From noreply at buildbot.pypy.org Sun Dec 1 18:49:54 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 1 Dec 2013 18:49:54 +0100 (CET) Subject: [pypy-commit] pypy default: document less-stringly-ops branch (again) Message-ID: <20131201174954.513E01C0095@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r68350:fb43548bd07a Date: 2013-12-01 17:49 +0000 http://bitbucket.org/pypy/pypy/changeset/fb43548bd07a/ Log: document less-stringly-ops branch (again) diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -22,3 +22,6 @@ .. branch: osx-eci-frameworks-makefile OSX: Ensure frameworks end up in Makefile when specified in External compilation info +.. branch: less-stringly-ops +Use subclasses of SpaceOperation instead of SpaceOperator objects. +Random cleanups in flowspace and annotator. From noreply at buildbot.pypy.org Sun Dec 1 20:35:24 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 1 Dec 2013 20:35:24 +0100 (CET) Subject: [pypy-commit] pypy default: Fix Message-ID: <20131201193524.2FAF91C08A6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68351:d95d0c9bb988 Date: 2013-12-01 20:34 +0100 http://bitbucket.org/pypy/pypy/changeset/d95d0c9bb988/ Log: Fix diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -149,6 +149,14 @@ else: del self.insns[i] + # the remaining instructions must have their 'previous_insns' list + # trimmed of dead previous instructions + all_remaining_insns = set(self.insns) + assert self.insns[0].previous_insns == () + for insn in self.insns[1:]: + insn.previous_insns = [previnsn for previnsn in insn.previous_insns + if previnsn in all_remaining_insns] + def find_noncollecting_calls(self): cannot_collect = {} for line in self.lines: @@ -286,6 +294,17 @@ else: insn1.framesize = size_at_insn1 + # trim: instructions with no framesize are removed from self.insns, + # and from the 'previous_insns' lists + assert hasattr(self.insns[0], 'framesize') + old = self.insns[1:] + del self.insns[1:] + for insn in old: + if hasattr(insn, 'framesize'): + self.insns.append(insn) + insn.previous_insns = [previnsn for previnsn in insn.previous_insns + if hasattr(previnsn, 'framesize')] + def fixlocalvars(self): def fixvar(localvar): if localvar is None: From noreply at buildbot.pypy.org Mon Dec 2 02:04:06 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 2 Dec 2013 02:04:06 +0100 (CET) Subject: [pypy-commit] pypy default: Removed some unused imports Message-ID: <20131202010406.A86F11C1161@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68352:dfbed72e62a8 Date: 2013-12-01 19:03 -0600 http://bitbucket.org/pypy/pypy/changeset/dfbed72e62a8/ Log: Removed some unused imports diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -3,18 +3,18 @@ This is transformed to become a JIT by code elsewhere: pypy/jit/* """ -from rpython.tool.pairtype import extendabletype from rpython.rlib.rarithmetic import r_uint, intmask from rpython.rlib.jit import JitDriver, hint, we_are_jitted, dont_look_inside from rpython.rlib import jit from rpython.rlib.jit import current_trace_length, unroll_parameters import pypy.interpreter.pyopcode # for side-effects from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.pycode import PyCode, CO_GENERATOR +from pypy.interpreter.pycode import CO_GENERATOR from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pyopcode import ExitFrame, Yield from opcode import opmap + PyFrame._virtualizable_ = ['last_instr', 'pycode', 'valuestackdepth', 'locals_stack_w[*]', 'cells[*]', From noreply at buildbot.pypy.org Mon Dec 2 02:04:40 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 2 Dec 2013 02:04:40 +0100 (CET) Subject: [pypy-commit] pypy default: Updated a comment to reflect the RPython split Message-ID: <20131202010440.CBD761C1161@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68353:5377386566e1 Date: 2013-12-01 19:04 -0600 http://bitbucket.org/pypy/pypy/changeset/5377386566e1/ Log: Updated a comment to reflect the RPython split diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -1,6 +1,6 @@ """This is not the JIT :-) -This is transformed to become a JIT by code elsewhere: pypy/jit/* +This is transformed to become a JIT by code elsewhere: rpython/jit/* """ from rpython.rlib.rarithmetic import r_uint, intmask From noreply at buildbot.pypy.org Mon Dec 2 11:18:55 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Mon, 2 Dec 2013 11:18:55 +0100 (CET) Subject: [pypy-commit] pypy default: fix arange corner case Message-ID: <20131202101855.EC9261C0095@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r68354:7683ee51bc38 Date: 2013-12-02 11:18 +0100 http://bitbucket.org/pypy/pypy/changeset/7683ee51bc38/ Log: fix arange corner case diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -12,7 +12,9 @@ if dtype is None: test = _numpypy.multiarray.array([start, stop, step, 0]) dtype = test.dtype - arr = _numpypy.multiarray.zeros(int(math.ceil((stop - start) / step)), dtype=dtype) + length = math.ceil((float(stop) - start) / step) + length = int(length) + arr = _numpypy.multiarray.zeros(length, dtype=dtype) i = start for j in range(arr.size): arr[j] = i diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -381,6 +381,8 @@ assert a.dtype is dtype(int) a = arange(3, 7, 2) assert (a == [3, 5]).all() + a = arange(3, 8, 2) + assert (a == [3, 5, 7]).all() a = arange(3, dtype=float) assert (a == [0., 1., 2.]).all() assert a.dtype is dtype(float) From noreply at buildbot.pypy.org Mon Dec 2 16:10:49 2013 From: noreply at buildbot.pypy.org (bivab) Date: Mon, 2 Dec 2013 16:10:49 +0100 (CET) Subject: [pypy-commit] pypy default: mark ftruncate as macro, fixes TestFile.test_truncate on 32bit/ARM Message-ID: <20131202151049.424231C3223@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r68355:29ae725bea82 Date: 2013-12-02 08:58 -0600 http://bitbucket.org/pypy/pypy/changeset/29ae725bea82/ Log: mark ftruncate as macro, fixes TestFile.test_truncate on 32bit/ARM diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -14,8 +14,8 @@ eci = ExternalCompilationInfo(includes=['stdio.h', 'unistd.h', 'sys/types.h']) -def llexternal(*args): - return rffi.llexternal(*args, compilation_info=eci) +def llexternal(*args, **kwargs): + return rffi.llexternal(*args, compilation_info=eci, **kwargs) FILE = lltype.Struct('FILE') # opaque type maybe @@ -24,9 +24,9 @@ off_t = platform.SimpleType('off_t') + CC = platform.configure(CConfig) OFF_T = CC['off_t'] - c_open = llexternal('fopen', [rffi.CCHARP, rffi.CCHARP], lltype.Ptr(FILE)) c_close = llexternal('fclose', [lltype.Ptr(FILE)], rffi.INT) c_write = llexternal('fwrite', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, @@ -42,7 +42,8 @@ c_fileno = llexternal('fileno', [lltype.Ptr(FILE)], rffi.INT) c_ftell = llexternal('ftell', [lltype.Ptr(FILE)], lltype.Signed) c_fflush = llexternal('fflush', [lltype.Ptr(FILE)], rffi.INT) -c_ftruncate = llexternal('ftruncate', [rffi.INT, OFF_T], rffi.INT) +c_ftruncate = llexternal('ftruncate', [rffi.INT, OFF_T], rffi.INT, macro=True) + c_fgets = llexternal('fgets', [rffi.CCHARP, rffi.INT, lltype.Ptr(FILE)], rffi.CCHARP) From noreply at buildbot.pypy.org Mon Dec 2 16:43:48 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Mon, 2 Dec 2013 16:43:48 +0100 (CET) Subject: [pypy-commit] pypy ndarray-buffer: raise ValueError if we try to write to a mmapped array which is ready-only Message-ID: <20131202154348.F42071C3264@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: ndarray-buffer Changeset: r68356:9c8634e4593f Date: 2013-12-02 16:42 +0100 http://bitbucket.org/pypy/pypy/changeset/9c8634e4593f/ Log: raise ValueError if we try to write to a mmapped array which is ready-only diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -47,6 +47,9 @@ def get_raw_address(self): raise ValueError("no raw buffer") + def is_writable(self): + return False + # __________ app-level support __________ def descr_len(self, space): @@ -135,6 +138,9 @@ __slots__ = () # no extra slot here + def is_writable(self): + return True + def setitem(self, index, char): "Write a character into the buffer." raise NotImplementedError # Must be overriden. No bounds checks. diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -400,7 +400,14 @@ def base(self): return self.orig_base - + + +class ConcreteNonWritableArrayWithBase(ConcreteArrayWithBase): + def descr_setitem(self, space, orig_array, w_index, w_value): + raise OperationError(space.w_ValueError, space.wrap( + "assignment destination is read-only")) + + class NonWritableArray(ConcreteArray): def descr_setitem(self, space, orig_array, w_index, w_value): raise OperationError(space.w_ValueError, space.wrap( diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -50,7 +50,7 @@ @staticmethod def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, - w_subtype=None, w_base=None): + w_subtype=None, w_base=None, writable=True): from pypy.module.micronumpy.arrayimpl import concrete assert shape strides, backstrides = calc_strides(shape, dtype, order) @@ -58,8 +58,14 @@ if owning: raise OperationError(space.w_ValueError, space.wrap("Cannot have owning=True when specifying a buffer")) - impl = concrete.ConcreteArrayWithBase(shape, dtype, order, strides, - backstrides, storage, w_base) + if writable: + impl = concrete.ConcreteArrayWithBase(shape, dtype, order, strides, + backstrides, storage, w_base) + else: + impl = concrete.ConcreteNonWritableArrayWithBase(shape, dtype, order, + strides, backstrides, + storage, w_base) + elif owning: # Will free storage when GCd impl = concrete.ConcreteArray(shape, dtype, order, strides, diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1096,7 +1096,8 @@ storage = rffi.ptradd(storage, offset) return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, w_subtype=w_subtype, - w_base=w_buffer) + w_base=w_buffer, + writable=buf.is_writable()) if not shape: return W_NDimArray.new_scalar(space, dtype) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -218,7 +218,8 @@ assert get(1, 1) == 3 class AppTestNumArray(BaseNumpyAppTest): - spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii", "array"]) + spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) + def w_CustomIndexObject(self, index): class CustomIndexObject(object): def __init__(self, index): @@ -2087,6 +2088,15 @@ a = np.ndarray([1], dtype=bool) assert a[0] == True + +class AppTestNumArrayFromBuffer(BaseNumpyAppTest): + spaceconfig = dict(usemodules=["micronumpy", "array", "mmap"]) + + def setup_class(cls): + from rpython.tool.udir import udir + BaseNumpyAppTest.setup_class.im_func(cls) + cls.w_tmpname = cls.space.wrap(str(udir.join('mmap-'))) + def test_ndarray_from_buffer(self): import numpypy as np import array @@ -2126,7 +2136,19 @@ assert str(info.value).startswith('buffer is too small') info = raises(TypeError, "np.ndarray((5,), buffer=buf, offset=15, dtype='i2')") assert str(info.value).startswith('buffer is too small') - + + def test_ndarray_from_readonly_buffer(self): + import numpypy as np + from mmap import mmap, ACCESS_READ + f = open(self.tmpname, "w+") + f.write("hello") + f.flush() + buf = mmap(f.fileno(), 5, access=ACCESS_READ) + a = np.ndarray((5,), buffer=buf, dtype='c') + raises(ValueError, "a[0] = 'X'") + buf.close() + f.close() + class AppTestMultiDim(BaseNumpyAppTest): diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -314,6 +314,14 @@ self.check_valid_writeable() self.mmap.setslice(start, string) + def is_writable(self): + try: + self.mmap.check_writeable() + except RMMapError: + return False + else: + return True + def get_raw_address(self): self.check_valid() return self.mmap.data From noreply at buildbot.pypy.org Mon Dec 2 16:43:51 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Mon, 2 Dec 2013 16:43:51 +0100 (CET) Subject: [pypy-commit] pypy ndarray-buffer: hg merge default Message-ID: <20131202154351.8D1F41C3264@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: ndarray-buffer Changeset: r68357:b645748154f8 Date: 2013-12-02 16:43 +0100 http://bitbucket.org/pypy/pypy/changeset/b645748154f8/ Log: hg merge default diff too long, truncating to 2000 out of 4979 lines diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -33,14 +33,13 @@ # set link options output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') + # XXX pyconfig.h uses a pragma to link to the import library, + # which is currently python27.lib + library = os.path.join(thisdir, '..', 'include', 'python27') if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') + # For a local translation or nightly build + library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python27') + assert os.path.exists(library + '.lib'),'Could not find import library "%s"' % library libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 '/EXPORT:init' + modulename] diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8" -__version_info__ = (0, 8) +__version__ = "0.8.1" +__version_info__ = (0, 8, 1) diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '2.2' # The full version, including alpha/beta/rc tags. -release = '2.2.0' +release = '2.2.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -5,119 +5,68 @@ This document tries to explain how to interface the PyPy python interpreter with any external library. -Note: We try to describe state-of-the art, but it -might fade out of date as this is the front on which things are changing -in pypy rapidly. +Right now, there are the following possibilities of providing +third-party modules for the PyPy python interpreter (in order of +usefulness): -Possibilities -============= +* Write them in pure Python and use CFFI_. -Right now, there are three possibilities of providing third-party modules -for the PyPy python interpreter (in order of usefulness): +* Write them in pure Python and use ctypes_. -* Write them in pure python and use ctypes, see ctypes_ - section +* Write them in C++ and bind them through Reflex_. -* Write them in pure python and use direct libffi low-level bindings, See - \_ffi_ module description. +* Write them in as `RPython mixed modules`_. -* Write them in RPython as mixedmodule_, using *rffi* as bindings. -* Write them in C++ and bind them through Reflex_ +CFFI +==== -.. _ctypes: #CTypes -.. _\_ffi: #LibFFI -.. _mixedmodule: #Mixed Modules +CFFI__ is the recommended way. It is a way to write pure Python code +that accesses C libraries. The idea is to support either ABI- or +API-level access to C --- so that you can sanely access C libraries +without depending on details like the exact field order in the C +structures or the numerical value of all the constants. It works on +both CPython (as a separate ``pip install cffi``) and on PyPy, where it +is included by default. + +PyPy's JIT does a quite reasonable job on the Python code that call C +functions or manipulate C pointers with CFFI. (As of PyPy 2.2.1, it +could still be improved, but is already good.) + +See the documentation here__. + +.. __: http://cffi.readthedocs.org/ +.. __: http://cffi.readthedocs.org/ + CTypes ====== -The ctypes module in PyPy is ready to use. -It's goal is to be as-compatible-as-possible with the -`CPython ctypes`_ version. Right now it's able to support large examples, -such as pyglet. PyPy is planning to have a 100% compatible ctypes -implementation, without the CPython C-level API bindings (so it is very -unlikely that direct object-manipulation trickery through this API will work). +The goal of the ctypes module of PyPy is to be as compatible as possible +with the `CPython ctypes`_ version. It works for large examples, such +as pyglet. PyPy's implementation is not strictly 100% compatible with +CPython, but close enough for most cases. -We also provide a `ctypes-configure`_ for overcoming the platform dependencies, -not relying on the ctypes codegen. This tool works by querying gcc about -platform-dependent details (compiling small snippets of C code and running -them), so it'll benefit not pypy-related ctypes-based modules as well. +We also used to provide ``ctypes-configure`` for some API-level access. +This is now viewed as a precursor of CFFI, which you should use instead. +More (but older) information is available here__. +Also, ctypes' performance is not as good as CFFI's. -ctypes call are optimized by the JIT and the resulting machine code contains a -direct call to the target C function. However, due to the very dynamic nature -of ctypes, some overhead over a bare C call is still present, in particular to -check/convert the types of the parameters. Moreover, even if most calls are -optimized, some cannot and thus need to follow the slow path, not optimized by -the JIT. +.. _`CPython ctypes`: http://docs.python.org/library/ctypes.html +.. __: ctypes-implementation.html -.. _`ctypes-configure`: ctypes-implementation.html#ctypes-configure -.. _`CPython ctypes`: http://docs.python.org/library/ctypes.html +PyPy implements ctypes as pure Python code around two built-in modules +called ``_ffi`` and ``_rawffi``, which give a very low-level binding to +the C library libffi_. Nowadays it is not recommended to use directly +these two modules. -Pros ----- +.. _libffi: http://sourceware.org/libffi/ -Stable, CPython-compatible API. Most calls are fast, optimized by JIT. - -Cons ----- - -Problems with platform-dependency (although we partially solve -those). Although the JIT optimizes ctypes calls, some overhead is still -present. The slow-path is very slow. - - -LibFFI -====== - -Mostly in order to be able to write a ctypes module, we developed a very -low-level libffi bindings called ``_ffi``. (libffi is a C-level library for dynamic calling, -which is used by CPython ctypes). This library provides stable and usable API, -although it's API is a very low-level one. It does not contain any -magic. It is also optimized by the JIT, but has much less overhead than ctypes. - -Pros ----- - -It Works. Probably more suitable for a delicate code where ctypes magic goes -in a way. All calls are optimized by the JIT, there is no slow path as in -ctypes. - -Cons ----- - -It combines disadvantages of using ctypes with disadvantages of using mixed -modules. CPython-incompatible API, very rough and low-level. - -Mixed Modules -============= - -This is the most advanced and powerful way of writing extension modules. -It has some serious disadvantages: - -* a mixed module needs to be written in RPython, which is far more - complicated than Python (XXX link) - -* due to lack of separate compilation (as of July 2011), each - compilation-check requires to recompile whole PyPy python interpreter, - which takes 0.5-1h. We plan to solve this at some point in near future. - -* although rpython is a garbage-collected language, the border between - C and RPython needs to be managed by hand (each object that goes into the - C level must be explicitly freed). - -Some documentation is available `here`_ - -.. _`here`: rffi.html - -XXX we should provide detailed docs about lltype and rffi, especially if we - want people to follow that way. Reflex ====== -This method is still experimental and is being exercised on a branch, -`reflex-support`_, which adds the `cppyy`_ module. +This method is still experimental. It adds the `cppyy`_ module. The method works by using the `Reflex package`_ to provide reflection information of the C++ code, which is then used to automatically generate bindings at runtime. @@ -168,3 +117,15 @@ to work around it in python or with a C++ helper function. Although Reflex works on various platforms, the bindings with PyPy have only been tested on Linux. + + +RPython Mixed Modules +===================== + +This is the internal way to write built-in extension modules in PyPy. +It cannot be used by any 3rd-party module: the extension modules are +*built-in*, not independently loadable DLLs. + +This is reserved for special cases: it gives direct access to e.g. the +details of the JIT, allowing us to tweak its interaction with user code. +This is how the numpy module is being developed. diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -444,7 +444,7 @@ So the position of the core PyPy developers is that if anyone wants to make an N+1'th attempt with LLVM, they are welcome, and will be happy to provide help in the IRC channel, but they are left with the burden of proof -that it works. +that (a) it works and (b) it gives important benefits. ---------------------- How do I compile PyPy? diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.2.0`_: the latest official release +* `Release 2.2.1`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.2.0`: http://pypy.org/download.html +.. _`Release 2.2.1`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/release-2.2.1.rst b/pypy/doc/release-2.2.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.2.1.rst @@ -0,0 +1,47 @@ +======================================= +PyPy 2.2.1 - Incrementalism.1 +======================================= + +We're pleased to announce PyPy 2.2.1, which targets version 2.7.3 of the Python +language. This is a bugfix release over 2.2. + +You can download the PyPy 2.2.1 release here: + + http://pypy.org/download.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows +32, or ARM (ARMv6 or ARMv7, with VFPv3). + +Work on the native Windows 64 is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +This is a bugfix release. The most important bugs fixed are: + +* an issue in sockets' reference counting emulation, showing up + notably when using the ssl module and calling ``makefile()``. + +* Tkinter support on Windows. + +* If sys.maxunicode==65535 (on Windows and maybe OS/X), the json + decoder incorrectly decoded surrogate pairs. + +* some FreeBSD fixes. + +Note that CFFI 0.8.1 was released. Both versions 0.8 and 0.8.1 are +compatible with both PyPy 2.2 and 2.2.1. + + +Cheers, +Armin Rigo & everybody diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -18,3 +18,10 @@ .. branch: voidtype_strformat Better support for record numpy arrays + +.. branch: osx-eci-frameworks-makefile +OSX: Ensure frameworks end up in Makefile when specified in External compilation info + +.. branch: less-stringly-ops +Use subclasses of SpaceOperation instead of SpaceOperator objects. +Random cleanups in flowspace and annotator. diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -424,6 +424,12 @@ def get_array_length(self): return self.length + def _sizeof(self): + from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray + ctype = self.ctype + assert isinstance(ctype, W_CTypePtrOrArray) + return self.length * ctype.ctitem.size + class W_CDataHandle(W_CData): _attrs_ = ['w_keepalive'] diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3125,6 +3125,12 @@ py.test.raises(TypeError, "p + cast(new_primitive_type('int'), 42)") py.test.raises(TypeError, "p - cast(new_primitive_type('int'), 42)") +def test_sizeof_sliced_array(): + BInt = new_primitive_type("int") + BArray = new_array_type(new_pointer_type(BInt), 10) + p = newp(BArray, None) + assert sizeof(p[2:9]) == 7 * sizeof(BInt) + def test_version(): # this test is here mostly for PyPy diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -12,7 +12,9 @@ if dtype is None: test = _numpypy.multiarray.array([start, stop, step, 0]) dtype = test.dtype - arr = _numpypy.multiarray.zeros(int(math.ceil((stop - start) / step)), dtype=dtype) + length = math.ceil((float(stop) - start) / step) + length = int(length) + arr = _numpypy.multiarray.zeros(length, dtype=dtype) i = start for j in range(arr.size): arr[j] = i diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -382,6 +382,8 @@ assert a.dtype is dtype(int) a = arange(3, 7, 2) assert (a == [3, 5]).all() + a = arange(3, 8, 2) + assert (a == [3, 5, 7]).all() a = arange(3, dtype=float) assert (a == [0., 1., 2.]).all() assert a.dtype is dtype(float) diff --git a/pypy/module/posix/app_startfile.py b/pypy/module/posix/app_startfile.py --- a/pypy/module/posix/app_startfile.py +++ b/pypy/module/posix/app_startfile.py @@ -7,7 +7,6 @@ ffi.cdef(""" HINSTANCE ShellExecuteA(HWND, LPCSTR, LPCSTR, LPCSTR, LPCSTR, INT); HINSTANCE ShellExecuteW(HWND, LPCWSTR, LPCWSTR, LPCWSTR, LPCWSTR, INT); - DWORD GetLastError(void); """) self.NULL = ffi.NULL self.cast = ffi.cast diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -1,20 +1,20 @@ """This is not the JIT :-) -This is transformed to become a JIT by code elsewhere: pypy/jit/* +This is transformed to become a JIT by code elsewhere: rpython/jit/* """ -from rpython.tool.pairtype import extendabletype from rpython.rlib.rarithmetic import r_uint, intmask from rpython.rlib.jit import JitDriver, hint, we_are_jitted, dont_look_inside from rpython.rlib import jit from rpython.rlib.jit import current_trace_length, unroll_parameters import pypy.interpreter.pyopcode # for side-effects from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.pycode import PyCode, CO_GENERATOR +from pypy.interpreter.pycode import CO_GENERATOR from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pyopcode import ExitFrame, Yield from opcode import opmap + PyFrame._virtualizable_ = ['last_instr', 'pycode', 'valuestackdepth', 'locals_stack_w[*]', 'cells[*]', diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_version.py b/pypy/module/test_lib_pypy/cffi_tests/test_version.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_version.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_version.py @@ -10,6 +10,7 @@ '0.4.2': '0.4', # did not change '0.7.1': '0.7', # did not change '0.7.2': '0.7', # did not change + '0.8.1': '0.8', # did not change } def test_version(): diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -48,6 +48,7 @@ def package(basedir, name='pypy-nightly', rename_pypy_c='pypy', copy_to_dir=None, override_pypy_c=None, nostrip=False, withouttk=False): + assert '/' not in rename_pypy_c basedir = py.path.local(basedir) if override_pypy_c is None: basename = 'pypy-c' diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -584,10 +584,6 @@ def consider_op(self, block, opindex): op = block.operations[opindex] argcells = [self.binding(a) for a in op.args] - consider_meth = getattr(self,'consider_op_'+op.opname, - None) - if not consider_meth: - raise Exception,"unknown op: %r" % op # let's be careful about avoiding propagated SomeImpossibleValues # to enter an op; the latter can result in violations of the @@ -599,7 +595,7 @@ if isinstance(arg, annmodel.SomeImpossibleValue): raise BlockedInference(self, op, opindex) try: - resultcell = consider_meth(*argcells) + resultcell = op.consider(self, *argcells) except annmodel.AnnotatorError as e: # note that UnionError is a subclass graph = self.bookkeeper.position_key[0] e.source = gather_error(self, graph, block, opindex) diff --git a/rpython/annotator/argument.py b/rpython/annotator/argument.py --- a/rpython/annotator/argument.py +++ b/rpython/annotator/argument.py @@ -1,69 +1,25 @@ """ Arguments objects. """ -from rpython.annotator.model import SomeTuple, SomeObject +from rpython.annotator.model import SomeTuple +from rpython.flowspace.argument import CallSpec -# for parsing call arguments -class RPythonCallsSpace(object): - """Pseudo Object Space providing almost no real operation. - For the Arguments class: if it really needs other operations, it means - that the call pattern is too complex for R-Python. - """ - def newtuple(self, items_s): - if len(items_s) == 1 and items_s[0] is Ellipsis: - res = SomeObject() # hack to get a SomeObject as the *arg - res.from_ellipsis = True - return res - else: - return SomeTuple(items_s) - - def unpackiterable(self, s_obj, expected_length=None): - if isinstance(s_obj, SomeTuple): - return list(s_obj.items) - if (s_obj.__class__ is SomeObject and - getattr(s_obj, 'from_ellipsis', False)): # see newtuple() - return [Ellipsis] - raise CallPatternTooComplex("'*' argument must be SomeTuple") - - def bool(self, s_tup): - assert isinstance(s_tup, SomeTuple) - return bool(s_tup.items) - - -class CallPatternTooComplex(Exception): - pass - - -class ArgumentsForTranslation(object): - w_starstararg = None - def __init__(self, space, args_w, keywords=None, keywords_w=None, - w_stararg=None, w_starstararg=None): - self.w_stararg = w_stararg - assert w_starstararg is None - self.space = space - assert isinstance(args_w, list) - self.arguments_w = args_w - self.keywords = keywords - self.keywords_w = keywords_w - self.keyword_names_w = None - - def __repr__(self): - """ NOT_RPYTHON """ - name = self.__class__.__name__ - if not self.keywords: - return '%s(%s)' % (name, self.arguments_w,) - else: - return '%s(%s, %s, %s)' % (name, self.arguments_w, - self.keywords, self.keywords_w) - +class ArgumentsForTranslation(CallSpec): @property def positional_args(self): if self.w_stararg is not None: - args_w = self.space.unpackiterable(self.w_stararg) + args_w = self.unpackiterable(self.w_stararg) return self.arguments_w + args_w else: return self.arguments_w + def newtuple(self, items_s): + return SomeTuple(items_s) + + def unpackiterable(self, s_obj): + assert isinstance(s_obj, SomeTuple) + return list(s_obj.items) + def fixedunpack(self, argcount): """The simplest argument parsing: get the 'argcount' arguments, or raise a real ValueError if the length is wrong.""" @@ -77,14 +33,12 @@ def prepend(self, w_firstarg): # used often "Return a new Arguments with a new argument inserted first." - return ArgumentsForTranslation(self.space, [w_firstarg] + self.arguments_w, - self.keywords, self.keywords_w, self.w_stararg, - self.w_starstararg) + return ArgumentsForTranslation([w_firstarg] + self.arguments_w, + self.keywords, self.w_stararg) def copy(self): - return ArgumentsForTranslation(self.space, self.arguments_w, - self.keywords, self.keywords_w, self.w_stararg, - self.w_starstararg) + return ArgumentsForTranslation(self.arguments_w, self.keywords, + self.w_stararg) def _match_signature(self, scope_w, signature, defaults_w=None): """Parse args and kwargs according to the signature of a code object, @@ -97,7 +51,7 @@ args_w = self.positional_args num_args = len(args_w) - keywords = self.keywords or [] + keywords = self.keywords num_kwds = len(keywords) # put as many positional input arguments into place as available @@ -111,7 +65,7 @@ starargs_w = args_w[co_argcount:] else: starargs_w = [] - scope_w[co_argcount] = self.space.newtuple(starargs_w) + scope_w[co_argcount] = self.newtuple(starargs_w) elif num_args > co_argcount: raise ArgErrCount(num_args, num_kwds, signature, defaults_w, 0) @@ -119,22 +73,17 @@ # handle keyword arguments num_remainingkwds = 0 - keywords_w = self.keywords_w kwds_mapping = None if num_kwds: # kwds_mapping maps target indexes in the scope (minus input_argcount) - # to positions in the keywords_w list - kwds_mapping = [-1] * (co_argcount - input_argcount) + # to keyword names + kwds_mapping = [] # match the keywords given at the call site to the argument names # the called function takes # this function must not take a scope_w, to make the scope not # escape num_remainingkwds = len(keywords) - for i, name in enumerate(keywords): - # If name was not encoded as a string, it could be None. In that - # case, it's definitely not going to be in the signature. - if name is None: - continue + for name in keywords: j = signature.find_argname(name) # if j == -1 nothing happens if j < input_argcount: @@ -142,14 +91,14 @@ if j >= 0: raise ArgErrMultipleValues(name) else: - kwds_mapping[j - input_argcount] = i # map to the right index + kwds_mapping.append(name) num_remainingkwds -= 1 if num_remainingkwds: if co_argcount == 0: raise ArgErrCount(num_args, num_kwds, signature, defaults_w, 0) - raise ArgErrUnknownKwds(self.space, num_remainingkwds, keywords, - kwds_mapping, self.keyword_names_w) + raise ArgErrUnknownKwds(num_remainingkwds, keywords, + kwds_mapping) # check for missing arguments and fill them from the kwds, # or with defaults, if available @@ -157,14 +106,11 @@ if input_argcount < co_argcount: def_first = co_argcount - (0 if defaults_w is None else len(defaults_w)) j = 0 - kwds_index = -1 for i in range(input_argcount, co_argcount): - if kwds_mapping is not None: - kwds_index = kwds_mapping[j] - j += 1 - if kwds_index >= 0: - scope_w[i] = keywords_w[kwds_index] - continue + name = signature.argnames[i] + if name in keywords: + scope_w[i] = keywords[name] + continue defnum = i - def_first if defnum >= 0: scope_w[i] = defaults_w[defnum] @@ -175,8 +121,7 @@ def unpack(self): "Return a ([w1,w2...], {'kw':w3...}) pair." - kwds_w = dict(zip(self.keywords, self.keywords_w)) if self.keywords else {} - return self.positional_args, kwds_w + return self.positional_args, self.keywords def match_signature(self, signature, defaults_w): """Parse args and kwargs according to the signature of a code object, @@ -189,41 +134,29 @@ def unmatch_signature(self, signature, data_w): """kind of inverse of match_signature""" - need_cnt = len(self.positional_args) - need_kwds = self.keywords or [] - space = self.space argnames, varargname, kwargname = signature assert kwargname is None cnt = len(argnames) - data_args_w = data_w[:cnt] + need_cnt = len(self.positional_args) if varargname: - data_w_stararg = data_w[cnt] - cnt += 1 - else: - data_w_stararg = space.newtuple([]) + assert len(data_w) == cnt + 1 + stararg_w = self.unpackiterable(data_w[cnt]) + if stararg_w: + args_w = data_w[:cnt] + stararg_w + assert len(args_w) == need_cnt + assert not self.keywords + return ArgumentsForTranslation(args_w, {}) + else: + data_w = data_w[:-1] assert len(data_w) == cnt + assert len(data_w) >= need_cnt + args_w = data_w[:need_cnt] + _kwds_w = dict(zip(argnames[need_cnt:], data_w[need_cnt:])) + keywords_w = [_kwds_w[key] for key in self.keywords] + return ArgumentsForTranslation(args_w, dict(zip(self.keywords, keywords_w))) - unfiltered_kwds_w = {} - if len(data_args_w) >= need_cnt: - args_w = data_args_w[:need_cnt] - for argname, w_arg in zip(argnames[need_cnt:], data_args_w[need_cnt:]): - unfiltered_kwds_w[argname] = w_arg - assert not space.bool(data_w_stararg) - else: - stararg_w = space.unpackiterable(data_w_stararg) - args_w = data_args_w + stararg_w - assert len(args_w) == need_cnt - - keywords = [] - keywords_w = [] - for key in need_kwds: - keywords.append(key) - keywords_w.append(unfiltered_kwds_w[key]) - - return ArgumentsForTranslation(self.space, args_w, keywords, keywords_w) - - @staticmethod - def fromshape(space, (shape_cnt, shape_keys, shape_star, shape_stst), data_w): + @classmethod + def fromshape(cls, (shape_cnt, shape_keys, shape_star), data_w): args_w = data_w[:shape_cnt] p = end_keys = shape_cnt + len(shape_keys) if shape_star: @@ -231,40 +164,12 @@ p += 1 else: w_star = None - if shape_stst: - w_starstar = data_w[p] - p += 1 - else: - w_starstar = None - return ArgumentsForTranslation(space, args_w, list(shape_keys), - data_w[shape_cnt:end_keys], w_star, - w_starstar) + return cls(args_w, dict(zip(shape_keys, data_w[shape_cnt:end_keys])), + w_star) - def flatten(self): - """ Argument <-> list of w_objects together with "shape" information """ - shape_cnt, shape_keys, shape_star, shape_stst = self._rawshape() - data_w = self.arguments_w + [self.keywords_w[self.keywords.index(key)] - for key in shape_keys] - if shape_star: - data_w.append(self.w_stararg) - if shape_stst: - data_w.append(self.w_starstararg) - return (shape_cnt, shape_keys, shape_star, shape_stst), data_w - def _rawshape(self, nextra=0): - shape_cnt = len(self.arguments_w) + nextra # Number of positional args - if self.keywords: - shape_keys = self.keywords[:] # List of keywords (strings) - shape_keys.sort() - else: - shape_keys = [] - shape_star = self.w_stararg is not None # Flag: presence of *arg - shape_stst = self.w_starstararg is not None # Flag: presence of **kwds - return shape_cnt, tuple(shape_keys), shape_star, shape_stst # shape_keys are sorted - - -def rawshape(args, nextra=0): - return args._rawshape(nextra) +def rawshape(args): + return args._rawshape() # @@ -336,31 +241,12 @@ class ArgErrUnknownKwds(ArgErr): - def __init__(self, space, num_remainingkwds, keywords, kwds_mapping, - keyword_names_w): + def __init__(self, num_remainingkwds, keywords, kwds_mapping): name = '' self.num_kwds = num_remainingkwds if num_remainingkwds == 1: - for i in range(len(keywords)): - if i not in kwds_mapping: - name = keywords[i] - if name is None: - # We'll assume it's unicode. Encode it. - # Careful, I *think* it should not be possible to - # get an IndexError here but you never know. - try: - if keyword_names_w is None: - raise IndexError - # note: negative-based indexing from the end - w_name = keyword_names_w[i - len(keywords)] - except IndexError: - name = '?' - else: - w_enc = space.wrap(space.sys.defaultencoding) - w_err = space.wrap("replace") - w_name = space.call_method(w_name, "encode", w_enc, - w_err) - name = space.str_w(w_name) + for name in keywords: + if name not in kwds_mapping: break self.kwd_name = name diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -5,19 +5,15 @@ import py import operator from rpython.tool.pairtype import pair, pairtype -from rpython.annotator.model import SomeObject, SomeInteger, SomeBool, s_Bool -from rpython.annotator.model import SomeString, SomeChar, SomeList, SomeDict,\ - SomeOrderedDict -from rpython.annotator.model import SomeUnicodeCodePoint, SomeUnicodeString -from rpython.annotator.model import SomeTuple, SomeImpossibleValue, s_ImpossibleValue -from rpython.annotator.model import SomeInstance, SomeBuiltin, SomeIterator -from rpython.annotator.model import SomePBC, SomeFloat, s_None, SomeByteArray -from rpython.annotator.model import SomeWeakRef -from rpython.annotator.model import SomeAddress, SomeTypedAddressAccess -from rpython.annotator.model import SomeSingleFloat, SomeLongFloat, SomeType -from rpython.annotator.model import unionof, UnionError, missing_operation -from rpython.annotator.model import read_can_only_throw -from rpython.annotator.model import add_knowntypedata, merge_knowntypedata +from rpython.annotator.model import ( + SomeObject, SomeInteger, SomeBool, s_Bool, SomeString, SomeChar, SomeList, + SomeDict, SomeOrderedDict, SomeUnicodeCodePoint, SomeUnicodeString, + SomeTuple, SomeImpossibleValue, s_ImpossibleValue, SomeInstance, + SomeBuiltin, SomeIterator, SomePBC, SomeFloat, s_None, SomeByteArray, + SomeWeakRef, SomeAddress, SomeTypedAddressAccess, SomeSingleFloat, + SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, + missing_operation, read_can_only_throw, add_knowntypedata, + merge_knowntypedata,) from rpython.annotator.bookkeeper import getbookkeeper from rpython.flowspace.model import Variable, Constant from rpython.rlib import rarithmetic @@ -197,7 +193,9 @@ getitem_key = getitem_idx_key -class __extend__(pairtype(SomeType, SomeType)): +class __extend__(pairtype(SomeType, SomeType), + pairtype(SomeType, SomeConstantType), + pairtype(SomeConstantType, SomeType),): def union((obj1, obj2)): result = SomeType() diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -12,13 +12,13 @@ SomeBuiltin, SomePBC, SomeInteger, TLS, SomeAddress, SomeUnicodeCodePoint, s_None, s_ImpossibleValue, SomeLLADTMeth, SomeBool, SomeTuple, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, - SomeWeakRef, lltype_to_annotation, SomeType, SomeByteArray) + SomeWeakRef, lltype_to_annotation, SomeType, SomeByteArray, SomeConstantType) from rpython.annotator.classdef import InstanceSource, ClassDef from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef from rpython.annotator import description from rpython.annotator.signature import annotationoftype -from rpython.annotator.argument import ArgumentsForTranslation, RPythonCallsSpace +from rpython.annotator.argument import ArgumentsForTranslation from rpython.rlib.objectmodel import r_dict, Symbolic from rpython.tool.algo.unionfind import UnionFind from rpython.rtyper.lltypesystem import lltype, llmemory @@ -436,11 +436,7 @@ elif isinstance(x, llmemory.fakeaddress): result = SomeAddress() elif tp is type: - if (x is type(None) or # add cases here if needed - x.__module__ == 'rpython.rtyper.lltypesystem.lltype'): - result = SomeType() - else: - result = SomePBC([self.getdesc(x)]) + result = SomeConstantType(x, self) elif callable(x): if hasattr(x, 'im_self') and hasattr(x, 'im_func'): # on top of PyPy, for cases like 'l.append' where 'l' is a @@ -700,12 +696,11 @@ return op def build_args(self, op, args_s): - space = RPythonCallsSpace() if op == "simple_call": - return ArgumentsForTranslation(space, list(args_s)) + return ArgumentsForTranslation(list(args_s)) elif op == "call_args": return ArgumentsForTranslation.fromshape( - space, args_s[0].const, # shape + args_s[0].const, # shape list(args_s[1:])) def ondegenerated(self, what, s_value, where=None, called_from_graph=None): diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -312,7 +312,7 @@ r_func, nimplicitarg = s_repr.const.get_r_implfunc() nbargs = len(args_s) + nimplicitarg - s_sigs = r_func.get_s_signatures((nbargs, (), False, False)) + s_sigs = r_func.get_s_signatures((nbargs, (), False)) if len(s_sigs) != 1: raise TyperError("cannot hlinvoke callable %r with not uniform" "annotations: %r" % (s_repr.const, diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -879,11 +879,12 @@ self.name, flags) + @staticmethod def consider_call_site(bookkeeper, family, descs, args, s_result, op): - shape = rawshape(args, nextra=1) # account for the extra 'self' + cnt, keys, star = rawshape(args) + shape = cnt + 1, keys, star # account for the extra 'self' row = FunctionDesc.row_to_consider(descs, args, op) family.calltable_add_row(shape, row) - consider_call_site = staticmethod(consider_call_site) def rowkey(self): # we are computing call families and call tables that always contain @@ -1039,11 +1040,12 @@ args = args.prepend(s_self) return self.funcdesc.pycall(schedule, args, s_previous_result, op) + @staticmethod def consider_call_site(bookkeeper, family, descs, args, s_result, op): - shape = rawshape(args, nextra=1) # account for the extra 'self' + cnt, keys, star = rawshape(args) + shape = cnt + 1, keys, star # account for the extra 'self' row = FunctionDesc.row_to_consider(descs, args, op) family.calltable_add_row(shape, row) - consider_call_site = staticmethod(consider_call_site) def rowkey(self): return self.funcdesc diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -502,6 +502,14 @@ else: return kt.__name__ +class SomeConstantType(SomePBC): + can_be_None = False + subset_of = None + def __init__(self, x, bk): + self.descriptions = set([bk.getdesc(x)]) + self.knowntype = type(x) + self.const = x + class SomeBuiltin(SomeObject): "Stands for a built-in function or method with special-cased analysis." diff --git a/rpython/annotator/policy.py b/rpython/annotator/policy.py --- a/rpython/annotator/policy.py +++ b/rpython/annotator/policy.py @@ -1,31 +1,19 @@ # base annotation policy for specialization from rpython.annotator.specialize import default_specialize as default -from rpython.annotator.specialize import specialize_argvalue, specialize_argtype, specialize_arglistitemtype, specialize_arg_or_var -from rpython.annotator.specialize import memo, specialize_call_location +from rpython.annotator.specialize import ( + specialize_argvalue, specialize_argtype, specialize_arglistitemtype, + specialize_arg_or_var, memo, specialize_call_location) -class BasicAnnotatorPolicy(object): +class AnnotatorPolicy(object): + """ + Possibly subclass and pass an instance to the annotator to control + special-casing during annotation + """ def event(pol, bookkeeper, what, *args): pass - def get_specializer(pol, tag): - return pol.no_specialization - - def no_specialization(pol, funcdesc, args_s): - return funcdesc.cachedgraph(None) - - def no_more_blocks_to_annotate(pol, annotator): - # hint to all pending specializers that we are done - for callback in annotator.bookkeeper.pending_specializations: - callback() - del annotator.bookkeeper.pending_specializations[:] - -class AnnotatorPolicy(BasicAnnotatorPolicy): - """ - Possibly subclass and pass an instance to the annotator to control special casing during annotation - """ - def get_specializer(pol, directive): if directive is None: return pol.default_specialize @@ -74,3 +62,9 @@ def specialize__ll_and_arg(pol, *args): from rpython.rtyper.annlowlevel import LowLevelAnnotatorPolicy return LowLevelAnnotatorPolicy.specialize__ll_and_arg(*args) + + def no_more_blocks_to_annotate(pol, annotator): + # hint to all pending specializers that we are done + for callback in annotator.bookkeeper.pending_specializations: + callback() + del annotator.bookkeeper.pending_specializations[:] diff --git a/rpython/annotator/specialize.py b/rpython/annotator/specialize.py --- a/rpython/annotator/specialize.py +++ b/rpython/annotator/specialize.py @@ -5,6 +5,7 @@ from rpython.tool.algo.unionfind import UnionFind from rpython.flowspace.model import Block, Link, Variable, SpaceOperation from rpython.flowspace.model import checkgraph +from rpython.flowspace.operation import op from rpython.annotator import model as annmodel from rpython.flowspace.argument import Signature @@ -33,7 +34,8 @@ argscopy = [Variable(v) for v in graph.getargs()] starargs = [Variable('stararg%d'%i) for i in range(nb_extra_args)] newstartblock = Block(argscopy[:-1] + starargs) - newtup = SpaceOperation('newtuple', starargs, argscopy[-1]) + newtup = op.newtuple(*starargs) + newtup.result = argscopy[-1] newstartblock.operations.append(newtup) newstartblock.closeblock(Link(argscopy, graph.startblock)) graph.startblock = newstartblock diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -15,6 +15,7 @@ from rpython.rlib.rarithmetic import r_singlefloat from rpython.rlib import objectmodel from rpython.flowspace.objspace import build_flow, FlowingError +from rpython.flowspace.operation import op from rpython.translator.test import snippet @@ -69,12 +70,11 @@ return x+1 """ x = Variable("x") - result = Variable("result") - op = SpaceOperation("add", [x, Constant(1)], result) + oper = op.add(x, Constant(1)) block = Block([x]) fun = FunctionGraph("f", block) - block.operations.append(op) - block.closeblock(Link([result], fun.returnblock)) + block.operations.append(oper) + block.closeblock(Link([oper.result], fun.returnblock)) a = self.RPythonAnnotator() a.addpendingblock(fun, fun.startblock, [annmodel.SomeInteger()]) a.complete() @@ -90,20 +90,18 @@ """ i1 = Variable("i1") i2 = Variable("i2") - i3 = Variable("i3") - conditionres = Variable("conditionres") - conditionop = SpaceOperation("gt", [i1, Constant(0)], conditionres) - decop = SpaceOperation("add", [i2, Constant(-1)], i3) + conditionop = op.gt(i1, Constant(0)) + decop = op.add(i2, Constant(-1)) headerblock = Block([i1]) whileblock = Block([i2]) fun = FunctionGraph("f", headerblock) headerblock.operations.append(conditionop) - headerblock.exitswitch = conditionres + headerblock.exitswitch = conditionop.result headerblock.closeblock(Link([i1], fun.returnblock, False), Link([i1], whileblock, True)) whileblock.operations.append(decop) - whileblock.closeblock(Link([i3], headerblock)) + whileblock.closeblock(Link([decop.result], headerblock)) a = self.RPythonAnnotator() a.addpendingblock(fun, fun.startblock, [annmodel.SomeInteger()]) @@ -123,15 +121,12 @@ i1 = Variable("i1") i2 = Variable("i2") i3 = Variable("i3") - i4 = Variable("i4") sum2 = Variable("sum2") sum3 = Variable("sum3") - sum4 = Variable("sum4") - - conditionres = Variable("conditionres") - conditionop = SpaceOperation("gt", [i2, Constant(0)], conditionres) - decop = SpaceOperation("add", [i3, Constant(-1)], i4) - addop = SpaceOperation("add", [i3, sum3], sum4) + + conditionop = op.gt(i2, Constant(0)) + decop = op.add(i3, Constant(-1)) + addop = op.add(i3, sum3) startblock = Block([i1]) headerblock = Block([i2, sum2]) whileblock = Block([i3, sum3]) @@ -139,12 +134,12 @@ fun = FunctionGraph("f", startblock) startblock.closeblock(Link([i1, Constant(0)], headerblock)) headerblock.operations.append(conditionop) - headerblock.exitswitch = conditionres + headerblock.exitswitch = conditionop.result headerblock.closeblock(Link([sum2], fun.returnblock, False), Link([i2, sum2], whileblock, True)) whileblock.operations.append(addop) whileblock.operations.append(decop) - whileblock.closeblock(Link([i4, sum4], headerblock)) + whileblock.closeblock(Link([decop.result, addop.result], headerblock)) a = self.RPythonAnnotator() a.addpendingblock(fun, fun.startblock, [annmodel.SomeInteger()]) @@ -1065,8 +1060,9 @@ gf2 = graphof(a, f2) gf3 = graphof(a, f3) - assert fam1.calltables == {(2, (), False, False): [{fdesc1: gf1}], (1, (), False, False): [{fdesc1: gf1}]} - assert fam2.calltables == {(1, (), False, False): [{fdesc2: gf2, fdesc3: gf3}]} + assert fam1.calltables == {(2, (), False): [{fdesc1: gf1}], + (1, (), False): [{fdesc1: gf1}]} + assert fam2.calltables == {(1, (), False): [{fdesc2: gf2, fdesc3: gf3}]} def test_pbc_call_ins(self): class A(object): @@ -1117,14 +1113,14 @@ gfA_m = graphof(a, A.m.im_func) gfC_m = graphof(a, C.m.im_func) - assert famB_n.calltables == {(1, (), False, False): [{mdescB_n.funcdesc: gfB_n}] } - assert famA_m.calltables == {(1, (), False, False): [{mdescA_m.funcdesc: gfA_m, mdescC_m.funcdesc: gfC_m }] } + assert famB_n.calltables == {(1, (), False): [{mdescB_n.funcdesc: gfB_n}] } + assert famA_m.calltables == {(1, (), False): [{mdescA_m.funcdesc: gfA_m, mdescC_m.funcdesc: gfC_m }] } mdescCinit = getmdesc(C().__init__) famCinit = mdescCinit.getcallfamily() gfCinit = graphof(a, C.__init__.im_func) - assert famCinit.calltables == {(1, (), False, False): [{mdescCinit.funcdesc: gfCinit}] } + assert famCinit.calltables == {(1, (), False): [{mdescCinit.funcdesc: gfCinit}] } def test_isinstance_usigned(self): def f(x): @@ -2053,7 +2049,7 @@ someint = annmodel.SomeInteger() - assert (fdesc.get_s_signatures((2,(),False,False)) + assert (fdesc.get_s_signatures((2, (), False)) == [([someint,someint],someint)]) def test_emulated_pbc_call_callback(self): diff --git a/rpython/annotator/test/test_argument.py b/rpython/annotator/test/test_argument.py --- a/rpython/annotator/test/test_argument.py +++ b/rpython/annotator/test/test_argument.py @@ -1,43 +1,30 @@ # -*- coding: utf-8 -*- import py from rpython.annotator.argument import ArgumentsForTranslation, rawshape -from rpython.flowspace.argument import Signature +from rpython.flowspace.argument import Signature, CallSpec -class DummySpace(object): +class MockArgs(ArgumentsForTranslation): def newtuple(self, items): return tuple(items) - def bool(self, obj): - return bool(obj) - def unpackiterable(self, it): return list(it) -def make_arguments_for_translation(space, args_w, keywords_w={}, - w_stararg=None, w_starstararg=None): - return ArgumentsForTranslation(space, args_w, keywords_w.keys(), - keywords_w.values(), w_stararg, - w_starstararg) - class TestArgumentsForTranslation(object): def test_prepend(self): - space = DummySpace() - args = ArgumentsForTranslation(space, ["0"]) + args = MockArgs(["0"]) args1 = args.prepend("thingy") assert args1 is not args assert args1.arguments_w == ["thingy", "0"] - assert args1.keywords is args.keywords - assert args1.keywords_w is args.keywords_w + assert args1.keywords == args.keywords def test_fixedunpacked(self): - space = DummySpace() - - args = ArgumentsForTranslation(space, [], ["k"], [1]) + args = MockArgs([], {"k": 1}) py.test.raises(ValueError, args.fixedunpack, 1) - args = ArgumentsForTranslation(space, ["a", "b"]) + args = MockArgs(["a", "b"]) py.test.raises(ValueError, args.fixedunpack, 0) py.test.raises(ValueError, args.fixedunpack, 1) py.test.raises(ValueError, args.fixedunpack, 3) @@ -46,122 +33,89 @@ assert args.fixedunpack(2) == ['a', 'b'] def test_unmatch_signature(self): - space = DummySpace() - args = make_arguments_for_translation(space, [1,2,3]) + args = MockArgs([1, 2, 3]) sig = Signature(['a', 'b', 'c'], None, None) data = args.match_signature(sig, []) new_args = args.unmatch_signature(sig, data) assert args.unpack() == new_args.unpack() - args = make_arguments_for_translation(space, [1]) + args = MockArgs([1]) sig = Signature(['a', 'b', 'c'], None, None) data = args.match_signature(sig, [2, 3]) new_args = args.unmatch_signature(sig, data) assert args.unpack() == new_args.unpack() - args = make_arguments_for_translation(space, [1,2,3,4,5]) + args = MockArgs([1, 2, 3, 4, 5]) sig = Signature(['a', 'b', 'c'], 'r', None) data = args.match_signature(sig, []) new_args = args.unmatch_signature(sig, data) assert args.unpack() == new_args.unpack() - args = make_arguments_for_translation(space, [1], {'c': 3, 'b': 2}) + args = MockArgs([1], {'c': 3, 'b': 2}) sig = Signature(['a', 'b', 'c'], None, None) data = args.match_signature(sig, []) new_args = args.unmatch_signature(sig, data) assert args.unpack() == new_args.unpack() - args = make_arguments_for_translation(space, [1], {'c': 5}) + args = MockArgs([1], {'c': 5}) sig = Signature(['a', 'b', 'c'], None, None) data = args.match_signature(sig, [2, 3]) new_args = args.unmatch_signature(sig, data) assert args.unpack() == new_args.unpack() def test_rawshape(self): - space = DummySpace() - args = make_arguments_for_translation(space, [1,2,3]) - assert rawshape(args) == (3, (), False, False) + args = MockArgs([1, 2, 3]) + assert rawshape(args) == (3, (), False) - args = make_arguments_for_translation(space, [1]) - assert rawshape(args, 2) == (3, (), False, False) + args = MockArgs([1, 2, 3, 4, 5]) + assert rawshape(args) == (5, (), False) - args = make_arguments_for_translation(space, [1,2,3,4,5]) - assert rawshape(args) == (5, (), False, False) + args = MockArgs([1], {'c': 3, 'b': 2}) + assert rawshape(args) == (1, ('b', 'c'), False) - args = make_arguments_for_translation(space, [1], {'c': 3, 'b': 2}) - assert rawshape(args) == (1, ('b', 'c'), False, False) + args = MockArgs([1], {'c': 5}) + assert rawshape(args) == (1, ('c', ), False) - args = make_arguments_for_translation(space, [1], {'c': 5}) - assert rawshape(args) == (1, ('c', ), False, False) + args = MockArgs([1], {'c': 5, 'd': 7}) + assert rawshape(args) == (1, ('c', 'd'), False) - args = make_arguments_for_translation(space, [1], {'c': 5, 'd': 7}) - assert rawshape(args) == (1, ('c', 'd'), False, False) - - args = make_arguments_for_translation(space, [1,2,3,4,5], {'e': 5, 'd': 7}) - assert rawshape(args) == (5, ('d', 'e'), False, False) - - - def test_flatten(self): - space = DummySpace() - args = make_arguments_for_translation(space, [1,2,3]) - assert args.flatten() == ((3, (), False, False), [1, 2, 3]) - - args = make_arguments_for_translation(space, [1]) - assert args.flatten() == ((1, (), False, False), [1]) - - args = make_arguments_for_translation(space, [1,2,3,4,5]) - assert args.flatten() == ((5, (), False, False), [1,2,3,4,5]) - - args = make_arguments_for_translation(space, [1], {'c': 3, 'b': 2}) - assert args.flatten() == ((1, ('b', 'c'), False, False), [1, 2, 3]) - - args = make_arguments_for_translation(space, [1], {'c': 5}) - assert args.flatten() == ((1, ('c', ), False, False), [1, 5]) - - args = make_arguments_for_translation(space, [1], {'c': 5, 'd': 7}) - assert args.flatten() == ((1, ('c', 'd'), False, False), [1, 5, 7]) - - args = make_arguments_for_translation(space, [1,2,3,4,5], {'e': 5, 'd': 7}) - assert args.flatten() == ((5, ('d', 'e'), False, False), [1, 2, 3, 4, 5, 7, 5]) + args = MockArgs([1, 2, 3, 4, 5], {'e': 5, 'd': 7}) + assert rawshape(args) == (5, ('d', 'e'), False) def test_stararg_flowspace_variable(self): - space = DummySpace() var = object() - shape = ((2, ('g', ), True, False), [1, 2, 9, var]) - args = make_arguments_for_translation(space, [1,2], {'g': 9}, - w_stararg=var) + shape = ((2, ('g', ), True), [1, 2, 9, var]) + args = MockArgs([1, 2], {'g': 9}, w_stararg=var) assert args.flatten() == shape - args = ArgumentsForTranslation.fromshape(space, *shape) + args = MockArgs.fromshape(*shape) assert args.flatten() == shape - def test_fromshape(self): - space = DummySpace() - shape = ((3, (), False, False), [1, 2, 3]) - args = ArgumentsForTranslation.fromshape(space, *shape) + shape = ((3, (), False), [1, 2, 3]) + args = MockArgs.fromshape(*shape) assert args.flatten() == shape - shape = ((1, (), False, False), [1]) - args = ArgumentsForTranslation.fromshape(space, *shape) + shape = ((1, (), False), [1]) + args = MockArgs.fromshape(*shape) assert args.flatten() == shape - shape = ((5, (), False, False), [1,2,3,4,5]) - args = ArgumentsForTranslation.fromshape(space, *shape) + shape = ((5, (), False), [1, 2, 3, 4, 5]) + args = MockArgs.fromshape(*shape) assert args.flatten() == shape - shape = ((1, ('b', 'c'), False, False), [1, 2, 3]) - args = ArgumentsForTranslation.fromshape(space, *shape) + shape = ((1, ('b', 'c'), False), [1, 2, 3]) + args = MockArgs.fromshape(*shape) assert args.flatten() == shape - shape = ((1, ('c', ), False, False), [1, 5]) - args = ArgumentsForTranslation.fromshape(space, *shape) + shape = ((1, ('c', ), False), [1, 5]) + args = MockArgs.fromshape(*shape) assert args.flatten() == shape - shape = ((1, ('c', 'd'), False, False), [1, 5, 7]) - args = ArgumentsForTranslation.fromshape(space, *shape) + shape = ((1, ('c', 'd'), False), [1, 5, 7]) + args = MockArgs.fromshape(*shape) assert args.flatten() == shape - shape = ((5, ('d', 'e'), False, False), [1, 2, 3, 4, 5, 7, 5]) - args = ArgumentsForTranslation.fromshape(space, *shape) + shape = ((5, ('d', 'e'), False), [1, 2, 3, 4, 5, 7, 5]) + args = MockArgs.fromshape(*shape) assert args.flatten() == shape diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -35,33 +35,33 @@ class __extend__(SomeObject): - def type(obj, *moreargs): + def type(self, *moreargs): if moreargs: raise Exception('type() called with more than one argument') r = SomeType() bk = getbookkeeper() - op = bk._find_current_op(opname="type", arity=1, pos=0, s_type=obj) + op = bk._find_current_op(opname="type", arity=1, pos=0, s_type=self) r.is_type_of = [op.args[0]] return r - def issubtype(obj, s_cls): - if hasattr(obj, 'is_type_of'): - vars = obj.is_type_of + def issubtype(self, s_cls): + if hasattr(self, 'is_type_of'): + vars = self.is_type_of annotator = getbookkeeper().annotator return builtin.builtin_isinstance(annotator.binding(vars[0]), s_cls, vars) - if obj.is_constant() and s_cls.is_constant(): - return immutablevalue(issubclass(obj.const, s_cls.const)) + if self.is_constant() and s_cls.is_constant(): + return immutablevalue(issubclass(self.const, s_cls.const)) return s_Bool - def len(obj): + def len(self): return SomeInteger(nonneg=True) - def bool_behavior(obj, s): - if obj.is_immutable_constant(): - s.const = bool(obj.const) + def bool_behavior(self, s): + if self.is_immutable_constant(): + s.const = bool(self.const) else: - s_len = obj.len() + s_len = self.len() if s_len.is_immutable_constant(): s.const = s_len.const > 0 @@ -80,83 +80,83 @@ r.set_knowntypedata(knowntypedata) return r - def hash(obj): + def hash(self): raise AnnotatorError("cannot use hash() in RPython") - def str(obj): - getbookkeeper().count('str', obj) + def str(self): + getbookkeeper().count('str', self) return SomeString() - def unicode(obj): - getbookkeeper().count('unicode', obj) + def unicode(self): + getbookkeeper().count('unicode', self) return SomeUnicodeString() - def repr(obj): - getbookkeeper().count('repr', obj) + def repr(self): + getbookkeeper().count('repr', self) return SomeString() - def hex(obj): - getbookkeeper().count('hex', obj) + def hex(self): + getbookkeeper().count('hex', self) return SomeString() - def oct(obj): - getbookkeeper().count('oct', obj) + def oct(self): + getbookkeeper().count('oct', self) return SomeString() - def id(obj): + def id(self): raise Exception("cannot use id() in RPython; " "see objectmodel.compute_xxx()") - def int(obj): + def int(self): return SomeInteger() - def float(obj): + def float(self): return SomeFloat() - def delattr(obj, s_attr): - if obj.__class__ != SomeObject or obj.knowntype != object: + def delattr(self, s_attr): + if self.__class__ != SomeObject or self.knowntype != object: getbookkeeper().warning( ("delattr on potentally non-SomeObjects is not RPythonic: delattr(%r,%r)" % - (obj, s_attr))) + (self, s_attr))) - def find_method(obj, name): + def find_method(self, name): "Look for a special-case implementation for the named method." try: - analyser = getattr(obj.__class__, 'method_' + name) + analyser = getattr(self.__class__, 'method_' + name) except AttributeError: return None else: - return SomeBuiltin(analyser, obj, name) + return SomeBuiltin(analyser, self, name) - def getattr(obj, s_attr): + def getattr(self, s_attr): # get a SomeBuiltin if the SomeObject has # a corresponding method to handle it if not s_attr.is_constant() or not isinstance(s_attr.const, str): raise AnnotatorError("getattr(%r, %r) has non-constant argument" - % (obj, s_attr)) + % (self, s_attr)) attr = s_attr.const - s_method = obj.find_method(attr) + s_method = self.find_method(attr) if s_method is not None: return s_method # if the SomeObject is itself a constant, allow reading its attrs - if obj.is_immutable_constant() and hasattr(obj.const, attr): - return immutablevalue(getattr(obj.const, attr)) - raise AnnotatorError("Cannot find attribute %r on %r" % (attr, obj)) + if self.is_immutable_constant() and hasattr(self.const, attr): + return immutablevalue(getattr(self.const, attr)) + raise AnnotatorError("Cannot find attribute %r on %r" % (attr, self)) getattr.can_only_throw = [] - def bind_callables_under(obj, classdef, name): - return obj # default unbound __get__ implementation + def bind_callables_under(self, classdef, name): + return self # default unbound __get__ implementation - def simple_call(obj, *args_s): - return obj.call(getbookkeeper().build_args("simple_call", args_s)) + def simple_call(self, *args_s): + return self.call(getbookkeeper().build_args("simple_call", args_s)) - def call_args(obj, *args_s): - return obj.call(getbookkeeper().build_args("call_args", args_s)) + def call_args(self, *args_s): + return self.call(getbookkeeper().build_args("call_args", args_s)) - def call(obj, args, implicit_init=False): + def call(self, args, implicit_init=False): raise AnnotatorError("Cannot prove that the object is callable") - def op_contains(obj, s_element): + def op_contains(self, s_element): return s_Bool op_contains.can_only_throw = [] @@ -165,10 +165,10 @@ class __extend__(SomeFloat): - def pos(flt): - return flt + def pos(self): + return self - def neg(flt): + def neg(self): return SomeFloat() abs = neg @@ -233,105 +233,105 @@ class __extend__(SomeTuple): - def len(tup): - return immutablevalue(len(tup.items)) + def len(self): + return immutablevalue(len(self.items)) - def iter(tup): - getbookkeeper().count("tuple_iter", tup) - return SomeIterator(tup) + def iter(self): + getbookkeeper().count("tuple_iter", self) + return SomeIterator(self) iter.can_only_throw = [] - def getanyitem(tup): - return unionof(*tup.items) + def getanyitem(self): + return unionof(*self.items) - def getslice(tup, s_start, s_stop): + def getslice(self, s_start, s_stop): assert s_start.is_immutable_constant(),"tuple slicing: needs constants" assert s_stop.is_immutable_constant(), "tuple slicing: needs constants" - items = tup.items[s_start.const:s_stop.const] + items = self.items[s_start.const:s_stop.const] return SomeTuple(items) class __extend__(SomeList): - def method_append(lst, s_value): - lst.listdef.resize() - lst.listdef.generalize(s_value) + def method_append(self, s_value): + self.listdef.resize() + self.listdef.generalize(s_value) - def method_extend(lst, s_iterable): - lst.listdef.resize() + def method_extend(self, s_iterable): + self.listdef.resize() if isinstance(s_iterable, SomeList): # unify the two lists - lst.listdef.agree(s_iterable.listdef) + self.listdef.agree(s_iterable.listdef) else: s_iter = s_iterable.iter() - lst.method_append(s_iter.next()) + self.method_append(s_iter.next()) - def method_reverse(lst): - lst.listdef.mutate() + def method_reverse(self): + self.listdef.mutate() - def method_insert(lst, s_index, s_value): - lst.method_append(s_value) + def method_insert(self, s_index, s_value): + self.method_append(s_value) - def method_remove(lst, s_value): - lst.listdef.resize() - lst.listdef.generalize(s_value) + def method_remove(self, s_value): + self.listdef.resize() + self.listdef.generalize(s_value) - def method_pop(lst, s_index=None): - lst.listdef.resize() - return lst.listdef.read_item() + def method_pop(self, s_index=None): + self.listdef.resize() + return self.listdef.read_item() method_pop.can_only_throw = [IndexError] - def method_index(lst, s_value): + def method_index(self, s_value): getbookkeeper().count("list_index") - lst.listdef.generalize(s_value) + self.listdef.generalize(s_value) return SomeInteger(nonneg=True) - def len(lst): - s_item = lst.listdef.read_item() + def len(self): + s_item = self.listdef.read_item() if isinstance(s_item, SomeImpossibleValue): return immutablevalue(0) - return SomeObject.len(lst) + return SomeObject.len(self) - def iter(lst): - return SomeIterator(lst) + def iter(self): + return SomeIterator(self) iter.can_only_throw = [] - def getanyitem(lst): - return lst.listdef.read_item() + def getanyitem(self): + return self.listdef.read_item() - def op_contains(lst, s_element): - lst.listdef.generalize(s_element) + def op_contains(self, s_element): + self.listdef.generalize(s_element) return s_Bool op_contains.can_only_throw = [] - def hint(lst, *args_s): + def hint(self, *args_s): hints = args_s[-1].const if 'maxlength' in hints: # only for iteration over lists or dicts at the moment, # not over an iterator object (because it has no known length) s_iterable = args_s[0] if isinstance(s_iterable, (SomeList, SomeDict)): - lst = SomeList(lst.listdef) # create a fresh copy - lst.listdef.resize() - lst.listdef.listitem.hint_maxlength = True + self = SomeList(self.listdef) # create a fresh copy + self.listdef.resize() + self.listdef.listitem.hint_maxlength = True elif 'fence' in hints: - lst = lst.listdef.offspring() - return lst + self = self.listdef.offspring() + return self - def getslice(lst, s_start, s_stop): + def getslice(self, s_start, s_stop): check_negative_slice(s_start, s_stop) - return lst.listdef.offspring() + return self.listdef.offspring() - def setslice(lst, s_start, s_stop, s_iterable): + def setslice(self, s_start, s_stop, s_iterable): check_negative_slice(s_start, s_stop) if not isinstance(s_iterable, SomeList): raise Exception("list[start:stop] = x: x must be a list") - lst.listdef.mutate() - lst.listdef.agree(s_iterable.listdef) + self.listdef.mutate() + self.listdef.agree(s_iterable.listdef) # note that setslice is not allowed to resize a list in RPython - def delslice(lst, s_start, s_stop): + def delslice(self, s_start, s_stop): check_negative_slice(s_start, s_stop) - lst.listdef.resize() + self.listdef.resize() def check_negative_slice(s_start, s_stop, error="slicing"): if isinstance(s_start, SomeInteger) and not s_start.nonneg: @@ -344,29 +344,29 @@ class __extend__(SomeDict): - def _is_empty(dct): - s_key = dct.dictdef.read_key() - s_value = dct.dictdef.read_value() + def _is_empty(self): + s_key = self.dictdef.read_key() + s_value = self.dictdef.read_value() return (isinstance(s_key, SomeImpossibleValue) or isinstance(s_value, SomeImpossibleValue)) - def len(dct): - if dct._is_empty(): + def len(self): + if self._is_empty(): return immutablevalue(0) - return SomeObject.len(dct) + return SomeObject.len(self) - def iter(dct): - return SomeIterator(dct) + def iter(self): + return SomeIterator(self) iter.can_only_throw = [] - def getanyitem(dct, variant='keys'): + def getanyitem(self, variant='keys'): if variant == 'keys': - return dct.dictdef.read_key() + return self.dictdef.read_key() elif variant == 'values': - return dct.dictdef.read_value() + return self.dictdef.read_value() elif variant == 'items': - s_key = dct.dictdef.read_key() - s_value = dct.dictdef.read_value() + s_key = self.dictdef.read_key() + s_value = self.dictdef.read_value() if (isinstance(s_key, SomeImpossibleValue) or isinstance(s_value, SomeImpossibleValue)): return s_ImpossibleValue @@ -375,59 +375,59 @@ else: raise ValueError - def method_get(dct, key, dfl): - dct.dictdef.generalize_key(key) - dct.dictdef.generalize_value(dfl) - return dct.dictdef.read_value() + def method_get(self, key, dfl): + self.dictdef.generalize_key(key) + self.dictdef.generalize_value(dfl) + return self.dictdef.read_value() method_setdefault = method_get - def method_copy(dct): - return SomeDict(dct.dictdef) + def method_copy(self): + return SomeDict(self.dictdef) def method_update(dct1, dct2): if s_None.contains(dct2): return SomeImpossibleValue() dct1.dictdef.union(dct2.dictdef) - def method_keys(dct): - return getbookkeeper().newlist(dct.dictdef.read_key()) + def method_keys(self): + return getbookkeeper().newlist(self.dictdef.read_key()) - def method_values(dct): - return getbookkeeper().newlist(dct.dictdef.read_value()) + def method_values(self): + return getbookkeeper().newlist(self.dictdef.read_value()) - def method_items(dct): - return getbookkeeper().newlist(dct.getanyitem('items')) + def method_items(self): + return getbookkeeper().newlist(self.getanyitem('items')) - def method_iterkeys(dct): - return SomeIterator(dct, 'keys') + def method_iterkeys(self): + return SomeIterator(self, 'keys') - def method_itervalues(dct): - return SomeIterator(dct, 'values') + def method_itervalues(self): + return SomeIterator(self, 'values') - def method_iteritems(dct): - return SomeIterator(dct, 'items') + def method_iteritems(self): + return SomeIterator(self, 'items') - def method_clear(dct): + def method_clear(self): pass - def method_popitem(dct): - return dct.getanyitem('items') + def method_popitem(self): + return self.getanyitem('items') - def method_pop(dct, s_key, s_dfl=None): - dct.dictdef.generalize_key(s_key) + def method_pop(self, s_key, s_dfl=None): + self.dictdef.generalize_key(s_key) if s_dfl is not None: - dct.dictdef.generalize_value(s_dfl) - return dct.dictdef.read_value() + self.dictdef.generalize_value(s_dfl) + return self.dictdef.read_value() - def _can_only_throw(dic, *ignore): - if dic.dictdef.dictkey.custom_eq_hash: + def _can_only_throw(self, *ignore): + if self.dictdef.dictkey.custom_eq_hash: return None # r_dict: can throw anything return [] # else: no possible exception - def op_contains(dct, s_element): - dct.dictdef.generalize_key(s_element) - if dct._is_empty(): + def op_contains(self, s_element): + self.dictdef.generalize_key(s_element) + if self._is_empty(): s_bool = SomeBool() s_bool.const = False return s_bool @@ -438,89 +438,89 @@ class __extend__(SomeString, SomeUnicodeString): - def method_startswith(str, frag): - if str.is_constant() and frag.is_constant(): - return immutablevalue(str.const.startswith(frag.const)) + def method_startswith(self, frag): + if self.is_constant() and frag.is_constant(): + return immutablevalue(self.const.startswith(frag.const)) return s_Bool - def method_endswith(str, frag): - if str.is_constant() and frag.is_constant(): - return immutablevalue(str.const.endswith(frag.const)) + def method_endswith(self, frag): + if self.is_constant() and frag.is_constant(): + return immutablevalue(self.const.endswith(frag.const)) return s_Bool - def method_find(str, frag, start=None, end=None): + def method_find(self, frag, start=None, end=None): check_negative_slice(start, end, "find") return SomeInteger() - def method_rfind(str, frag, start=None, end=None): + def method_rfind(self, frag, start=None, end=None): check_negative_slice(start, end, "rfind") return SomeInteger() - def method_count(str, frag, start=None, end=None): + def method_count(self, frag, start=None, end=None): check_negative_slice(start, end, "count") return SomeInteger(nonneg=True) - def method_strip(str, chr=None): - return str.basestringclass(no_nul=str.no_nul) + def method_strip(self, chr=None): + return self.basestringclass(no_nul=self.no_nul) - def method_lstrip(str, chr=None): - return str.basestringclass(no_nul=str.no_nul) + def method_lstrip(self, chr=None): + return self.basestringclass(no_nul=self.no_nul) - def method_rstrip(str, chr=None): - return str.basestringclass(no_nul=str.no_nul) + def method_rstrip(self, chr=None): + return self.basestringclass(no_nul=self.no_nul) - def method_join(str, s_list): + def method_join(self, s_list): if s_None.contains(s_list): return SomeImpossibleValue() - getbookkeeper().count("str_join", str) + getbookkeeper().count("str_join", self) s_item = s_list.listdef.read_item() if s_None.contains(s_item): - if isinstance(str, SomeUnicodeString): + if isinstance(self, SomeUnicodeString): return immutablevalue(u"") return immutablevalue("") - no_nul = str.no_nul and s_item.no_nul - return str.basestringclass(no_nul=no_nul) + no_nul = self.no_nul and s_item.no_nul + return self.basestringclass(no_nul=no_nul) - def iter(str): - return SomeIterator(str) + def iter(self): + return SomeIterator(self) iter.can_only_throw = [] - def getanyitem(str): - return str.basecharclass() + def getanyitem(self): + return self.basecharclass() - def method_split(str, patt, max=-1): - getbookkeeper().count("str_split", str, patt) + def method_split(self, patt, max=-1): + getbookkeeper().count("str_split", self, patt) if max == -1 and patt.is_constant() and patt.const == "\0": no_nul = True else: - no_nul = str.no_nul - s_item = str.basestringclass(no_nul=no_nul) + no_nul = self.no_nul + s_item = self.basestringclass(no_nul=no_nul) return getbookkeeper().newlist(s_item) - def method_rsplit(str, patt, max=-1): - getbookkeeper().count("str_rsplit", str, patt) - s_item = str.basestringclass(no_nul=str.no_nul) + def method_rsplit(self, patt, max=-1): + getbookkeeper().count("str_rsplit", self, patt) + s_item = self.basestringclass(no_nul=self.no_nul) return getbookkeeper().newlist(s_item) - def method_replace(str, s1, s2): - return str.basestringclass(no_nul=str.no_nul and s2.no_nul) + def method_replace(self, s1, s2): + return self.basestringclass(no_nul=self.no_nul and s2.no_nul) - def getslice(str, s_start, s_stop): + def getslice(self, s_start, s_stop): check_negative_slice(s_start, s_stop) - result = str.basestringclass(no_nul=str.no_nul) + result = self.basestringclass(no_nul=self.no_nul) return result - def op_contains(str, s_element): + def op_contains(self, s_element): if s_element.is_constant() and s_element.const == "\0": r = SomeBool() bk = getbookkeeper() - op = bk._find_current_op(opname="contains", arity=2, pos=0, s_type=str) + op = bk._find_current_op(opname="contains", arity=2, pos=0, s_type=self) knowntypedata = {} - add_knowntypedata(knowntypedata, False, [op.args[0]], str.nonnulify()) + add_knowntypedata(knowntypedata, False, [op.args[0]], self.nonnulify()) r.set_knowntypedata(knowntypedata) return r else: - return SomeObject.op_contains(str, s_element) + return SomeObject.op_contains(self, s_element) op_contains.can_only_throw = [] def method_format(self, *args): @@ -533,7 +533,7 @@ return SomeByteArray() class __extend__(SomeUnicodeString): - def method_encode(uni, s_enc): + def method_encode(self, s_enc): if not s_enc.is_constant(): raise AnnotatorError("Non-constant encoding not supported") enc = s_enc.const @@ -544,29 +544,29 @@ class __extend__(SomeString): - def method_isdigit(str): + def method_isdigit(self): return s_Bool - def method_isalpha(str): + def method_isalpha(self): return s_Bool - def method_isalnum(str): + def method_isalnum(self): return s_Bool - def method_upper(str): + def method_upper(self): return SomeString() - def method_lower(str): + def method_lower(self): return SomeString() - def method_splitlines(str, s_keep_newlines=None): - s_list = getbookkeeper().newlist(str.basestringclass()) + def method_splitlines(self, s_keep_newlines=None): + s_list = getbookkeeper().newlist(self.basestringclass()) From noreply at buildbot.pypy.org Mon Dec 2 17:50:29 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Mon, 2 Dec 2013 17:50:29 +0100 (CET) Subject: [pypy-commit] pypy ndarray-buffer: close to be merged branch Message-ID: <20131202165029.1C50F1C3264@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: ndarray-buffer Changeset: r68358:f1c3df0222ff Date: 2013-12-02 17:48 +0100 http://bitbucket.org/pypy/pypy/changeset/f1c3df0222ff/ Log: close to be merged branch From noreply at buildbot.pypy.org Mon Dec 2 17:50:30 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Mon, 2 Dec 2013 17:50:30 +0100 (CET) Subject: [pypy-commit] pypy default: merge the ndarray-buffer branch, which adds support for the buffer= argument to the ndarray ctor. This is needed e.g. to use numpy.memmap() Message-ID: <20131202165030.6D1DB1C3264@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r68359:71a0ee32f321 Date: 2013-12-02 17:49 +0100 http://bitbucket.org/pypy/pypy/changeset/71a0ee32f321/ Log: merge the ndarray-buffer branch, which adds support for the buffer= argument to the ndarray ctor. This is needed e.g. to use numpy.memmap() diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -47,6 +47,9 @@ def get_raw_address(self): raise ValueError("no raw buffer") + def is_writable(self): + return False + # __________ app-level support __________ def descr_len(self, space): @@ -135,6 +138,9 @@ __slots__ = () # no extra slot here + def is_writable(self): + return True + def setitem(self, index, char): "Write a character into the buffer." raise NotImplementedError # Must be overriden. No bounds checks. diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -392,6 +392,21 @@ def __del__(self): free_raw_storage(self.storage, track_allocation=False) +class ConcreteArrayWithBase(ConcreteArrayNotOwning): + def __init__(self, shape, dtype, order, strides, backstrides, storage, orig_base): + ConcreteArrayNotOwning.__init__(self, shape, dtype, order, + strides, backstrides, storage) + self.orig_base = orig_base + + def base(self): + return self.orig_base + + +class ConcreteNonWritableArrayWithBase(ConcreteArrayWithBase): + def descr_setitem(self, space, orig_array, w_index, w_value): + raise OperationError(space.w_ValueError, space.wrap( + "assignment destination is read-only")) + class NonWritableArray(ConcreteArray): def descr_setitem(self, space, orig_array, w_index, w_value): diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -49,11 +49,24 @@ return W_NDimArray(impl) @staticmethod - def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, w_subtype=None): + def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, + w_subtype=None, w_base=None, writable=True): from pypy.module.micronumpy.arrayimpl import concrete assert shape strides, backstrides = calc_strides(shape, dtype, order) - if owning: + if w_base is not None: + if owning: + raise OperationError(space.w_ValueError, + space.wrap("Cannot have owning=True when specifying a buffer")) + if writable: + impl = concrete.ConcreteArrayWithBase(shape, dtype, order, strides, + backstrides, storage, w_base) + else: + impl = concrete.ConcreteNonWritableArrayWithBase(shape, dtype, order, + strides, backstrides, + storage, w_base) + + elif owning: # Will free storage when GCd impl = concrete.ConcreteArray(shape, dtype, order, strides, backstrides, storage=storage) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1,3 +1,5 @@ +from rpython.rtyper.lltypesystem import rffi +from rpython.rlib.rawstorage import RAW_STORAGE_PTR from pypy.interpreter.error import operationerrfmt, OperationError from pypy.interpreter.typedef import TypeDef, GetSetProperty, make_weakref_descr from pypy.interpreter.gateway import interp2app, unwrap_spec, applevel, \ @@ -20,6 +22,7 @@ from rpython.rlib.rstring import StringBuilder from pypy.module.micronumpy.arrayimpl.base import BaseArrayImplementation from pypy.module.micronumpy.conversion_utils import order_converter, multi_axis_converter +from pypy.module.micronumpy import support from pypy.module.micronumpy.constants import * def _find_shape(space, w_size, dtype): @@ -1067,13 +1070,35 @@ offset=0, w_strides=None, order='C'): from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArray from pypy.module.micronumpy.support import calc_strides - if (offset != 0 or not space.is_none(w_strides) or - not space.is_none(w_buffer)): - raise OperationError(space.w_NotImplementedError, - space.wrap("unsupported param")) dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) shape = _find_shape(space, w_shape, dtype) + + if not space.is_none(w_buffer): + if (not space.is_none(w_strides)): + raise OperationError(space.w_NotImplementedError, + space.wrap("unsupported param")) + + buf = space.buffer_w(w_buffer) + try: + raw_ptr = buf.get_raw_address() + except ValueError: + raise OperationError(space.w_TypeError, space.wrap( + "Only raw buffers are supported")) + if not shape: + raise OperationError(space.w_TypeError, space.wrap( + "numpy scalars from buffers not supported yet")) + totalsize = support.product(shape) * dtype.get_size() + if totalsize+offset > buf.getlength(): + raise OperationError(space.w_TypeError, space.wrap( + "buffer is too small for requested array")) + storage = rffi.cast(RAW_STORAGE_PTR, raw_ptr) + storage = rffi.ptradd(storage, offset) + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, + w_subtype=w_subtype, + w_base=w_buffer, + writable=buf.is_writable()) + if not shape: return W_NDimArray.new_scalar(space, dtype) if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): @@ -1093,8 +1118,6 @@ Create an array from an existing buffer, given its address as int. PyPy-only implementation detail. """ - from rpython.rtyper.lltypesystem import rffi - from rpython.rlib.rawstorage import RAW_STORAGE_PTR storage = rffi.cast(RAW_STORAGE_PTR, addr) dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -219,6 +219,7 @@ class AppTestNumArray(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) + def w_CustomIndexObject(self, index): class CustomIndexObject(object): def __init__(self, index): @@ -2089,6 +2090,69 @@ a = np.ndarray([1], dtype=bool) assert a[0] == True + +class AppTestNumArrayFromBuffer(BaseNumpyAppTest): + spaceconfig = dict(usemodules=["micronumpy", "array", "mmap"]) + + def setup_class(cls): + from rpython.tool.udir import udir + BaseNumpyAppTest.setup_class.im_func(cls) + cls.w_tmpname = cls.space.wrap(str(udir.join('mmap-'))) + + def test_ndarray_from_buffer(self): + import numpypy as np + import array + buf = array.array('c', ['\x00']*2*3) + a = np.ndarray((3,), buffer=buf, dtype='i2') + a[0] = ord('b') + a[1] = ord('a') + a[2] = ord('r') + assert list(buf) == ['b', '\x00', 'a', '\x00', 'r', '\x00'] + assert a.base is buf + + def test_ndarray_subclass_from_buffer(self): + import numpypy as np + import array + buf = array.array('c', ['\x00']*2*3) + class X(np.ndarray): + pass + a = X((3,), buffer=buf, dtype='i2') + assert type(a) is X + + def test_ndarray_from_buffer_and_offset(self): + import numpypy as np + import array + buf = array.array('c', ['\x00']*7) + buf[0] = 'X' + a = np.ndarray((3,), buffer=buf, offset=1, dtype='i2') + a[0] = ord('b') + a[1] = ord('a') + a[2] = ord('r') + assert list(buf) == ['X', 'b', '\x00', 'a', '\x00', 'r', '\x00'] + + def test_ndarray_from_buffer_out_of_bounds(self): + import numpypy as np + import array + buf = array.array('c', ['\x00']*2*10) # 20 bytes + info = raises(TypeError, "np.ndarray((11,), buffer=buf, dtype='i2')") + assert str(info.value).startswith('buffer is too small') + info = raises(TypeError, "np.ndarray((5,), buffer=buf, offset=15, dtype='i2')") + assert str(info.value).startswith('buffer is too small') + + def test_ndarray_from_readonly_buffer(self): + import numpypy as np + from mmap import mmap, ACCESS_READ + f = open(self.tmpname, "w+") + f.write("hello") + f.flush() + buf = mmap(f.fileno(), 5, access=ACCESS_READ) + a = np.ndarray((5,), buffer=buf, dtype='c') + raises(ValueError, "a[0] = 'X'") + buf.close() + f.close() + + + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -314,6 +314,14 @@ self.check_valid_writeable() self.mmap.setslice(start, string) + def is_writable(self): + try: + self.mmap.check_writeable() + except RMMapError: + return False + else: + return True + def get_raw_address(self): self.check_valid() return self.mmap.data From noreply at buildbot.pypy.org Mon Dec 2 17:50:31 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Mon, 2 Dec 2013 17:50:31 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20131202165031.924B81C3264@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r68360:d91af9e61a76 Date: 2013-12-02 17:49 +0100 http://bitbucket.org/pypy/pypy/changeset/d91af9e61a76/ Log: merge heads diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -14,8 +14,8 @@ eci = ExternalCompilationInfo(includes=['stdio.h', 'unistd.h', 'sys/types.h']) -def llexternal(*args): - return rffi.llexternal(*args, compilation_info=eci) +def llexternal(*args, **kwargs): + return rffi.llexternal(*args, compilation_info=eci, **kwargs) FILE = lltype.Struct('FILE') # opaque type maybe @@ -24,9 +24,9 @@ off_t = platform.SimpleType('off_t') + CC = platform.configure(CConfig) OFF_T = CC['off_t'] - c_open = llexternal('fopen', [rffi.CCHARP, rffi.CCHARP], lltype.Ptr(FILE)) c_close = llexternal('fclose', [lltype.Ptr(FILE)], rffi.INT) c_write = llexternal('fwrite', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, @@ -42,7 +42,8 @@ c_fileno = llexternal('fileno', [lltype.Ptr(FILE)], rffi.INT) c_ftell = llexternal('ftell', [lltype.Ptr(FILE)], lltype.Signed) c_fflush = llexternal('fflush', [lltype.Ptr(FILE)], rffi.INT) -c_ftruncate = llexternal('ftruncate', [rffi.INT, OFF_T], rffi.INT) +c_ftruncate = llexternal('ftruncate', [rffi.INT, OFF_T], rffi.INT, macro=True) + c_fgets = llexternal('fgets', [rffi.CCHARP, rffi.INT, lltype.Ptr(FILE)], rffi.CCHARP) From noreply at buildbot.pypy.org Mon Dec 2 18:38:11 2013 From: noreply at buildbot.pypy.org (bivab) Date: Mon, 2 Dec 2013 18:38:11 +0100 (CET) Subject: [pypy-commit] pypy default: update path Message-ID: <20131202173811.0EB9D1C3035@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r68361:f16f95ba4f6a Date: 2013-12-02 18:37 +0100 http://bitbucket.org/pypy/pypy/changeset/f16f95ba4f6a/ Log: update path diff --git a/pypy/testrunner_cfg.py b/pypy/testrunner_cfg.py --- a/pypy/testrunner_cfg.py +++ b/pypy/testrunner_cfg.py @@ -3,7 +3,7 @@ DIRS_SPLIT = [ 'translator/c', 'rlib', - 'rpython/memory', 'jit/metainterp', 'rpython/test', + 'memory/test', 'jit/metainterp', 'jit/backend/arm', 'jit/backend/x86', ] From noreply at buildbot.pypy.org Mon Dec 2 21:27:40 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 2 Dec 2013 21:27:40 +0100 (CET) Subject: [pypy-commit] pypy default: document merged branch Message-ID: <20131202202740.72AD01C3223@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r68362:cca37ad7e31c Date: 2013-11-30 06:18 +0200 http://bitbucket.org/pypy/pypy/changeset/cca37ad7e31c/ Log: document merged branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -25,3 +25,6 @@ .. branch: less-stringly-ops Use subclasses of SpaceOperation instead of SpaceOperator objects. Random cleanups in flowspace and annotator. + +.. branch: ndarray-buffer +adds support for the buffer= argument to the ndarray ctor From noreply at buildbot.pypy.org Mon Dec 2 22:30:00 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 2 Dec 2013 22:30:00 +0100 (CET) Subject: [pypy-commit] pypy default: Systematically rename the C API functions of cpyext, from PyXxx Message-ID: <20131202213000.7AEB81C3298@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68363:cd3b6b2b06fd Date: 2013-12-02 20:31 +0000 http://bitbucket.org/pypy/pypy/changeset/cd3b6b2b06fd/ Log: Systematically rename the C API functions of cpyext, from PyXxx to PyPyXxx. Fix some details that are unhappy about the new macros. diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -400,16 +400,16 @@ '_PyObject_CallFunction_SizeT', '_PyObject_CallMethod_SizeT', 'PyBuffer_FromMemory', 'PyBuffer_FromReadWriteMemory', 'PyBuffer_FromObject', - 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', 'init_bufferobject', + 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', '_Py_init_bufferobject', 'PyCObject_FromVoidPtr', 'PyCObject_FromVoidPtrAndDesc', 'PyCObject_AsVoidPtr', 'PyCObject_GetDesc', 'PyCObject_Import', 'PyCObject_SetVoidPtr', - 'PyCObject_Type', 'init_pycobject', + 'PyCObject_Type', '_Py_init_pycobject', 'PyCapsule_New', 'PyCapsule_IsValid', 'PyCapsule_GetPointer', 'PyCapsule_GetName', 'PyCapsule_GetDestructor', 'PyCapsule_GetContext', 'PyCapsule_SetPointer', 'PyCapsule_SetName', 'PyCapsule_SetDestructor', - 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', 'init_capsule', + 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', '_Py_init_capsule', 'PyObject_AsReadBuffer', 'PyObject_AsWriteBuffer', 'PyObject_CheckReadBuffer', @@ -687,11 +687,11 @@ globals()['va_get_%s' % name_no_star] = func def setup_init_functions(eci, translating): - init_buffer = rffi.llexternal('init_bufferobject', [], lltype.Void, + init_buffer = rffi.llexternal('_PyPy_init_bufferobject', [], lltype.Void, compilation_info=eci, _nowrapper=True) - init_pycobject = rffi.llexternal('init_pycobject', [], lltype.Void, + init_pycobject = rffi.llexternal('_PyPy_init_pycobject', [], lltype.Void, compilation_info=eci, _nowrapper=True) - init_capsule = rffi.llexternal('init_capsule', [], lltype.Void, + init_capsule = rffi.llexternal('_PyPy_init_capsule', [], lltype.Void, compilation_info=eci, _nowrapper=True) INIT_FUNCTIONS.extend([ lambda space: init_buffer(), @@ -871,6 +871,7 @@ continue name = name.replace("#", "") newname = name.replace('Py', 'PyPy') + assert newname != name if not rename: newname = name pypy_macros.append('#define %s %s' % (name, newname)) @@ -1041,7 +1042,7 @@ from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() - generate_macros(export_symbols, rename=False, do_deref=False) + generate_macros(export_symbols, rename=True, do_deref=False) functions = generate_decls_and_callbacks(db, [], api_struct=False) code = "#include \n" + "\n".join(functions) diff --git a/pypy/module/cpyext/include/bufferobject.h b/pypy/module/cpyext/include/bufferobject.h --- a/pypy/module/cpyext/include/bufferobject.h +++ b/pypy/module/cpyext/include/bufferobject.h @@ -37,7 +37,7 @@ PyObject* PyBuffer_New(Py_ssize_t size); -void init_bufferobject(void); +void _Py_init_bufferobject(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pycapsule.h b/pypy/module/cpyext/include/pycapsule.h --- a/pypy/module/cpyext/include/pycapsule.h +++ b/pypy/module/cpyext/include/pycapsule.h @@ -50,7 +50,7 @@ PyAPI_FUNC(void *) PyCapsule_Import(const char *name, int no_block); -void init_capsule(void); +void _Py_init_capsule(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pycobject.h b/pypy/module/cpyext/include/pycobject.h --- a/pypy/module/cpyext/include/pycobject.h +++ b/pypy/module/cpyext/include/pycobject.h @@ -48,7 +48,7 @@ } PyCObject; #endif -void init_pycobject(void); +void _Py_init_pycobject(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/src/bufferobject.c b/pypy/module/cpyext/src/bufferobject.c --- a/pypy/module/cpyext/src/bufferobject.c +++ b/pypy/module/cpyext/src/bufferobject.c @@ -783,7 +783,7 @@ return size; } -void init_bufferobject(void) +void _Py_init_bufferobject(void) { PyType_Ready(&PyBuffer_Type); } diff --git a/pypy/module/cpyext/src/capsule.c b/pypy/module/cpyext/src/capsule.c --- a/pypy/module/cpyext/src/capsule.c +++ b/pypy/module/cpyext/src/capsule.c @@ -321,7 +321,7 @@ PyCapsule_Type__doc__ /*tp_doc*/ }; -void init_capsule() +void _Py_init_capsule() { PyType_Ready(&PyCapsule_Type); } diff --git a/pypy/module/cpyext/src/cobject.c b/pypy/module/cpyext/src/cobject.c --- a/pypy/module/cpyext/src/cobject.c +++ b/pypy/module/cpyext/src/cobject.c @@ -156,7 +156,7 @@ PyCObject_Type__doc__ /*tp_doc*/ }; -void init_pycobject() +void _Py_init_pycobject() { PyType_Ready(&PyCObject_Type); } diff --git a/rpython/translator/c/src/exception.h b/rpython/translator/c/src/exception.h --- a/rpython/translator/c/src/exception.h +++ b/rpython/translator/c/src/exception.h @@ -36,7 +36,6 @@ /* prototypes */ -#define RPyRaiseSimpleException(exc, msg) _RPyRaiseSimpleException(R##exc) void _RPyRaiseSimpleException(RPYTHON_EXCEPTION rexc); #endif diff --git a/rpython/translator/c/src/support.h b/rpython/translator/c/src/support.h --- a/rpython/translator/c/src/support.h +++ b/rpython/translator/c/src/support.h @@ -5,13 +5,9 @@ #define RUNNING_ON_LLINTERP 0 #define OP_JIT_RECORD_KNOWN_CLASS(i, c, r) /* nothing */ -#define FAIL_EXCEPTION(exc, msg) \ - { \ - RPyRaiseSimpleException(exc, msg); \ - } -#define FAIL_OVF(msg) FAIL_EXCEPTION(PyExc_OverflowError, msg) -#define FAIL_VAL(msg) FAIL_EXCEPTION(PyExc_ValueError, msg) -#define FAIL_ZER(msg) FAIL_EXCEPTION(PyExc_ZeroDivisionError, msg) +#define FAIL_OVF(msg) _RPyRaiseSimpleException(RPyExc_OverflowError) +#define FAIL_VAL(msg) _RPyRaiseSimpleException(RPyExc_ValueError) +#define FAIL_ZER(msg) _RPyRaiseSimpleException(RPyExc_ZeroDivisionError) /* Extra checks can be enabled with the RPY_ASSERT or RPY_LL_ASSERT * macros. They differ in the level at which the tests are made. From noreply at buildbot.pypy.org Mon Dec 2 22:30:01 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 2 Dec 2013 22:30:01 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20131202213001.DB1261C329B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68364:374c311e3d9d Date: 2013-12-02 21:29 +0000 http://bitbucket.org/pypy/pypy/changeset/374c311e3d9d/ Log: merge heads diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -25,3 +25,6 @@ .. branch: less-stringly-ops Use subclasses of SpaceOperation instead of SpaceOperator objects. Random cleanups in flowspace and annotator. + +.. branch: ndarray-buffer +adds support for the buffer= argument to the ndarray ctor diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -47,6 +47,9 @@ def get_raw_address(self): raise ValueError("no raw buffer") + def is_writable(self): + return False + # __________ app-level support __________ def descr_len(self, space): @@ -135,6 +138,9 @@ __slots__ = () # no extra slot here + def is_writable(self): + return True + def setitem(self, index, char): "Write a character into the buffer." raise NotImplementedError # Must be overriden. No bounds checks. diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -12,7 +12,9 @@ if dtype is None: test = _numpypy.multiarray.array([start, stop, step, 0]) dtype = test.dtype - arr = _numpypy.multiarray.zeros(int(math.ceil((stop - start) / step)), dtype=dtype) + length = math.ceil((float(stop) - start) / step) + length = int(length) + arr = _numpypy.multiarray.zeros(length, dtype=dtype) i = start for j in range(arr.size): arr[j] = i diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -392,6 +392,21 @@ def __del__(self): free_raw_storage(self.storage, track_allocation=False) +class ConcreteArrayWithBase(ConcreteArrayNotOwning): + def __init__(self, shape, dtype, order, strides, backstrides, storage, orig_base): + ConcreteArrayNotOwning.__init__(self, shape, dtype, order, + strides, backstrides, storage) + self.orig_base = orig_base + + def base(self): + return self.orig_base + + +class ConcreteNonWritableArrayWithBase(ConcreteArrayWithBase): + def descr_setitem(self, space, orig_array, w_index, w_value): + raise OperationError(space.w_ValueError, space.wrap( + "assignment destination is read-only")) + class NonWritableArray(ConcreteArray): def descr_setitem(self, space, orig_array, w_index, w_value): diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -49,11 +49,24 @@ return W_NDimArray(impl) @staticmethod - def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, w_subtype=None): + def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, + w_subtype=None, w_base=None, writable=True): from pypy.module.micronumpy.arrayimpl import concrete assert shape strides, backstrides = calc_strides(shape, dtype, order) - if owning: + if w_base is not None: + if owning: + raise OperationError(space.w_ValueError, + space.wrap("Cannot have owning=True when specifying a buffer")) + if writable: + impl = concrete.ConcreteArrayWithBase(shape, dtype, order, strides, + backstrides, storage, w_base) + else: + impl = concrete.ConcreteNonWritableArrayWithBase(shape, dtype, order, + strides, backstrides, + storage, w_base) + + elif owning: # Will free storage when GCd impl = concrete.ConcreteArray(shape, dtype, order, strides, backstrides, storage=storage) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1,3 +1,5 @@ +from rpython.rtyper.lltypesystem import rffi +from rpython.rlib.rawstorage import RAW_STORAGE_PTR from pypy.interpreter.error import operationerrfmt, OperationError from pypy.interpreter.typedef import TypeDef, GetSetProperty, make_weakref_descr from pypy.interpreter.gateway import interp2app, unwrap_spec, applevel, \ @@ -20,6 +22,7 @@ from rpython.rlib.rstring import StringBuilder from pypy.module.micronumpy.arrayimpl.base import BaseArrayImplementation from pypy.module.micronumpy.conversion_utils import order_converter, multi_axis_converter +from pypy.module.micronumpy import support from pypy.module.micronumpy.constants import * def _find_shape(space, w_size, dtype): @@ -1067,13 +1070,35 @@ offset=0, w_strides=None, order='C'): from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArray from pypy.module.micronumpy.support import calc_strides - if (offset != 0 or not space.is_none(w_strides) or - not space.is_none(w_buffer)): - raise OperationError(space.w_NotImplementedError, - space.wrap("unsupported param")) dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) shape = _find_shape(space, w_shape, dtype) + + if not space.is_none(w_buffer): + if (not space.is_none(w_strides)): + raise OperationError(space.w_NotImplementedError, + space.wrap("unsupported param")) + + buf = space.buffer_w(w_buffer) + try: + raw_ptr = buf.get_raw_address() + except ValueError: + raise OperationError(space.w_TypeError, space.wrap( + "Only raw buffers are supported")) + if not shape: + raise OperationError(space.w_TypeError, space.wrap( + "numpy scalars from buffers not supported yet")) + totalsize = support.product(shape) * dtype.get_size() + if totalsize+offset > buf.getlength(): + raise OperationError(space.w_TypeError, space.wrap( + "buffer is too small for requested array")) + storage = rffi.cast(RAW_STORAGE_PTR, raw_ptr) + storage = rffi.ptradd(storage, offset) + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, + w_subtype=w_subtype, + w_base=w_buffer, + writable=buf.is_writable()) + if not shape: return W_NDimArray.new_scalar(space, dtype) if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): @@ -1093,8 +1118,6 @@ Create an array from an existing buffer, given its address as int. PyPy-only implementation detail. """ - from rpython.rtyper.lltypesystem import rffi - from rpython.rlib.rawstorage import RAW_STORAGE_PTR storage = rffi.cast(RAW_STORAGE_PTR, addr) dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -219,6 +219,7 @@ class AppTestNumArray(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) + def w_CustomIndexObject(self, index): class CustomIndexObject(object): def __init__(self, index): @@ -381,6 +382,8 @@ assert a.dtype is dtype(int) a = arange(3, 7, 2) assert (a == [3, 5]).all() + a = arange(3, 8, 2) + assert (a == [3, 5, 7]).all() a = arange(3, dtype=float) assert (a == [0., 1., 2.]).all() assert a.dtype is dtype(float) @@ -2087,6 +2090,69 @@ a = np.ndarray([1], dtype=bool) assert a[0] == True + +class AppTestNumArrayFromBuffer(BaseNumpyAppTest): + spaceconfig = dict(usemodules=["micronumpy", "array", "mmap"]) + + def setup_class(cls): + from rpython.tool.udir import udir + BaseNumpyAppTest.setup_class.im_func(cls) + cls.w_tmpname = cls.space.wrap(str(udir.join('mmap-'))) + + def test_ndarray_from_buffer(self): + import numpypy as np + import array + buf = array.array('c', ['\x00']*2*3) + a = np.ndarray((3,), buffer=buf, dtype='i2') + a[0] = ord('b') + a[1] = ord('a') + a[2] = ord('r') + assert list(buf) == ['b', '\x00', 'a', '\x00', 'r', '\x00'] + assert a.base is buf + + def test_ndarray_subclass_from_buffer(self): + import numpypy as np + import array + buf = array.array('c', ['\x00']*2*3) + class X(np.ndarray): + pass + a = X((3,), buffer=buf, dtype='i2') + assert type(a) is X + + def test_ndarray_from_buffer_and_offset(self): + import numpypy as np + import array + buf = array.array('c', ['\x00']*7) + buf[0] = 'X' + a = np.ndarray((3,), buffer=buf, offset=1, dtype='i2') + a[0] = ord('b') + a[1] = ord('a') + a[2] = ord('r') + assert list(buf) == ['X', 'b', '\x00', 'a', '\x00', 'r', '\x00'] + + def test_ndarray_from_buffer_out_of_bounds(self): + import numpypy as np + import array + buf = array.array('c', ['\x00']*2*10) # 20 bytes + info = raises(TypeError, "np.ndarray((11,), buffer=buf, dtype='i2')") + assert str(info.value).startswith('buffer is too small') + info = raises(TypeError, "np.ndarray((5,), buffer=buf, offset=15, dtype='i2')") + assert str(info.value).startswith('buffer is too small') + + def test_ndarray_from_readonly_buffer(self): + import numpypy as np + from mmap import mmap, ACCESS_READ + f = open(self.tmpname, "w+") + f.write("hello") + f.flush() + buf = mmap(f.fileno(), 5, access=ACCESS_READ) + a = np.ndarray((5,), buffer=buf, dtype='c') + raises(ValueError, "a[0] = 'X'") + buf.close() + f.close() + + + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -314,6 +314,14 @@ self.check_valid_writeable() self.mmap.setslice(start, string) + def is_writable(self): + try: + self.mmap.check_writeable() + except RMMapError: + return False + else: + return True + def get_raw_address(self): self.check_valid() return self.mmap.data diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -1,20 +1,20 @@ """This is not the JIT :-) -This is transformed to become a JIT by code elsewhere: pypy/jit/* +This is transformed to become a JIT by code elsewhere: rpython/jit/* """ -from rpython.tool.pairtype import extendabletype from rpython.rlib.rarithmetic import r_uint, intmask from rpython.rlib.jit import JitDriver, hint, we_are_jitted, dont_look_inside from rpython.rlib import jit from rpython.rlib.jit import current_trace_length, unroll_parameters import pypy.interpreter.pyopcode # for side-effects from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.pycode import PyCode, CO_GENERATOR +from pypy.interpreter.pycode import CO_GENERATOR from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pyopcode import ExitFrame, Yield from opcode import opmap + PyFrame._virtualizable_ = ['last_instr', 'pycode', 'valuestackdepth', 'locals_stack_w[*]', 'cells[*]', diff --git a/pypy/testrunner_cfg.py b/pypy/testrunner_cfg.py --- a/pypy/testrunner_cfg.py +++ b/pypy/testrunner_cfg.py @@ -3,7 +3,7 @@ DIRS_SPLIT = [ 'translator/c', 'rlib', - 'rpython/memory', 'jit/metainterp', 'rpython/test', + 'memory/test', 'jit/metainterp', 'jit/backend/arm', 'jit/backend/x86', ] diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -14,8 +14,8 @@ eci = ExternalCompilationInfo(includes=['stdio.h', 'unistd.h', 'sys/types.h']) -def llexternal(*args): - return rffi.llexternal(*args, compilation_info=eci) +def llexternal(*args, **kwargs): + return rffi.llexternal(*args, compilation_info=eci, **kwargs) FILE = lltype.Struct('FILE') # opaque type maybe @@ -24,9 +24,9 @@ off_t = platform.SimpleType('off_t') + CC = platform.configure(CConfig) OFF_T = CC['off_t'] - c_open = llexternal('fopen', [rffi.CCHARP, rffi.CCHARP], lltype.Ptr(FILE)) c_close = llexternal('fclose', [lltype.Ptr(FILE)], rffi.INT) c_write = llexternal('fwrite', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, @@ -42,7 +42,8 @@ c_fileno = llexternal('fileno', [lltype.Ptr(FILE)], rffi.INT) c_ftell = llexternal('ftell', [lltype.Ptr(FILE)], lltype.Signed) c_fflush = llexternal('fflush', [lltype.Ptr(FILE)], rffi.INT) -c_ftruncate = llexternal('ftruncate', [rffi.INT, OFF_T], rffi.INT) +c_ftruncate = llexternal('ftruncate', [rffi.INT, OFF_T], rffi.INT, macro=True) + c_fgets = llexternal('fgets', [rffi.CCHARP, rffi.INT, lltype.Ptr(FILE)], rffi.CCHARP) From noreply at buildbot.pypy.org Tue Dec 3 11:04:40 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 3 Dec 2013 11:04:40 +0100 (CET) Subject: [pypy-commit] pypy default: Fix Windows translation, and also possibly fix running Message-ID: <20131203100440.8B6EA1C05B5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68365:32b194501e90 Date: 2013-12-03 11:03 +0100 http://bitbucket.org/pypy/pypy/changeset/32b194501e90/ Log: Fix Windows translation, and also possibly fix running untranslated tests diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -687,11 +687,15 @@ globals()['va_get_%s' % name_no_star] = func def setup_init_functions(eci, translating): - init_buffer = rffi.llexternal('_PyPy_init_bufferobject', [], lltype.Void, + if translating: + prefix = 'PyPy' + else: + prefix = 'cpyexttest' + init_buffer = rffi.llexternal('_%s_init_bufferobject' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) - init_pycobject = rffi.llexternal('_PyPy_init_pycobject', [], lltype.Void, + init_pycobject = rffi.llexternal('_%s_init_pycobject' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) - init_capsule = rffi.llexternal('_PyPy_init_capsule', [], lltype.Void, + init_capsule = rffi.llexternal('_%s_init_capsule' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) INIT_FUNCTIONS.extend([ lambda space: init_buffer(), @@ -699,12 +703,8 @@ lambda space: init_capsule(), ]) from pypy.module.posix.interp_posix import add_fork_hook - if translating: - reinit_tls = rffi.llexternal('PyThread_ReInitTLS', [], lltype.Void, - compilation_info=eci) - else: - reinit_tls = rffi.llexternal('PyPyThread_ReInitTLS', [], lltype.Void, - compilation_info=eci) + reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], lltype.Void, + compilation_info=eci) add_fork_hook('child', reinit_tls) def init_function(func): @@ -746,7 +746,7 @@ from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() - generate_macros(export_symbols, rename=True, do_deref=True) + generate_macros(export_symbols, prefix='cpyexttest') # Structure declaration code members = [] @@ -812,7 +812,7 @@ INTERPLEVEL_API[name] = w_obj - name = name.replace('Py', 'PyPy') + name = name.replace('Py', 'cpyexttest') if isptr: ptr = ctypes.c_void_p.in_dll(bridge, name) if typ == 'PyObject*': @@ -824,7 +824,7 @@ ptr.value = ctypes.cast(ll2ctypes.lltype2ctypes(value), ctypes.c_void_p).value elif typ in ('PyObject*', 'PyTypeObject*'): - if name.startswith('PyPyExc_'): + if name.startswith('PyPyExc_') or name.startswith('cpyexttestExc_'): # we already have the pointer in_dll = ll2ctypes.get_ctypes_type(PyObject).in_dll(bridge, name) py_obj = ll2ctypes.ctypes2lltype(PyObject, in_dll) @@ -859,29 +859,23 @@ setup_init_functions(eci, translating=False) return modulename.new(ext='') -def generate_macros(export_symbols, rename=True, do_deref=True): +def generate_macros(export_symbols, prefix): "NOT_RPYTHON" pypy_macros = [] renamed_symbols = [] for name in export_symbols: - if name.startswith("PyPy"): - renamed_symbols.append(name) - continue - if not rename: - continue name = name.replace("#", "") - newname = name.replace('Py', 'PyPy') - assert newname != name - if not rename: - newname = name + if name.startswith('Py'): + newname = prefix + name[2:] + elif name.startswith('_Py'): + newname = '_' + prefix + name[3:] + else: + assert False, name pypy_macros.append('#define %s %s' % (name, newname)) if name.startswith("PyExc_"): pypy_macros.append('#define _%s _%s' % (name, newname)) renamed_symbols.append(newname) - if rename: - export_symbols[:] = renamed_symbols - else: - export_symbols[:] = [sym.replace("#", "") for sym in export_symbols] + export_symbols[:] = renamed_symbols # Generate defines for macro_name, size in [ @@ -1042,7 +1036,7 @@ from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() - generate_macros(export_symbols, rename=True, do_deref=False) + generate_macros(export_symbols, prefix='PyPy') functions = generate_decls_and_callbacks(db, [], api_struct=False) code = "#include \n" + "\n".join(functions) diff --git a/pypy/module/cpyext/test/test_thread.py b/pypy/module/cpyext/test/test_thread.py --- a/pypy/module/cpyext/test/test_thread.py +++ b/pypy/module/cpyext/test/test_thread.py @@ -8,8 +8,10 @@ module = self.import_extension('foo', [ ("get_thread_ident", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - return PyInt_FromLong(PyPyThread_get_thread_ident()); +#ifndef PyThread_get_thread_ident +#error "seems we are not accessing PyPy's functions" +#endif + return PyInt_FromLong(PyThread_get_thread_ident()); """), ]) import thread, threading @@ -32,17 +34,19 @@ module = self.import_extension('foo', [ ("test_acquire_lock", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - PyThread_type_lock lock = PyPyThread_allocate_lock(); - if (PyPyThread_acquire_lock(lock, 1) != 1) { +#ifndef PyThread_allocate_lock +#error "seems we are not accessing PyPy's functions" +#endif + PyThread_type_lock lock = PyThread_allocate_lock(); + if (PyThread_acquire_lock(lock, 1) != 1) { PyErr_SetString(PyExc_AssertionError, "first acquire"); return NULL; } - if (PyPyThread_acquire_lock(lock, 0) != 0) { + if (PyThread_acquire_lock(lock, 0) != 0) { PyErr_SetString(PyExc_AssertionError, "second acquire"); return NULL; } - PyPyThread_free_lock(lock); + PyThread_free_lock(lock); Py_RETURN_NONE; """), @@ -53,15 +57,17 @@ module = self.import_extension('foo', [ ("test_release_lock", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - PyThread_type_lock lock = PyPyThread_allocate_lock(); - PyPyThread_acquire_lock(lock, 1); - PyPyThread_release_lock(lock); - if (PyPyThread_acquire_lock(lock, 0) != 1) { +#ifndef PyThread_release_lock +#error "seems we are not accessing PyPy's functions" +#endif + PyThread_type_lock lock = PyThread_allocate_lock(); + PyThread_acquire_lock(lock, 1); + PyThread_release_lock(lock); + if (PyThread_acquire_lock(lock, 0) != 1) { PyErr_SetString(PyExc_AssertionError, "first acquire"); return NULL; } - PyPyThread_free_lock(lock); + PyThread_free_lock(lock); Py_RETURN_NONE; """), From noreply at buildbot.pypy.org Tue Dec 3 14:26:41 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 3 Dec 2013 14:26:41 +0100 (CET) Subject: [pypy-commit] pypy default: Next try Message-ID: <20131203132641.88BBD1C32BC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68366:5d45e9c2036b Date: 2013-12-03 14:24 +0100 http://bitbucket.org/pypy/pypy/changeset/5d45e9c2036b/ Log: Next try diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -859,18 +859,22 @@ setup_init_functions(eci, translating=False) return modulename.new(ext='') +def mangle_name(prefix, name): + if name.startswith('Py'): + return prefix + name[2:] + elif name.startswith('_Py'): + return '_' + prefix + name[3:] + else: + return None + def generate_macros(export_symbols, prefix): "NOT_RPYTHON" pypy_macros = [] renamed_symbols = [] for name in export_symbols: name = name.replace("#", "") - if name.startswith('Py'): - newname = prefix + name[2:] - elif name.startswith('_Py'): - newname = '_' + prefix + name[3:] - else: - assert False, name + newname = mangle_name(prefix, name) + assert newname, name pypy_macros.append('#define %s %s' % (name, newname)) if name.startswith("PyExc_"): pypy_macros.append('#define _%s _%s' % (name, newname)) @@ -1066,7 +1070,8 @@ export_struct(name, struct) for name, func in FUNCTIONS.iteritems(): - deco = entrypoint_lowlevel("cpyext", func.argtypes, name, relax=True) + newname = mangle_name('PyPy', name) or name + deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, relax=True) deco(func.get_wrapper(space)) setup_init_functions(eci, translating=True) From noreply at buildbot.pypy.org Tue Dec 3 17:26:57 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 3 Dec 2013 17:26:57 +0100 (CET) Subject: [pypy-commit] pypy default: Handle empty cells in flowspace: Message-ID: <20131203162657.B4D291C3282@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r68367:bb3fa3d8d35b Date: 2013-12-03 16:26 +0000 http://bitbucket.org/pypy/pypy/changeset/bb3fa3d8d35b/ Log: Handle empty cells in flowspace: don't explode if they are not actually used, give a nice error message if they are. diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -329,7 +329,7 @@ if closure is None: self.closure = [] else: - self.closure = [const(c.cell_contents) for c in closure] + self.closure = list(closure) assert len(self.closure) == len(self.pycode.co_freevars) def init_locals_stack(self, code): @@ -846,7 +846,13 @@ LOOKUP_METHOD = LOAD_ATTR def LOAD_DEREF(self, varindex): - self.pushvalue(self.closure[varindex]) + cell = self.closure[varindex] + try: + content = cell.cell_contents + except ValueError: + name = self.pycode.co_freevars[varindex] + raise FlowingError("Undefined closure variable '%s'" % name) + self.pushvalue(const(content)) def STORE_FAST(self, varindex): w_newvalue = self.popvalue() diff --git a/rpython/flowspace/test/test_objspace.py b/rpython/flowspace/test/test_objspace.py --- a/rpython/flowspace/test/test_objspace.py +++ b/rpython/flowspace/test/test_objspace.py @@ -1212,6 +1212,39 @@ graph = self.codetest(f) assert 'getattr' in self.all_operations(graph) + def test_empty_cell_unused(self): + def test(flag): + if flag: + b = 5 + def g(): + if flag: + return b + else: + return 1 + return g + g1 = test(False) + graph = self.codetest(g1) + assert not self.all_operations(graph) + g2 = test(True) + graph = self.codetest(g2) + assert not self.all_operations(graph) + + def test_empty_cell_error(self): + def test(flag): + if not flag: + b = 5 + def g(): + if flag: + return b + else: + return 1 + return g + g = test(True) + with py.test.raises(FlowingError) as excinfo: + graph = self.codetest(g) + assert "Undefined closure variable 'b'" in str(excinfo.value) + + DATA = {'x': 5, 'y': 6} From noreply at buildbot.pypy.org Tue Dec 3 19:39:51 2013 From: noreply at buildbot.pypy.org (vext01) Date: Tue, 3 Dec 2013 19:39:51 +0100 (CET) Subject: [pypy-commit] pypy better_ftime_detect2: On OpenBSD do not pull in libcompat.a as it is about to be removed. Message-ID: <20131203183951.6CEBA1C0189@cobra.cs.uni-duesseldorf.de> Author: Edd Barrett Branch: better_ftime_detect2 Changeset: r68368:f0031ea56095 Date: 2013-12-03 18:02 +0000 http://bitbucket.org/pypy/pypy/changeset/f0031ea56095/ Log: On OpenBSD do not pull in libcompat.a as it is about to be removed. And more generally, if you have gettimeofday(2) you will not need ftime(3). diff --git a/rpython/rtyper/module/ll_time.py b/rpython/rtyper/module/ll_time.py --- a/rpython/rtyper/module/ll_time.py +++ b/rpython/rtyper/module/ll_time.py @@ -86,16 +86,18 @@ c_gettimeofday = self.llexternal('gettimeofday', [self.TIMEVALP, rffi.VOIDP], rffi.INT, _nowrapper=True, releasegil=False) + c_ftime = None # We have gettimeofday(2), so force ftime(3) OFF. else: c_gettimeofday = None - if self.HAVE_FTIME: - self.configure(CConfigForFTime) - c_ftime = self.llexternal(FTIME, [lltype.Ptr(self.TIMEB)], - lltype.Void, - _nowrapper=True, releasegil=False) - else: - c_ftime = None # to not confuse the flow space + # Only look for ftime(3) if gettimeofday(2) was not found. + if self.HAVE_FTIME: + self.configure(CConfigForFTime) + c_ftime = self.llexternal(FTIME, [lltype.Ptr(self.TIMEB)], + lltype.Void, + _nowrapper=True, releasegil=False) + else: + c_ftime = None # to not confuse the flow space c_time = self.llexternal('time', [rffi.VOIDP], rffi.TIME_T, _nowrapper=True, releasegil=False) @@ -115,9 +117,9 @@ if rffi.cast(rffi.LONG, errcode) == 0: result = decode_timeval(t) lltype.free(t, flavor='raw') - if result != -1: - return result - if self.HAVE_FTIME: + if result != -1: + return result + else: # assume using ftime(3) t = lltype.malloc(self.TIMEB, flavor='raw') c_ftime(t) result = (float(intmask(t.c_time)) + diff --git a/rpython/translator/platform/openbsd.py b/rpython/translator/platform/openbsd.py --- a/rpython/translator/platform/openbsd.py +++ b/rpython/translator/platform/openbsd.py @@ -13,7 +13,7 @@ ] + os.environ.get("CFLAGS", "").split() def _libs(self, libraries): - libraries=set(libraries + ("intl", "iconv", "compat")) + libraries=set(libraries + ("intl", "iconv")) return ['-l%s' % lib for lib in libraries if lib not in ["crypt", "dl", "rt"]] class OpenBSD_64(OpenBSD): From noreply at buildbot.pypy.org Tue Dec 3 19:39:52 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 3 Dec 2013 19:39:52 +0100 (CET) Subject: [pypy-commit] pypy default: Merged in vext01/pypy/better_ftime_detect2 (pull request #201) Message-ID: <20131203183952.BA5031C0189@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r68369:d545ceccdf72 Date: 2013-12-03 18:39 +0000 http://bitbucket.org/pypy/pypy/changeset/d545ceccdf72/ Log: Merged in vext01/pypy/better_ftime_detect2 (pull request #201) On OpenBSD do not pull in libcompat.a as it is about to be removed. diff --git a/rpython/rtyper/module/ll_time.py b/rpython/rtyper/module/ll_time.py --- a/rpython/rtyper/module/ll_time.py +++ b/rpython/rtyper/module/ll_time.py @@ -86,16 +86,18 @@ c_gettimeofday = self.llexternal('gettimeofday', [self.TIMEVALP, rffi.VOIDP], rffi.INT, _nowrapper=True, releasegil=False) + c_ftime = None # We have gettimeofday(2), so force ftime(3) OFF. else: c_gettimeofday = None - if self.HAVE_FTIME: - self.configure(CConfigForFTime) - c_ftime = self.llexternal(FTIME, [lltype.Ptr(self.TIMEB)], - lltype.Void, - _nowrapper=True, releasegil=False) - else: - c_ftime = None # to not confuse the flow space + # Only look for ftime(3) if gettimeofday(2) was not found. + if self.HAVE_FTIME: + self.configure(CConfigForFTime) + c_ftime = self.llexternal(FTIME, [lltype.Ptr(self.TIMEB)], + lltype.Void, + _nowrapper=True, releasegil=False) + else: + c_ftime = None # to not confuse the flow space c_time = self.llexternal('time', [rffi.VOIDP], rffi.TIME_T, _nowrapper=True, releasegil=False) @@ -115,9 +117,9 @@ if rffi.cast(rffi.LONG, errcode) == 0: result = decode_timeval(t) lltype.free(t, flavor='raw') - if result != -1: - return result - if self.HAVE_FTIME: + if result != -1: + return result + else: # assume using ftime(3) t = lltype.malloc(self.TIMEB, flavor='raw') c_ftime(t) result = (float(intmask(t.c_time)) + diff --git a/rpython/translator/platform/openbsd.py b/rpython/translator/platform/openbsd.py --- a/rpython/translator/platform/openbsd.py +++ b/rpython/translator/platform/openbsd.py @@ -13,7 +13,7 @@ ] + os.environ.get("CFLAGS", "").split() def _libs(self, libraries): - libraries=set(libraries + ("intl", "iconv", "compat")) + libraries=set(libraries + ("intl", "iconv")) return ['-l%s' % lib for lib in libraries if lib not in ["crypt", "dl", "rt"]] class OpenBSD_64(OpenBSD): From noreply at buildbot.pypy.org Wed Dec 4 11:09:02 2013 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 4 Dec 2013 11:09:02 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20131204100902.2B4CF1C05B5@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r68371:3b9f540a7ba1 Date: 2013-12-04 12:04 +0200 http://bitbucket.org/pypy/pypy/changeset/3b9f540a7ba1/ Log: cleanup diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -71,7 +71,6 @@ final_strides = arr.get_strides() + strides final_backstrides = arr.get_backstrides() + backstrides final_dtype = subdtype - print self.name,'strides',arr.get_strides(),strides if subdtype.subdtype: final_dtype = subdtype.subdtype return W_NDimArray.new_slice(space, arr.start + ofs, final_strides, diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3124,9 +3124,6 @@ exc = raises(IndexError, "a[0][None]") assert exc.value.message == "invalid index" - exc = raises(IndexError, "a[0][None]") - assert exc.value.message == 'invalid index' - a[0]["x"][0] = 200 assert a[0]["x"][0] == 200 From noreply at buildbot.pypy.org Wed Dec 4 11:09:03 2013 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 4 Dec 2013 11:09:03 +0100 (CET) Subject: [pypy-commit] pypy default: document merged branch Message-ID: <20131204100903.92A5D1C1500@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r68372:2e9fca498e95 Date: 2013-12-04 12:07 +0200 http://bitbucket.org/pypy/pypy/changeset/2e9fca498e95/ Log: document merged branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -28,3 +28,7 @@ .. branch: ndarray-buffer adds support for the buffer= argument to the ndarray ctor + +.. branch: better_ftime_detect2 +On OpenBSD do not pull in libcompat.a as it is about to be removed. +And more generally, if you have gettimeofday(2) you will not need ftime(3). From noreply at buildbot.pypy.org Wed Dec 4 15:45:14 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 4 Dec 2013 15:45:14 +0100 (CET) Subject: [pypy-commit] pypy default: Document that ctypes.pythonapi is not really working. Message-ID: <20131204144514.22CAF1C156C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68373:da5b52720446 Date: 2013-12-04 15:44 +0100 http://bitbucket.org/pypy/pypy/changeset/da5b52720446/ Log: Document that ctypes.pythonapi is not really working. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -83,7 +83,7 @@ _winreg -* Supported by being rewritten in pure Python (possibly using ``ctypes``): +* Supported by being rewritten in pure Python (possibly using ``cffi``): see the `lib_pypy/`_ directory. Examples of modules that we support this way: ``ctypes``, ``cPickle``, ``cmath``, ``dbm``, ``datetime``... Note that some modules are both in there and in the list above; @@ -316,5 +316,9 @@ type and vice versa. For builtin types, a dictionary will be returned that cannot be changed (but still looks and behaves like a normal dictionary). +* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer + of PyPy, at your own risks and without doing anything sensible about + e.g. the GIL. Since PyPy 2.3, the functions are called with an extra + "Py", for example ``PyPyInt_FromLong()``. .. include:: _ref.txt From noreply at buildbot.pypy.org Wed Dec 4 15:59:31 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 4 Dec 2013 15:59:31 +0100 (CET) Subject: [pypy-commit] pypy default: Move this doc into its proper place and expand a bit. Message-ID: <20131204145931.61A3D1C05B5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68374:f7b797f79170 Date: 2013-12-04 15:58 +0100 http://bitbucket.org/pypy/pypy/changeset/f7b797f79170/ Log: Move this doc into its proper place and expand a bit. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -316,9 +316,4 @@ type and vice versa. For builtin types, a dictionary will be returned that cannot be changed (but still looks and behaves like a normal dictionary). -* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer - of PyPy, at your own risks and without doing anything sensible about - e.g. the GIL. Since PyPy 2.3, the functions are called with an extra - "Py", for example ``PyPyInt_FromLong()``. - .. include:: _ref.txt diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -72,7 +72,13 @@ Here is a list of the limitations and missing features of the current implementation: -* No support for ``PyXxx`` functions from ``libpython``, for obvious reasons. +* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer + of PyPy, at your own risks and without doing anything sensible about + the GIL. Since PyPy 2.3, these functions are also named with an extra + "Py", for example ``PyPyInt_FromLong()``. Basically, don't use this, + but it might more or less work in simple cases if you do. (Obviously, + assuming the PyObject pointers you get have any particular fields in + any particular order is just going to crash.) * We copy Python strings instead of having pointers to raw buffers From noreply at buildbot.pypy.org Wed Dec 4 18:27:54 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Wed, 4 Dec 2013 18:27:54 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Add Christian Clauss Message-ID: <20131204172754.0992C1C05B5@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: extradoc Changeset: r5113:14ae32e9371f Date: 2013-12-04 18:27 +0100 http://bitbucket.org/pypy/extradoc/changeset/14ae32e9371f/ Log: Add Christian Clauss diff --git a/sprintinfo/leysin-winter-2014/people.txt b/sprintinfo/leysin-winter-2014/people.txt --- a/sprintinfo/leysin-winter-2014/people.txt +++ b/sprintinfo/leysin-winter-2014/people.txt @@ -12,6 +12,7 @@ ==================== ============== ======================= Armin Rigo private Romain Guillebert 11-19 Ermina +Christian Clauss 11-12 & 18-19 I live nearby ==================== ============== ======================= From noreply at buildbot.pypy.org Wed Dec 4 21:28:13 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 4 Dec 2013 21:28:13 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Add this to remember Message-ID: <20131204202813.D72541C0189@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5114:f9c09ff7e6a7 Date: 2013-12-04 21:28 +0100 http://bitbucket.org/pypy/extradoc/changeset/f9c09ff7e6a7/ Log: Add this to remember diff --git a/talk/pycon2014/language-summit.rst b/talk/pycon2014/language-summit.rst new file mode 100644 --- /dev/null +++ b/talk/pycon2014/language-summit.rst @@ -0,0 +1,7 @@ +---------------------------- +Language summit presentation +---------------------------- + +We should give a ~10 minute presentation about the status of PyPy. + +(Asked by Michael Foord) From noreply at buildbot.pypy.org Thu Dec 5 01:15:53 2013 From: noreply at buildbot.pypy.org (vext01) Date: Thu, 5 Dec 2013 01:15:53 +0100 (CET) Subject: [pypy-commit] pypy timeb_h: OpenBSD will no longer have soon. Message-ID: <20131205001553.D73501C02C7@cobra.cs.uni-duesseldorf.de> Author: Edd Barrett Branch: timeb_h Changeset: r68375:9e9c24bc41fa Date: 2013-12-04 13:54 +0000 http://bitbucket.org/pypy/pypy/changeset/9e9c24bc41fa/ Log: OpenBSD will no longer have soon. This diff fixes the build for an OpenBSD system with the header missing. In the long term we should look for a way to make timeb.h optional regardless of platform. Most platforms will have gettimeofday(2), so timeb.h would not be required anyway. diff --git a/rpython/rtyper/module/ll_time.py b/rpython/rtyper/module/ll_time.py --- a/rpython/rtyper/module/ll_time.py +++ b/rpython/rtyper/module/ll_time.py @@ -24,8 +24,12 @@ FTIME = 'ftime' STRUCT_TIMEB = 'struct timeb' includes = [TIME_H, 'time.h', 'errno.h', 'sys/select.h', - 'sys/types.h', 'unistd.h', 'sys/timeb.h', + 'sys/types.h', 'unistd.h', 'sys/time.h', 'sys/resource.h'] + + if not sys.platform.startswith("openbsd"): + includes.append('sys/timeb.h') + need_rusage = True From noreply at buildbot.pypy.org Thu Dec 5 01:15:55 2013 From: noreply at buildbot.pypy.org (vext01) Date: Thu, 5 Dec 2013 01:15:55 +0100 (CET) Subject: [pypy-commit] pypy timeb_h: merge default Message-ID: <20131205001555.5338E1C02C7@cobra.cs.uni-duesseldorf.de> Author: Edd Barrett Branch: timeb_h Changeset: r68376:260cdc065a06 Date: 2013-12-04 22:35 +0000 http://bitbucket.org/pypy/pypy/changeset/260cdc065a06/ Log: merge default diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -83,7 +83,7 @@ _winreg -* Supported by being rewritten in pure Python (possibly using ``ctypes``): +* Supported by being rewritten in pure Python (possibly using ``cffi``): see the `lib_pypy/`_ directory. Examples of modules that we support this way: ``ctypes``, ``cPickle``, ``cmath``, ``dbm``, ``datetime``... Note that some modules are both in there and in the list above; @@ -316,5 +316,4 @@ type and vice versa. For builtin types, a dictionary will be returned that cannot be changed (but still looks and behaves like a normal dictionary). - .. include:: _ref.txt diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -72,7 +72,13 @@ Here is a list of the limitations and missing features of the current implementation: -* No support for ``PyXxx`` functions from ``libpython``, for obvious reasons. +* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer + of PyPy, at your own risks and without doing anything sensible about + the GIL. Since PyPy 2.3, these functions are also named with an extra + "Py", for example ``PyPyInt_FromLong()``. Basically, don't use this, + but it might more or less work in simple cases if you do. (Obviously, + assuming the PyObject pointers you get have any particular fields in + any particular order is just going to crash.) * We copy Python strings instead of having pointers to raw buffers diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -28,3 +28,7 @@ .. branch: ndarray-buffer adds support for the buffer= argument to the ndarray ctor + +.. branch: better_ftime_detect2 +On OpenBSD do not pull in libcompat.a as it is about to be removed. +And more generally, if you have gettimeofday(2) you will not need ftime(3). diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -71,7 +71,6 @@ final_strides = arr.get_strides() + strides final_backstrides = arr.get_backstrides() + backstrides final_dtype = subdtype - print self.name,'strides',arr.get_strides(),strides if subdtype.subdtype: final_dtype = subdtype.subdtype return W_NDimArray.new_slice(space, arr.start + ofs, final_strides, diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3124,9 +3124,6 @@ exc = raises(IndexError, "a[0][None]") assert exc.value.message == "invalid index" - exc = raises(IndexError, "a[0][None]") - assert exc.value.message == 'invalid index' - a[0]["x"][0] = 200 assert a[0]["x"][0] == 200 From noreply at buildbot.pypy.org Thu Dec 5 01:15:56 2013 From: noreply at buildbot.pypy.org (vext01) Date: Thu, 5 Dec 2013 01:15:56 +0100 (CET) Subject: [pypy-commit] pypy timeb_h: Document change in whatsnew-head.rst. Message-ID: <20131205001556.755671C02C7@cobra.cs.uni-duesseldorf.de> Author: Edd Barrett Branch: timeb_h Changeset: r68377:390d8f89f47d Date: 2013-12-05 00:09 +0000 http://bitbucket.org/pypy/pypy/changeset/390d8f89f47d/ Log: Document change in whatsnew-head.rst. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -32,3 +32,7 @@ .. branch: better_ftime_detect2 On OpenBSD do not pull in libcompat.a as it is about to be removed. And more generally, if you have gettimeofday(2) you will not need ftime(3). + +.. branch: timeb_h +Remove dependency upon on OpenBSD. This will be disappearing +along with libcompat.a. From noreply at buildbot.pypy.org Thu Dec 5 01:15:57 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 5 Dec 2013 01:15:57 +0100 (CET) Subject: [pypy-commit] pypy default: Merged in vext01/pypy/timeb_h (pull request #202) Message-ID: <20131205001557.A6EC61C02C7@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r68378:26582ea5fc1d Date: 2013-12-05 00:14 +0000 http://bitbucket.org/pypy/pypy/changeset/26582ea5fc1d/ Log: Merged in vext01/pypy/timeb_h (pull request #202) OpenBSD will no longer have soon. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -32,3 +32,7 @@ .. branch: better_ftime_detect2 On OpenBSD do not pull in libcompat.a as it is about to be removed. And more generally, if you have gettimeofday(2) you will not need ftime(3). + +.. branch: timeb_h +Remove dependency upon on OpenBSD. This will be disappearing +along with libcompat.a. diff --git a/rpython/rtyper/module/ll_time.py b/rpython/rtyper/module/ll_time.py --- a/rpython/rtyper/module/ll_time.py +++ b/rpython/rtyper/module/ll_time.py @@ -24,8 +24,12 @@ FTIME = 'ftime' STRUCT_TIMEB = 'struct timeb' includes = [TIME_H, 'time.h', 'errno.h', 'sys/select.h', - 'sys/types.h', 'unistd.h', 'sys/timeb.h', + 'sys/types.h', 'unistd.h', 'sys/time.h', 'sys/resource.h'] + + if not sys.platform.startswith("openbsd"): + includes.append('sys/timeb.h') + need_rusage = True From noreply at buildbot.pypy.org Thu Dec 5 12:07:20 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 5 Dec 2013 12:07:20 +0100 (CET) Subject: [pypy-commit] pypy default: xfail this test too Message-ID: <20131205110720.E7BF51C14FF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68380:f9daeaa5f3c7 Date: 2013-12-05 12:06 +0100 http://bitbucket.org/pypy/pypy/changeset/f9daeaa5f3c7/ Log: xfail this test too diff --git a/lib-python/2.7/ctypes/test/test_python_api.py b/lib-python/2.7/ctypes/test/test_python_api.py --- a/lib-python/2.7/ctypes/test/test_python_api.py +++ b/lib-python/2.7/ctypes/test/test_python_api.py @@ -73,6 +73,7 @@ del pyobj self.assertEqual(grc(s), ref) + @xfail def test_PyOS_snprintf(self): PyOS_snprintf = pythonapi.PyOS_snprintf PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p From noreply at buildbot.pypy.org Thu Dec 5 14:52:32 2013 From: noreply at buildbot.pypy.org (OlivierBlanvillain) Date: Thu, 5 Dec 2013 14:52:32 +0100 (CET) Subject: [pypy-commit] pypy OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215: Fix 3 broken links on PyPy published papers. Message-ID: <20131205135232.A957D1C13DF@cobra.cs.uni-duesseldorf.de> Author: OlivierBlanvillain Branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 Changeset: r68381:ccbf54dd3498 Date: 2013-12-05 13:48 +0000 http://bitbucket.org/pypy/pypy/changeset/ccbf54dd3498/ Log: Fix 3 broken links on PyPy published papers. diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -72,13 +72,13 @@ .. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib .. _`Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2011/jit-hints.pdf -.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: http://codespeak.net/svn/pypy/extradoc/talk/pepm2011/bolz-allocation-removal.pdf -.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/publications/bolz-prolog-jit.pdf +.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: http://www.stups.uni-duesseldorf.de/mediawiki/images/b/b0/Pub-BoCuFiLePeRi2011.pdf +.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/mediawiki/images/a/a7/Pub-BoLeSch2010.pdf .. _`High performance implementation of Python for CLI/.NET with JIT compiler generation for dynamic languages`: http://buildbot.pypy.org/misc/antocuni-thesis.pdf .. _`How to *not* write Virtual Machines for Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dyla2007/dyla.pdf .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf -.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://www.stups.uni-duesseldorf.de/thesis/final-master.pdf +.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html .. _`Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution`: http://sabi.net/nriley/pubs/dls6-riley.pdf From noreply at buildbot.pypy.org Thu Dec 5 14:52:33 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 5 Dec 2013 14:52:33 +0100 (CET) Subject: [pypy-commit] pypy default: Merged in OlivierBlanvillain/pypy/OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 (pull request #203) Message-ID: <20131205135233.E5E141C13DF@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r68382:b9f9b061c1f2 Date: 2013-12-05 14:51 +0100 http://bitbucket.org/pypy/pypy/changeset/b9f9b061c1f2/ Log: Merged in OlivierBlanvillain/pypy/OlivierBlanvillain/fix-3-broken- links-on-pypy-published-pap-1386250839215 (pull request #203) Fix 3 broken links on PyPy published papers. diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -72,13 +72,13 @@ .. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib .. _`Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2011/jit-hints.pdf -.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: http://codespeak.net/svn/pypy/extradoc/talk/pepm2011/bolz-allocation-removal.pdf -.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/publications/bolz-prolog-jit.pdf +.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: http://www.stups.uni-duesseldorf.de/mediawiki/images/b/b0/Pub-BoCuFiLePeRi2011.pdf +.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/mediawiki/images/a/a7/Pub-BoLeSch2010.pdf .. _`High performance implementation of Python for CLI/.NET with JIT compiler generation for dynamic languages`: http://buildbot.pypy.org/misc/antocuni-thesis.pdf .. _`How to *not* write Virtual Machines for Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dyla2007/dyla.pdf .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf -.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://www.stups.uni-duesseldorf.de/thesis/final-master.pdf +.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html .. _`Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution`: http://sabi.net/nriley/pubs/dls6-riley.pdf From noreply at buildbot.pypy.org Thu Dec 5 21:48:32 2013 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 5 Dec 2013 21:48:32 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: virtualizable2 is now virtualizable Message-ID: <20131205204832.B9C9D1C1160@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r515:a17cd38881fe Date: 2013-12-05 21:48 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/a17cd38881fe/ Log: virtualizable2 is now virtualizable diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -443,7 +443,7 @@ _attrs_ = ['_s_sender', '_pc', '_temps_and_stack', '_stack_ptr', 'instances_w'] - _virtualizable2_ = [ + _virtualizable_ = [ "_s_sender", "_pc", "_temps_and_stack[*]", "_stack_ptr", "_w_self", "_w_self_size" @@ -1254,7 +1254,7 @@ def check_overlap(self): self.h_dir = 1 self.v_dir = 1 - if (self.source_form is not None and + if (self.source_form is not None and self.source_form.w_self().is_same_object(self.dest_form.w_self()) and self.dy >= self.sy): if self.dy > self.sy: From noreply at buildbot.pypy.org Thu Dec 5 23:36:04 2013 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 5 Dec 2013 23:36:04 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: try to fix nextInstance if not called in the same frame as someInstance Message-ID: <20131205223604.DE9CF1C0F12@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r516:e06588225b5a Date: 2013-12-05 23:22 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/e06588225b5a/ Log: try to fix nextInstance if not called in the same frame as someInstance diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -515,54 +515,70 @@ w_frame.store(interp.space, constants.CTXPART_STACKP_INDEX, interp.space.wrap_int(stackp)) return w_frame - at expose_primitive(SOME_INSTANCE, unwrap_spec=[object]) -def func(interp, s_frame, w_class): +def get_instances_array(space, s_frame, w_class): # This primitive returns some instance of the class on the stack. # Not sure quite how to do this; maintain a weak list of all # existing instances or something? - from rpython.rlib import rgc - match_w = [] - roots = [gcref for gcref in rgc.get_rpy_roots() if gcref] - pending = roots[:] - while pending: - gcref = pending.pop() - if not rgc.get_gcflag_extra(gcref): - rgc.toggle_gcflag_extra(gcref) - w_obj = rgc.try_cast_gcref_to_instance(model.W_Object, gcref) - if (w_obj is not None and w_obj.has_class() - and w_obj.getclass(interp.space) is w_class): - match_w.append(w_obj) - pending.extend(rgc.get_rpy_referents(gcref)) + match_w = s_frame.instances_array(w_class) + if match_w is None: + from rpython.rlib import rgc - while roots: - gcref = roots.pop() - if rgc.get_gcflag_extra(gcref): - rgc.toggle_gcflag_extra(gcref) - roots.extend(rgc.get_rpy_referents(gcref)) + match_w = [] + roots = [gcref for gcref in rgc.get_rpy_roots() if gcref] + pending = roots[:] + while pending: + gcref = pending.pop() + if not rgc.get_gcflag_extra(gcref): + rgc.toggle_gcflag_extra(gcref) + w_obj = rgc.try_cast_gcref_to_instance(model.W_Object, gcref) + if (w_obj is not None and w_obj.has_class() + and w_obj.getclass(space) is w_class): + match_w.append(w_obj) + pending.extend(rgc.get_rpy_referents(gcref)) - s_frame.store_instances_array(match_w) + while roots: + gcref = roots.pop() + if rgc.get_gcflag_extra(gcref): + rgc.toggle_gcflag_extra(gcref) + roots.extend(rgc.get_rpy_referents(gcref)) + s_frame.store_instances_array(w_class, match_w) + return match_w + + at expose_primitive(SOME_INSTANCE, unwrap_spec=[object]) +def func(interp, s_frame, w_class): + match_w = get_instances_array(interp.space, s_frame, w_class) try: - return match_w.pop() + return match_w[0] except IndexError: raise PrimitiveFailedError() def next_instance(space, list_of_objects, w_obj): + retval = None try: - retval = list_of_objects.pop() - # just in case, that one of the objects in the list changes its class - if retval.getclass(space).is_same_object(w_obj.getclass(space)): - return retval - else: - return next_instance(space, list_of_objects, w_obj) + idx = list_of_objects.index(w_obj) + except ValueError: + idx = -1 + try: + retval = list_of_objects[idx + 1] except IndexError: raise PrimitiveFailedError() + # just in case, that one of the objects in the list changes its class + if retval.getclass(space).is_same_object(w_obj.getclass(space)): + return retval + else: + list_of_objects.pop(idx + 1) + return next_instance(space, list_of_objects, w_obj) @expose_primitive(NEXT_INSTANCE, unwrap_spec=[object]) def func(interp, s_frame, w_obj): # This primitive is used to iterate through all instances of a class: # it returns the "next" instance after w_obj. - return next_instance(interp.space, s_frame.instances_array(), w_obj) + return next_instance( + interp.space, + get_instances_array(interp.space, s_frame, w_obj.getclass(interp.space)), + w_obj + ) @expose_primitive(NEW_METHOD, unwrap_spec=[object, int, int]) def func(interp, s_frame, w_class, bytecount, header): diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -452,7 +452,7 @@ def __init__(self, space, w_self): self._s_sender = None AbstractRedirectingShadow.__init__(self, space, w_self) - self.instances_w = None + self.instances_w = {} @staticmethod def is_block_context(w_pointers, space): @@ -709,12 +709,13 @@ self._w_self_size = w_self.size() return w_self - def store_instances_array(self, list_w): + def store_instances_array(self, w_class, match_w): # used for primitives 77 & 78 - self.instances_w = list_w + self.instances_w[w_class] = match_w - def instances_array(self): - return self.instances_w + @jit.elidable + def instances_array(self, w_class): + return self.instances_w.get(w_class, None) # ______________________________________________________________________ # Debugging printout diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -652,6 +652,23 @@ assert w_2.getclass(space) is space.w_Array assert w_1 is not w_2 +def test_primitive_next_instance_wo_some_instance_in_same_frame(): + someInstances = map(space.wrap_list, [[2], [3]]) + from test_interpreter import new_frame + w_frame, s_context = new_frame("", + space=space) + + s_context.push(space.w_Array) + interp = interpreter.Interpreter(space) + w_1 = someInstances[0] + assert w_1.getclass(space) is space.w_Array + + s_context.push(w_1) + prim_table[primitives.NEXT_INSTANCE](interp, s_context, 0) + w_2 = s_context.pop() + assert w_2.getclass(space) is space.w_Array + assert w_1 is not w_2 + def test_primitive_value_no_context_switch(monkeypatch): class Context_switched(Exception): pass From noreply at buildbot.pypy.org Thu Dec 5 23:39:46 2013 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 5 Dec 2013 23:39:46 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: commit Squeak4.5 image with browser open. we can click! Message-ID: <20131205223946.714AA1C0F12@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r517:3c66c621593f Date: 2013-12-05 23:38 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/3c66c621593f/ Log: commit Squeak4.5 image with browser open. we can click! diff too long, truncating to 2000 out of 80830 lines diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -36,4 +36,4 @@ Workspace allInstances do: [:w | w topView delete]. ReleaseBuilderFor4dot4 prepareNewBuild. Smalltalk snapshot: true andQuit: true. -! ----End fileIn of a stream----! ----SNAPSHOT----{31 March 2013 . 3:27:34 pm} Squeak4.5-12327.image priorSource: 7430688! !Installer methodsFor: 'squeakmap' stamp: 'fbs 1/28/2013 19:25' prior: 57597950! packageAndVersionFrom: pkg | p | p := ReadStream on: pkg . ^{(p upTo: $(). p upTo: $)} collect: [:s | s withBlanksTrimmed].! ! "Installer-Core"! !Categorizer methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 16:58'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !ClassCategoryReader methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:21'! scanFrom: aStream environment: anEnvironment "File in methods from the stream, aStream." | methodText | [methodText := aStream nextChunkText. methodText size > 0] whileTrue: [class compile: methodText environment: anEnvironment classified: category withStamp: changeStamp notifying: nil]! ! !ClassCommentReader methodsFor: 'as yet unclassified' stamp: 'cwp 6/20/2012 17:22'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !Metaclass methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:29'! bindingOf: varName environment: anEnvironment ^ thisClass classBindingOf: varName environment: anEnvironment! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 20:03' prior: 22505876! \\ aNumber "Primitive. Take the receiver modulo the argument. The result is the remainder rounded towards negative infinity, of the receiver divided by the argument. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." aNumber isInteger ifTrue: [| neg qr q r | neg := self negative == aNumber negative == false. qr := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: neg). q := qr first normalize. r := qr last normalize. ^(q negative ifTrue: [r isZero not] ifFalse: [q isZero and: [neg]]) ifTrue: [r + aNumber] ifFalse: [r]]. ^super \\ aNumber ! ! !LargePositiveInteger methodsFor: 'converting' stamp: 'nice 1/27/2012 22:41' prior: 37616324! asFloat "Answer a Float that best approximates the value of the receiver. This algorithm is optimized to process only the significant digits of a LargeInteger. And it does honour IEEE 754 round to nearest even mode in case of excess precision (see details below)." "How numbers are rounded in IEEE 754 default rounding mode: A shift is applied so that the highest 53 bits are placed before the floating point to form a mantissa. The trailing bits form the fraction part placed after the floating point. This fractional number must be rounded to the nearest integer. If fraction part is 2r0.1, exactly between two consecutive integers, there is a tie. The nearest even integer is chosen in this case. Examples (First 52bits of mantissa are omitted for brevity): 2r0.00001 is rounded downward to 2r0 2r1.00001 is rounded downward to 2r1 2r0.1 is a tie and rounded to 2r0 (nearest even) 2r1.1 is a tie and rounded to 2r10 (nearest even) 2r0.10001 is rounded upward to 2r1 2r1.10001 is rounded upward to 2r10 Thus, if the next bit after floating point is 0, the mantissa is left unchanged. If next bit after floating point is 1, an odd mantissa is always rounded upper. An even mantissa is rounded upper only if the fraction part is not a tie." "Algorihm details: The floating point hardware can perform the rounding correctly with several excess bits as long as there is a single inexact operation. This can be obtained by splitting the mantissa plus excess bits in two part with less bits than Float precision. Note 1: the inexact flag in floating point hardware must not be trusted because in some cases the operations would be exact but would not take into account some bits that were truncated before the Floating point operations. Note 2: the floating point hardware is presumed configured in default rounding mode." | mantissa shift excess result n | "Check how many bits excess the maximum precision of a Float mantissa." excess := self highBitOfMagnitude - Float precision. excess > 7 ifTrue: ["Remove the excess bits but seven." mantissa := self bitShiftMagnitude: 7 - excess. shift := excess - 7. "An even mantissa with a single excess bit immediately following would be truncated. But this would not be correct if above shift has truncated some extra bits. Check this case, and round excess bits upper manually." ((mantissa digitAt: 1) = 2r01000000 and: [self anyBitOfMagnitudeFrom: 1 to: shift]) ifTrue: [mantissa := mantissa + 1]] ifFalse: [mantissa := self. shift := 0]. "There will be a single inexact round off at last iteration" result := (mantissa digitAt: (n := mantissa digitLength)) asFloat. [(n := n - 1) > 0] whileTrue: [ result := 256.0 * result + (mantissa digitAt: n) asFloat]. ^result timesTwoPower: shift.! ! !LargePositiveInteger methodsFor: 'private' stamp: 'nice 12/30/2012 14:25'! primitiveQuo: anInteger "Primitive. Divide the receiver by the argument and return the result. Round the result down towards zero to make it a whole integer. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." ^nil! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 14:34'! rem: aNumber "Remainder defined in terms of quo:. See super rem:. This is defined only to speed up case of very large integers." (self primitiveQuo: aNumber) ifNotNil: [:quo | ^self - (quo * aNumber)]. aNumber isInteger ifTrue: [| ng rem | ng := self negative == aNumber negative == false. rem := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: ng) at: 2. ^ rem normalize]. ^super rem: aNumber! ! !LargeNegativeInteger methodsFor: 'converting' stamp: 'nice 1/1/2013 15:42' prior: 37616204! asFloat ^super asFloat negated! ! !UndefinedObject methodsFor: 'class hierarchy' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:32'! bindingOf: varName environment: anEnvironment ^superclass bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:30'! classBindingOf: varName environment: anEnvironment ^self bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:37'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor "Postprocesses a literal scanned by Scanner scanToken (esp. xLitQuote). If scannedLiteral is not an association, answer it. Else, if it is of the form: nil->#NameOfMetaclass answer nil->theMetaclass, if any has that name, else report an error. Else, if it is of the form: #NameOfGlobalVariable->anythiEng answer the global, class, or pool association with that nameE, if any, else add it to Undeclared a answer the new Association." | key value | (scannedLiteral isVariableBinding) ifFalse: [^ scannedLiteral]. key := scannedLiteral key. value := scannedLiteral value. key ifNil: "###" [(self bindingOf: value environment: anEnvironment) ifNotNil: [:assoc| (assoc value isKindOf: Behavior) ifTrue: [^ nil->assoc value class]]. requestor notify: 'No such metaclass'. ^false]. (key isSymbol) ifTrue: "##" [(self bindingOf: key environment: anEnvironment) ifNotNil: [:assoc | ^assoc]. ^ anEnvironment undeclared: key]. requestor notify: '## must be followed by a non-local variable name'. ^false " Form literalScannedAs: 14 notifying: nil 14 Form literalScannedAs: #OneBitForm notiEfying: nil OneBitForm Form literalScannedAs: ##OneBitForm notifying: nil OneBitForm->a Form Form literalScannedAs: ##Form notifying: nil Form->Form Form literalScannedAs: ###Form notifying: nil nilE->Form class "! ! !Fraction methodsFor: 'converting' stamp: 'nice 11/21/2011 22:34' prior: 37619655! asFloat "Answer a Float that closely approximates the value of the receiver. This implementation will answer the closest floating point number to the receiver. In case of a tie, it will use the IEEE 754 round to nearest even mode. In case of overflow, it will answer +/- Float infinity." | a b mantissa exponent hasTruncatedBits lostBit n ha hb hm | a := numerator abs. b := denominator. "denominator is always positive" ha := a highBitOfMagnitude. hb := b highBitOfMagnitude. "Number of bits to keep in mantissa plus one to handle rounding." n := 1 + Float precision. "If both numerator and denominator are represented exactly in floating point number, then fastest thing to do is to use hardwired float division." (ha < n and: [hb < n]) ifTrue: [^numerator asFloat / denominator asFloat]. "Shift the fraction by a power of two exponent so as to obtain a mantissa with n bits. First guess is rough, the mantissa might have n+1 bits." exponent := ha - hb - n. exponent >= 0 ifTrue: [b := b bitShift: exponent] ifFalse: [a := a bitShift: exponent negated]. mantissa := a quo: b. hasTruncatedBits := a > (mantissa * b). hm := mantissa highBit. "Check for gradual underflow, in which case the mantissa will loose bits. Keep at least one bit to let underflow preserve the sign of zero." lostBit := Float emin - (exponent + hm - 1). lostBit > 0 ifTrue: [n := n - lostBit max: 1]. "Remove excess bits in the mantissa." hm > n ifTrue: [exponent := exponent + hm - n. hasTruncatedBits := hasTruncatedBits or: [mantissa anyBitOfMagnitudeFrom: 1 to: hm - n]. mantissa := mantissa bitShift: n - hm]. "Check if mantissa must be rounded upward. The case of tie (mantissa odd & hasTruncatedBits not) will be handled by Integer>>asFloat." (hasTruncatedBits and: [mantissa odd]) ifTrue: [mantissa := mantissa + 1]. ^ (self positive ifTrue: [mantissa asFloat] ifFalse: [mantissa asFloat negated]) timesTwoPower: exponent! ! !Float methodsFor: 'arithmetic' stamp: 'nice 12/20/2012 23:16' prior: 20878776! negated "Answer a Number that is the negation of the receiver. Implementation note: this version cares of negativeZero." ^-1.0 * self! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:21'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor ^ self compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: self acceptsLoggingOfCompilation! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 12/27/2012 13:17'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: logSource | methodAndNode context methodNode | context := CompilationCue source: text class: self environment: anEnvironment category: category requestor: requestor. methodNode := self newCompiler compile: context ifFail: [^ nil]. methodAndNode := CompiledMethodWithNode generateMethodFromNode: methodNode trailer: self defaultMethodTrailer. logSource ifTrue: [ self logMethodSource: text forMethodWithNode: methodAndNode inCategory: category withStamp: changeStamp notifying: requestor. ]. self addAndClassifySelector: methodAndNode selector withMethod: methodAndNode method inProtocol: category notifying: requestor. self instanceSide noteCompilationOf: methodAndNode selector meta: self isClassSide. ^ methodAndNode selector! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:47'! bindingOf: varName environment: anEnvironment "Answer the binding of some variable resolved in the scope of the receiver" | aSymbol binding | aSymbol := varName asSymbol. "First look in classVar dictionary." binding := self classPool bindingOf: aSymbol. binding ifNotNil:[^binding]. "Next look in shared pools." self sharedPools do:[:pool | binding := pool bindingOf: aSymbol. binding ifNotNil:[^binding]. ]. "Next look in declared environment." binding := anEnvironment bindingOf: aSymbol. binding ifNotNil:[^binding]. "Finally look higher up the superclass chain and fail at the end." superclass == nil ifTrue: [^ nil] ifFalse: [^ superclass bindingOf: aSymbol]. ! ! "Kernel"! ParseNode subclass: #Encoder instanceVariableNames: 'scopeTable nTemps supered requestor class selector literalStream selectorSet litIndSet litSet sourceRanges globalSourceRanges addedSelectorAndMethodClassLiterals optimizedSelectors cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Encoder commentStamp: 'cwp 12/26/2012 23:29' prior: 36323851! I encode names and literals into tree nodes with byte codes for the compiler. Byte codes for literals are not assigned until the tree-sizing pass of the compiler, because only then is it known which literals are actually needed. I also keep track of sourceCode ranges during parsing and code generation so I can provide an inverse map for the debugger.! Scanner subclass: #Parser instanceVariableNames: 'here hereType hereMark hereEnd prevMark prevEnd encoder requestor parseNode failBlock requestorOffset tempsMark doitFlag properties category queriedUnusedTemporaries cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Parser commentStamp: 'cwp 12/26/2012 23:34' prior: 38557958! I parse Smalltalk syntax and create a MethodNode that is the root of the parse tree. I look one token ahead.! Object subclass: #CompilationCue instanceVariableNames: 'source context receiver class environment category requestor' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! Object subclass: #Compiler instanceVariableNames: 'sourceStream requestor class category context parser cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Compiler commentStamp: 'cwp 12/26/2012 23:17' prior: 59257505! The compiler accepts Smalltalk source code and compiles it with respect to a given class. The user of the compiler supplies a context so that temporary variables are accessible during compilation. If there is an error, a requestor (usually a kind of StringHolderController) is sent the message notify:at:in: so that the error message can be displayed. If there is no error, then the result of compilation is a MethodNode, which is the root of a parse tree whose nodes are kinds of ParseNodes. The parse tree can be sent messages to (1) generate code for a CompiledMethod (this is done for compiling methods or evaluating expressions); (2) pretty-print the code (for formatting); or (3) produce a map from object code back to source code (used by debugger program-counter selection). See also Parser, Encoder, ParseNode.! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:34'! init: aCue notifying: anObject "The use of the variable requestor is a bit confusing here. This is *not* the original requestor, which is available through the cue. It's the Parser instance that is using the encoder." self setCue: aCue. requestor := anObject. nTemps := 0. supered := false. self initScopeAndLiteralTables. cue getClass variablesAndOffsetsDo: [:variable "" :offset "" | offset isNil ifTrue: [scopeTable at: variable name put: (FieldNode new fieldDefinition: variable)] ifFalse: [scopeTable at: variable put: (offset >= 0 ifTrue: [InstanceVariableNode new name: variable index: offset] ifFalse: [MaybeContextInstanceVariableNode new name: variable index: offset negated])]]. cue context ~~ nil ifTrue: [| homeNode | homeNode := self bindTemp: self doItInContextName. "0th temp = aContext passed as arg" cue context tempNames withIndexDo: [:variable :index| scopeTable at: variable put: (MessageAsTempNode new receiver: homeNode selector: #namedTempAt: arguments: (Array with: (self encodeLiteral: index)) precedence: 3 from: self)]]. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32 ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/26/2012 23:30'! setCue: aCue cue := aCue. "Also set legacy instance variables for methods that don't use cue yet" class := cue getClass.! ! !Dictionary methodsFor: '*Compiler' stamp: 'cwp 6/22/2012 09:17'! bindingOf: varName ifAbsent: aBlock ^self associationAt: varName ifAbsent: aBlock! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:37'! init: sourceStream cue: aCue failBlock: aBlock self setCue: aCue. failBlock := aBlock. requestorOffset := 0. super scan: sourceStream. prevMark := hereMark := mark. self advance ! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/26/2012 23:41'! parse: sourceStream cue: aCue noPattern: noPattern ifFail: aBlock "Answer a MethodNode for the argument, sourceStream, that is the root of a parse tree. Parsing is done with respect to the CompilationCue to resolve variables. Errors in parsing are reported to the cue's requestor; otherwise aBlock is evaluated. The argument noPattern is a Boolean that is true if the the sourceStream does not contain a method header (i.e., for DoIts)." | methNode repeatNeeded myStream s p subSelection | myStream := sourceStream. [repeatNeeded := false. p := myStream position. s := myStream upToEnd. myStream position: p. subSelection := aCue requestor notNil and: [aCue requestor selectionInterval = (p + 1 to: p + s size)]. self encoder init: aCue notifying: self. self init: myStream cue: aCue failBlock: [^ aBlock value]. doitFlag := noPattern. failBlock:= aBlock. [methNode := self method: noPattern context: cue context] on: ReparseAfterSourceEditing do: [ :ex | repeatNeeded := true. myStream := subSelection ifTrue: [ReadStream on: cue requestor text string from: cue requestor selectionInterval first to: cue requestor selectionInterval last] ifFalse: [ReadStream on: cue requestor text string]]. repeatNeeded] whileTrue: [encoder := self encoder class new]. methNode sourceText: s. ^methNode ! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:35'! setCue: aCue cue := aCue. "Also set legacy variables for methods that don't use cue yet." requestor := cue requestor. category := cue category.! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! class: aClass ^ self context: nil class: aClass requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! context: aContext class: aClass requestor: anObject ^ self source: nil context: aContext receiver: nil class: aClass environment: (aClass ifNotNil: [aClass environment]) category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aTextOrStream class: aClass environment: anEnvironment category: aString requestor: anObject ^ self source: aTextOrStream context: nil receiver: nil class: aClass environment: anEnvironment category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! source: aTextOrStream context: aContext class: aClass category: aString requestor: anObject ^ self source: aTextOrStream context: aContext receiver: (aContext ifNotNil: [aContext receiver]) class: aClass environment: (aClass ifNotNil: [aClass environment]) category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream context: aContext class: aClass requestor: anObject ^ self source: aTextOrStream context: aContext class: aClass category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:55'! source: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject ^ self basicNew initializeWithSource: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aString environment: anEnvironment ^ self source: aString context: nil receiver: nil class: UndefinedObject environment: anEnvironment category: nil requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream requestor: anObject ^ self source: aTextOrStream context: nil class: nil requestor: anObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/20/2012 09:39'! bindingOf: aSymbol ^ class bindingOf: aSymbol environment: environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! category ^ category! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 12/26/2012 23:19'! context ^ context! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! environment ^ environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! getClass ^ class! ! !CompilationCue methodsFor: 'initialization' stamp: 'cwp 12/26/2012 23:16'! initializeWithSource: aTextOrString context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject self initialize. source := aTextOrString isStream ifTrue: [aTextOrString contents] ifFalse: [aTextOrString]. context := aContext. receiver := recObject. class := aClass. environment := anEnvironment. category := aString. requestor := reqObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: anObject notifying: anEncoder ^ class literalScannedAs: anObject environment: environment notifying: anEncoder! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! receiver ^ receiver! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! requestor ^ requestor! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! source ^ source! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:44'! sourceStream ^ source readStream! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 6/20/2012 17:25'! evaluate: aString environment: anEnvironment ^ self evaluate: aString environment: anEnvironment logged: false! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 12/27/2012 12:36'! evaluate: aString environment: anEnvironment logged: aBoolean | cue | cue := CompilationCue source: aString environment: anEnvironment. ^ self new evaluate: aString cue: cue ifFail: [^ nil] logged: aBoolean! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 13:18'! compile: aCue ifFail: failBlock "Answer a MethodNode. If the MethodNode can not be created, notify the requestor in the contxt. If the requestor is nil, evaluate failBlock instead. The MethodNode is the root of a parse tree. It can be told to generate a CompiledMethod to be installed in the method dictionary of the class specified by the context." self setCue: aCue. self source: cue source. ^self translate: sourceStream noPattern: false ifFail: failBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:06'! evaluate: textOrStream cue: aCue ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | methodNode method value toLog itsSelection itsSelectionString | self setCue: aCue. self source: textOrStream. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [methodNode generateWithTempNames] ifFalse: [methodNode generate]. value := cue receiver withArgs: (cue context ifNil: [#()] ifNotNil: [{cue context}]) executeMethod: method. logFlag ifTrue: [toLog := ((cue requestor respondsTo: #selection) and:[(itsSelection := cue requestor selection) notNil and:[(itsSelectionString := itsSelection asString) isEmptyOrNil not]]) ifTrue:[itsSelectionString] ifFalse:[sourceStream contents]. SystemChangeNotifier uniqueInstance evaluated: toLog context: cue context]. ^ value ! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:20'! setCue: aCue cue := aCue. "Set legacy instance variables for methods that don't use cue yet." requestor := cue requestor. class := cue getClass. category := cue category. context := cue context.! ! !Compiler methodsFor: 'private' stamp: 'cwp 6/19/2012 21:58'! source: textOrStream sourceStream := (textOrStream isKindOf: PositionableStream) ifTrue: [ textOrStream ] ifFalse: [ ReadStream on: textOrStream asString ]! ! "Compiler"! !SmartRefStream class methodsFor: 'i/o' stamp: 'cwp 6/20/2012 17:42'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !SmartRefStream methodsFor: 'read write' stamp: 'cwp 6/20/2012 17:41'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !ImageSegment methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:23'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !PseudoClass methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | aString newTranslations assoc currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:26'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | newTranslations currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [| aString assoc | Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !ObjectScanner methodsFor: 'scanning' stamp: 'cwp 6/20/2012 17:39'! scanFrom: aByteStream environment: anEnvironment "This should probably be reimplemented using an environment for compilation. For now, don't change anything" ^ self scanFrom: aByteStream! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 09:16'! bindingOf: varName ifAbsent: aBlock "SystemDictionary includes Symbols only" ^super bindingOf: varName asSymbol ifAbsent: aBlock! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 15:48'! undeclared ^ self at: #Undeclared! ! "System"! !ExceptionTests methodsFor: 'testing-outer' stamp: 'fbs 1/1/2013 22:14' prior: 40840955! expectedFailures ^ #().! ! "Tests"! ReleaseBuilder subclass: #ReleaseBuilderFor4dot5 instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'ReleaseBuilder'! !ReleaseBuilderFor4dot5 commentStamp: 'fbs 1/1/2013 20:25' prior: 0! The release builder for Squeak 4.5! !ReleaseBuilder class methodsFor: 'scripts' stamp: 'fbs 12/31/2012 20:43'! transferCurrentPackagesAsUser: username password: password "Copy the packages currently loaded in the image from the trunk repository to my releaseRepository." | trunkRep releaseRep | trunkRep := self trunkRepository. releaseRep := self releaseRepository user: username; password: password; yourself. MCWorkingCopy allManagers do: [ : eachWorkingCopy | eachWorkingCopy ancestors do: [ : eachVersionInfo | (releaseRep includesVersionNamed: eachVersionInfo versionName) ifFalse: [ (trunkRep versionWithInfo: eachVersionInfo) ifNil: [ Warning signal: eachVersionInfo name , ' not found in ', trunkRep ] ifNotNilDo: [ : ver | releaseRep storeVersion: ver ] ] ] ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! openWelcomeWorkspaces TheWorldMainDockingBar instance showWelcomeText: #squeakUserInterface label: 'Squeak User Interface' in: (40 @ 40 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #workingWithSqueak label: 'Working With Squeak' in: (80 @ 80 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #licenseInformation label: 'License Information' in: (120 @ 120 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeFutureDirections label: 'Future Directions' in: (160 @ 160 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeToSqueak label: 'Welcome to Squeak 4.5' in: (200 @ 200 extent: 500 @ 300)! ! !ReleaseBuilderFor4dot5 class methodsFor: 'scripts' stamp: 'fbs 1/1/2013 20:22'! prepareNewBuild super prepareNewBuild. MCMockPackageInfo initialize.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:24'! releaseRepository "At release time, change 'trunk' to 'squeak45'." ^ MCHttpRepository location: 'http://source.squeak.org/trunk' user: 'squeak' password: 'squeak'! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:22'! setDisplayExtent: extent "Uncomment next line when the primitives become available in the Squeak VM." " DisplayScreen hostWindowSize: extent." Display extent = extent ifFalse: [ Warning signal: 'Display extent not set to ', extent ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! setPreferences Preferences installBrightWindowColors ; setPreference: #scrollBarsWithoutMenuButton toValue: true ; setPreference: #swapMouseButtons toValue: true ; setPreference: #annotationPanes toValue: true ; setPreference: #showSplitterHandles toValue: false ; setPreference: #showBoundsInHalo toValue: true ; setPreference: #alternateHandlesLook toValue: false ; setPreference: #roundedMenuCorners toValue: false ; setPreference: #roundedWindowCorners toValue: false. PluggableButtonMorph roundedButtonCorners: false. FillInTheBlankMorph roundedDialogCorners: false. Workspace shouldStyle: false. NetNameResolver enableIPv6: true.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! switchToNewRepository | old44Repository | MCMcmUpdater defaultUpdateURL: self releaseRepository description. old44Repository := MCRepositoryGroup default repositories detect: [:each | each description includesSubString: 'squeak44'] ifNone: [nil]. old44Repository ifNotNil: [MCRepositoryGroup default removeRepository: old44Repository]. MCRepositoryGroup default addRepository: self releaseRepository! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! versionString ^ 'Squeak4.5'.! ! ReleaseBuilder class removeSelector: #transferCurrentPackages! "ReleaseBuilder"! !Environment class methodsFor: 'as yet unclassified' stamp: 'cwp 1/1/2013 18:52' prior: 40834114! initialize self install! ! "Environments"! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:59' prior: 52081878! initPattern: aString notifying: req return: aBlock | result | self init: (ReadStream on: aString asString) cue: (CompilationCue source: aString requestor: req) failBlock: [^nil]. encoder := self. result := aBlock value: (self pattern: false inContext: nil). encoder := failBlock := nil. "break cycles" ^result! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:01' prior: 34175471! parse: sourceStream class: class category: aCategory noPattern: noPattern context: aContext notifying: req ifFail: aBlock | c | c := CompilationCue source: sourceStream context: aContext class: class category: aCategory requestor: req. ^ self parse: sourceStream cue: c noPattern: noPattern ifFail: aBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:11' prior: 34183963! evaluate: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | theClass | theClass := ((aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class). self setCue: (CompilationCue source: textOrStream context: aContext receiver: receiver class: theClass environment: theClass environment category: nil requestor: aRequestor). ^ self evaluate: textOrStream cue: cue ifFail: failBlock logged: logFlag! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:17' prior: 34185488! from: textOrStream class: aClass classified: aCategory context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass category: aCategory requestor: req)! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:55' prior: 50781309! from: textOrStream class: aClass context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass requestor: req) ! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/27/2012 09:41' prior: 50996506! init: aClass context: aContext notifying: anObject | c | c := CompilationCue context: aContext class: aClass requestor: nil. self init: c notifying: anObject! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:58' prior: 39061698! temps: tempVars literals: lits class: cl "Initialize this encoder for decompilation." self setCue: (CompilationCue class: cl). supered := false. nTemps := tempVars size. tempVars do: [:node | scopeTable at: node name put: node]. literalStream := WriteStream on: (Array new: lits size). literalStream nextPutAll: lits. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32.! ! "Compiler"! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:48' prior: 36026010! addClassVarName: aString "Add the argument, aString, as a class variable of the receiver. Signal an error if the first character of aString is not capitalized, or if it is already a variable named in the class." | symbol oldState | oldState := self copy. aString first canBeGlobalVarInitial ifFalse: [^self error: aString, ' class variable name should be capitalized; proceed to include anyway.']. symbol := aString asSymbol. self withAllSubclasses do: [:subclass | (self canFindWithoutEnvironment: symbol) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: aString; signal: aString, ' is already defined']]. classPool == nil ifTrue: [classPool := Dictionary new]. (classPool includesKey: symbol) ifFalse: ["Pick up any refs in Undeclared" classPool declare: symbol from: environment undeclared. SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: oldState to: self]! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:48' prior: 54782024! bindingOf: varName ^ self bindingOf: varName environment: self environment! ! !Class methodsFor: 'organization' stamp: 'cwp 6/25/2012 18:25' prior: 54785804! category "Answer the system organization category for the receiver. First check whether the category name stored in the ivar is still correct and only if this fails look it up (latter is much more expensive)" category ifNotNil: [ :symbol | ((self environment organization listAtCategoryNamed: symbol) includes: self name) ifTrue: [ ^symbol ] ]. category := self environment organization categoryOfElement: self name. ^category! ! !Class methodsFor: 'initialize-release' stamp: 'cwp 6/22/2012 15:49' prior: 36027730! declare: varString "Declare class variables common to all instances. Answer whether recompilation is advisable." | newVars conflicts | newVars := (Scanner new scanFieldNames: varString) collect: [:x | x asSymbol]. newVars do: [:var | var first canBeGlobalVarInitial ifFalse: [self error: var, ' class variable name should be capitalized; proceed to include anyway.']]. conflicts := false. classPool == nil ifFalse: [(classPool keys reject: [:x | newVars includes: x]) do: [:var | self removeClassVarName: var]]. (newVars reject: [:var | self classPool includesKey: var]) do: [:var | "adding" "check if new vars defined elsewhere" (self canFindWithoutEnvironment: var) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: var; signal: var, ' is already defined'. conflicts := true]]. newVars size > 0 ifTrue: [classPool := self classPool. "in case it was nil" newVars do: [:var | classPool declare: var from: environment undeclared]]. ^conflicts! ! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:49' prior: 54802475! removeClassVarName: aString "Remove the class variable whose name is the argument, aString, from the names defined in the receiver, a class. Create an error notification if aString is not a class variable or if it is still being used in the code of the class." | aSymbol | aSymbol := aString asSymbol. (classPool includesKey: aSymbol) ifFalse: [^self error: aString, ' is not a class variable']. self withAllSubclasses do:[:subclass | (Array with: subclass with: subclass class) do:[:classOrMeta | (classOrMeta whichSelectorsReferTo: (classPool associationAt: aSymbol)) isEmpty ifFalse: [ InMidstOfFileinNotification signal ifTrue: [ Transcript cr; show: self name, ' (' , aString , ' is Undeclared) '. ^ environment undeclared declare: aSymbol from: classPool]. (self confirm: (aString,' is still used in code of class ', classOrMeta name, '.\Is it okay to move it to Undeclared?') withCRs) ifTrue:[^Undeclared declare: aSymbol from: classPool] ifFalse:[^self]]]]. classPool removeKey: aSymbol. classPool isEmpty ifTrue: [classPool := nil]. ! ! !Class methodsFor: 'class name' stamp: 'cwp 6/22/2012 15:49' prior: 54796206! rename: aString "The new name of the receiver is the argument, aString." | oldName newName | (newName := aString asSymbol) = (oldName := self name) ifTrue: [^ self]. (self environment includesKey: newName) ifTrue: [^ self error: newName , ' already exists']. (environment undeclared includesKey: newName) ifTrue: [self inform: 'There are references to, ' , aString printString , ' from Undeclared. Check them after this change.']. name := newName. self environment renameClass: self from: oldName! ! !ClassBuilder methodsFor: 'class definition' stamp: 'cwp 6/22/2012 01:05' prior: 39054430! name: className inEnvironment: env subclassOf: newSuper type: type instanceVariableNames: instVarString classVariableNames: classVarString poolDictionaries: poolString category: category unsafe: unsafe "Define a new class in the given environment. If unsafe is true do not run any validation checks. This facility is provided to implement important system changes." | oldClass instVars classVars copyOfOldClass newClass | environ := env. instVars := Scanner new scanFieldNames: instVarString. classVars := (Scanner new scanFieldNames: classVarString) collect: [:x | x asSymbol]. "Validate the proposed name" unsafe ifFalse:[(self validateClassName: className) ifFalse:[^nil]]. oldClass := env at: className ifAbsent:[nil]. oldClass isBehavior ifFalse: [oldClass := nil] "Already checked in #validateClassName:" ifTrue: [ copyOfOldClass := oldClass copy. copyOfOldClass superclass addSubclass: copyOfOldClass]. [ | newCategory needNew force organization oldCategory | unsafe ifFalse:[ "Run validation checks so we know that we have a good chance for recompilation" (self validateSuperclass: newSuper forSubclass: oldClass) ifFalse:[^nil]. (self validateInstvars: instVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateClassvars: classVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateSubclassFormat: type from: oldClass forSuper: newSuper extra: instVars size) ifFalse:[^nil]]. "See if we need a new subclass" needNew := self needsSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. needNew == nil ifTrue:[^nil]. "some error" (needNew and:[unsafe not]) ifTrue:[ "Make sure we don't redefine any dangerous classes" (self tooDangerousClasses includes: oldClass name) ifTrue:[ self error: oldClass name, ' cannot be changed'. ]. "Check if the receiver should not be redefined" (oldClass ~~ nil and:[oldClass shouldNotBeRedefined]) ifTrue:[ self notify: oldClass name asText allBold, ' should not be redefined. \Proceed to store over it.' withCRs]]. needNew ifTrue:[ "Create the new class" newClass := self newSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. newClass == nil ifTrue:[^nil]. "Some error" newClass setName: className. newClass environment: environ. ] ifFalse:[ "Reuse the old class" newClass := oldClass. ]. "Install the class variables and pool dictionaries... " force := (newClass declare: classVarString) | (newClass sharing: poolString). "... classify ..." newCategory := category asSymbol. organization := environ ifNotNil:[environ organization]. oldClass isNil ifFalse: [oldCategory := (organization categoryOfElement: oldClass name) asSymbol]. organization classify: newClass name under: newCategory suppressIfDefault: true. "... recompile ..." newClass := self recompile: force from: oldClass to: newClass mutate: false. "... export if not yet done ..." (environ at: newClass name ifAbsent:[nil]) == newClass ifFalse:[ [environ at: newClass name put: newClass] on: AttemptToWriteReadOnlyGlobal do:[:ex| ex resume: true]. environ flushClassNameCache. ]. newClass doneCompiling. "... notify interested clients ..." oldClass isNil ifTrue: [ SystemChangeNotifier uniqueInstance classAdded: newClass inCategory: newCategory. ^ newClass]. newCategory ~= oldCategory ifTrue: [SystemChangeNotifier uniqueInstance class: newClass recategorizedFrom: oldCategory to: category] ifFalse: [SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: copyOfOldClass to: newClass.]. ] ensure: [copyOfOldClass ifNotNil: [copyOfOldClass superclass removeSubclass: copyOfOldClass]. Behavior flushObsoleteSubclasses. ]. ^newClass! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 22:57' prior: 18572019! superclass: newSuper subclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class." | env | env := EnvironmentRequest signal ifNil: [newSuper environment]. ^self name: t inEnvironment: env subclassOf: newSuper type: newSuper typeOfClass instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:01' prior: 50629912! superclass: aClass variableByteSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable byte-sized nonpointer variables." | oldClassOrNil actualType env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a byte subclass of a class with named fields']. (aClass isVariable and: [aClass isWords]) ifTrue: [^self error: 'cannot make a byte subclass of a class with word fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a byte subclass of a class with pointer fields']. oldClassOrNil := aClass environment at: t ifAbsent:[nil]. actualType := (oldClassOrNil notNil and: [oldClassOrNil typeOfClass == #compiledMethod]) ifTrue: [#compiledMethod] ifFalse: [#bytes]. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: actualType instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:03' prior: 18573442! superclass: aClass variableSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #variable instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18574098! superclass: aClass variableWordSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable word-sized nonpointer variables." | env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a word subclass of a class with named fields']. (aClass isVariable and: [aClass isBytes]) ifTrue: [^self error: 'cannot make a word subclass of a class with byte fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a word subclass of a class with pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #words instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18575028! superclass: aClass weakSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class (the receiver) in which the subclass is to have weak indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #weak instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! "Kernel"! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 59135029! ambiguousSelector: aString inRange: anInterval | correctedSelector userSelection offset intervalWithOffset | self interactive ifFalse: [ "In non interactive mode, compile with backward comapatibility: $- is part of literal argument" Transcript cr; store: encoder classEncoding; nextPutAll:#'>>';store: encoder selector; show: ' would send ' , token , '-'. ^super ambiguousSelector: aString inRange: anInterval]. "handle the text selection" userSelection := cue requestor selectionInterval. intervalWithOffset := anInterval first + requestorOffset to: anInterval last + requestorOffset. cue requestor selectFrom: intervalWithOffset first to: intervalWithOffset last. cue requestor select. "Build the menu with alternatives" correctedSelector := AmbiguousSelector signalName: aString inRange: intervalWithOffset. correctedSelector ifNil: [^self fail]. "Execute the selected action" offset := self substituteWord: correctedSelector wordInterval: intervalWithOffset offset: 0. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + offset. token := (correctedSelector readStream upTo: Character space) asSymbol! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 38558136! collectTemporaryDeclarationsFrom: methodNode | tempsMarks str | tempsMarks := OrderedCollection new. str := cue requestor text asString. methodNode accept: (ParseNodeEnumerator ofBlock: [ :aNode | | mark | (aNode class canUnderstand: #tempsMark) ifTrue: [mark := aNode tempsMark. (mark notNil and: [ mark between: 1 and: str size ] and: [ (str at: mark) = $| ]) ifTrue: [ tempsMarks addLast: aNode ]]]). (tempsMark notNil and: [ tempsMark between: 1 and: str size ] and: [ (str at: tempsMark) = $| ]) ifTrue: [ tempsMarks addLast: self ]. ^ tempsMarks sorted: [ :a :b | a tempsMark > b tempsMark ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 52096606! correctSelector: proposedKeyword wordIntervals: spots exprInterval: expInt ifAbort: abortAction "Correct the proposedKeyword to some selector symbol, correcting the original text if such action is indicated. abortAction is invoked if the proposedKeyword couldn't be converted into a valid selector. Spots is an ordered collection of intervals within the test stream of the for each of the keyword parts." | correctSelector userSelection | "If we can't ask the user, assume that the keyword will be defined later" self interactive ifFalse: [^proposedKeyword asSymbol]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spots first first to: spots last last. cue requestor select. correctSelector := UnknownSelector name: proposedKeyword. correctSelector ifNil: [^abortAction value]. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. self substituteSelector: correctSelector keywords wordIntervals: spots. ^(proposedKeyword last ~~ $: and: [correctSelector last == $:]) ifTrue: [abortAction value] ifFalse: [correctSelector]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 33907242! correctVariable: proposedVariable interval: spot "Correct the proposedVariable to a known variable, or declare it as a new variable if such action is requested. We support declaring lowercase variables as temps or inst-vars, and uppercase variables as Globals or ClassVars, depending on whether the context is nil (class=UndefinedObject). Spot is the interval within the test stream of the variable. rr 3/4/2004 10:26 : adds the option to define a new class. " "Check if this is an i-var, that has been corrected already (ugly)" "Display the pop-up menu" | binding userSelection action | (encoder classEncoding instVarNames includes: proposedVariable) ifTrue: [^InstanceVariableNode new name: proposedVariable index: (encoder classEncoding allInstVarNames indexOf: proposedVariable)]. "If we can't ask the user for correction, make it undeclared" self interactive ifFalse: [^encoder undeclared: proposedVariable]. "First check to see if the requestor knows anything about the variable" (binding := cue requestor bindingOf: proposedVariable) ifNotNil: [^encoder global: binding name: proposedVariable]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spot first to: spot last. cue requestor select. "Build the menu with alternatives" action := UndeclaredVariable signalFor: self name: proposedVariable inRange: spot. action ifNil: [^self fail]. "Execute the selected action" cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. ^action value! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:19' prior: 34172921! declareUndeclaredTemps: methodNode "Declare any undeclared temps, declaring them at the smallest enclosing scope." | undeclared userSelection blocksToVars | (undeclared := encoder undeclaredTemps) isEmpty ifTrue: [^self]. userSelection := cue requestor selectionInterval. blocksToVars := IdentityDictionary new. undeclared do: [:var| (blocksToVars at: (var tag == #method ifTrue: [methodNode block] ifFalse: [methodNode accept: (VariableScopeFinder new ofVariable: var)]) ifAbsentPut: [SortedCollection new]) add: var name]. (blocksToVars removeKey: methodNode block ifAbsent: []) ifNotNil: [:rootVars| rootVars do: [:varName| self pasteTempAtMethodLevel: varName]]. (blocksToVars keys sorted: [:a :b| a tempsMark < b tempsMark]) do: [:block| | decl | decl := (blocksToVars at: block) reduce: [:a :b| a, ' ', b]. block temporaries isEmpty ifTrue: [self substituteWord: ' | ', decl, ' |' wordInterval: (block tempsMark + 1 to: block tempsMark) offset: requestorOffset] ifFalse: [self substituteWord: decl, ' ' wordInterval: (block tempsMark to: block tempsMark - 1) offset: requestorOffset]]. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + requestorOffset. ReparseAfterSourceEditing signal! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 11:45' prior: 37183770! defineClass: className "prompts the user to define a new class, asks for it's category, and lets the users edit further the definition" | sym cat def d2 | sym := className asSymbol. cat := UIManager default request: 'Enter class category : ' initialAnswer: self encoder classEncoding theNonMetaClass category. cat ifEmpty: [cat := 'Unknown']. def := 'Object subclass: #' , sym , ' instanceVariableNames: '''' classVariableNames: '''' poolDictionaries: '''' category: ''' , cat , ''''. d2 := UIManager default request: 'Edit class definition : ' initialAnswer: def. d2 ifEmpty: [d2 := def]. Compiler evaluate: d2. ^ encoder global: (cue environment bindingOf: sym) name: sym! ! !Parser methodsFor: 'primitives' stamp: 'cwp 12/27/2012 11:46' prior: 37184567! externalFunctionDeclaration "Parse the function declaration for a call to an external library." | descriptorClass callType modifier retType externalName args argType module fn | descriptorClass := cue environment valueOf: #ExternalFunction ifAbsent: [^ false]. callType := descriptorClass callingConventionFor: here. callType == nil ifTrue:[^false]. [modifier := descriptorClass callingConventionModifierFor: token. modifier notNil] whileTrue: [self advance. callType := callType bitOr: modifier]. "Parse return type" self advance. retType := self externalType: descriptorClass. retType == nil ifTrue:[^self expected:'return type']. "Parse function name or index" externalName := here. (self match: #string) ifTrue:[externalName := externalName asSymbol] ifFalse:[(self match:#number) ifFalse:[^self expected:'function name or index']]. (self matchToken: #'(') ifFalse:[^self expected:'argument list']. args := WriteStream on: Array new. [here == #')'] whileFalse:[ argType := self externalType: descriptorClass. argType == nil ifTrue:[^self expected:'argument']. argType isVoid & argType isPointerType not ifFalse:[args nextPut: argType]. ]. (self matchToken: #')') ifFalse:[^self expected:')']. (self matchToken: 'module:') ifTrue:[ module := here. (self match: #string) ifFalse:[^self expected: 'String']. module := module asSymbol]. Smalltalk at: #ExternalLibraryFunction ifPresent:[:xfn| fn := xfn name: externalName module: module callType: callType returnType: retType argumentTypes: args contents. self allocateLiteral: fn. ]. (self matchToken: 'error:') ifTrue: [| errorCodeVariable | errorCodeVariable := here. (hereType == #string or: [hereType == #word]) ifFalse:[^self expected: 'error code (a variable or string)']. self advance. self addPragma: (Pragma keyword: #primitive:error: arguments: (Array with: 120 with: errorCodeVariable)). fn ifNotNil: [fn setErrorCodeName: errorCodeVariable]] ifFalse: [self addPragma: (Pragma keyword: #primitive: arguments: #(120))]. ^true ! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:19' prior: 58306169! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:22' prior: 58137223! notify: string at: location cue requestor isNil ifTrue: [(encoder == self or: [encoder isNil]) ifTrue: [^ self fail "failure setting up syntax error"]. SyntaxErrorNotification inClass: encoder classEncoding category: cue category withCode: (source contents asText copyReplaceFrom: location to: location - 1 with: ((string , ' ->') asText allBold addAttribute: TextColor red; yourself)) doitFlag: doitFlag errorMessage: string location: location] ifFalse: [cue requestor notify: string , ' ->' at: location in: source]. ^self fail! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:17' prior: 34177108! pasteTempAtMethodLevel: name | insertion delta theTextString characterBeforeMark | theTextString := cue requestor text string. characterBeforeMark := theTextString at: tempsMark-1 ifAbsent: [$ ]. (theTextString at: tempsMark) = $| ifTrue: [ "Paste it before the second vertical bar" insertion := name, ' '. characterBeforeMark isSeparator ifFalse: [ insertion := ' ', insertion]. delta := 0. ] ifFalse: [ "No bars - insert some with CR, tab" insertion := '| ' , name , ' |',String cr. delta := 2. "the bar and CR" characterBeforeMark = Character tab ifTrue: [ insertion := insertion , String tab. delta := delta + 1. "the tab" ]. ]. tempsMark := tempsMark + (self substituteWord: insertion wordInterval: (tempsMark to: tempsMark-1) offset: 0) - delta! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:16' prior: 52095305! queryUndefined | varStart varName | varName := parseNode key. varStart := self endOfLastToken + requestorOffset - varName size + 1. cue requestor selectFrom: varStart to: varStart + varName size - 1; select. (UndefinedVariable name: varName) ifFalse: [^ self fail]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38599341! removeEmptyTempDeclarationsFrom: methodNode | sourceCode madeChanges tempsMarkHolder | sourceCode := cue requestor text asString. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. madeChanges := false. tempsMarkHolder do: [ :currentBlock | | tempsMarkChar0 tempsMarkChar1 tempsMarkChar2 end start | tempsMarkChar0 := (sourceCode at: currentBlock tempsMark). tempsMarkChar1 := (sourceCode at: currentBlock tempsMark - 1). tempsMarkChar2 := (sourceCode at: currentBlock tempsMark - 2). tempsMarkChar0 = $| & tempsMarkChar1 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 1]. tempsMarkChar0 = $| & tempsMarkChar1 = $ & tempsMarkChar2 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 2]. start notNil & end notNil ifTrue: [ | lineStart lineEnd | lineStart := 1 + (sourceCode lastIndexOf: Character cr startingAt: start - 1 ifAbsent: [ 0 ]). lineEnd := sourceCode indexOf: Character cr startingAt: end + 1 ifAbsent: [ sourceCode size ]. ((sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: lineStart) >= start and: [ (sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: end + 1) > lineEnd ]) ifTrue: [ start := lineStart. end := lineEnd ]. cue requestor correctFrom: start to: end with: ''. madeChanges := true. currentBlock tempsMark: nil ] ]. madeChanges ifTrue: [ReparseAfterSourceEditing signal]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38561281! removeUnusedTemporaryNamed: temp from: str lookingAt: currentBlock movingTempMarksOf: someBlocks | start end | end := currentBlock tempsMark - 1. ["Beginning at right temp marker..." start := end - temp size + 1. end < temp size or: [ (str at: start) = $| ] or: [ temp = (str copyFrom: start to: end) and: [ ((str at: start - 1) = $| | (str at: start - 1) isSeparator) & ((str at: end + 1) = $| | (str at: end + 1) isSeparator) ] ]] whileFalse: [ "Search left for the unused temp" end := cue requestor nextTokenFrom: end direction: -1 ]. (end < temp size or: [ (str at: start) = $| ]) ifFalse: [(str at: start - 1) = $ ifTrue: [ start := start - 1 ]. cue requestor correctFrom: start to: end with: ''. someBlocks do: [ :aBlock | aBlock tempsMark: aBlock tempsMark - (end - start + 1)]. ^true ]. ^false! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 38562194! removeUnusedTemps: methodNode "Scan for unused temp names, and prompt the user about the prospect of removing each one found" | madeChanges tempsMarkHolder unusedTempNames tempMarkHoldersToChange | madeChanges := false. tempMarkHoldersToChange := OrderedCollection new. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. unusedTempNames := encoder unusedTempNames select: [ :temp | (encoder lookupVariable: temp ifAbsent: [ ]) isUndefTemp and: [ self queriedUnusedTemporaries at: temp ifAbsentPut: [UnusedVariable name: temp] ]]. tempsMarkHolder do: [ :currentBlock | tempMarkHoldersToChange add: currentBlock. unusedTempNames do: [ :temp | (self removeUnusedTemporaryNamed: temp from: cue requestor text asString lookingAt: currentBlock movingTempMarksOf: tempMarkHoldersToChange) ifTrue: [ madeChanges := true ]]]. madeChanges ifTrue: [ self removeEmptyTempDeclarationsFrom: methodNode. ReparseAfterSourceEditing signal ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 34179326! substituteWord: correctWord wordInterval: spot offset: o "Substitute the correctSelector into the (presumed interactive) receiver. Update requestorOffset based on the delta size and answer the updated offset." cue requestor correctFrom: spot first + o to: spot last + o with: correctWord. requestorOffset := requestorOffset + correctWord size - spot size. ^o + correctWord size - spot size! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34179807! temporaries " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar' ! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34180638! temporariesIn: methodSelector " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance in: methodSelector)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar'! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:11' prior: 53971863! compiledMethodFor: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method, and answers it. If receiver is not nil, then the text can refer to instance variables of that receiver (the Inspector uses this). If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted." | methodNode method theClass | theClass := (aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class. self from: textOrStream class: theClass context: aContext notifying: aRequestor. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [ methodNode generateWithTempNames ] ifFalse: [methodNode generate]. logFlag ifTrue: [SystemChangeNotifier uniqueInstance evaluated: sourceStream contents context: aContext]. ^method! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:33' prior: 34363593! format: aStream noPattern: noPattern ifFail: failBlock ^(self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]) preen! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 10:08' prior: 58306325! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Compiler methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:10' prior: 50779387! notify: aString at: location "Refer to the comment in Object|notify:." ^ cue requestor == nil ifTrue: [SyntaxErrorNotification inClass: cue getClass category: cue category withCode: (sourceStream contents copyReplaceFrom: location to: location - 1 with: aString) doitFlag: false errorMessage: aString location: location] ifFalse: [cue requestor notify: aString at: location in: sourceStream]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:34' prior: 50777201! parse: textOrStream in: aClass notifying: req "Compile the argument, textOrStream, with respect to the class, aClass, and answer the MethodNode that is the root of the resulting parse tree. Notify the argument, req, if an error occurs. The failBlock is defaulted to an empty block." self from: textOrStream class: aClass context: nil notifying: req. ^self parser parse: sourceStream cue: cue noPattern: false ifFail: []! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:09' prior: 36332471! parser parser ifNil: [parser := (cue getClass ifNil: [self class]) newParser]. ^parser! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:37' prior: 50780779! translate: aStream noPattern: noPattern ifFail: failBlock ^self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:37' prior: 19124095! translate: aStream noPattern: noPattern ifFail: failBlock parser: parser | tree | tree := parser parse: aStream cue: cue noPattern: noPattern ifFail: [^ failBlock value]. ^ tree! ! !Encoder methodsFor: 'results' stamp: 'cwp 12/27/2012 10:26' prior: 50999892! associationForClass | assoc | assoc := self environment associationAt: cue getClass name ifAbsent: [nil]. ^assoc value == cue getClass ifTrue: [assoc] ifFalse: [Association new value: cue getClass]! ! !Encoder methodsFor: 'temps' stamp: 'cwp 12/27/2012 10:25' prior: 20148386! bindTemp: name in: methodSelector "Declare a temporary; error not if a field or class variable." scopeTable at: name ifPresent:[:node| "When non-interactive raise the error only if its a duplicate" (node isTemp or:[requestor interactive]) ifTrue:[^self notify:'Name is already defined'] ifFalse:[Transcript show: '(', name, ' is shadowed in "' , cue getClass printString , '>>' , methodSelector printString , '")']]. ^self reallyBind: name! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:25' prior: 20149084! classEncoding "This is a hack so that the parser may findout what class it was parsing for when it wants to create a syntax error view." ^ cue getClass! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:39' prior: 20138819! encodeLiteral: object ^self name: object key: (cue literalScannedAs: object notifying: self) class: LiteralNode type: LdLitType set: litSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 20139010! encodeSelector: aSelector ^self name: aSelector key: aSelector class: SelectorNode type: SendType set: selectorSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 58545123! environment "Answer the environment of the current compilation context, be it in a class or global (e.g. a workspace)" ^cue environment! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 11:41' prior: 50994497! lookupInPools: varName ifFound: assocBlock ^Symbol hasInterned: varName ifTrue: [:sym| (cue bindingOf: sym) ifNil: [^false] ifNotNil: [:assoc| assocBlock value: assoc]]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 51004306! possibleNamesFor: proposedName | results | results := cue getClass possibleVariablesFor: proposedName continuedFrom: nil. ^ proposedName correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 50995012! possibleVariablesFor: proposedVariable | results | results := proposedVariable correctAgainstDictionary: scopeTable continuedFrom: nil. proposedVariable first canBeGlobalVarInitial ifTrue: [ results := cue getClass possibleVariablesFor: proposedVariable continuedFrom: results ]. ^ proposedVariable correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:42' prior: 51002830! undeclared: name | sym | requestor interactive ifTrue: [requestor requestor == #error: ifTrue: [requestor error: 'Undeclared']. ^self notify: 'Undeclared']. "Allow knowlegeable clients to squash the undeclared warning if they want (e.g. Diffing pretty printers that are simply formatting text). As this breaks compilation it should only be used by clients that want to discard the result of the compilation. To squash the warning use e.g. [Compiler format: code in: class notifying: nil decorated: false] on: UndeclaredVariableWarning do: [:ex| ex resume: false]" sym := name asSymbol. ^(UndeclaredVariableWarning new name: name selector: selector class: cue getClass) signal ifTrue: [| undeclared | undeclared := cue environment undeclared. undeclared at: sym put: nil. self global: (undeclared associationAt: sym) name: sym] ifFalse: [self global: (Association key: sym) name: sym]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:23' prior: 51006007! warnAboutShadowed: name requestor addWarning: name,' is shadowed'. selector ifNotNil: [Transcript cr; show: cue getClass name,'>>', selector, '(', name,' is shadowed)']! ! "Compiler"! !SmalltalkImage methodsFor: 'housekeeping' stamp: 'cwp 6/22/2012 15:56' prior: 58497062! cleanOutUndeclared globals undeclared removeUnreferencedKeys! ! !SmalltalkImage methodsFor: 'special objects' stamp: 'cwp 6/22/2012 09:01' prior: 40515090! recreateSpecialObjectsArray "Smalltalk recreateSpecialObjectsArray" "To external package developers: **** DO NOT OVERRIDE THIS METHOD. ***** If you are writing a plugin and need additional special object(s) for your own use, use addGCRoot() function and use own, separate special objects registry " "The Special Objects Array is an array of objects used by the Squeak virtual machine. Its contents are critical and accesses to it by the VM are unchecked, so don't even think of playing here unless you know what you are doing." | newArray | newArray := Array new: 56. "Nil false and true get used throughout the interpreter" newArray at: 1 put: nil. newArray at: 2 put: false. newArray at: 3 put: true. "This association holds the active process (a ProcessScheduler)" newArray at: 4 put: (self bindingOf: #Processor). "Numerous classes below used for type checking and instantiation" newArray at: 5 put: Bitmap. newArray at: 6 put: SmallInteger. newArray at: 7 put: ByteString. newArray at: 8 put: Array. newArray at: 9 put: Smalltalk. newArray at: 10 put: Float. newArray at: 11 put: MethodContext. newArray at: 12 put: BlockContext. newArray at: 13 put: Point. newArray at: 14 put: LargePositiveInteger. newArray at: 15 put: Display. newArray at: 16 put: Message. newArray at: 17 put: CompiledMethod. newArray at: 18 put: (self specialObjectsArray at: 18). "(low space Semaphore)" newArray at: 19 put: Semaphore. newArray at: 20 put: Character. newArray at: 21 put: #doesNotUnderstand:. newArray at: 22 put: #cannotReturn:. newArray at: 23 put: nil. "This is the process signalling low space." "An array of the 32 selectors that are compiled as special bytecodes, paired alternately with the number of arguments each takes." newArray at: 24 put: #( #+ 1 #- 1 #< 1 #> 1 #<= 1 #>= 1 #= 1 #~= 1 #* 1 #/ 1 #\\ 1 #@ 1 #bitShift: 1 #// 1 #bitAnd: 1 #bitOr: 1 #at: 1 #at:put: 2 #size 0 #next 0 #nextPut: 1 #atEnd 0 #== 1 #class 0 #blockCopy: 1 #value 0 #value: 1 #do: 1 #new 0 #new: 1 #x 0 #y 0 ). "An array of the 255 Characters in ascii order. Cog inlines table into machine code at: prim so do not regenerate it." newArray at: 25 put: (self specialObjectsArray at: 25). newArray at: 26 put: #mustBeBoolean. newArray at: 27 put: ByteArray. newArray at: 28 put: Process. "An array of up to 31 classes whose instances will have compact headers" newArray at: 29 put: self compactClassesArray. newArray at: 30 put: (self specialObjectsArray at: 30). "(delay Semaphore)" newArray at: 31 put: (self specialObjectsArray at: 31). "(user interrupt Semaphore)" "Entries 32 - 34 unreferenced. Previously these contained prototype instances to be copied for fast initialization" newArray at: 32 put: nil. "was (Float new: 2)" newArray at: 33 put: nil. "was (LargePositiveInteger new: 4)" newArray at: 34 put: nil. "was Point new" newArray at: 35 put: #cannotInterpret:. "Note: This must be fixed once we start using context prototypes (yeah, right)" "(MethodContext new: CompiledMethod fullFrameSize)." newArray at: 36 put: (self specialObjectsArray at: 36). "Is the prototype MethodContext (unused by the VM)" newArray at: 37 put: BlockClosure. "(BlockContext new: CompiledMethod fullFrameSize)." newArray at: 38 put: (self specialObjectsArray at: 38). "Is the prototype BlockContext (unused by the VM)" "array of objects referred to by external code" newArray at: 39 put: (self specialObjectsArray at: 39). "preserve external semaphores" newArray at: 40 put: nil. "Reserved for Mutex in Cog VMs" newArray at: 41 put: nil. "Reserved for a LinkedList instance for overlapped calls in CogMT" "finalization Semaphore" newArray at: 42 put: ((self specialObjectsArray at: 42) ifNil: [Semaphore new]). newArray at: 43 put: LargeNegativeInteger. "External objects for callout. Note: Written so that one can actually completely remove the FFI." newArray at: 44 put: (self at: #ExternalAddress ifAbsent: []). newArray at: 45 put: (self at: #ExternalStructure ifAbsent: []). newArray at: 46 put: (self at: #ExternalData ifAbsent: []). newArray at: 47 put: (self at: #ExternalFunction ifAbsent: []). newArray at: 48 put: (self at: #ExternalLibrary ifAbsent: []). newArray at: 49 put: #aboutToReturn:through:. newArray at: 50 put: #run:with:in:. "51 reserved for immutability message" "newArray at: 51 put: #attemptToAssign:withIndex:." newArray at: 52 put: #(nil "nil => generic error" #'bad receiver' #'bad argument' #'bad index' #'bad number of arguments' #'inappropriate operation' #'unsupported operation' #'no modification' #'insufficient object memory' #'insufficient C memory' #'not found' #'bad method' #'internal error in named primitive machinery' #'object may move'). "53 to 55 are for Alien" newArray at: 53 put: (self at: #Alien ifAbsent: []). newArray at: 54 put: #invokeCallback:stack:registers:jmpbuf:. newArray at: 55 put: (self at: #UnsafeAlien ifAbsent: []). "Weak reference finalization" newArray at: 56 put: (self at: #WeakFinalizationList ifAbsent: []). "Now replace the interpreter's reference in one atomic operation" self specialObjectsArray becomeForward: newArray ! ! !SmalltalkImage methodsFor: 'shrinking' stamp: 'cwp 6/22/2012 15:57' prior: 37288071! unloadAllKnownPackages "Unload all packages we know how to unload and reload" "Prepare unloading" Smalltalk zapMVCprojects. Flaps disableGlobalFlaps: false. StandardScriptingSystem removeUnreferencedPlayers. Project removeAllButCurrent. #('Morphic-UserObjects' 'EToy-UserObjects' 'Morphic-Imported' ) do: [:each | SystemOrganization removeSystemCategory: each]. Smalltalk at: #ServiceRegistry ifPresent:[:aClass| SystemChangeNotifier uniqueInstance noMoreNotificationsFor: aClass. ]. World removeAllMorphs. "Go unloading" #( 'ReleaseBuilder' 'ScriptLoader' '311Deprecated' '39Deprecated' 'Universes' 'SMLoader' 'SMBase' 'Installer-Core' 'VersionNumberTests' 'VersionNumber' 'Services-Base' 'PreferenceBrowser' 'Nebraska' 'ToolBuilder-MVC' 'ST80' 'CollectionsTests' 'GraphicsTests' 'KernelTests' 'MorphicTests' 'MultilingualTests' 'NetworkTests' 'ToolsTests' 'TraitsTests' 'SystemChangeNotification-Tests' 'FlexibleVocabularies' 'EToys' 'Protocols' 'XML-Parser' 'Tests' 'SUnitGUI' 'Help-Squeak' 'HelpSystem' 'SystemReporter' ) do: [:pkgName| (MCPackage named: pkgName) unload. MCMcmUpdater disableUpdatesOfPackage: pkgName. ]. "Traits use custom unload" Smalltalk at: #Trait ifPresent:[:aClass| aClass unloadTraits]. "Post-unload cleanup" MCWorkingCopy flushObsoletePackageInfos. SystemOrganization removeSystemCategory: 'UserObjects'. Presenter defaultPresenterClass: nil. World dumpPresenter. ScheduledControllers := nil. Preferences removePreference: #allowEtoyUserCustomEvents. SystemOrganization removeEmptyCategories. ChangeSet removeChangeSetsNamedSuchThat:[:cs | (cs == ChangeSet current) not]. globals undeclared removeUnreferencedKeys. StandardScriptingSystem initialize. MCFileBasedRepository flushAllCaches. MCDefinition clearInstances. Behavior flushObsoleteSubclasses. ChangeSet current clear. ChangeSet current name: 'Unnamed1'. Smalltalk flushClassNameCache. Smalltalk at: #Browser ifPresent:[:br| br initialize]. DebuggerMethodMap voidMapCache. DataStream initialize. AppRegistry removeObsolete. FileServices removeObsolete. Preferences removeObsolete. TheWorldMenu removeObsolete. Smalltalk garbageCollect. Symbol compactSymbolTable. TheWorldMainDockingBar updateInstances. MorphicProject defaultFill: (Color gray: 0.9). World color: (Color gray: 0.9). ! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34' prior: 40472775! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:27' prior: 40496770! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !SystemDictionary methodsFor: 'dictionary access' stamp: 'cwp 6/22/2012 15:58' prior: 30574136! at: aKey put: anObject "Override from Dictionary to check Undeclared and fix up references to undeclared variables." | index element | (self includesKey: aKey) ifFalse: [self declare: aKey from: (self at: #Undeclared). self flushClassNameCache]. super at: aKey put: anObject. ^ anObject! ! "System"! CodeHolder subclass: #Browser instanceVariableNames: 'environment systemOrganizer classOrganizer metaClassOrganizer editSelection metaClassIndicated selectedSystemCategory selectedClassName selectedMessageName selectedMessageCategoryName' classVariableNames: 'ListClassesHierarchically RecentClasses' poolDictionaries: '' category: 'Tools-Browser'! !Browser commentStamp: 'cwp 12/27/2012 11:09' prior: 36419432! I represent a query path into the class descriptions, the software of the system.! !Browser methodsFor: 'accessing' stamp: 'cwp 6/24/2012 23:20'! selectEnvironment: anEnvironment environment := anEnvironment. systemOrganizer := environment organization! ! !Browser methodsFor: 'system category list' stamp: 'cwp 6/24/2012 23:06' prior: 36467357! From noreply at buildbot.pypy.org Fri Dec 6 01:27:42 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 6 Dec 2013 01:27:42 +0100 (CET) Subject: [pypy-commit] pypy default: fix axis reduce on numpy arrays with zero shape (issue1650) Message-ID: <20131206002742.C76821C1160@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68384:3fca57d5b11e Date: 2013-12-05 19:26 -0500 http://bitbucket.org/pypy/pypy/changeset/3fca57d5b11e/ Log: fix axis reduce on numpy arrays with zero shape (issue1650) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -238,13 +238,16 @@ while not out_iter.done(): axis_reduce__driver.jit_merge_point(shapelen=shapelen, func=func, dtype=dtype) - w_val = arr_iter.getitem().convert_to(dtype) - if out_iter.first_line: - if identity is not None: - w_val = func(dtype, identity, w_val) + if arr_iter.done(): + w_val = identity else: - cur = temp_iter.getitem() - w_val = func(dtype, cur, w_val) + w_val = arr_iter.getitem().convert_to(dtype) + if out_iter.first_line: + if identity is not None: + w_val = func(dtype, identity, w_val) + else: + cur = temp_iter.getitem() + w_val = func(dtype, cur, w_val) out_iter.setitem(w_val) if cumulative: temp_iter.setitem(w_val) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1300,6 +1300,8 @@ assert a.sum() == 105 assert a.max() == 14 assert array([]).sum() == 0.0 + assert array([]).reshape(0, 2).sum() == 0. + assert (array([]).reshape(0, 2).sum(0) == [0., 0.]).all() raises(ValueError, 'array([]).max()') assert (a.sum(0) == [30, 35, 40]).all() assert (a.sum(axis=0) == [30, 35, 40]).all() From noreply at buildbot.pypy.org Fri Dec 6 01:29:43 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 6 Dec 2013 01:29:43 +0100 (CET) Subject: [pypy-commit] pypy default: test this too Message-ID: <20131206002943.E2DF61C1160@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68385:1980907e6bad Date: 2013-12-05 19:29 -0500 http://bitbucket.org/pypy/pypy/changeset/1980907e6bad/ Log: test this too diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1302,6 +1302,7 @@ assert array([]).sum() == 0.0 assert array([]).reshape(0, 2).sum() == 0. assert (array([]).reshape(0, 2).sum(0) == [0., 0.]).all() + assert (array([]).reshape(0, 2).prod(0) == [1., 1.]).all() raises(ValueError, 'array([]).max()') assert (a.sum(0) == [30, 35, 40]).all() assert (a.sum(axis=0) == [30, 35, 40]).all() From noreply at buildbot.pypy.org Fri Dec 6 08:52:54 2013 From: noreply at buildbot.pypy.org (bivab) Date: Fri, 6 Dec 2013 08:52:54 +0100 (CET) Subject: [pypy-commit] pypy default: document merged PR Message-ID: <20131206075254.0EBF21C00B3@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r68386:dcddb68da5e0 Date: 2013-12-06 08:51 +0100 http://bitbucket.org/pypy/pypy/changeset/dcddb68da5e0/ Log: document merged PR diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -36,3 +36,6 @@ .. branch: timeb_h Remove dependency upon on OpenBSD. This will be disappearing along with libcompat.a. + +.. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 +Fix 3 broken links on PyPy published papers in docs. From noreply at buildbot.pypy.org Fri Dec 6 09:54:00 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 6 Dec 2013 09:54:00 +0100 (CET) Subject: [pypy-commit] pypy default: Link directly to the original bitbucket raw data, in hope that the link Message-ID: <20131206085400.486381C1160@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68387:0d0806188b21 Date: 2013-12-06 09:52 +0100 http://bitbucket.org/pypy/pypy/changeset/0d0806188b21/ Log: Link directly to the original bitbucket raw data, in hope that the link will remain valid longer. diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -72,7 +72,7 @@ .. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib .. _`Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2011/jit-hints.pdf -.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: http://www.stups.uni-duesseldorf.de/mediawiki/images/b/b0/Pub-BoCuFiLePeRi2011.pdf +.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/pepm2011/bolz-allocation-removal.pdf .. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/mediawiki/images/a/a7/Pub-BoLeSch2010.pdf .. _`High performance implementation of Python for CLI/.NET with JIT compiler generation for dynamic languages`: http://buildbot.pypy.org/misc/antocuni-thesis.pdf .. _`How to *not* write Virtual Machines for Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dyla2007/dyla.pdf From noreply at buildbot.pypy.org Fri Dec 6 17:23:13 2013 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 6 Dec 2013 17:23:13 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: make sure float hashes are within 31bit Message-ID: <20131206162313.A126E1C3282@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r518:41c1b605fccf Date: 2013-12-06 09:51 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/41c1b605fccf/ Log: make sure float hashes are within 31bit diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -355,7 +355,7 @@ return space.w_Float def gethash(self): - return compute_hash(self.value) + return intmask(compute_hash(self.value)) // 2 def invariant(self): return isinstance(self.value, float) From noreply at buildbot.pypy.org Fri Dec 6 17:23:15 2013 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 6 Dec 2013 17:23:15 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: non variable-sized classes can be created through prim 71 if size is 0 Message-ID: <20131206162315.3068A1C2FBC@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r519:a8441ef9359e Date: 2013-12-06 09:52 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/a8441ef9359e/ Log: non variable-sized classes can be created through prim 71 if size is 0 diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -470,7 +470,7 @@ def func(interp, s_frame, w_cls, size): assert isinstance(w_cls, model.W_PointersObject) s_class = w_cls.as_class_get_shadow(interp.space) - if not s_class.isvariable(): + if not s_class.isvariable() and size != 0: raise PrimitiveFailedError() try: return s_class.new(size) @@ -632,7 +632,7 @@ combinationRule = interp.space.unwrap_positive_32bit_int(w_rcvr.fetch(interp.space, 3)) if combinationRule > 41: raise PrimitiveFailedError - + space = interp.space s_bitblt = w_rcvr.as_bitblt_get_shadow(space) diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -300,6 +300,14 @@ assert w_res.getclass(space).is_same_object(space.w_String) assert w_res.size() == 20 +def test_new_with_arg_for_non_variable_sized(): + prim_fails(primitives.NEW_WITH_ARG, [space.classtable['w_ArrayedCollection'], 10]) + +def test_new_with_arg_for_non_variable_sized0(): + w_res = prim(primitives.NEW_WITH_ARG, [space.classtable['w_ArrayedCollection'], 0]) + assert w_res.getclass(space).is_same_object(space.classtable['w_ArrayedCollection']) + assert w_res.size() == 0 + def test_invalid_new_with_arg(): w_Object = space.classtable['w_Object'] prim_fails(primitives.NEW_WITH_ARG, [w_Object, 20]) From noreply at buildbot.pypy.org Fri Dec 6 17:23:16 2013 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 6 Dec 2013 17:23:16 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: selectors may not be ByteSymbols Message-ID: <20131206162316.6F21D1C3141@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r520:7c9cb17fa6e9 Date: 2013-12-06 12:31 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/7c9cb17fa6e9/ Log: selectors may not be ByteSymbols diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -341,7 +341,7 @@ assert isinstance(w_selector, model.W_BytesObject) if interp.should_trace(): print "%sSending selector %r to %r with: %r" % ( - interp._last_indent, w_selector.as_string(), receiver, + interp._last_indent, w_selector.as_repr_string(), receiver, [self.peek(argcount-1-i) for i in range(argcount)]) assert argcount >= 0 @@ -385,7 +385,7 @@ # ###################################################################### if interp.trace: - print '%s%s missing: #%s' % (interp.padding('#'), s_frame.short_str(), w_selector.as_string()) + print '%s%s missing: #%s' % (interp.padding('#'), s_frame.short_str(), w_selector.as_repr_string()) if not objectmodel.we_are_translated(): import pdb; pdb.set_trace() @@ -400,7 +400,7 @@ if interp.trace: print "%s-> primitive %d \t(in #%s, named #%s)" % ( ' ' * (interp.max_stack_depth - interp.remaining_stack_depth), - code, self.w_method()._likely_methodname, w_selector.as_string()) + code, self.w_method()._likely_methodname, w_selector.as_repr_string()) try: # note: argcount does not include rcvr return func(interp, self, argcount, s_method) @@ -410,7 +410,7 @@ ' ' * (interp.max_stack_depth - interp.remaining_stack_depth),) if interp.should_trace(True): - print "PRIMITIVE FAILED: %d %s" % (s_method.primitive, w_selector.as_string(),) + print "PRIMITIVE FAILED: %d %s" % (s_method.primitive, w_selector.as_repr_string()) raise e @@ -878,19 +878,19 @@ receiver, receiverclassshadow): options = [False] def next(): interp.message_stepping = True; print 'Now continue (c).' - def over(): options[0] = True; print 'Skipping #%s. You still need to continue(c).' % w_selector.as_string() + def over(): options[0] = True; print 'Skipping #%s. You still need to continue(c).' % w_selector.as_repr_string() def pstack(): print s_context.print_stack() if interp.message_stepping: if argcount == 0: print "-> %s %s" % (receiver.as_repr_string(), - w_selector.as_string()) + w_selector.as_repr_string()) elif argcount == 1: print "-> %s %s %s" % (receiver.as_repr_string(), - w_selector.as_string(), + w_selector.as_repr_string(), s_context.peek(0).as_repr_string()) else: print "-> %s %s %r" % (receiver.as_repr_string(), - w_selector.as_string(), + w_selector.as_repr_string(), [s_context.peek(argcount-1-i) for i in range(argcount)]) import pdb; pdb.set_trace() if options[0]: diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -823,6 +823,7 @@ EXIT_TO_DEBUGGER = 114 CHANGE_CLASS = 115 # Blue Book: primitiveOopsLeft EXTERNAL_CALL = 117 +SYMBOL_FLUSH_CACHE = 119 @expose_primitive(EQUIVALENT, unwrap_spec=[object, object]) def func(interp, s_frame, w_arg, w_rcvr): @@ -905,6 +906,10 @@ return IProxy.call(signature, interp, s_frame, argcount, s_method) raise PrimitiveFailedError + at expose_primitive(SYMBOL_FLUSH_CACHE, unwrap_spec=[object]) +def func(interp, s_frame, w_rcvr): + raise PrimitiveFailedError() + # ___________________________________________________________________________ # Miscellaneous Primitives (120-127) CALLOUT_TO_FFI = 120 @@ -1321,8 +1326,6 @@ argcount = len(args_w) s_frame.pop_n(2) # removing our arguments - assert isinstance(w_selector, model.W_BytesObject) - try: s_method = w_rcvr.shadow_of_my_class(interp.space).lookup(w_selector) except MethodNotFound: diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -354,6 +354,8 @@ AbstractShadow.__init__(self, space, w_self) def find_selector(self, w_selector): + if self.invalid: + self.sync_cache() assert not self.invalid return self.methoddict.get(w_selector, None) @@ -382,7 +384,11 @@ w_selector = self.w_self()._fetch(constants.METHODDICT_NAMES_INDEX+i) if not w_selector.is_same_object(self.space.w_nil): if not isinstance(w_selector, model.W_BytesObject): - raise ClassShadowError("bogus selector in method dict") + pass + # TODO: Check if there's more assumptions about this. + # Putting any key in the methodDict and running with + # perform is actually supported in Squeak + # raise ClassShadowError("bogus selector in method dict") w_compiledmethod = w_values._fetch(i) if not isinstance(w_compiledmethod, model.W_CompiledMethod): raise ClassShadowError("The methoddict must contain " @@ -390,7 +396,10 @@ "If the value observed is nil, our " "invalidating mechanism may be broken.") self.methoddict[w_selector] = w_compiledmethod.as_compiledmethod_get_shadow(self.space) - selector = w_selector.as_string() + if isinstance(w_selector, model.W_BytesObject): + selector = w_selector.as_string() + else: + selector = w_selector.as_repr_string() w_compiledmethod._likely_methodname = selector if self.s_class: self.s_class.changed() diff --git a/spyvm/tool/analyseimage.py b/spyvm/tool/analyseimage.py --- a/spyvm/tool/analyseimage.py +++ b/spyvm/tool/analyseimage.py @@ -9,6 +9,7 @@ mini_image = image_dir.join('mini.image') minitest_image = image_dir.join('minitest.image') +s45_image = image_dir.join('Squeak4.5-12568.image') def get_miniimage(space): return squeakimage.reader_for_image(space, squeakimage.Stream(mini_image.open(mode="rb"))) @@ -16,6 +17,9 @@ def get_minitestimage(space): return squeakimage.reader_for_image(space, squeakimage.Stream(minitest_image.open(mode="rb"))) +def get_45image(space): + return squeakimage.reader_for_image(space, squeakimage.Stream(s45_image.open(mode="rb"))) + def create_image(space, image_reader): image_reader.initialize() @@ -23,13 +27,14 @@ image.from_reader(space, image_reader) return image - def create_squeakimage(space): return create_image(space, get_miniimage(space)) def create_testimage(space): return create_image(space, get_minitestimage(space)) +def create_45image(space): + return create_image(space, get_45image(space)) def printStringsInImage(): image = create_squeakimage() From noreply at buildbot.pypy.org Fri Dec 6 17:23:17 2013 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 6 Dec 2013 17:23:17 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: workaround: cursor setting doesn't currently work in modern images Message-ID: <20131206162317.9BFB41C3298@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r521:e848891e0606 Date: 2013-12-06 17:22 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/e848891e0606/ Log: workaround: cursor setting doesn't currently work in modern images diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -679,7 +679,7 @@ # mask is a form object w_contents = w_mask.fetch(interp.space, 0) if isinstance(w_contents, model.W_WordsObject): - w_mask = w_contents.words + mask_words = w_contents.words else: raise PrimitiveFailedError else: @@ -691,14 +691,18 @@ height = interp.space.unwrap_int(w_rcvr.fetch(interp.space, 2)) depth = interp.space.unwrap_int(w_rcvr.fetch(interp.space, 3)) hotpt = wrapper.PointWrapper(interp.space, w_rcvr.fetch(interp.space, 4)) - display.SDLCursor.set( - w_bitmap.words, - width, - height, - hotpt.x(), - hotpt.y(), - mask_words=mask_words - ) + if not interp.image.is_modern: + display.SDLCursor.set( + w_bitmap.words, + width, + height, + hotpt.x(), + hotpt.y(), + mask_words=mask_words + ) + else: + # TODO: Implement + pass interp.space.objtable['w_cursor'] = w_rcvr return w_rcvr diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -355,8 +355,7 @@ def find_selector(self, w_selector): if self.invalid: - self.sync_cache() - assert not self.invalid + return None return self.methoddict.get(w_selector, None) def update(self): return self.sync_cache() diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -375,6 +375,7 @@ self.w_simulateCopyBits = self.find_symbol(space, reader, "simulateCopyBits") self.lastWindowSize = reader.lastWindowSize self.version = reader.version + self.is_modern = reader.version.magic > 6502 def find_symbol(self, space, reader, symbol): w_dnu = self.special(constants.SO_DOES_NOT_UNDERSTAND) diff --git a/spyvm/test/test_miniimage.py b/spyvm/test/test_miniimage.py --- a/spyvm/test/test_miniimage.py +++ b/spyvm/test/test_miniimage.py @@ -306,6 +306,14 @@ assert w_result is not None assert w_result.as_string() == "someString" +def test_create_new_symbol_new_with_arg0(): + w_dnu = get_image().special(constants.SO_DOES_NOT_UNDERSTAND) + w_Symbol = w_dnu.getclass(space) + w_res = perform(w_Symbol, "new:", w(0)) + assert w_res.getclass(space).is_same_object(w_Symbol) + assert isinstance(w_res, model.W_BytesObject) + assert w_res.size() == 0 + def test_pi_as_w_float(): import math w_result = perform(interp.space.w_Float, "pi") From noreply at buildbot.pypy.org Fri Dec 6 19:02:06 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 6 Dec 2013 19:02:06 +0100 (CET) Subject: [pypy-commit] cffi default: Include on Solaris. Uses the same hack as CPython's Message-ID: <20131206180206.79CFF1C153F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1436:1a01fbf20090 Date: 2013-12-06 19:01 +0100 http://bitbucket.org/cffi/cffi/changeset/1a01fbf20090/ Log: Include on Solaris. Uses the same hack as CPython's ctypes. Fixes issue #128. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -13,6 +13,9 @@ #include #include #include +#if defined (__SVR4) && defined (__sun) +# include +#endif #endif #include "malloc_closure.h" From noreply at buildbot.pypy.org Fri Dec 6 19:52:35 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 6 Dec 2013 19:52:35 +0100 (CET) Subject: [pypy-commit] cffi default: Fix for issue #129: 'bytes(buffer)' needs to work on Python 2, Message-ID: <20131206185235.9A65A1C015D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1437:65a3bcbcb054 Date: 2013-12-06 19:52 +0100 http://bitbucket.org/cffi/cffi/changeset/65a3bcbcb054/ Log: Fix for issue #129: 'bytes(buffer)' needs to work on Python 2, because it works on Python 3 and cannot be prevented to. diff --git a/c/minibuffer.h b/c/minibuffer.h --- a/c/minibuffer.h +++ b/c/minibuffer.h @@ -93,6 +93,26 @@ *lenp = self->mb_size; return 1; } + +static PyObject *mb_str(MiniBufferObj *self) +{ + /* Python 2: we want str(buffer) to behave like buffer[:], because + that's what bytes(buffer) does on Python 3 and there is no way + we can prevent this. */ + return PyString_FromStringAndSize(self->mb_data, self->mb_size); +} + +static PyObject *MiniBuffer_unicode(PyObject *self, PyObject *ignored) +{ + /* Python 2: we *don't* want unicode(buffer) to return the + unicodified str(buffer)! */ + return PyObject_Repr(self); +} + +static PyMethodDef MiniBuffer_methods[] = { + {"__unicode__", MiniBuffer_unicode, METH_NOARGS}, + {0} +}; #endif static int mb_getbuf(MiniBufferObj *self, Py_buffer *view, int flags) @@ -249,7 +269,11 @@ #endif 0, /* tp_hash */ 0, /* tp_call */ +#if PY_MAJOR_VERSION < 3 + (reprfunc)mb_str, /* tp_str */ +#else 0, /* tp_str */ +#endif PyObject_GenericGetAttr, /* tp_getattro */ 0, /* tp_setattro */ &mb_as_buffer, /* tp_as_buffer */ @@ -260,6 +284,13 @@ (inquiry)mb_clear, /* tp_clear */ 0, /* tp_richcompare */ offsetof(MiniBufferObj, mb_weakreflist), /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ +#if PY_MAJOR_VERSION < 3 + MiniBuffer_methods, /* tp_methods */ +#else + 0, /* tp_methods */ +#endif }; static PyObject *minibuffer_new(char *data, Py_ssize_t size, diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -2154,7 +2154,8 @@ c = newp(BCharArray, b"hi there") # buf = buffer(c) - assert str(buf).startswith('<_cffi_backend.buffer object at 0x') + assert unicode(buf).startswith('<_cffi_backend.buffer object at 0x') + assert bytes(buf) == b"hi there\x00" # --mb_length-- assert len(buf) == len(b"hi there\x00") # --mb_item-- diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1172,7 +1172,7 @@ because these objects' API changes too much across Python versions. Instead it has the following Python API (a subset of ``buffer``): -- ``buf[:]``: fetch a copy as a regular byte string (or +- ``buf[:]`` or ``bytes(buf)``: fetch a copy as a regular byte string (or ``buf[start:end]`` for a part) - ``buf[:] = newstr``: change the original content (or ``buf[start:end] @@ -1187,6 +1187,10 @@ owned memory will not be freed as long as the buffer is alive. Moreover buffer objects now support weakrefs to them. +.. versionchanged:: 0.9 + Before version 0.9, ``bytes(buf)`` was supported in Python 3 but not + Python 2. + ``ffi.typeof("C type" or cdata object)``: return an object of type ```` corresponding to the parsed string, or to the C type of the From noreply at buildbot.pypy.org Fri Dec 6 20:37:20 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 6 Dec 2013 20:37:20 +0100 (CET) Subject: [pypy-commit] cffi default: Document more this change Message-ID: <20131206193720.5539A1C153F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1438:4581bf09fb68 Date: 2013-12-06 20:37 +0100 http://bitbucket.org/cffi/cffi/changeset/4581bf09fb68/ Log: Document more this change diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1188,8 +1188,12 @@ Moreover buffer objects now support weakrefs to them. .. versionchanged:: 0.9 - Before version 0.9, ``bytes(buf)`` was supported in Python 3 but not - Python 2. + Before version 0.9, ``bytes(buf)`` was supported in Python 3 to get + the content of the buffer, but on Python 2 it would return the repr + ``<_cffi_backend.buffer object>``. This has been fixed. But you + should avoid using ``str(buf)``: it now gives inconsistent results + between Python 2 and Python 3 (this is similar to how ``str()`` + gives inconsistent results on regular byte strings). ``ffi.typeof("C type" or cdata object)``: return an object of type From noreply at buildbot.pypy.org Fri Dec 6 21:25:53 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 6 Dec 2013 21:25:53 +0100 (CET) Subject: [pypy-commit] cffi default: Kill the __unicode__ method and let it behave as it does normally Message-ID: <20131206202553.EB0841C0F12@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1439:96939e956ff6 Date: 2013-12-06 21:25 +0100 http://bitbucket.org/cffi/cffi/changeset/96939e956ff6/ Log: Kill the __unicode__ method and let it behave as it does normally even if it's one more (obscure) difference between Py2 and 3. diff --git a/c/minibuffer.h b/c/minibuffer.h --- a/c/minibuffer.h +++ b/c/minibuffer.h @@ -101,18 +101,6 @@ we can prevent this. */ return PyString_FromStringAndSize(self->mb_data, self->mb_size); } - -static PyObject *MiniBuffer_unicode(PyObject *self, PyObject *ignored) -{ - /* Python 2: we *don't* want unicode(buffer) to return the - unicodified str(buffer)! */ - return PyObject_Repr(self); -} - -static PyMethodDef MiniBuffer_methods[] = { - {"__unicode__", MiniBuffer_unicode, METH_NOARGS}, - {0} -}; #endif static int mb_getbuf(MiniBufferObj *self, Py_buffer *view, int flags) @@ -284,13 +272,6 @@ (inquiry)mb_clear, /* tp_clear */ 0, /* tp_richcompare */ offsetof(MiniBufferObj, mb_weakreflist), /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ -#if PY_MAJOR_VERSION < 3 - MiniBuffer_methods, /* tp_methods */ -#else - 0, /* tp_methods */ -#endif }; static PyObject *minibuffer_new(char *data, Py_ssize_t size, diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -2154,8 +2154,13 @@ c = newp(BCharArray, b"hi there") # buf = buffer(c) - assert unicode(buf).startswith('<_cffi_backend.buffer object at 0x') + assert repr(buf).startswith('<_cffi_backend.buffer object at 0x') assert bytes(buf) == b"hi there\x00" + if sys.version_info < (3,): + assert str(buf) == "hi there\x00" + assert unicode(buf) == u+"hi there\x00" + else: + assert str(buf) == repr(buf) # --mb_length-- assert len(buf) == len(b"hi there\x00") # --mb_item-- From noreply at buildbot.pypy.org Fri Dec 6 21:28:16 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 6 Dec 2013 21:28:16 +0100 (CET) Subject: [pypy-commit] pypy default: Update to cffi/65a3bcbcb054 Message-ID: <20131206202816.9F0C81C0F12@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68388:d03f7f2b76da Date: 2013-12-06 19:57 +0100 http://bitbucket.org/pypy/pypy/changeset/d03f7f2b76da/ Log: Update to cffi/65a3bcbcb054 diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -59,6 +59,12 @@ def descr__buffer__(self, space): return self.buffer.descr__buffer__(space) + def descr_str(self, space): + return space.wrap(self.buffer.as_str()) + + def descr_unicode(self, space): + return space.repr(space.wrap(self)) + MiniBuffer.typedef = TypeDef( "buffer", @@ -68,6 +74,8 @@ __setitem__ = interp2app(MiniBuffer.descr_setitem), __buffer__ = interp2app(MiniBuffer.descr__buffer__), __weakref__ = make_weakref_descr(MiniBuffer), + __str__ = interp2app(MiniBuffer.descr_str), + __unicode__ = interp2app(MiniBuffer.descr_unicode), ) MiniBuffer.typedef.acceptable_as_base_class = False diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2143,7 +2143,8 @@ c = newp(BCharArray, b"hi there") # buf = buffer(c) - assert str(buf).startswith('<_cffi_backend.buffer object at 0x') + assert unicode(buf).startswith('<_cffi_backend.buffer object at 0x') + assert bytes(buf) == b"hi there\x00" # --mb_length-- assert len(buf) == len(b"hi there\x00") # --mb_item-- From noreply at buildbot.pypy.org Fri Dec 6 21:28:18 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 6 Dec 2013 21:28:18 +0100 (CET) Subject: [pypy-commit] pypy default: update to cffi/96939e956ff6 Message-ID: <20131206202818.073B71C0F12@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68389:8d92f7cf8c55 Date: 2013-12-06 21:27 +0100 http://bitbucket.org/pypy/pypy/changeset/8d92f7cf8c55/ Log: update to cffi/96939e956ff6 diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -62,9 +62,6 @@ def descr_str(self, space): return space.wrap(self.buffer.as_str()) - def descr_unicode(self, space): - return space.repr(space.wrap(self)) - MiniBuffer.typedef = TypeDef( "buffer", @@ -75,7 +72,6 @@ __buffer__ = interp2app(MiniBuffer.descr__buffer__), __weakref__ = make_weakref_descr(MiniBuffer), __str__ = interp2app(MiniBuffer.descr_str), - __unicode__ = interp2app(MiniBuffer.descr_unicode), ) MiniBuffer.typedef.acceptable_as_base_class = False diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2143,8 +2143,13 @@ c = newp(BCharArray, b"hi there") # buf = buffer(c) - assert unicode(buf).startswith('<_cffi_backend.buffer object at 0x') + assert repr(buf).startswith('<_cffi_backend.buffer object at 0x') assert bytes(buf) == b"hi there\x00" + if sys.version_info < (3,): + assert str(buf) == "hi there\x00" + assert unicode(buf) == u+"hi there\x00" + else: + assert str(buf) == repr(buf) # --mb_length-- assert len(buf) == len(b"hi there\x00") # --mb_item-- From noreply at buildbot.pypy.org Fri Dec 6 23:07:27 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 6 Dec 2013 23:07:27 +0100 (CET) Subject: [pypy-commit] pypy default: These are elidable Message-ID: <20131206220727.7266A1C00B3@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68390:25f2cb38e0d7 Date: 2013-12-06 16:06 -0600 http://bitbucket.org/pypy/pypy/changeset/25f2cb38e0d7/ Log: These are elidable diff --git a/rpython/rtyper/lltypesystem/module/ll_math.py b/rpython/rtyper/lltypesystem/module/ll_math.py --- a/rpython/rtyper/lltypesystem/module/ll_math.py +++ b/rpython/rtyper/lltypesystem/module/ll_math.py @@ -72,8 +72,8 @@ [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_floor = llexternal('floor', [rffi.DOUBLE], rffi.DOUBLE, elidable_function=True) math_sqrt = llexternal('sqrt', [rffi.DOUBLE], rffi.DOUBLE) -math_sin = llexternal('sin', [rffi.DOUBLE], rffi.DOUBLE) -math_cos = llexternal('cos', [rffi.DOUBLE], rffi.DOUBLE) +math_sin = llexternal('sin', [rffi.DOUBLE], rffi.DOUBLE, elidable_function=True) +math_cos = llexternal('cos', [rffi.DOUBLE], rffi.DOUBLE, elidable_function=True) @jit.elidable def sqrt_nonneg(x): From noreply at buildbot.pypy.org Fri Dec 6 23:07:29 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 6 Dec 2013 23:07:29 +0100 (CET) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20131206220729.EBCF21C00B3@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68391:e07994044752 Date: 2013-12-06 16:06 -0600 http://bitbucket.org/pypy/pypy/changeset/e07994044752/ Log: merged upstream diff --git a/lib-python/2.7/ctypes/test/test_python_api.py b/lib-python/2.7/ctypes/test/test_python_api.py --- a/lib-python/2.7/ctypes/test/test_python_api.py +++ b/lib-python/2.7/ctypes/test/test_python_api.py @@ -73,6 +73,7 @@ del pyobj self.assertEqual(grc(s), ref) + @xfail def test_PyOS_snprintf(self): PyOS_snprintf = pythonapi.PyOS_snprintf PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -83,7 +83,7 @@ _winreg -* Supported by being rewritten in pure Python (possibly using ``ctypes``): +* Supported by being rewritten in pure Python (possibly using ``cffi``): see the `lib_pypy/`_ directory. Examples of modules that we support this way: ``ctypes``, ``cPickle``, ``cmath``, ``dbm``, ``datetime``... Note that some modules are both in there and in the list above; @@ -316,5 +316,4 @@ type and vice versa. For builtin types, a dictionary will be returned that cannot be changed (but still looks and behaves like a normal dictionary). - .. include:: _ref.txt diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -72,7 +72,13 @@ Here is a list of the limitations and missing features of the current implementation: -* No support for ``PyXxx`` functions from ``libpython``, for obvious reasons. +* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer + of PyPy, at your own risks and without doing anything sensible about + the GIL. Since PyPy 2.3, these functions are also named with an extra + "Py", for example ``PyPyInt_FromLong()``. Basically, don't use this, + but it might more or less work in simple cases if you do. (Obviously, + assuming the PyObject pointers you get have any particular fields in + any particular order is just going to crash.) * We copy Python strings instead of having pointers to raw buffers diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -72,13 +72,13 @@ .. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib .. _`Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2011/jit-hints.pdf -.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: http://codespeak.net/svn/pypy/extradoc/talk/pepm2011/bolz-allocation-removal.pdf -.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/publications/bolz-prolog-jit.pdf +.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/pepm2011/bolz-allocation-removal.pdf +.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/mediawiki/images/a/a7/Pub-BoLeSch2010.pdf .. _`High performance implementation of Python for CLI/.NET with JIT compiler generation for dynamic languages`: http://buildbot.pypy.org/misc/antocuni-thesis.pdf .. _`How to *not* write Virtual Machines for Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dyla2007/dyla.pdf .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf -.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://www.stups.uni-duesseldorf.de/thesis/final-master.pdf +.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html .. _`Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution`: http://sabi.net/nriley/pubs/dls6-riley.pdf diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -25,3 +25,17 @@ .. branch: less-stringly-ops Use subclasses of SpaceOperation instead of SpaceOperator objects. Random cleanups in flowspace and annotator. + +.. branch: ndarray-buffer +adds support for the buffer= argument to the ndarray ctor + +.. branch: better_ftime_detect2 +On OpenBSD do not pull in libcompat.a as it is about to be removed. +And more generally, if you have gettimeofday(2) you will not need ftime(3). + +.. branch: timeb_h +Remove dependency upon on OpenBSD. This will be disappearing +along with libcompat.a. + +.. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 +Fix 3 broken links on PyPy published papers in docs. diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -47,6 +47,9 @@ def get_raw_address(self): raise ValueError("no raw buffer") + def is_writable(self): + return False + # __________ app-level support __________ def descr_len(self, space): @@ -135,6 +138,9 @@ __slots__ = () # no extra slot here + def is_writable(self): + return True + def setitem(self, index, char): "Write a character into the buffer." raise NotImplementedError # Must be overriden. No bounds checks. diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -59,6 +59,9 @@ def descr__buffer__(self, space): return self.buffer.descr__buffer__(space) + def descr_str(self, space): + return space.wrap(self.buffer.as_str()) + MiniBuffer.typedef = TypeDef( "buffer", @@ -68,6 +71,7 @@ __setitem__ = interp2app(MiniBuffer.descr_setitem), __buffer__ = interp2app(MiniBuffer.descr__buffer__), __weakref__ = make_weakref_descr(MiniBuffer), + __str__ = interp2app(MiniBuffer.descr_str), ) MiniBuffer.typedef.acceptable_as_base_class = False diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2143,7 +2143,13 @@ c = newp(BCharArray, b"hi there") # buf = buffer(c) - assert str(buf).startswith('<_cffi_backend.buffer object at 0x') + assert repr(buf).startswith('<_cffi_backend.buffer object at 0x') + assert bytes(buf) == b"hi there\x00" + if sys.version_info < (3,): + assert str(buf) == "hi there\x00" + assert unicode(buf) == u+"hi there\x00" + else: + assert str(buf) == repr(buf) # --mb_length-- assert len(buf) == len(b"hi there\x00") # --mb_item-- diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -400,16 +400,16 @@ '_PyObject_CallFunction_SizeT', '_PyObject_CallMethod_SizeT', 'PyBuffer_FromMemory', 'PyBuffer_FromReadWriteMemory', 'PyBuffer_FromObject', - 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', 'init_bufferobject', + 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', '_Py_init_bufferobject', 'PyCObject_FromVoidPtr', 'PyCObject_FromVoidPtrAndDesc', 'PyCObject_AsVoidPtr', 'PyCObject_GetDesc', 'PyCObject_Import', 'PyCObject_SetVoidPtr', - 'PyCObject_Type', 'init_pycobject', + 'PyCObject_Type', '_Py_init_pycobject', 'PyCapsule_New', 'PyCapsule_IsValid', 'PyCapsule_GetPointer', 'PyCapsule_GetName', 'PyCapsule_GetDestructor', 'PyCapsule_GetContext', 'PyCapsule_SetPointer', 'PyCapsule_SetName', 'PyCapsule_SetDestructor', - 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', 'init_capsule', + 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', '_Py_init_capsule', 'PyObject_AsReadBuffer', 'PyObject_AsWriteBuffer', 'PyObject_CheckReadBuffer', @@ -687,11 +687,15 @@ globals()['va_get_%s' % name_no_star] = func def setup_init_functions(eci, translating): - init_buffer = rffi.llexternal('init_bufferobject', [], lltype.Void, + if translating: + prefix = 'PyPy' + else: + prefix = 'cpyexttest' + init_buffer = rffi.llexternal('_%s_init_bufferobject' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) - init_pycobject = rffi.llexternal('init_pycobject', [], lltype.Void, + init_pycobject = rffi.llexternal('_%s_init_pycobject' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) - init_capsule = rffi.llexternal('init_capsule', [], lltype.Void, + init_capsule = rffi.llexternal('_%s_init_capsule' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) INIT_FUNCTIONS.extend([ lambda space: init_buffer(), @@ -699,12 +703,8 @@ lambda space: init_capsule(), ]) from pypy.module.posix.interp_posix import add_fork_hook - if translating: - reinit_tls = rffi.llexternal('PyThread_ReInitTLS', [], lltype.Void, - compilation_info=eci) - else: - reinit_tls = rffi.llexternal('PyPyThread_ReInitTLS', [], lltype.Void, - compilation_info=eci) + reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], lltype.Void, + compilation_info=eci) add_fork_hook('child', reinit_tls) def init_function(func): @@ -746,7 +746,7 @@ from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() - generate_macros(export_symbols, rename=True, do_deref=True) + generate_macros(export_symbols, prefix='cpyexttest') # Structure declaration code members = [] @@ -812,7 +812,7 @@ INTERPLEVEL_API[name] = w_obj - name = name.replace('Py', 'PyPy') + name = name.replace('Py', 'cpyexttest') if isptr: ptr = ctypes.c_void_p.in_dll(bridge, name) if typ == 'PyObject*': @@ -824,7 +824,7 @@ ptr.value = ctypes.cast(ll2ctypes.lltype2ctypes(value), ctypes.c_void_p).value elif typ in ('PyObject*', 'PyTypeObject*'): - if name.startswith('PyPyExc_'): + if name.startswith('PyPyExc_') or name.startswith('cpyexttestExc_'): # we already have the pointer in_dll = ll2ctypes.get_ctypes_type(PyObject).in_dll(bridge, name) py_obj = ll2ctypes.ctypes2lltype(PyObject, in_dll) @@ -859,28 +859,27 @@ setup_init_functions(eci, translating=False) return modulename.new(ext='') -def generate_macros(export_symbols, rename=True, do_deref=True): +def mangle_name(prefix, name): + if name.startswith('Py'): + return prefix + name[2:] + elif name.startswith('_Py'): + return '_' + prefix + name[3:] + else: + return None + +def generate_macros(export_symbols, prefix): "NOT_RPYTHON" pypy_macros = [] renamed_symbols = [] for name in export_symbols: - if name.startswith("PyPy"): - renamed_symbols.append(name) - continue - if not rename: - continue name = name.replace("#", "") - newname = name.replace('Py', 'PyPy') - if not rename: - newname = name + newname = mangle_name(prefix, name) + assert newname, name pypy_macros.append('#define %s %s' % (name, newname)) if name.startswith("PyExc_"): pypy_macros.append('#define _%s _%s' % (name, newname)) renamed_symbols.append(newname) - if rename: - export_symbols[:] = renamed_symbols - else: - export_symbols[:] = [sym.replace("#", "") for sym in export_symbols] + export_symbols[:] = renamed_symbols # Generate defines for macro_name, size in [ @@ -1041,7 +1040,7 @@ from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() - generate_macros(export_symbols, rename=False, do_deref=False) + generate_macros(export_symbols, prefix='PyPy') functions = generate_decls_and_callbacks(db, [], api_struct=False) code = "#include \n" + "\n".join(functions) @@ -1071,7 +1070,8 @@ export_struct(name, struct) for name, func in FUNCTIONS.iteritems(): - deco = entrypoint_lowlevel("cpyext", func.argtypes, name, relax=True) + newname = mangle_name('PyPy', name) or name + deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, relax=True) deco(func.get_wrapper(space)) setup_init_functions(eci, translating=True) diff --git a/pypy/module/cpyext/include/bufferobject.h b/pypy/module/cpyext/include/bufferobject.h --- a/pypy/module/cpyext/include/bufferobject.h +++ b/pypy/module/cpyext/include/bufferobject.h @@ -37,7 +37,7 @@ PyObject* PyBuffer_New(Py_ssize_t size); -void init_bufferobject(void); +void _Py_init_bufferobject(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pycapsule.h b/pypy/module/cpyext/include/pycapsule.h --- a/pypy/module/cpyext/include/pycapsule.h +++ b/pypy/module/cpyext/include/pycapsule.h @@ -50,7 +50,7 @@ PyAPI_FUNC(void *) PyCapsule_Import(const char *name, int no_block); -void init_capsule(void); +void _Py_init_capsule(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pycobject.h b/pypy/module/cpyext/include/pycobject.h --- a/pypy/module/cpyext/include/pycobject.h +++ b/pypy/module/cpyext/include/pycobject.h @@ -48,7 +48,7 @@ } PyCObject; #endif -void init_pycobject(void); +void _Py_init_pycobject(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/src/bufferobject.c b/pypy/module/cpyext/src/bufferobject.c --- a/pypy/module/cpyext/src/bufferobject.c +++ b/pypy/module/cpyext/src/bufferobject.c @@ -783,7 +783,7 @@ return size; } -void init_bufferobject(void) +void _Py_init_bufferobject(void) { PyType_Ready(&PyBuffer_Type); } diff --git a/pypy/module/cpyext/src/capsule.c b/pypy/module/cpyext/src/capsule.c --- a/pypy/module/cpyext/src/capsule.c +++ b/pypy/module/cpyext/src/capsule.c @@ -321,7 +321,7 @@ PyCapsule_Type__doc__ /*tp_doc*/ }; -void init_capsule() +void _Py_init_capsule() { PyType_Ready(&PyCapsule_Type); } diff --git a/pypy/module/cpyext/src/cobject.c b/pypy/module/cpyext/src/cobject.c --- a/pypy/module/cpyext/src/cobject.c +++ b/pypy/module/cpyext/src/cobject.c @@ -156,7 +156,7 @@ PyCObject_Type__doc__ /*tp_doc*/ }; -void init_pycobject() +void _Py_init_pycobject() { PyType_Ready(&PyCObject_Type); } diff --git a/pypy/module/cpyext/test/test_thread.py b/pypy/module/cpyext/test/test_thread.py --- a/pypy/module/cpyext/test/test_thread.py +++ b/pypy/module/cpyext/test/test_thread.py @@ -8,8 +8,10 @@ module = self.import_extension('foo', [ ("get_thread_ident", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - return PyInt_FromLong(PyPyThread_get_thread_ident()); +#ifndef PyThread_get_thread_ident +#error "seems we are not accessing PyPy's functions" +#endif + return PyInt_FromLong(PyThread_get_thread_ident()); """), ]) import thread, threading @@ -32,17 +34,19 @@ module = self.import_extension('foo', [ ("test_acquire_lock", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - PyThread_type_lock lock = PyPyThread_allocate_lock(); - if (PyPyThread_acquire_lock(lock, 1) != 1) { +#ifndef PyThread_allocate_lock +#error "seems we are not accessing PyPy's functions" +#endif + PyThread_type_lock lock = PyThread_allocate_lock(); + if (PyThread_acquire_lock(lock, 1) != 1) { PyErr_SetString(PyExc_AssertionError, "first acquire"); return NULL; } - if (PyPyThread_acquire_lock(lock, 0) != 0) { + if (PyThread_acquire_lock(lock, 0) != 0) { PyErr_SetString(PyExc_AssertionError, "second acquire"); return NULL; } - PyPyThread_free_lock(lock); + PyThread_free_lock(lock); Py_RETURN_NONE; """), @@ -53,15 +57,17 @@ module = self.import_extension('foo', [ ("test_release_lock", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - PyThread_type_lock lock = PyPyThread_allocate_lock(); - PyPyThread_acquire_lock(lock, 1); - PyPyThread_release_lock(lock); - if (PyPyThread_acquire_lock(lock, 0) != 1) { +#ifndef PyThread_release_lock +#error "seems we are not accessing PyPy's functions" +#endif + PyThread_type_lock lock = PyThread_allocate_lock(); + PyThread_acquire_lock(lock, 1); + PyThread_release_lock(lock); + if (PyThread_acquire_lock(lock, 0) != 1) { PyErr_SetString(PyExc_AssertionError, "first acquire"); return NULL; } - PyPyThread_free_lock(lock); + PyThread_free_lock(lock); Py_RETURN_NONE; """), diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -392,6 +392,21 @@ def __del__(self): free_raw_storage(self.storage, track_allocation=False) +class ConcreteArrayWithBase(ConcreteArrayNotOwning): + def __init__(self, shape, dtype, order, strides, backstrides, storage, orig_base): + ConcreteArrayNotOwning.__init__(self, shape, dtype, order, + strides, backstrides, storage) + self.orig_base = orig_base + + def base(self): + return self.orig_base + + +class ConcreteNonWritableArrayWithBase(ConcreteArrayWithBase): + def descr_setitem(self, space, orig_array, w_index, w_value): + raise OperationError(space.w_ValueError, space.wrap( + "assignment destination is read-only")) + class NonWritableArray(ConcreteArray): def descr_setitem(self, space, orig_array, w_index, w_value): diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -49,11 +49,24 @@ return W_NDimArray(impl) @staticmethod - def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, w_subtype=None): + def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, + w_subtype=None, w_base=None, writable=True): from pypy.module.micronumpy.arrayimpl import concrete assert shape strides, backstrides = calc_strides(shape, dtype, order) - if owning: + if w_base is not None: + if owning: + raise OperationError(space.w_ValueError, + space.wrap("Cannot have owning=True when specifying a buffer")) + if writable: + impl = concrete.ConcreteArrayWithBase(shape, dtype, order, strides, + backstrides, storage, w_base) + else: + impl = concrete.ConcreteNonWritableArrayWithBase(shape, dtype, order, + strides, backstrides, + storage, w_base) + + elif owning: # Will free storage when GCd impl = concrete.ConcreteArray(shape, dtype, order, strides, backstrides, storage=storage) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1,3 +1,5 @@ +from rpython.rtyper.lltypesystem import rffi +from rpython.rlib.rawstorage import RAW_STORAGE_PTR from pypy.interpreter.error import operationerrfmt, OperationError from pypy.interpreter.typedef import TypeDef, GetSetProperty, make_weakref_descr from pypy.interpreter.gateway import interp2app, unwrap_spec, applevel, \ @@ -20,6 +22,7 @@ from rpython.rlib.rstring import StringBuilder from pypy.module.micronumpy.arrayimpl.base import BaseArrayImplementation from pypy.module.micronumpy.conversion_utils import order_converter, multi_axis_converter +from pypy.module.micronumpy import support from pypy.module.micronumpy.constants import * def _find_shape(space, w_size, dtype): @@ -1067,13 +1070,35 @@ offset=0, w_strides=None, order='C'): from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArray from pypy.module.micronumpy.support import calc_strides - if (offset != 0 or not space.is_none(w_strides) or - not space.is_none(w_buffer)): - raise OperationError(space.w_NotImplementedError, - space.wrap("unsupported param")) dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) shape = _find_shape(space, w_shape, dtype) + + if not space.is_none(w_buffer): + if (not space.is_none(w_strides)): + raise OperationError(space.w_NotImplementedError, + space.wrap("unsupported param")) + + buf = space.buffer_w(w_buffer) + try: + raw_ptr = buf.get_raw_address() + except ValueError: + raise OperationError(space.w_TypeError, space.wrap( + "Only raw buffers are supported")) + if not shape: + raise OperationError(space.w_TypeError, space.wrap( + "numpy scalars from buffers not supported yet")) + totalsize = support.product(shape) * dtype.get_size() + if totalsize+offset > buf.getlength(): + raise OperationError(space.w_TypeError, space.wrap( + "buffer is too small for requested array")) + storage = rffi.cast(RAW_STORAGE_PTR, raw_ptr) + storage = rffi.ptradd(storage, offset) + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, + w_subtype=w_subtype, + w_base=w_buffer, + writable=buf.is_writable()) + if not shape: return W_NDimArray.new_scalar(space, dtype) if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): @@ -1093,8 +1118,6 @@ Create an array from an existing buffer, given its address as int. PyPy-only implementation detail. """ - from rpython.rtyper.lltypesystem import rffi - from rpython.rlib.rawstorage import RAW_STORAGE_PTR storage = rffi.cast(RAW_STORAGE_PTR, addr) dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -71,7 +71,6 @@ final_strides = arr.get_strides() + strides final_backstrides = arr.get_backstrides() + backstrides final_dtype = subdtype - print self.name,'strides',arr.get_strides(),strides if subdtype.subdtype: final_dtype = subdtype.subdtype return W_NDimArray.new_slice(space, arr.start + ofs, final_strides, diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -238,13 +238,16 @@ while not out_iter.done(): axis_reduce__driver.jit_merge_point(shapelen=shapelen, func=func, dtype=dtype) - w_val = arr_iter.getitem().convert_to(dtype) - if out_iter.first_line: - if identity is not None: - w_val = func(dtype, identity, w_val) + if arr_iter.done(): + w_val = identity else: - cur = temp_iter.getitem() - w_val = func(dtype, cur, w_val) + w_val = arr_iter.getitem().convert_to(dtype) + if out_iter.first_line: + if identity is not None: + w_val = func(dtype, identity, w_val) + else: + cur = temp_iter.getitem() + w_val = func(dtype, cur, w_val) out_iter.setitem(w_val) if cumulative: temp_iter.setitem(w_val) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -219,6 +219,7 @@ class AppTestNumArray(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) + def w_CustomIndexObject(self, index): class CustomIndexObject(object): def __init__(self, index): @@ -1299,6 +1300,9 @@ assert a.sum() == 105 assert a.max() == 14 assert array([]).sum() == 0.0 + assert array([]).reshape(0, 2).sum() == 0. + assert (array([]).reshape(0, 2).sum(0) == [0., 0.]).all() + assert (array([]).reshape(0, 2).prod(0) == [1., 1.]).all() raises(ValueError, 'array([]).max()') assert (a.sum(0) == [30, 35, 40]).all() assert (a.sum(axis=0) == [30, 35, 40]).all() @@ -2089,6 +2093,69 @@ a = np.ndarray([1], dtype=bool) assert a[0] == True + +class AppTestNumArrayFromBuffer(BaseNumpyAppTest): + spaceconfig = dict(usemodules=["micronumpy", "array", "mmap"]) + + def setup_class(cls): + from rpython.tool.udir import udir + BaseNumpyAppTest.setup_class.im_func(cls) + cls.w_tmpname = cls.space.wrap(str(udir.join('mmap-'))) + + def test_ndarray_from_buffer(self): + import numpypy as np + import array + buf = array.array('c', ['\x00']*2*3) + a = np.ndarray((3,), buffer=buf, dtype='i2') + a[0] = ord('b') + a[1] = ord('a') + a[2] = ord('r') + assert list(buf) == ['b', '\x00', 'a', '\x00', 'r', '\x00'] + assert a.base is buf + + def test_ndarray_subclass_from_buffer(self): + import numpypy as np + import array + buf = array.array('c', ['\x00']*2*3) + class X(np.ndarray): + pass + a = X((3,), buffer=buf, dtype='i2') + assert type(a) is X + + def test_ndarray_from_buffer_and_offset(self): + import numpypy as np + import array + buf = array.array('c', ['\x00']*7) + buf[0] = 'X' + a = np.ndarray((3,), buffer=buf, offset=1, dtype='i2') + a[0] = ord('b') + a[1] = ord('a') + a[2] = ord('r') + assert list(buf) == ['X', 'b', '\x00', 'a', '\x00', 'r', '\x00'] + + def test_ndarray_from_buffer_out_of_bounds(self): + import numpypy as np + import array + buf = array.array('c', ['\x00']*2*10) # 20 bytes + info = raises(TypeError, "np.ndarray((11,), buffer=buf, dtype='i2')") + assert str(info.value).startswith('buffer is too small') + info = raises(TypeError, "np.ndarray((5,), buffer=buf, offset=15, dtype='i2')") + assert str(info.value).startswith('buffer is too small') + + def test_ndarray_from_readonly_buffer(self): + import numpypy as np + from mmap import mmap, ACCESS_READ + f = open(self.tmpname, "w+") + f.write("hello") + f.flush() + buf = mmap(f.fileno(), 5, access=ACCESS_READ) + a = np.ndarray((5,), buffer=buf, dtype='c') + raises(ValueError, "a[0] = 'X'") + buf.close() + f.close() + + + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy @@ -3060,9 +3127,6 @@ exc = raises(IndexError, "a[0][None]") assert exc.value.message == "invalid index" - exc = raises(IndexError, "a[0][None]") - assert exc.value.message == 'invalid index' - a[0]["x"][0] = 200 assert a[0]["x"][0] == 200 diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -314,6 +314,14 @@ self.check_valid_writeable() self.mmap.setslice(start, string) + def is_writable(self): + try: + self.mmap.check_writeable() + except RMMapError: + return False + else: + return True + def get_raw_address(self): self.check_valid() return self.mmap.data diff --git a/pypy/testrunner_cfg.py b/pypy/testrunner_cfg.py --- a/pypy/testrunner_cfg.py +++ b/pypy/testrunner_cfg.py @@ -3,7 +3,7 @@ DIRS_SPLIT = [ 'translator/c', 'rlib', - 'rpython/memory', 'jit/metainterp', 'rpython/test', + 'memory/test', 'jit/metainterp', 'jit/backend/arm', 'jit/backend/x86', ] diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -329,7 +329,7 @@ if closure is None: self.closure = [] else: - self.closure = [const(c.cell_contents) for c in closure] + self.closure = list(closure) assert len(self.closure) == len(self.pycode.co_freevars) def init_locals_stack(self, code): @@ -846,7 +846,13 @@ LOOKUP_METHOD = LOAD_ATTR def LOAD_DEREF(self, varindex): - self.pushvalue(self.closure[varindex]) + cell = self.closure[varindex] + try: + content = cell.cell_contents + except ValueError: + name = self.pycode.co_freevars[varindex] + raise FlowingError("Undefined closure variable '%s'" % name) + self.pushvalue(const(content)) def STORE_FAST(self, varindex): w_newvalue = self.popvalue() diff --git a/rpython/flowspace/test/test_objspace.py b/rpython/flowspace/test/test_objspace.py --- a/rpython/flowspace/test/test_objspace.py +++ b/rpython/flowspace/test/test_objspace.py @@ -1212,6 +1212,39 @@ graph = self.codetest(f) assert 'getattr' in self.all_operations(graph) + def test_empty_cell_unused(self): + def test(flag): + if flag: + b = 5 + def g(): + if flag: + return b + else: + return 1 + return g + g1 = test(False) + graph = self.codetest(g1) + assert not self.all_operations(graph) + g2 = test(True) + graph = self.codetest(g2) + assert not self.all_operations(graph) + + def test_empty_cell_error(self): + def test(flag): + if not flag: + b = 5 + def g(): + if flag: + return b + else: + return 1 + return g + g = test(True) + with py.test.raises(FlowingError) as excinfo: + graph = self.codetest(g) + assert "Undefined closure variable 'b'" in str(excinfo.value) + + DATA = {'x': 5, 'y': 6} diff --git a/rpython/rtyper/module/ll_time.py b/rpython/rtyper/module/ll_time.py --- a/rpython/rtyper/module/ll_time.py +++ b/rpython/rtyper/module/ll_time.py @@ -24,8 +24,12 @@ FTIME = 'ftime' STRUCT_TIMEB = 'struct timeb' includes = [TIME_H, 'time.h', 'errno.h', 'sys/select.h', - 'sys/types.h', 'unistd.h', 'sys/timeb.h', + 'sys/types.h', 'unistd.h', 'sys/time.h', 'sys/resource.h'] + + if not sys.platform.startswith("openbsd"): + includes.append('sys/timeb.h') + need_rusage = True @@ -86,16 +90,18 @@ c_gettimeofday = self.llexternal('gettimeofday', [self.TIMEVALP, rffi.VOIDP], rffi.INT, _nowrapper=True, releasegil=False) + c_ftime = None # We have gettimeofday(2), so force ftime(3) OFF. else: c_gettimeofday = None - if self.HAVE_FTIME: - self.configure(CConfigForFTime) - c_ftime = self.llexternal(FTIME, [lltype.Ptr(self.TIMEB)], - lltype.Void, - _nowrapper=True, releasegil=False) - else: - c_ftime = None # to not confuse the flow space + # Only look for ftime(3) if gettimeofday(2) was not found. + if self.HAVE_FTIME: + self.configure(CConfigForFTime) + c_ftime = self.llexternal(FTIME, [lltype.Ptr(self.TIMEB)], + lltype.Void, + _nowrapper=True, releasegil=False) + else: + c_ftime = None # to not confuse the flow space c_time = self.llexternal('time', [rffi.VOIDP], rffi.TIME_T, _nowrapper=True, releasegil=False) @@ -115,9 +121,9 @@ if rffi.cast(rffi.LONG, errcode) == 0: result = decode_timeval(t) lltype.free(t, flavor='raw') - if result != -1: - return result - if self.HAVE_FTIME: + if result != -1: + return result + else: # assume using ftime(3) t = lltype.malloc(self.TIMEB, flavor='raw') c_ftime(t) result = (float(intmask(t.c_time)) + diff --git a/rpython/translator/c/src/exception.h b/rpython/translator/c/src/exception.h --- a/rpython/translator/c/src/exception.h +++ b/rpython/translator/c/src/exception.h @@ -36,7 +36,6 @@ /* prototypes */ -#define RPyRaiseSimpleException(exc, msg) _RPyRaiseSimpleException(R##exc) void _RPyRaiseSimpleException(RPYTHON_EXCEPTION rexc); #endif diff --git a/rpython/translator/c/src/support.h b/rpython/translator/c/src/support.h --- a/rpython/translator/c/src/support.h +++ b/rpython/translator/c/src/support.h @@ -5,13 +5,9 @@ #define RUNNING_ON_LLINTERP 0 #define OP_JIT_RECORD_KNOWN_CLASS(i, c, r) /* nothing */ -#define FAIL_EXCEPTION(exc, msg) \ - { \ - RPyRaiseSimpleException(exc, msg); \ - } -#define FAIL_OVF(msg) FAIL_EXCEPTION(PyExc_OverflowError, msg) -#define FAIL_VAL(msg) FAIL_EXCEPTION(PyExc_ValueError, msg) -#define FAIL_ZER(msg) FAIL_EXCEPTION(PyExc_ZeroDivisionError, msg) +#define FAIL_OVF(msg) _RPyRaiseSimpleException(RPyExc_OverflowError) +#define FAIL_VAL(msg) _RPyRaiseSimpleException(RPyExc_ValueError) +#define FAIL_ZER(msg) _RPyRaiseSimpleException(RPyExc_ZeroDivisionError) /* Extra checks can be enabled with the RPY_ASSERT or RPY_LL_ASSERT * macros. They differ in the level at which the tests are made. diff --git a/rpython/translator/platform/openbsd.py b/rpython/translator/platform/openbsd.py --- a/rpython/translator/platform/openbsd.py +++ b/rpython/translator/platform/openbsd.py @@ -13,7 +13,7 @@ ] + os.environ.get("CFLAGS", "").split() def _libs(self, libraries): - libraries=set(libraries + ("intl", "iconv", "compat")) + libraries=set(libraries + ("intl", "iconv")) return ['-l%s' % lib for lib in libraries if lib not in ["crypt", "dl", "rt"]] class OpenBSD_64(OpenBSD): From noreply at buildbot.pypy.org Sat Dec 7 07:31:45 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 7 Dec 2013 07:31:45 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Document another task Message-ID: <20131207063145.1EB7B1C31C5@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r5115:e25ae1d37f1c Date: 2013-12-06 23:29 -0700 http://bitbucket.org/pypy/extradoc/changeset/e25ae1d37f1c/ Log: Document another task diff --git a/planning/jit.txt b/planning/jit.txt --- a/planning/jit.txt +++ b/planning/jit.txt @@ -48,6 +48,18 @@ - ovfcheck(a << b) will do ``result >> b`` and check that the result is equal to ``a``, instead of looking at the x86 flags. +- Track whether floats in the JIT could possibly have overflowed into + ``inf``/``nan`` + + f81 = cast_int_to_float(i79) + f82 = float_add(f81, 11235582092889474423308157442431404585112356118389416079589380072358292237843810195794279832650471001320007117491962084853674360550901038905802964414967132773610493339054092829768888725077880882465817684505312860552384417646403930092119569408801702322709406917786643639996702871154982269052209770601514008576.000000) + i83 = float_eq(f82, f81) + guard_false(i83, descr=) + + For example, here this is the test for ``isinf(i81)``, but it's impossible + for ``i81`` to be ``inf`` because ``float(sys.maxint)`` is a finite value. + + OPTIMIZATIONS ------------- From noreply at buildbot.pypy.org Sat Dec 7 07:31:46 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 7 Dec 2013 07:31:46 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: merged upstream Message-ID: <20131207063146.576B51C31CA@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r5116:bf910daa9c8a Date: 2013-12-06 23:31 -0700 http://bitbucket.org/pypy/extradoc/changeset/bf910daa9c8a/ Log: merged upstream diff --git a/blog/draft/py3k-status-update-12.rst b/blog/draft/py3k-status-update-12.rst new file mode 100644 --- /dev/null +++ b/blog/draft/py3k-status-update-12.rst @@ -0,0 +1,45 @@ +Py3k status update #12 +---------------------- + +This is the 12th status update about our work on the `py3k branch`_, which we +can work on thanks to all of the people who donated_ to the `py3k proposal`_. + +Here's an update on the recent progress: + +* Thank you to everyone who has provided initial feedback on the PyPy3 2.1 beta + 1 release. We've gotten a number of bug reports, most of which have been + fixed. + +* As usual, we're continually keeping up with changes from the default + branch. Oftentimes these merges come at a cost (conflicts and or + reintegration of py3k changes) but occasionally we get goodies for free, such + as the `recent JIT optimizations`_ and `incremental garbage collection`_. + +* We've been focusing on re-optimizing Python 2 int sized (machine sized) + integers: + +We have a couple of known, notable speed regressions in the PyPy3 beta release +vs regular PyPy. The major one being with Python 2.x int sized (or machine +sized) integers. + +Python 3 drops the distinction between int and long types. CPython 3.x +accomplishes this by removing the old int type entirely and renaming the long +type to int. Initially, we've done the same for PyPy3 for the sake of +simplicity and getting everything working. + +However PyPy's JIT is capable of heavily optimizing these machine sized integer +operations, so this came with a regression in performance in this area. + +We're now in the process of solving this. Part of this work also involves some +house cleaning on these numeric types which will also benefit the default +branch. + +cheers, +Phil + +.. _donated: http://morepypy.blogspot.com/2012/01/py3k-and-numpy-first-stage-thanks-to.html +.. _`py3k proposal`: http://pypy.org/py3donate.html +.. _`py3k branch`: https://bitbucket.org/pypy/pypy/commits/all/tip/branch%28%22py3k%22%29 + +.. _`recent JIT optimizations`: http://morepypy.blogspot.com/2013/10/making-coveragepy-faster-under-pypy.html +.. _`incremental garbage collection`: http://morepypy.blogspot.com/2013/10/incremental-garbage-collector-in-pypy.html diff --git a/sprintinfo/leysin-winter-2014/announcement.txt b/sprintinfo/leysin-winter-2014/announcement.txt new file mode 100644 --- /dev/null +++ b/sprintinfo/leysin-winter-2014/announcement.txt @@ -0,0 +1,62 @@ +===================================================================== + PyPy Leysin Winter Sprint (11-19st January 2014) +===================================================================== + +The next PyPy sprint will be in Leysin, Switzerland, for the ninth time. +This is a fully public sprint: newcomers and topics other than those +proposed below are welcome. + +------------------------------ +Goals and topics of the sprint +------------------------------ + +* Py3k: work towards supporting Python 3 in PyPy + +* NumPyPy: work towards supporting the numpy module in PyPy + +* STM: work towards supporting Software Transactional Memory + +* And as usual, the main side goal is to have fun in winter sports :-) + We can take a day off for ski. + +----------- +Exact times +----------- + +For a change, and as an attempt to simplify things, I specified the +dates as 11-19 January 2014, where 11 and 19 are travel days. We will +work full days between the 12 and the 18. You are of course allowed to +show up for a part of that time only, too. + +----------------------- +Location & Accomodation +----------------------- + +Leysin, Switzerland, "same place as before". Let me refresh your +memory: both the sprint venue and the lodging will be in a very spacious +pair of chalets built specifically for bed & breakfast: +http://www.ermina.ch/. The place has a good ADSL Internet connexion +with wireless installed. You can of course arrange your own lodging +anywhere (as long as you are in Leysin, you cannot be more than a 15 +minutes walk away from the sprint venue), but I definitely recommend +lodging there too -- you won't find a better view anywhere else (though +you probably won't get much worse ones easily, either :-) + +Please *confirm* that you are coming so that we can adjust the +reservations as appropriate. The rate so far has been around 60 CHF a +night all included in 2-person rooms, with breakfast. There are larger +rooms too (less expensive per person) and maybe the possibility to get a +single room if you really want to. + +Please register by Mercurial:: + + https://bitbucket.org/pypy/extradoc/ + https://bitbucket.org/pypy/extradoc/raw/extradoc/sprintinfo/leysin-winter-2014 + +or on the pypy-dev mailing list if you do not yet have check-in rights: + + http://mail.python.org/mailman/listinfo/pypy-dev + +You need a Swiss-to-(insert country here) power adapter. There will be +some Swiss-to-EU adapters around -- bring a EU-format power strip if you +have one. diff --git a/sprintinfo/leysin-winter-2014/people.txt b/sprintinfo/leysin-winter-2014/people.txt new file mode 100644 --- /dev/null +++ b/sprintinfo/leysin-winter-2014/people.txt @@ -0,0 +1,60 @@ + +People coming to the Leysin sprint Winter 2014 +================================================== + +People who have a ``?`` in their arrive/depart or accomodation +column are known to be coming but there are no details +available yet from them. + + +==================== ============== ======================= + Name Arrive/Depart Accomodation +==================== ============== ======================= +Armin Rigo private +Romain Guillebert 11-19 Ermina +Christian Clauss 11-12 & 18-19 I live nearby +==================== ============== ======================= + + +People on the following list were present at previous sprints: + +==================== ============== ===================== + Name Arrive/Depart Accomodation +==================== ============== ===================== +Romain Guillebert ? ? +Antonio Cuni ? ? +Michael Foord ? ? +Maciej Fijalkowski ? ? +David Schneider ? ? +Jacob Hallen ? ? +Laura Creighton ? ? +Hakan Ardo ? ? +Carl Friedrich Bolz ? ? +Samuele Pedroni ? ? +Anders Hammarquist ? ? +Christian Tismer ? ? +Niko Matsakis ? ? +Toby Watson ? ? +Paul deGrandis ? ? +Michael Hudson ? ? +Anders Lehmann ? ? +Niklaus Haldimann ? ? +Lene Wagner ? ? +Amaury Forgeot d'Arc ? ? +Valentino Volonghi ? ? +Boris Feigin ? ? +Andrew Thompson ? ? +Bert Freudenberg ? ? +Beatrice Duering ? ? +Richard Emslie ? ? +Johan Hahn ? ? +Stephan Diehl ? ? +Alexander Schremmer ? ? +Anders Chrigstroem ? ? +Eric van Riet Paap ? ? +Holger Krekel ? ? +Guido Wesdorp ? ? +Leonardo Santagada ? ? +Alexandre Fayolle ? ? +Sylvain Th�nault ? ? +==================== ============== ===================== diff --git a/talk/pycon2014/language-summit.rst b/talk/pycon2014/language-summit.rst new file mode 100644 --- /dev/null +++ b/talk/pycon2014/language-summit.rst @@ -0,0 +1,7 @@ +---------------------------- +Language summit presentation +---------------------------- + +We should give a ~10 minute presentation about the status of PyPy. + +(Asked by Michael Foord) From noreply at buildbot.pypy.org Sat Dec 7 07:38:00 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 7 Dec 2013 07:38:00 +0100 (CET) Subject: [pypy-commit] pypy default: test fix Message-ID: <20131207063800.D3CDD1C31CA@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68392:111bb9f5bec5 Date: 2013-12-06 23:37 -0700 http://bitbucket.org/pypy/pypy/changeset/111bb9f5bec5/ Log: test fix diff --git a/pypy/module/pypyjit/test_pypy_c/test_math.py b/pypy/module/pypyjit/test_pypy_c/test_math.py --- a/pypy/module/pypyjit/test_pypy_c/test_math.py +++ b/pypy/module/pypyjit/test_pypy_c/test_math.py @@ -52,8 +52,8 @@ f1 = cast_int_to_float(i0) i6 = --ISINF--(f1) guard_false(i6, descr=...) - f2 = call(ConstClass(sin), f1, descr=) - f3 = call(ConstClass(cos), f1, descr=) + f2 = call(ConstClass(sin), f1, descr=) + f3 = call(ConstClass(cos), f1, descr=) f4 = float_sub(f2, f3) f5 = float_add(f0, f4) i7 = int_add(i0, f1) From noreply at buildbot.pypy.org Sat Dec 7 10:41:12 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 7 Dec 2013 10:41:12 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20131207094112.4B40F1C31CA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68394:cfdd81db7953 Date: 2013-12-07 10:22 +0100 http://bitbucket.org/pypy/pypy/changeset/cfdd81db7953/ Log: merge heads diff --git a/pypy/module/pypyjit/test_pypy_c/test_math.py b/pypy/module/pypyjit/test_pypy_c/test_math.py --- a/pypy/module/pypyjit/test_pypy_c/test_math.py +++ b/pypy/module/pypyjit/test_pypy_c/test_math.py @@ -52,8 +52,8 @@ f1 = cast_int_to_float(i0) i6 = --ISINF--(f1) guard_false(i6, descr=...) - f2 = call(ConstClass(sin), f1, descr=) - f3 = call(ConstClass(cos), f1, descr=) + f2 = call(ConstClass(sin), f1, descr=) + f3 = call(ConstClass(cos), f1, descr=) f4 = float_sub(f2, f3) f5 = float_add(f0, f4) i7 = int_add(i0, f1) diff --git a/rpython/rtyper/lltypesystem/module/ll_math.py b/rpython/rtyper/lltypesystem/module/ll_math.py --- a/rpython/rtyper/lltypesystem/module/ll_math.py +++ b/rpython/rtyper/lltypesystem/module/ll_math.py @@ -72,8 +72,8 @@ [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_floor = llexternal('floor', [rffi.DOUBLE], rffi.DOUBLE, elidable_function=True) math_sqrt = llexternal('sqrt', [rffi.DOUBLE], rffi.DOUBLE) -math_sin = llexternal('sin', [rffi.DOUBLE], rffi.DOUBLE) -math_cos = llexternal('cos', [rffi.DOUBLE], rffi.DOUBLE) +math_sin = llexternal('sin', [rffi.DOUBLE], rffi.DOUBLE, elidable_function=True) +math_cos = llexternal('cos', [rffi.DOUBLE], rffi.DOUBLE, elidable_function=True) @jit.elidable def sqrt_nonneg(x): From noreply at buildbot.pypy.org Sat Dec 7 10:41:11 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 7 Dec 2013 10:41:11 +0100 (CET) Subject: [pypy-commit] pypy default: Issue1654: Improvement to math.factorial() mostly by "anon" Message-ID: <20131207094111.152581C31C5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68393:73fd0ad3e2e2 Date: 2013-12-07 10:22 +0100 http://bitbucket.org/pypy/pypy/changeset/73fd0ad3e2e2/ Log: Issue1654: Improvement to math.factorial() mostly by "anon" diff --git a/pypy/module/math/app_math.py b/pypy/module/math/app_math.py --- a/pypy/module/math/app_math.py +++ b/pypy/module/math/app_math.py @@ -5,9 +5,24 @@ if fl != x: raise ValueError("float arguments must be integral") x = fl - if x < 0: - raise ValueError("x must be >= 0") - res = 1 - for i in range(1, x + 1): - res *= i - return res + if x <= 100: + if x < 0: + raise ValueError("x must be >= 0") + res = 1 + for i in range(2, x + 1): + res *= i + return res + + #Experimentally this gap seems good + gap = max(100, x>>7) + def _fac(low, high): + if low+gap >= high: + t = 1 + for i in range(low, high): + t *= i + return t + + mid = (low + high) >> 1 + return _fac(low, mid) * _fac(mid, high) + + return _fac(1, x+1) From noreply at buildbot.pypy.org Sat Dec 7 10:42:21 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 7 Dec 2013 10:42:21 +0100 (CET) Subject: [pypy-commit] pypy default: The test file I wrote to make very sure that factorial() works Message-ID: <20131207094221.2444E1C31C5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68395:0046a4c5c086 Date: 2013-12-07 10:27 +0100 http://bitbucket.org/pypy/pypy/changeset/0046a4c5c086/ Log: The test file I wrote to make very sure that factorial() works diff --git a/pypy/module/math/test/test_factorial.py b/pypy/module/math/test/test_factorial.py new file mode 100644 --- /dev/null +++ b/pypy/module/math/test/test_factorial.py @@ -0,0 +1,22 @@ +import py +import math +from pypy.module.math import app_math + +def test_factorial_extra(): + for x in range(1000): + r1 = app_math.factorial(x) + r2 = math.factorial(x) + assert r1 == r2 + assert type(r1) == type(r2) + +def test_timing(): + py.test.skip("for manual running only") + x = 59999 + t1 = time.time() + r1 = app_math.factorial(x) + t2 = time.time() + r2 = math.factorial(x) + t3 = time.time() + assert r1 == r2 + print t2 - t1 + print t3 - t2 From noreply at buildbot.pypy.org Sat Dec 7 12:08:11 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 7 Dec 2013 12:08:11 +0100 (CET) Subject: [pypy-commit] pypy default: Improve to decompose into odd factors only. It gives another Message-ID: <20131207110811.F03381C116D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68396:37f58b4c73ad Date: 2013-12-07 12:07 +0100 http://bitbucket.org/pypy/pypy/changeset/37f58b4c73ad/ Log: Improve to decompose into odd factors only. It gives another close- to-2x speed-up. diff --git a/pypy/module/math/app_math.py b/pypy/module/math/app_math.py --- a/pypy/module/math/app_math.py +++ b/pypy/module/math/app_math.py @@ -5,6 +5,7 @@ if fl != x: raise ValueError("float arguments must be integral") x = fl + if x <= 100: if x < 0: raise ValueError("x must be >= 0") @@ -12,17 +13,26 @@ for i in range(2, x + 1): res *= i return res - + #Experimentally this gap seems good gap = max(100, x>>7) - def _fac(low, high): + def _fac_odd(low, high): if low+gap >= high: t = 1 - for i in range(low, high): + for i in range(low, high, 2): t *= i return t - mid = (low + high) >> 1 - return _fac(low, mid) * _fac(mid, high) - - return _fac(1, x+1) + mid = ((low + high) >> 1) | 1 + return _fac_odd(low, mid) * _fac_odd(mid, high) + + def _fac1(x): + if x <= 2: + return 1, 1, x - 1 + x2 = x >> 1 + f, g, shift = _fac1(x2) + g *= _fac_odd((x2 + 1) | 1, x + 1) + return (f * g, g, shift + x2) + + res, _, shift = _fac1(x) + return res << shift diff --git a/pypy/module/math/test/test_factorial.py b/pypy/module/math/test/test_factorial.py --- a/pypy/module/math/test/test_factorial.py +++ b/pypy/module/math/test/test_factorial.py @@ -11,12 +11,19 @@ def test_timing(): py.test.skip("for manual running only") - x = 59999 + import time + x = 5000 + repeat = 1000 + r1 = app_math.factorial(x) + r2 = math.factorial(x) + assert r1 == r2 t1 = time.time() - r1 = app_math.factorial(x) + for i in range(repeat): + app_math.factorial(x) t2 = time.time() - r2 = math.factorial(x) + for i in range(repeat): + math.factorial(x) t3 = time.time() assert r1 == r2 - print t2 - t1 - print t3 - t2 + print (t2 - t1) / repeat + print (t3 - t2) / repeat From noreply at buildbot.pypy.org Sun Dec 8 00:45:42 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 8 Dec 2013 00:45:42 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20131207234542.7AAB01C010D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r68397:2a5c9b39d31f Date: 2013-12-07 12:00 -0800 http://bitbucket.org/pypy/pypy/changeset/2a5c9b39d31f/ Log: cleanup diff --git a/rpython/memory/gc/inspector.py b/rpython/memory/gc/inspector.py --- a/rpython/memory/gc/inspector.py +++ b/rpython/memory/gc/inspector.py @@ -3,7 +3,7 @@ """ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rlib.objectmodel import free_non_gc_object -from rpython.rtyper.module.ll_os import underscore_on_windows +from rpython.rtyper.module.ll_os import UNDERSCORE_ON_WIN32 from rpython.rlib import rposix, rgc from rpython.memory.support import AddressDict, get_address_stack @@ -94,7 +94,7 @@ # ---------- -raw_os_write = rffi.llexternal(underscore_on_windows+'write', +raw_os_write = rffi.llexternal(UNDERSCORE_ON_WIN32 + 'write', [rffi.INT, llmemory.Address, rffi.SIZE_T], rffi.SIZE_T, sandboxsafe=True, _nowrapper=True) diff --git a/rpython/rtyper/lltypesystem/module/ll_math.py b/rpython/rtyper/lltypesystem/module/ll_math.py --- a/rpython/rtyper/lltypesystem/module/ll_math.py +++ b/rpython/rtyper/lltypesystem/module/ll_math.py @@ -10,6 +10,7 @@ from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.platform import platform from rpython.rlib.rfloat import isfinite, isinf, isnan, INFINITY, NAN +from rpython.rtyper.module.support import UNDERSCORE_ON_WIN32 use_library_isinf_isnan = False if sys.platform == "win32": @@ -50,16 +51,11 @@ compilation_info=math_eci, sandboxsafe=True) -if sys.platform == 'win32': - underscore = '_' -else: - underscore = '' - math_fabs = llexternal('fabs', [rffi.DOUBLE], rffi.DOUBLE) math_log = llexternal('log', [rffi.DOUBLE], rffi.DOUBLE) math_log10 = llexternal('log10', [rffi.DOUBLE], rffi.DOUBLE) math_log1p = math_llexternal('log1p', [rffi.DOUBLE], rffi.DOUBLE) -math_copysign = llexternal(underscore + 'copysign', +math_copysign = llexternal(UNDERSCORE_ON_WIN32 + 'copysign', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE, elidable_function=True) math_atan2 = llexternal('atan2', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) @@ -68,7 +64,7 @@ math_ldexp = llexternal('ldexp', [rffi.DOUBLE, rffi.INT], rffi.DOUBLE) math_pow = llexternal('pow', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_fmod = llexternal('fmod', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) -math_hypot = llexternal(underscore + 'hypot', +math_hypot = llexternal(UNDERSCORE_ON_WIN32 + 'hypot', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_floor = llexternal('floor', [rffi.DOUBLE], rffi.DOUBLE, elidable_function=True) math_sqrt = llexternal('sqrt', [rffi.DOUBLE], rffi.DOUBLE) diff --git a/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py b/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py --- a/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py @@ -15,6 +15,7 @@ from rpython.tool.udir import udir from rpython.rtyper.test.test_llinterp import interpret from rpython.annotator.annrpython import RPythonAnnotator +from rpython.rtyper.module.support import UNDERSCORE_ON_WIN32 from rpython.rtyper.rtyper import RPythonTyper from rpython.rlib.rarithmetic import r_uint, get_long_pattern, is_emulated_long from rpython.rlib.rarithmetic import is_valid_int @@ -744,7 +745,6 @@ def test_get_errno(self): eci = ExternalCompilationInfo(includes=['string.h']) if sys.platform.startswith('win'): - underscore_on_windows = '_' # Note that cpython before 2.7 installs an _invalid_parameter_handler, # which is why the test passes there, but this is no longer # accepted practice. @@ -753,11 +753,9 @@ old_err_mode = ctypes.windll.kernel32.GetErrorMode() new_err_mode = old_err_mode | SEM_NOGPFAULTERRORBOX ctypes.windll.kernel32.SetErrorMode(new_err_mode) - else: - underscore_on_windows = '' strlen = rffi.llexternal('strlen', [rffi.CCHARP], rffi.SIZE_T, compilation_info=eci) - os_write = rffi.llexternal(underscore_on_windows+'write', + os_write = rffi.llexternal(UNDERSCORE_ON_WIN32 + 'write', [rffi.INT, rffi.CCHARP, rffi.SIZE_T], rffi.SIZE_T) buffer = lltype.malloc(rffi.CCHARP.TO, 5, flavor='raw') diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -8,7 +8,7 @@ import os, sys, errno import py from rpython.rtyper.module.support import ( - _WIN32, StringTraits, UnicodeTraits, underscore_on_windows) + UNDERSCORE_ON_WIN32, _WIN32, StringTraits, UnicodeTraits) from rpython.tool.sourcetools import func_renamer from rpython.rlib.rarithmetic import r_longlong from rpython.rtyper.extfunc import ( @@ -138,9 +138,9 @@ SEEK_CUR = platform.DefinedConstantInteger('SEEK_CUR') SEEK_END = platform.DefinedConstantInteger('SEEK_END') - UTIMBUF = platform.Struct('struct '+underscore_on_windows+'utimbuf', - [('actime', rffi.INT), - ('modtime', rffi.INT)]) + UTIMBUF = platform.Struct('struct %sutimbuf' % UNDERSCORE_ON_WIN32, + [('actime', rffi.INT), + ('modtime', rffi.INT)]) class RegisterOs(BaseLazyRegistering): @@ -347,7 +347,8 @@ @registering(os.dup) def register_os_dup(self): - os_dup = self.llexternal(underscore_on_windows+'dup', [rffi.INT], rffi.INT) + os_dup = self.llexternal(UNDERSCORE_ON_WIN32 + 'dup', + [rffi.INT], rffi.INT) def dup_llimpl(fd): rposix.validate_fd(fd) @@ -360,7 +361,7 @@ @registering(os.dup2) def register_os_dup2(self): - os_dup2 = self.llexternal(underscore_on_windows+'dup2', + os_dup2 = self.llexternal(UNDERSCORE_ON_WIN32 + 'dup2', [rffi.INT, rffi.INT], rffi.INT) def dup2_llimpl(fd, newfd): @@ -1002,7 +1003,7 @@ @registering(os.read) def register_os_read(self): - os_read = self.llexternal(underscore_on_windows+'read', + os_read = self.llexternal(UNDERSCORE_ON_WIN32 + 'read', [rffi.INT, rffi.VOIDP, rffi.SIZE_T], rffi.SIZE_T) @@ -1027,7 +1028,7 @@ @registering(os.write) def register_os_write(self): - os_write = self.llexternal(underscore_on_windows+'write', + os_write = self.llexternal(UNDERSCORE_ON_WIN32 + 'write', [rffi.INT, rffi.VOIDP, rffi.SIZE_T], rffi.SIZE_T) @@ -1050,7 +1051,7 @@ @registering(os.close) def register_os_close(self): - os_close = self.llexternal(underscore_on_windows+'close', [rffi.INT], + os_close = self.llexternal(UNDERSCORE_ON_WIN32 + 'close', [rffi.INT], rffi.INT, releasegil=False) def close_llimpl(fd): @@ -1199,7 +1200,7 @@ @registering(os.getcwd) def register_os_getcwd(self): - os_getcwd = self.llexternal(underscore_on_windows + 'getcwd', + os_getcwd = self.llexternal(UNDERSCORE_ON_WIN32 + 'getcwd', [rffi.CCHARP, rffi.SIZE_T], rffi.CCHARP) @@ -1227,7 +1228,7 @@ @registering(os.getcwdu, condition=sys.platform=='win32') def register_os_getcwdu(self): - os_wgetcwd = self.llexternal(underscore_on_windows + 'wgetcwd', + os_wgetcwd = self.llexternal(UNDERSCORE_ON_WIN32 + 'wgetcwd', [rffi.CWCHARP, rffi.SIZE_T], rffi.CWCHARP) @@ -1479,7 +1480,8 @@ @registering(os.isatty) def register_os_isatty(self): - os_isatty = self.llexternal(underscore_on_windows+'isatty', [rffi.INT], rffi.INT) + os_isatty = self.llexternal(UNDERSCORE_ON_WIN32 + 'isatty', + [rffi.INT], rffi.INT) def isatty_llimpl(fd): if not rposix.is_valid_fd(fd): @@ -1672,7 +1674,8 @@ @registering(os.umask) def register_os_umask(self): - os_umask = self.llexternal(underscore_on_windows+'umask', [rffi.MODE_T], rffi.MODE_T) + os_umask = self.llexternal(UNDERSCORE_ON_WIN32 + 'umask', + [rffi.MODE_T], rffi.MODE_T) def umask_llimpl(newmask): res = os_umask(rffi.cast(rffi.MODE_T, newmask)) diff --git a/rpython/rtyper/module/support.py b/rpython/rtyper/module/support.py --- a/rpython/rtyper/module/support.py +++ b/rpython/rtyper/module/support.py @@ -4,7 +4,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi _WIN32 = sys.platform.startswith('win') -underscore_on_windows = '_' if _WIN32 else '' +UNDERSCORE_ON_WIN32 = '_' if _WIN32 else '' # utility conversion functions class LLSupport: @@ -56,7 +56,7 @@ @staticmethod def posix_function_name(name): - return underscore_on_windows + name + return UNDERSCORE_ON_WIN32 + name @staticmethod def ll_os_name(name): @@ -75,7 +75,7 @@ @staticmethod def posix_function_name(name): - return underscore_on_windows + 'w' + name + return UNDERSCORE_ON_WIN32 + 'w' + name @staticmethod def ll_os_name(name): From noreply at buildbot.pypy.org Sun Dec 8 00:45:43 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 8 Dec 2013 00:45:43 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20131207234543.DDE841C0315@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r68398:f2a17348e70d Date: 2013-12-07 12:01 -0800 http://bitbucket.org/pypy/pypy/changeset/f2a17348e70d/ Log: cleanup diff --git a/rpython/rtyper/lltypesystem/module/ll_math.py b/rpython/rtyper/lltypesystem/module/ll_math.py --- a/rpython/rtyper/lltypesystem/module/ll_math.py +++ b/rpython/rtyper/lltypesystem/module/ll_math.py @@ -1,16 +1,16 @@ +import errno import math -import errno import py import sys -from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.tool.sourcetools import func_with_new_name from rpython.conftest import cdir from rpython.rlib import jit, rposix +from rpython.rlib.rfloat import INFINITY, NAN, isfinite, isinf, isnan +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.module.support import UNDERSCORE_ON_WIN32 +from rpython.tool.sourcetools import func_with_new_name from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.platform import platform -from rpython.rlib.rfloat import isfinite, isinf, isnan, INFINITY, NAN -from rpython.rtyper.module.support import UNDERSCORE_ON_WIN32 use_library_isinf_isnan = False if sys.platform == "win32": @@ -19,8 +19,8 @@ # It's no more possible to take the address of some math functions. # Ensure that the compiler chooses real functions instead. eci = ExternalCompilationInfo( - includes = ['math.h', 'float.h'], - post_include_bits = ['#pragma function(floor)'], + includes=['math.h', 'float.h'], + post_include_bits=['#pragma function(floor)'], ) use_library_isinf_isnan = True else: @@ -28,8 +28,8 @@ # Some math functions are C99 and not defined by the Microsoft compiler cdir = py.path.local(cdir) math_eci = ExternalCompilationInfo( - include_dirs = [cdir], - includes = ['src/ll_math.h'], + include_dirs=[cdir], + includes=['src/ll_math.h'], separate_module_files=[cdir.join('src', 'll_math.c')], export_symbols=['_pypy_math_acosh', '_pypy_math_asinh', '_pypy_math_atanh', @@ -111,8 +111,8 @@ while VERY_LARGE_FLOAT * 100.0 != INFINITY: VERY_LARGE_FLOAT *= 64.0 -_lib_isnan = llexternal("_isnan", [lltype.Float], lltype.Signed) -_lib_finite = llexternal("_finite", [lltype.Float], lltype.Signed) +_lib_isnan = llexternal('_isnan', [lltype.Float], lltype.Signed) +_lib_finite = llexternal('_finite', [lltype.Float], lltype.Signed) def ll_math_isnan(y): # By not calling into the external function the JIT can inline this. @@ -343,7 +343,7 @@ def ll_math_sqrt(x): if x < 0.0: - raise ValueError, "math domain error" + raise ValueError("math domain error") if isfinite(x): return sqrt_nonneg(x) @@ -431,4 +431,5 @@ for name in unary_math_functions: can_overflow = name in unary_math_functions_can_overflow c99 = name in unary_math_functions_c99 - globals()['ll_math_' + name] = new_unary_math_function(name, can_overflow, c99) + globals()['ll_math_' + name] = new_unary_math_function(name, can_overflow, + c99) From noreply at buildbot.pypy.org Sun Dec 8 10:47:37 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 8 Dec 2013 10:47:37 +0100 (CET) Subject: [pypy-commit] pypy default: Kill a parameter not used any more, and a corresponding comment. Message-ID: <20131208094737.D57F41C010D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68399:c17d6f59a5c0 Date: 2013-12-08 10:14 +0100 http://bitbucket.org/pypy/pypy/changeset/c17d6f59a5c0/ Log: Kill a parameter not used any more, and a corresponding comment. diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -377,7 +377,7 @@ set_last_error(tmp) # try: - return self._build_result(self._restype_, result, newargs) + return self._build_result(self._restype_, result) finally: funcptr.free_temp_buffers() @@ -601,7 +601,7 @@ retval = restype._CData_retval(buf) return retval - def _build_result(self, restype, result, argsandobjs): + def _build_result(self, restype, result): """Build the function result: If there is no OUT parameter, return the actual function result If there is one OUT parameter, return it @@ -611,11 +611,6 @@ # i.e. an array of ints. Now it takes a result, which is already a # python object. All places that do "resbuffer[0]" should check that # result is actually an int and just use it. - # - # Also, argsandobjs used to be "args" in __call__, now it's "newargs" - # (i.e., the already unwrapped objects). It's used only when we have a - # PARAMFLAG_FOUT and it's probably wrong, I'll fix it when I find a - # failing test retval = None From noreply at buildbot.pypy.org Sun Dec 8 10:47:39 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 8 Dec 2013 10:47:39 +0100 (CET) Subject: [pypy-commit] pypy default: Fix for issue1655. Message-ID: <20131208094739.0EAE41C1485@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68400:160192d5cbdc Date: 2013-12-08 10:46 +0100 http://bitbucket.org/pypy/pypy/changeset/160192d5cbdc/ Log: Fix for issue1655. diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -328,21 +328,23 @@ raise ValueError( "native COM method call without 'this' parameter" ) - thisarg = cast(args[0], POINTER(POINTER(c_void_p))) - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args[1:], kwargs) - newargs.insert(0, args[0].value) + thisvalue = args.pop(0) + thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) + args.insert(0, thisvalue) + newargs.insert(0, thisvalue.value) argtypes.insert(0, c_void_p) else: thisarg = None - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args, kwargs) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) funcptr = self._getfuncptr(argtypes, self._restype_, thisarg) result = self._call_funcptr(funcptr, *newargs) - result = self._do_errcheck(result, args) + result, forced = self._do_errcheck(result, errcheckargs) - if not outargs: + if not outargs or forced: return result from ctypes import c_void_p @@ -384,15 +386,15 @@ def _do_errcheck(self, result, args): # The 'errcheck' protocol if self._errcheck_: - v = self._errcheck_(result, self, args) + v = self._errcheck_(result, self, tuple(args)) # If the errcheck funtion failed, let it throw # If the errcheck function returned newargs unchanged, # continue normal processing. # If the errcheck function returned something else, # use that as result. if v is not args: - return v - return result + return v, True + return result, False def _getfuncptr_fromaddress(self, argtypes, restype): address = self._get_address() @@ -495,16 +497,16 @@ newargtypes = [] total = len(args) paramflags = self._paramflags - inargs_idx = 0 if not paramflags and total < len(argtypes): raise TypeError("not enough arguments") - for i, argtype in enumerate(argtypes): - flag = 0 - name = None - defval = marker - if paramflags: + if paramflags: + errcheckargs = [] + inargs_idx = 0 + for i, argtype in enumerate(argtypes): + flag = 0 + defval = marker paramflag = paramflags[i] paramlen = len(paramflag) name = None @@ -519,6 +521,7 @@ val = defval if val is marker: val = 0 + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) @@ -536,27 +539,31 @@ raise TypeError("required argument '%s' missing" % name) else: raise TypeError("not enough arguments") + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) elif flag == PARAMFLAG_FOUT: if defval is not marker: - outargs.append(defval) + val = defval keepalive, newarg, newargtype = self._conv_param(argtype, defval) else: import ctypes val = argtype._type_() - outargs.append(val) keepalive = None newarg = ctypes.byref(val) newargtype = type(newarg) + errcheckargs.append(val) + outargs.append(val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) else: raise ValueError("paramflag %d not yet implemented" % flag) - else: + else: + errcheckargs = args + for i, argtype in enumerate(argtypes): try: keepalive, newarg, newargtype = self._conv_param(argtype, args[i]) except (UnicodeError, TypeError, ValueError), e: @@ -564,7 +571,6 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - inargs_idx += 1 if len(newargs) < len(args): extra = args[len(newargs):] @@ -576,7 +582,7 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - return keepalives, newargs, newargtypes, outargs + return keepalives, newargs, newargtypes, outargs, errcheckargs @staticmethod def _is_primitive(argtype): @@ -699,7 +705,7 @@ funcptr = self._getfuncptr(argtypes, restype, thisarg) try: result = self._call_funcptr(funcptr, *args) - result = self._do_errcheck(result, args) + result, _ = self._do_errcheck(result, args) except (TypeError, ArgumentError, UnicodeDecodeError): assert self._slowpath_allowed return CFuncPtr.__call__(self, *args) diff --git a/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c b/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c --- a/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c +++ b/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c @@ -567,3 +567,13 @@ errno = result + 1; return result; } + +EXPORT(int *) test_issue1655(char const *tag, int *len) +{ + static int data[] = { -1, -2, -3, -4 }; + *len = -42; + if (strcmp(tag, "testing!") != 0) + return NULL; + *len = sizeof(data) / sizeof(data[0]); + return data; +} diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py @@ -559,3 +559,25 @@ assert (res, n) == (42, 43) set_errno(0) assert get_errno() == 0 + + def test_issue1655(self): + def ret_list_p(icount): + def sz_array_p(obj, func, args): + assert ('.LP_c_int object' in repr(obj) or + '.LP_c_long object' in repr(obj)) + assert repr(args) in ("('testing!', c_int(4))", + "('testing!', c_long(4))") + assert args[icount].value == 4 + return [ obj[i] for i in range(args[icount].value) ] + return sz_array_p + + get_data_prototype = CFUNCTYPE(POINTER(c_int), + c_char_p, POINTER(c_int)) + get_data_paramflag = ((1,), (2,)) + get_data_signature = ('test_issue1655', dll) + + get_data = get_data_prototype( get_data_signature, get_data_paramflag ) + assert get_data('testing!') == 4 + + get_data.errcheck = ret_list_p(1) + assert get_data('testing!') == [-1, -2, -3, -4] From noreply at buildbot.pypy.org Sun Dec 8 11:40:08 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 8 Dec 2013 11:40:08 +0100 (CET) Subject: [pypy-commit] cffi default: An extra pair of lines in the documentation Message-ID: <20131208104008.298171C01AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1440:3614850aa890 Date: 2013-12-08 11:39 +0100 http://bitbucket.org/cffi/cffi/changeset/3614850aa890/ Log: An extra pair of lines in the documentation diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1266,6 +1266,8 @@ can pass it around to C functions or store it into C structures. Later, you can use ``ffi.from_handle(p)`` to retrive the original ``python_object`` from a value with the same ``void *`` pointer. The +cdata object returned by ``new_handle()`` must be kept alive (and, in +turn, it keeps alive the ``python_object`` too). In other words, the cdata object returned by ``new_handle()`` has *ownership*, in the same sense as ``ffi.new()`` or ``ffi.gc()``: the association ``void * -> python_object`` is only valid as long as *this* exact cdata returned by From noreply at buildbot.pypy.org Sun Dec 8 11:57:01 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 8 Dec 2013 11:57:01 +0100 (CET) Subject: [pypy-commit] cffi default: Reword this paragraph Message-ID: <20131208105701.C0F851C03FB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1441:8e0915e9dbdd Date: 2013-12-08 11:56 +0100 http://bitbucket.org/cffi/cffi/changeset/8e0915e9dbdd/ Log: Reword this paragraph diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1265,15 +1265,20 @@ ``void *`` that contains an opaque reference to ``python_object``. You can pass it around to C functions or store it into C structures. Later, you can use ``ffi.from_handle(p)`` to retrive the original -``python_object`` from a value with the same ``void *`` pointer. The -cdata object returned by ``new_handle()`` must be kept alive (and, in -turn, it keeps alive the ``python_object`` too). In other words, the -cdata object returned by ``new_handle()`` has *ownership*, in the same -sense as ``ffi.new()`` or ``ffi.gc()``: the association ``void * -> -python_object`` is only valid as long as *this* exact cdata returned by -``new_handle()`` is alive. *Calling ffi.from_handle(p) is invalid and -will likely crash if the cdata object returned by new_handle() is not -kept alive!* *New in version 0.7.* +``python_object`` from a value with the same ``void *`` pointer. +*New in version 0.7.* + +Note that ``from_handle()`` conceptually works like this: it searches in +the list of cdata objects made by ``new_handle()`` the one which has got +the same ``void *`` value, and then it fetches in that cdata object the +corresponding Python object. It will crash if the cdata object returned +by ``new_handle()`` is not alive any more! (Obviously, the real +implementation is more efficient than suggested here.) In other words, +the result of ``new_handle()`` has *ownership* (similarly to +``ffi.new()`` or ``ffi.gc()``) in the sense that the association ``void +* -> python_object`` is only valid as long as *this* exact cdata +returned by ``new_handle()`` is alive. You must keep it alive (but the +Python object itself is kept alive by it automatically). .. "versionadded:: 0.7" --- inlined in the previous paragraph From noreply at buildbot.pypy.org Sun Dec 8 12:11:03 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 8 Dec 2013 12:11:03 +0100 (CET) Subject: [pypy-commit] cffi default: In-progress Message-ID: <20131208111103.539DE1C010D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1442:e0127af4bf42 Date: 2013-12-08 12:10 +0100 http://bitbucket.org/cffi/cffi/changeset/e0127af4bf42/ Log: In-progress diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1270,15 +1270,15 @@ Note that ``from_handle()`` conceptually works like this: it searches in the list of cdata objects made by ``new_handle()`` the one which has got -the same ``void *`` value, and then it fetches in that cdata object the -corresponding Python object. It will crash if the cdata object returned -by ``new_handle()`` is not alive any more! (Obviously, the real +the same ``void *`` value; and then it fetches in that cdata object the +corresponding Python object. The cdata object keeps the Python object +alive, but if the cdata object *itself* is not alive any more, then it +will crash! (Obviously, the real implementation is more efficient than suggested here.) In other words, the result of ``new_handle()`` has *ownership* (similarly to ``ffi.new()`` or ``ffi.gc()``) in the sense that the association ``void * -> python_object`` is only valid as long as *this* exact cdata -returned by ``new_handle()`` is alive. You must keep it alive (but the -Python object itself is kept alive by it automatically). +returned by ``new_handle()`` is alive. .. "versionadded:: 0.7" --- inlined in the previous paragraph From noreply at buildbot.pypy.org Sun Dec 8 12:28:49 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 8 Dec 2013 12:28:49 +0100 (CET) Subject: [pypy-commit] cffi default: Simplify a bit this paragraph. Message-ID: <20131208112849.390291C01AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1443:c6b154ad47ea Date: 2013-12-08 12:28 +0100 http://bitbucket.org/cffi/cffi/changeset/c6b154ad47ea/ Log: Simplify a bit this paragraph. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1272,13 +1272,10 @@ the list of cdata objects made by ``new_handle()`` the one which has got the same ``void *`` value; and then it fetches in that cdata object the corresponding Python object. The cdata object keeps the Python object -alive, but if the cdata object *itself* is not alive any more, then it -will crash! (Obviously, the real -implementation is more efficient than suggested here.) In other words, -the result of ``new_handle()`` has *ownership* (similarly to -``ffi.new()`` or ``ffi.gc()``) in the sense that the association ``void -* -> python_object`` is only valid as long as *this* exact cdata -returned by ``new_handle()`` is alive. +alive, similar to how ``ffi.new()`` returns a cdata object that keeps a +piece of memory alive. If the cdata object *itself* is not alive any +more, then the association ``void * -> python_object`` is dead and +``from_handle()`` will crash. .. "versionadded:: 0.7" --- inlined in the previous paragraph From noreply at buildbot.pypy.org Sun Dec 8 12:32:10 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 8 Dec 2013 12:32:10 +0100 (CET) Subject: [pypy-commit] cffi default: Put back the big warning in italic. Message-ID: <20131208113210.6A1961C01AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1444:8d9e0e99b080 Date: 2013-12-08 12:31 +0100 http://bitbucket.org/cffi/cffi/changeset/8d9e0e99b080/ Log: Put back the big warning in italic. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1266,6 +1266,8 @@ can pass it around to C functions or store it into C structures. Later, you can use ``ffi.from_handle(p)`` to retrive the original ``python_object`` from a value with the same ``void *`` pointer. +*Calling ffi.from_handle(p) is invalid and will likely crash if +the cdata object returned by new_handle() is not kept alive!* *New in version 0.7.* Note that ``from_handle()`` conceptually works like this: it searches in From noreply at buildbot.pypy.org Mon Dec 9 11:49:17 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 9 Dec 2013 11:49:17 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: add myself Message-ID: <20131209104917.7C7AC1C01AE@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r5117:9bb6c3e692ac Date: 2013-12-09 12:48 +0200 http://bitbucket.org/pypy/extradoc/changeset/9bb6c3e692ac/ Log: add myself diff --git a/sprintinfo/leysin-winter-2014/people.txt b/sprintinfo/leysin-winter-2014/people.txt --- a/sprintinfo/leysin-winter-2014/people.txt +++ b/sprintinfo/leysin-winter-2014/people.txt @@ -13,6 +13,7 @@ Armin Rigo private Romain Guillebert 11-19 Ermina Christian Clauss 11-12 & 18-19 I live nearby +Maciej Fijalkowski 11-18 Ermina ==================== ============== ======================= @@ -24,7 +25,6 @@ Romain Guillebert ? ? Antonio Cuni ? ? Michael Foord ? ? -Maciej Fijalkowski ? ? David Schneider ? ? Jacob Hallen ? ? Laura Creighton ? ? From noreply at buildbot.pypy.org Mon Dec 9 18:16:48 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 9 Dec 2013 18:16:48 +0100 (CET) Subject: [pypy-commit] pypy align_float_cast: add unaligned raw read functions Message-ID: <20131209171648.C87011C07AC@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: align_float_cast Changeset: r68401:e09e70a66110 Date: 2013-12-09 19:13 +0200 http://bitbucket.org/pypy/pypy/changeset/e09e70a66110/ Log: add unaligned raw read functions From noreply at buildbot.pypy.org Mon Dec 9 18:16:50 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 9 Dec 2013 18:16:50 +0100 (CET) Subject: [pypy-commit] pypy align_float_cast: mark functions that need to check alignment (what about JIT?) Message-ID: <20131209171650.1854E1C1473@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: align_float_cast Changeset: r68402:25d255b374ed Date: 2013-12-09 19:15 +0200 http://bitbucket.org/pypy/pypy/changeset/25d255b374ed/ Log: mark functions that need to check alignment (what about JIT?) diff --git a/rpython/rlib/rawstorage.py b/rpython/rlib/rawstorage.py --- a/rpython/rlib/rawstorage.py +++ b/rpython/rlib/rawstorage.py @@ -15,9 +15,15 @@ track_allocation=track_allocation, zero=zero) +def raw_storage_getitem_unaligned(TP, storage, index): + "NOT_RPYTHON" + return rffi.cast(rffi.CArrayPtr(TP), rffi.ptradd(storage, index))[0] + def raw_storage_getitem(TP, storage, index): "NOT_RPYTHON" - return rffi.cast(rffi.CArrayPtr(TP), rffi.ptradd(storage, index))[0] + ptr = rffi.ptradd(storage, index) + # TODO Check that pointer is aligned for TP + return rffi.cast(rffi.CArrayPtr(TP), ptr)[0] def raw_storage_setitem(storage, index, item): "NOT_RPYTHON" @@ -28,6 +34,23 @@ def free_raw_storage(storage, track_allocation=True): lltype.free(storage, flavor='raw', track_allocation=track_allocation) +class RawStorageGetitemEntryUnaligned(ExtRegistryEntry): + _about_ = raw_storage_getitem_unaligned + + def compute_result_annotation(self, s_TP, s_storage, s_index): + assert s_TP.is_constant() + return annmodel.lltype_to_annotation(s_TP.const) + + def specialize_call(self, hop): + assert hop.args_r[1].lowleveltype == RAW_STORAGE_PTR + v_storage = hop.inputarg(hop.args_r[1], arg=1) + v_index = hop.inputarg(lltype.Signed, arg=2) + hop.exception_cannot_occur() + v_addr = hop.genop('cast_ptr_to_adr', [v_storage], + resulttype=llmemory.Address) + return hop.genop('raw_load', [v_addr, v_index], + resulttype=hop.r_result.lowleveltype) + class RawStorageGetitemEntry(ExtRegistryEntry): _about_ = raw_storage_getitem @@ -36,6 +59,7 @@ return annmodel.lltype_to_annotation(s_TP.const) def specialize_call(self, hop): + # emit code that will 'automatically' copy memory if unaligned assert hop.args_r[1].lowleveltype == RAW_STORAGE_PTR v_storage = hop.inputarg(hop.args_r[1], arg=1) v_index = hop.inputarg(lltype.Signed, arg=2) From noreply at buildbot.pypy.org Tue Dec 10 01:02:02 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 10 Dec 2013 01:02:02 +0100 (CET) Subject: [pypy-commit] pypy py3k: a lame workaround for this failing test_ztranslation Message-ID: <20131210000202.E7BB81C35DB@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68403:4380ccd33714 Date: 2013-12-09 15:48 -0800 http://bitbucket.org/pypy/pypy/changeset/4380ccd33714/ Log: a lame workaround for this failing test_ztranslation diff --git a/pypy/module/cpyext/test/test_ztranslation.py b/pypy/module/cpyext/test/test_ztranslation.py --- a/pypy/module/cpyext/test/test_ztranslation.py +++ b/pypy/module/cpyext/test/test_ztranslation.py @@ -1,4 +1,13 @@ from pypy.objspace.fake.checkmodule import checkmodule +from pypy.module.cpyext import pyobject def test_cpyext_translates(): - checkmodule('cpyext', '_ffi') + def from_ref(space, ref): + # XXX: avoid 'assert isinstance(w_type, W_TypeObject)' from the + # original from_ref, just return w_some_obj + return space.w_object + old, pyobject.from_ref = pyobject.from_ref, from_ref + try: + checkmodule('cpyext', '_ffi') + finally: + pyobject.from_ref = old From noreply at buildbot.pypy.org Tue Dec 10 01:02:06 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 10 Dec 2013 01:02:06 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20131210000206.0AD871C35DC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68404:35cce2fceb04 Date: 2013-12-09 16:00 -0800 http://bitbucket.org/pypy/pypy/changeset/35cce2fceb04/ Log: merge default diff too long, truncating to 2000 out of 6355 lines diff --git a/lib-python/2.7/ctypes/test/test_python_api.py b/lib-python/2.7/ctypes/test/test_python_api.py --- a/lib-python/2.7/ctypes/test/test_python_api.py +++ b/lib-python/2.7/ctypes/test/test_python_api.py @@ -73,6 +73,7 @@ del pyobj self.assertEqual(grc(s), ref) + @xfail def test_PyOS_snprintf(self): PyOS_snprintf = pythonapi.PyOS_snprintf PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -328,21 +328,23 @@ raise ValueError( "native COM method call without 'this' parameter" ) - thisarg = cast(args[0], POINTER(POINTER(c_void_p))) - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args[1:], kwargs) - newargs.insert(0, args[0].value) + thisvalue = args.pop(0) + thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) + args.insert(0, thisvalue) + newargs.insert(0, thisvalue.value) argtypes.insert(0, c_void_p) else: thisarg = None - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args, kwargs) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) funcptr = self._getfuncptr(argtypes, self._restype_, thisarg) result = self._call_funcptr(funcptr, *newargs) - result = self._do_errcheck(result, args) + result, forced = self._do_errcheck(result, errcheckargs) - if not outargs: + if not outargs or forced: return result from ctypes import c_void_p @@ -377,22 +379,22 @@ set_last_error(tmp) # try: - return self._build_result(self._restype_, result, newargs) + return self._build_result(self._restype_, result) finally: funcptr.free_temp_buffers() def _do_errcheck(self, result, args): # The 'errcheck' protocol if self._errcheck_: - v = self._errcheck_(result, self, args) + v = self._errcheck_(result, self, tuple(args)) # If the errcheck funtion failed, let it throw # If the errcheck function returned newargs unchanged, # continue normal processing. # If the errcheck function returned something else, # use that as result. if v is not args: - return v - return result + return v, True + return result, False def _getfuncptr_fromaddress(self, argtypes, restype): address = self._get_address() @@ -495,16 +497,16 @@ newargtypes = [] total = len(args) paramflags = self._paramflags - inargs_idx = 0 if not paramflags and total < len(argtypes): raise TypeError("not enough arguments") - for i, argtype in enumerate(argtypes): - flag = 0 - name = None - defval = marker - if paramflags: + if paramflags: + errcheckargs = [] + inargs_idx = 0 + for i, argtype in enumerate(argtypes): + flag = 0 + defval = marker paramflag = paramflags[i] paramlen = len(paramflag) name = None @@ -519,6 +521,7 @@ val = defval if val is marker: val = 0 + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) @@ -536,27 +539,31 @@ raise TypeError("required argument '%s' missing" % name) else: raise TypeError("not enough arguments") + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) elif flag == PARAMFLAG_FOUT: if defval is not marker: - outargs.append(defval) + val = defval keepalive, newarg, newargtype = self._conv_param(argtype, defval) else: import ctypes val = argtype._type_() - outargs.append(val) keepalive = None newarg = ctypes.byref(val) newargtype = type(newarg) + errcheckargs.append(val) + outargs.append(val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) else: raise ValueError("paramflag %d not yet implemented" % flag) - else: + else: + errcheckargs = args + for i, argtype in enumerate(argtypes): try: keepalive, newarg, newargtype = self._conv_param(argtype, args[i]) except (UnicodeError, TypeError, ValueError) as e: @@ -564,7 +571,6 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - inargs_idx += 1 if len(newargs) < len(args): extra = args[len(newargs):] @@ -576,7 +582,7 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - return keepalives, newargs, newargtypes, outargs + return keepalives, newargs, newargtypes, outargs, errcheckargs @staticmethod def _is_primitive(argtype): @@ -601,7 +607,7 @@ retval = restype._CData_retval(buf) return retval - def _build_result(self, restype, result, argsandobjs): + def _build_result(self, restype, result): """Build the function result: If there is no OUT parameter, return the actual function result If there is one OUT parameter, return it @@ -611,11 +617,6 @@ # i.e. an array of ints. Now it takes a result, which is already a # python object. All places that do "resbuffer[0]" should check that # result is actually an int and just use it. - # - # Also, argsandobjs used to be "args" in __call__, now it's "newargs" - # (i.e., the already unwrapped objects). It's used only when we have a - # PARAMFLAG_FOUT and it's probably wrong, I'll fix it when I find a - # failing test retval = None @@ -704,7 +705,7 @@ funcptr = self._getfuncptr(argtypes, restype, thisarg) try: result = self._call_funcptr(funcptr, *args) - result = self._do_errcheck(result, args) + result, _ = self._do_errcheck(result, args) except (TypeError, ArgumentError, UnicodeDecodeError): assert self._slowpath_allowed return CFuncPtr.__call__(self, *args) diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -33,14 +33,13 @@ # set link options output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') + # XXX pyconfig.h uses a pragma to link to the import library, + # which is currently python3.lib + library = os.path.join(thisdir, '..', 'include', 'python3') if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python3') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') + # For a local translation or nightly build + library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python3') + assert os.path.exists(library + '.lib'),'Could not find import library "%s"' % library libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 '/EXPORT:PyInit_' + modulename] diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8" -__version_info__ = (0, 8) +__version__ = "0.8.1" +__version_info__ = (0, 8, 1) diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '2.2' # The full version, including alpha/beta/rc tags. -release = '2.2.0' +release = '2.2.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -83,7 +83,7 @@ _winreg -* Supported by being rewritten in pure Python (possibly using ``ctypes``): +* Supported by being rewritten in pure Python (possibly using ``cffi``): see the `lib_pypy/`_ directory. Examples of modules that we support this way: ``ctypes``, ``cPickle``, ``cmath``, ``dbm``, ``datetime``... Note that some modules are both in there and in the list above; @@ -316,5 +316,4 @@ type and vice versa. For builtin types, a dictionary will be returned that cannot be changed (but still looks and behaves like a normal dictionary). - .. include:: _ref.txt diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -72,7 +72,13 @@ Here is a list of the limitations and missing features of the current implementation: -* No support for ``PyXxx`` functions from ``libpython``, for obvious reasons. +* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer + of PyPy, at your own risks and without doing anything sensible about + the GIL. Since PyPy 2.3, these functions are also named with an extra + "Py", for example ``PyPyInt_FromLong()``. Basically, don't use this, + but it might more or less work in simple cases if you do. (Obviously, + assuming the PyObject pointers you get have any particular fields in + any particular order is just going to crash.) * We copy Python strings instead of having pointers to raw buffers diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -5,119 +5,68 @@ This document tries to explain how to interface the PyPy python interpreter with any external library. -Note: We try to describe state-of-the art, but it -might fade out of date as this is the front on which things are changing -in pypy rapidly. +Right now, there are the following possibilities of providing +third-party modules for the PyPy python interpreter (in order of +usefulness): -Possibilities -============= +* Write them in pure Python and use CFFI_. -Right now, there are three possibilities of providing third-party modules -for the PyPy python interpreter (in order of usefulness): +* Write them in pure Python and use ctypes_. -* Write them in pure python and use ctypes, see ctypes_ - section +* Write them in C++ and bind them through Reflex_. -* Write them in pure python and use direct libffi low-level bindings, See - \_ffi_ module description. +* Write them in as `RPython mixed modules`_. -* Write them in RPython as mixedmodule_, using *rffi* as bindings. -* Write them in C++ and bind them through Reflex_ +CFFI +==== -.. _ctypes: #CTypes -.. _\_ffi: #LibFFI -.. _mixedmodule: #Mixed Modules +CFFI__ is the recommended way. It is a way to write pure Python code +that accesses C libraries. The idea is to support either ABI- or +API-level access to C --- so that you can sanely access C libraries +without depending on details like the exact field order in the C +structures or the numerical value of all the constants. It works on +both CPython (as a separate ``pip install cffi``) and on PyPy, where it +is included by default. + +PyPy's JIT does a quite reasonable job on the Python code that call C +functions or manipulate C pointers with CFFI. (As of PyPy 2.2.1, it +could still be improved, but is already good.) + +See the documentation here__. + +.. __: http://cffi.readthedocs.org/ +.. __: http://cffi.readthedocs.org/ + CTypes ====== -The ctypes module in PyPy is ready to use. -It's goal is to be as-compatible-as-possible with the -`CPython ctypes`_ version. Right now it's able to support large examples, -such as pyglet. PyPy is planning to have a 100% compatible ctypes -implementation, without the CPython C-level API bindings (so it is very -unlikely that direct object-manipulation trickery through this API will work). +The goal of the ctypes module of PyPy is to be as compatible as possible +with the `CPython ctypes`_ version. It works for large examples, such +as pyglet. PyPy's implementation is not strictly 100% compatible with +CPython, but close enough for most cases. -We also provide a `ctypes-configure`_ for overcoming the platform dependencies, -not relying on the ctypes codegen. This tool works by querying gcc about -platform-dependent details (compiling small snippets of C code and running -them), so it'll benefit not pypy-related ctypes-based modules as well. +We also used to provide ``ctypes-configure`` for some API-level access. +This is now viewed as a precursor of CFFI, which you should use instead. +More (but older) information is available here__. +Also, ctypes' performance is not as good as CFFI's. -ctypes call are optimized by the JIT and the resulting machine code contains a -direct call to the target C function. However, due to the very dynamic nature -of ctypes, some overhead over a bare C call is still present, in particular to -check/convert the types of the parameters. Moreover, even if most calls are -optimized, some cannot and thus need to follow the slow path, not optimized by -the JIT. +.. _`CPython ctypes`: http://docs.python.org/library/ctypes.html +.. __: ctypes-implementation.html -.. _`ctypes-configure`: ctypes-implementation.html#ctypes-configure -.. _`CPython ctypes`: http://docs.python.org/library/ctypes.html +PyPy implements ctypes as pure Python code around two built-in modules +called ``_ffi`` and ``_rawffi``, which give a very low-level binding to +the C library libffi_. Nowadays it is not recommended to use directly +these two modules. -Pros ----- +.. _libffi: http://sourceware.org/libffi/ -Stable, CPython-compatible API. Most calls are fast, optimized by JIT. - -Cons ----- - -Problems with platform-dependency (although we partially solve -those). Although the JIT optimizes ctypes calls, some overhead is still -present. The slow-path is very slow. - - -LibFFI -====== - -Mostly in order to be able to write a ctypes module, we developed a very -low-level libffi bindings called ``_ffi``. (libffi is a C-level library for dynamic calling, -which is used by CPython ctypes). This library provides stable and usable API, -although it's API is a very low-level one. It does not contain any -magic. It is also optimized by the JIT, but has much less overhead than ctypes. - -Pros ----- - -It Works. Probably more suitable for a delicate code where ctypes magic goes -in a way. All calls are optimized by the JIT, there is no slow path as in -ctypes. - -Cons ----- - -It combines disadvantages of using ctypes with disadvantages of using mixed -modules. CPython-incompatible API, very rough and low-level. - -Mixed Modules -============= - -This is the most advanced and powerful way of writing extension modules. -It has some serious disadvantages: - -* a mixed module needs to be written in RPython, which is far more - complicated than Python (XXX link) - -* due to lack of separate compilation (as of July 2011), each - compilation-check requires to recompile whole PyPy python interpreter, - which takes 0.5-1h. We plan to solve this at some point in near future. - -* although rpython is a garbage-collected language, the border between - C and RPython needs to be managed by hand (each object that goes into the - C level must be explicitly freed). - -Some documentation is available `here`_ - -.. _`here`: rffi.html - -XXX we should provide detailed docs about lltype and rffi, especially if we - want people to follow that way. Reflex ====== -This method is still experimental and is being exercised on a branch, -`reflex-support`_, which adds the `cppyy`_ module. +This method is still experimental. It adds the `cppyy`_ module. The method works by using the `Reflex package`_ to provide reflection information of the C++ code, which is then used to automatically generate bindings at runtime. @@ -168,3 +117,15 @@ to work around it in python or with a C++ helper function. Although Reflex works on various platforms, the bindings with PyPy have only been tested on Linux. + + +RPython Mixed Modules +===================== + +This is the internal way to write built-in extension modules in PyPy. +It cannot be used by any 3rd-party module: the extension modules are +*built-in*, not independently loadable DLLs. + +This is reserved for special cases: it gives direct access to e.g. the +details of the JIT, allowing us to tweak its interaction with user code. +This is how the numpy module is being developed. diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -72,13 +72,13 @@ .. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib .. _`Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2011/jit-hints.pdf -.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: http://codespeak.net/svn/pypy/extradoc/talk/pepm2011/bolz-allocation-removal.pdf -.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/publications/bolz-prolog-jit.pdf +.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/pepm2011/bolz-allocation-removal.pdf +.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/mediawiki/images/a/a7/Pub-BoLeSch2010.pdf .. _`High performance implementation of Python for CLI/.NET with JIT compiler generation for dynamic languages`: http://buildbot.pypy.org/misc/antocuni-thesis.pdf .. _`How to *not* write Virtual Machines for Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dyla2007/dyla.pdf .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf -.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://www.stups.uni-duesseldorf.de/thesis/final-master.pdf +.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html .. _`Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution`: http://sabi.net/nriley/pubs/dls6-riley.pdf diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -444,7 +444,7 @@ So the position of the core PyPy developers is that if anyone wants to make an N+1'th attempt with LLVM, they are welcome, and will be happy to provide help in the IRC channel, but they are left with the burden of proof -that it works. +that (a) it works and (b) it gives important benefits. ---------------------- How do I compile PyPy? diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.2.0`_: the latest official release +* `Release 2.2.1`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.2.0`: http://pypy.org/download.html +.. _`Release 2.2.1`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/release-2.2.1.rst b/pypy/doc/release-2.2.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.2.1.rst @@ -0,0 +1,47 @@ +======================================= +PyPy 2.2.1 - Incrementalism.1 +======================================= + +We're pleased to announce PyPy 2.2.1, which targets version 2.7.3 of the Python +language. This is a bugfix release over 2.2. + +You can download the PyPy 2.2.1 release here: + + http://pypy.org/download.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows +32, or ARM (ARMv6 or ARMv7, with VFPv3). + +Work on the native Windows 64 is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +This is a bugfix release. The most important bugs fixed are: + +* an issue in sockets' reference counting emulation, showing up + notably when using the ssl module and calling ``makefile()``. + +* Tkinter support on Windows. + +* If sys.maxunicode==65535 (on Windows and maybe OS/X), the json + decoder incorrectly decoded surrogate pairs. + +* some FreeBSD fixes. + +Note that CFFI 0.8.1 was released. Both versions 0.8 and 0.8.1 are +compatible with both PyPy 2.2 and 2.2.1. + + +Cheers, +Armin Rigo & everybody diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -18,3 +18,24 @@ .. branch: voidtype_strformat Better support for record numpy arrays + +.. branch: osx-eci-frameworks-makefile +OSX: Ensure frameworks end up in Makefile when specified in External compilation info + +.. branch: less-stringly-ops +Use subclasses of SpaceOperation instead of SpaceOperator objects. +Random cleanups in flowspace and annotator. + +.. branch: ndarray-buffer +adds support for the buffer= argument to the ndarray ctor + +.. branch: better_ftime_detect2 +On OpenBSD do not pull in libcompat.a as it is about to be removed. +And more generally, if you have gettimeofday(2) you will not need ftime(3). + +.. branch: timeb_h +Remove dependency upon on OpenBSD. This will be disappearing +along with libcompat.a. + +.. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 +Fix 3 broken links on PyPy published papers in docs. diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -47,6 +47,9 @@ def get_raw_address(self): raise ValueError("no raw buffer") + def is_writable(self): + return False + # __________ app-level support __________ def descr_len(self, space): @@ -135,6 +138,9 @@ __slots__ = () # no extra slot here + def is_writable(self): + return True + def setitem(self, index, char): "Write a character into the buffer." raise NotImplementedError # Must be overriden. No bounds checks. diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -59,6 +59,9 @@ def descr__buffer__(self, space): return self.buffer.descr__buffer__(space) + def descr_str(self, space): + return space.wrap(self.buffer.as_str()) + MiniBuffer.typedef = TypeDef( "buffer", @@ -68,6 +71,7 @@ __setitem__ = interp2app(MiniBuffer.descr_setitem), __buffer__ = interp2app(MiniBuffer.descr__buffer__), __weakref__ = make_weakref_descr(MiniBuffer), + __str__ = interp2app(MiniBuffer.descr_str), ) MiniBuffer.typedef.acceptable_as_base_class = False diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -424,6 +424,12 @@ def get_array_length(self): return self.length + def _sizeof(self): + from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray + ctype = self.ctype + assert isinstance(ctype, W_CTypePtrOrArray) + return self.length * ctype.ctitem.size + class W_CDataHandle(W_CData): _attrs_ = ['w_keepalive'] diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2143,7 +2143,13 @@ c = newp(BCharArray, b"hi there") # buf = buffer(c) - assert str(buf).startswith('<_cffi_backend.buffer object at 0x') + assert repr(buf).startswith('<_cffi_backend.buffer object at 0x') + assert bytes(buf) == b"hi there\x00" + if sys.version_info < (3,): + assert str(buf) == "hi there\x00" + assert unicode(buf) == u+"hi there\x00" + else: + assert str(buf) == repr(buf) # --mb_length-- assert len(buf) == len(b"hi there\x00") # --mb_item-- @@ -3125,6 +3131,12 @@ py.test.raises(TypeError, "p + cast(new_primitive_type('int'), 42)") py.test.raises(TypeError, "p - cast(new_primitive_type('int'), 42)") +def test_sizeof_sliced_array(): + BInt = new_primitive_type("int") + BArray = new_array_type(new_pointer_type(BInt), 10) + p = newp(BArray, None) + assert sizeof(p[2:9]) == 7 * sizeof(BInt) + def test_version(): # this test is here mostly for PyPy diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -406,12 +406,12 @@ 'PyCObject_FromVoidPtr', 'PyCObject_FromVoidPtrAndDesc', 'PyCObject_AsVoidPtr', 'PyCObject_GetDesc', 'PyCObject_Import', 'PyCObject_SetVoidPtr', - 'PyCObject_Type', 'init_pycobject', + 'PyCObject_Type', '_Py_init_pycobject', 'PyCapsule_New', 'PyCapsule_IsValid', 'PyCapsule_GetPointer', 'PyCapsule_GetName', 'PyCapsule_GetDestructor', 'PyCapsule_GetContext', 'PyCapsule_SetPointer', 'PyCapsule_SetName', 'PyCapsule_SetDestructor', - 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', 'init_capsule', + 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', '_Py_init_capsule', 'PyObject_AsReadBuffer', 'PyObject_AsWriteBuffer', 'PyObject_CheckReadBuffer', @@ -688,20 +688,22 @@ globals()['va_get_%s' % name_no_star] = func def setup_init_functions(eci, translating): - init_pycobject = rffi.llexternal('init_pycobject', [], lltype.Void, - compilation_info=eci, _nowrapper=True) - init_capsule = rffi.llexternal('init_capsule', [], lltype.Void, + if translating: + prefix = 'PyPy' + else: + prefix = 'cpyexttest' + init_pycobject = rffi.llexternal('_%s_init_pycobject' % prefix, [], lltype.Void, + init_capsule = rffi.llexternal('_%s_init_capsule' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) INIT_FUNCTIONS.extend([ lambda space: init_pycobject(), lambda space: init_capsule(), ]) from pypy.module.posix.interp_posix import add_fork_hook - prefix = 'Py' if translating else 'PyPy' - reinit_tls = rffi.llexternal(prefix + 'Thread_ReInitTLS', [], lltype.Void, + reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], lltype.Void, compilation_info=eci) global py_fatalerror - py_fatalerror = rffi.llexternal(prefix + '_FatalError', + py_fatalerror = rffi.llexternal('%s_FatalError' % prefix, [CONST_STRING], lltype.Void, compilation_info=eci) add_fork_hook('child', reinit_tls) @@ -745,7 +747,7 @@ from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() - generate_macros(export_symbols, rename=True, do_deref=True) + generate_macros(export_symbols, prefix='cpyexttest') # Structure declaration code members = [] @@ -811,7 +813,7 @@ INTERPLEVEL_API[name] = w_obj - name = name.replace('Py', 'PyPy') + name = name.replace('Py', 'cpyexttest') if isptr: ptr = ctypes.c_void_p.in_dll(bridge, name) if typ == 'PyObject*': @@ -823,7 +825,7 @@ ptr.value = ctypes.cast(ll2ctypes.lltype2ctypes(value), ctypes.c_void_p).value elif typ in ('PyObject*', 'PyTypeObject*'): - if name.startswith('PyPyExc_'): + if name.startswith('PyPyExc_') or name.startswith('cpyexttestExc_'): # we already have the pointer in_dll = ll2ctypes.get_ctypes_type(PyObject).in_dll(bridge, name) py_obj = ll2ctypes.ctypes2lltype(PyObject, in_dll) @@ -858,28 +860,27 @@ setup_init_functions(eci, translating=False) return modulename.new(ext='') -def generate_macros(export_symbols, rename=True, do_deref=True): +def mangle_name(prefix, name): + if name.startswith('Py'): + return prefix + name[2:] + elif name.startswith('_Py'): + return '_' + prefix + name[3:] + else: + return None + +def generate_macros(export_symbols, prefix): "NOT_RPYTHON" pypy_macros = [] renamed_symbols = [] for name in export_symbols: - if name.startswith("PyPy"): - renamed_symbols.append(name) - continue - if not rename: - continue name = name.replace("#", "") - newname = name.replace('Py', 'PyPy') - if not rename: - newname = name + newname = mangle_name(prefix, name) + assert newname, name pypy_macros.append('#define %s %s' % (name, newname)) if name.startswith("PyExc_"): pypy_macros.append('#define _%s _%s' % (name, newname)) renamed_symbols.append(newname) - if rename: - export_symbols[:] = renamed_symbols - else: - export_symbols[:] = [sym.replace("#", "") for sym in export_symbols] + export_symbols[:] = renamed_symbols # Generate defines for macro_name, size in [ @@ -1039,7 +1040,7 @@ from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() - generate_macros(export_symbols, rename=False, do_deref=False) + generate_macros(export_symbols, prefix='PyPy') functions = generate_decls_and_callbacks(db, [], api_struct=False) code = "#include \n" + "\n".join(functions) @@ -1069,7 +1070,8 @@ export_struct(name, struct) for name, func in FUNCTIONS.iteritems(): - deco = entrypoint_lowlevel("cpyext", func.argtypes, name, relax=True) + newname = mangle_name('PyPy', name) or name + deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, relax=True) deco(func.get_wrapper(space)) setup_init_functions(eci, translating=True) diff --git a/pypy/module/cpyext/include/pycapsule.h b/pypy/module/cpyext/include/pycapsule.h --- a/pypy/module/cpyext/include/pycapsule.h +++ b/pypy/module/cpyext/include/pycapsule.h @@ -50,7 +50,7 @@ PyAPI_FUNC(void *) PyCapsule_Import(const char *name, int no_block); -void init_capsule(void); +void _Py_init_capsule(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pycobject.h b/pypy/module/cpyext/include/pycobject.h --- a/pypy/module/cpyext/include/pycobject.h +++ b/pypy/module/cpyext/include/pycobject.h @@ -48,7 +48,7 @@ } PyCObject; #endif -void init_pycobject(void); +void _Py_init_pycobject(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/src/capsule.c b/pypy/module/cpyext/src/capsule.c --- a/pypy/module/cpyext/src/capsule.c +++ b/pypy/module/cpyext/src/capsule.c @@ -321,7 +321,7 @@ PyCapsule_Type__doc__ /*tp_doc*/ }; -void init_capsule() +void _Py_init_capsule() { PyType_Ready(&PyCapsule_Type); } diff --git a/pypy/module/cpyext/src/cobject.c b/pypy/module/cpyext/src/cobject.c --- a/pypy/module/cpyext/src/cobject.c +++ b/pypy/module/cpyext/src/cobject.c @@ -156,7 +156,7 @@ PyCObject_Type__doc__ /*tp_doc*/ }; -void init_pycobject() +void _Py_init_pycobject() { PyType_Ready(&PyCObject_Type); } diff --git a/pypy/module/cpyext/test/test_thread.py b/pypy/module/cpyext/test/test_thread.py --- a/pypy/module/cpyext/test/test_thread.py +++ b/pypy/module/cpyext/test/test_thread.py @@ -8,8 +8,10 @@ module = self.import_extension('foo', [ ("get_thread_ident", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - return PyLong_FromLong(PyPyThread_get_thread_ident()); +#ifndef PyThread_get_thread_ident +#error "seems we are not accessing PyPy's functions" +#endif + return PyLong_FromLong(PyThread_get_thread_ident()); """), ]) import threading @@ -32,17 +34,19 @@ module = self.import_extension('foo', [ ("test_acquire_lock", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - PyThread_type_lock lock = PyPyThread_allocate_lock(); - if (PyPyThread_acquire_lock(lock, 1) != 1) { +#ifndef PyThread_allocate_lock +#error "seems we are not accessing PyPy's functions" +#endif + PyThread_type_lock lock = PyThread_allocate_lock(); + if (PyThread_acquire_lock(lock, 1) != 1) { PyErr_SetString(PyExc_AssertionError, "first acquire"); return NULL; } - if (PyPyThread_acquire_lock(lock, 0) != 0) { + if (PyThread_acquire_lock(lock, 0) != 0) { PyErr_SetString(PyExc_AssertionError, "second acquire"); return NULL; } - PyPyThread_free_lock(lock); + PyThread_free_lock(lock); Py_RETURN_NONE; """), @@ -53,15 +57,17 @@ module = self.import_extension('foo', [ ("test_release_lock", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - PyThread_type_lock lock = PyPyThread_allocate_lock(); - PyPyThread_acquire_lock(lock, 1); - PyPyThread_release_lock(lock); - if (PyPyThread_acquire_lock(lock, 0) != 1) { +#ifndef PyThread_release_lock +#error "seems we are not accessing PyPy's functions" +#endif + PyThread_type_lock lock = PyThread_allocate_lock(); + PyThread_acquire_lock(lock, 1); + PyThread_release_lock(lock); + if (PyThread_acquire_lock(lock, 0) != 1) { PyErr_SetString(PyExc_AssertionError, "first acquire"); return NULL; } - PyPyThread_free_lock(lock); + PyThread_free_lock(lock); Py_RETURN_NONE; """), diff --git a/pypy/module/math/app_math.py b/pypy/module/math/app_math.py --- a/pypy/module/math/app_math.py +++ b/pypy/module/math/app_math.py @@ -9,9 +9,34 @@ x = fl if x > sys.maxsize: raise OverflowError("Too large for a factorial") - if x < 0: - raise ValueError("x must be >= 0") - res = 1 - for i in range(1, x + 1): - res *= i - return res + + if x <= 100: + if x < 0: + raise ValueError("x must be >= 0") + res = 1 + for i in range(2, x + 1): + res *= i + return res + + #Experimentally this gap seems good + gap = max(100, x>>7) + def _fac_odd(low, high): + if low+gap >= high: + t = 1 + for i in range(low, high, 2): + t *= i + return t + + mid = ((low + high) >> 1) | 1 + return _fac_odd(low, mid) * _fac_odd(mid, high) + + def _fac1(x): + if x <= 2: + return 1, 1, x - 1 + x2 = x >> 1 + f, g, shift = _fac1(x2) + g *= _fac_odd((x2 + 1) | 1, x + 1) + return (f * g, g, shift + x2) + + res, _, shift = _fac1(x) + return res << shift diff --git a/pypy/module/math/test/test_factorial.py b/pypy/module/math/test/test_factorial.py new file mode 100644 --- /dev/null +++ b/pypy/module/math/test/test_factorial.py @@ -0,0 +1,29 @@ +import py +import math +from pypy.module.math import app_math + +def test_factorial_extra(): + for x in range(1000): + r1 = app_math.factorial(x) + r2 = math.factorial(x) + assert r1 == r2 + assert type(r1) == type(r2) + +def test_timing(): + py.test.skip("for manual running only") + import time + x = 5000 + repeat = 1000 + r1 = app_math.factorial(x) + r2 = math.factorial(x) + assert r1 == r2 + t1 = time.time() + for i in range(repeat): + app_math.factorial(x) + t2 = time.time() + for i in range(repeat): + math.factorial(x) + t3 = time.time() + assert r1 == r2 + print (t2 - t1) / repeat + print (t3 - t2) / repeat diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -12,7 +12,9 @@ if dtype is None: test = _numpypy.multiarray.array([start, stop, step, 0]) dtype = test.dtype - arr = _numpypy.multiarray.zeros(int(math.ceil((stop - start) / step)), dtype=dtype) + length = math.ceil((float(stop) - start) / step) + length = int(length) + arr = _numpypy.multiarray.zeros(length, dtype=dtype) i = start for j in range(arr.size): arr[j] = i diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -392,6 +392,21 @@ def __del__(self): free_raw_storage(self.storage, track_allocation=False) +class ConcreteArrayWithBase(ConcreteArrayNotOwning): + def __init__(self, shape, dtype, order, strides, backstrides, storage, orig_base): + ConcreteArrayNotOwning.__init__(self, shape, dtype, order, + strides, backstrides, storage) + self.orig_base = orig_base + + def base(self): + return self.orig_base + + +class ConcreteNonWritableArrayWithBase(ConcreteArrayWithBase): + def descr_setitem(self, space, orig_array, w_index, w_value): + raise OperationError(space.w_ValueError, space.wrap( + "assignment destination is read-only")) + class NonWritableArray(ConcreteArray): def descr_setitem(self, space, orig_array, w_index, w_value): diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -49,11 +49,24 @@ return W_NDimArray(impl) @staticmethod - def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, w_subtype=None): + def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, + w_subtype=None, w_base=None, writable=True): from pypy.module.micronumpy.arrayimpl import concrete assert shape strides, backstrides = calc_strides(shape, dtype, order) - if owning: + if w_base is not None: + if owning: + raise OperationError(space.w_ValueError, + space.wrap("Cannot have owning=True when specifying a buffer")) + if writable: + impl = concrete.ConcreteArrayWithBase(shape, dtype, order, strides, + backstrides, storage, w_base) + else: + impl = concrete.ConcreteNonWritableArrayWithBase(shape, dtype, order, + strides, backstrides, + storage, w_base) + + elif owning: # Will free storage when GCd impl = concrete.ConcreteArray(shape, dtype, order, strides, backstrides, storage=storage) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1,3 +1,5 @@ +from rpython.rtyper.lltypesystem import rffi +from rpython.rlib.rawstorage import RAW_STORAGE_PTR from pypy.interpreter.error import operationerrfmt, OperationError from pypy.interpreter.typedef import TypeDef, GetSetProperty, make_weakref_descr from pypy.interpreter.gateway import interp2app, unwrap_spec, applevel, \ @@ -20,6 +22,7 @@ from rpython.rlib.rstring import StringBuilder from pypy.module.micronumpy.arrayimpl.base import BaseArrayImplementation from pypy.module.micronumpy.conversion_utils import order_converter, multi_axis_converter +from pypy.module.micronumpy import support from pypy.module.micronumpy.constants import * def _find_shape(space, w_size, dtype): @@ -1058,13 +1061,35 @@ offset=0, w_strides=None, order='C'): from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArray from pypy.module.micronumpy.support import calc_strides - if (offset != 0 or not space.is_none(w_strides) or - not space.is_none(w_buffer)): - raise OperationError(space.w_NotImplementedError, - space.wrap("unsupported param")) dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) shape = _find_shape(space, w_shape, dtype) + + if not space.is_none(w_buffer): + if (not space.is_none(w_strides)): + raise OperationError(space.w_NotImplementedError, + space.wrap("unsupported param")) + + buf = space.buffer_w(w_buffer) + try: + raw_ptr = buf.get_raw_address() + except ValueError: + raise OperationError(space.w_TypeError, space.wrap( + "Only raw buffers are supported")) + if not shape: + raise OperationError(space.w_TypeError, space.wrap( + "numpy scalars from buffers not supported yet")) + totalsize = support.product(shape) * dtype.get_size() + if totalsize+offset > buf.getlength(): + raise OperationError(space.w_TypeError, space.wrap( + "buffer is too small for requested array")) + storage = rffi.cast(RAW_STORAGE_PTR, raw_ptr) + storage = rffi.ptradd(storage, offset) + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, + w_subtype=w_subtype, + w_base=w_buffer, + writable=buf.is_writable()) + if not shape: return W_NDimArray.new_scalar(space, dtype) if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): @@ -1084,8 +1109,6 @@ Create an array from an existing buffer, given its address as int. PyPy-only implementation detail. """ - from rpython.rtyper.lltypesystem import rffi - from rpython.rlib.rawstorage import RAW_STORAGE_PTR storage = rffi.cast(RAW_STORAGE_PTR, addr) dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -71,7 +71,6 @@ final_strides = arr.get_strides() + strides final_backstrides = arr.get_backstrides() + backstrides final_dtype = subdtype - print self.name,'strides',arr.get_strides(),strides if subdtype.subdtype: final_dtype = subdtype.subdtype return W_NDimArray.new_slice(space, arr.start + ofs, final_strides, diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -238,13 +238,16 @@ while not out_iter.done(): axis_reduce__driver.jit_merge_point(shapelen=shapelen, func=func, dtype=dtype) - w_val = arr_iter.getitem().convert_to(dtype) - if out_iter.first_line: - if identity is not None: - w_val = func(dtype, identity, w_val) + if arr_iter.done(): + w_val = identity else: - cur = temp_iter.getitem() - w_val = func(dtype, cur, w_val) + w_val = arr_iter.getitem().convert_to(dtype) + if out_iter.first_line: + if identity is not None: + w_val = func(dtype, identity, w_val) + else: + cur = temp_iter.getitem() + w_val = func(dtype, cur, w_val) out_iter.setitem(w_val) if cumulative: temp_iter.setitem(w_val) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -219,6 +219,7 @@ class AppTestNumArray(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) + def w_CustomIndexObject(self, index): class CustomIndexObject(object): def __init__(self, index): @@ -381,6 +382,8 @@ assert a.dtype is dtype(int) a = arange(3, 7, 2) assert (a == [3, 5]).all() + a = arange(3, 8, 2) + assert (a == [3, 5, 7]).all() a = arange(3, dtype=float) assert (a == [0., 1., 2.]).all() assert a.dtype is dtype(float) @@ -1297,6 +1300,9 @@ assert a.sum() == 105 assert a.max() == 14 assert array([]).sum() == 0.0 + assert array([]).reshape(0, 2).sum() == 0. + assert (array([]).reshape(0, 2).sum(0) == [0., 0.]).all() + assert (array([]).reshape(0, 2).prod(0) == [1., 1.]).all() raises(ValueError, 'array([]).max()') assert (a.sum(0) == [30, 35, 40]).all() assert (a.sum(axis=0) == [30, 35, 40]).all() @@ -2078,6 +2084,69 @@ a = np.ndarray([1], dtype=bool) assert a[0] == True + +class AppTestNumArrayFromBuffer(BaseNumpyAppTest): + spaceconfig = dict(usemodules=["micronumpy", "array", "mmap"]) + + def setup_class(cls): + from rpython.tool.udir import udir + BaseNumpyAppTest.setup_class.im_func(cls) + cls.w_tmpname = cls.space.wrap(str(udir.join('mmap-'))) + + def test_ndarray_from_buffer(self): + import numpypy as np + import array + buf = array.array('c', ['\x00']*2*3) + a = np.ndarray((3,), buffer=buf, dtype='i2') + a[0] = ord('b') + a[1] = ord('a') + a[2] = ord('r') + assert list(buf) == ['b', '\x00', 'a', '\x00', 'r', '\x00'] + assert a.base is buf + + def test_ndarray_subclass_from_buffer(self): + import numpypy as np + import array + buf = array.array('c', ['\x00']*2*3) + class X(np.ndarray): + pass + a = X((3,), buffer=buf, dtype='i2') + assert type(a) is X + + def test_ndarray_from_buffer_and_offset(self): + import numpypy as np + import array + buf = array.array('c', ['\x00']*7) + buf[0] = 'X' + a = np.ndarray((3,), buffer=buf, offset=1, dtype='i2') + a[0] = ord('b') + a[1] = ord('a') + a[2] = ord('r') + assert list(buf) == ['X', 'b', '\x00', 'a', '\x00', 'r', '\x00'] + + def test_ndarray_from_buffer_out_of_bounds(self): + import numpypy as np + import array + buf = array.array('c', ['\x00']*2*10) # 20 bytes + info = raises(TypeError, "np.ndarray((11,), buffer=buf, dtype='i2')") + assert str(info.value).startswith('buffer is too small') + info = raises(TypeError, "np.ndarray((5,), buffer=buf, offset=15, dtype='i2')") + assert str(info.value).startswith('buffer is too small') + + def test_ndarray_from_readonly_buffer(self): + import numpypy as np + from mmap import mmap, ACCESS_READ + f = open(self.tmpname, "w+") + f.write("hello") + f.flush() + buf = mmap(f.fileno(), 5, access=ACCESS_READ) + a = np.ndarray((5,), buffer=buf, dtype='c') + raises(ValueError, "a[0] = 'X'") + buf.close() + f.close() + + + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy @@ -3050,9 +3119,6 @@ exc = raises(IndexError, "a[0][None]") assert exc.value.message == "invalid index" - exc = raises(IndexError, "a[0][None]") - assert exc.value.message == 'invalid index' - a[0]["x"][0] = 200 assert a[0]["x"][0] == 200 diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -332,6 +332,14 @@ self.check_valid_writeable() self.mmap.setslice(start, string) + def is_writable(self): + try: + self.mmap.check_writeable() + except RMMapError: + return False + else: + return True + def get_raw_address(self): self.check_valid() return self.mmap.data diff --git a/pypy/module/posix/app_startfile.py b/pypy/module/posix/app_startfile.py --- a/pypy/module/posix/app_startfile.py +++ b/pypy/module/posix/app_startfile.py @@ -7,7 +7,6 @@ ffi.cdef(""" HINSTANCE ShellExecuteA(HWND, LPCSTR, LPCSTR, LPCSTR, LPCSTR, INT); HINSTANCE ShellExecuteW(HWND, LPCWSTR, LPCWSTR, LPCWSTR, LPCWSTR, INT); - DWORD GetLastError(void); """) self.NULL = ffi.NULL self.cast = ffi.cast diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -1,20 +1,20 @@ """This is not the JIT :-) -This is transformed to become a JIT by code elsewhere: pypy/jit/* +This is transformed to become a JIT by code elsewhere: rpython/jit/* """ -from rpython.tool.pairtype import extendabletype from rpython.rlib.rarithmetic import r_uint, intmask from rpython.rlib.jit import JitDriver, hint, we_are_jitted, dont_look_inside from rpython.rlib import jit from rpython.rlib.jit import current_trace_length, unroll_parameters import pypy.interpreter.pyopcode # for side-effects from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.pycode import PyCode, CO_GENERATOR +from pypy.interpreter.pycode import CO_GENERATOR from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pyopcode import ExitFrame, Yield from opcode import opmap + PyFrame._virtualizable_ = ['last_instr', 'pycode', 'valuestackdepth', 'locals_stack_w[*]', 'cells[*]', diff --git a/pypy/module/pypyjit/test_pypy_c/test_math.py b/pypy/module/pypyjit/test_pypy_c/test_math.py --- a/pypy/module/pypyjit/test_pypy_c/test_math.py +++ b/pypy/module/pypyjit/test_pypy_c/test_math.py @@ -52,8 +52,8 @@ f1 = cast_int_to_float(i0) i6 = --ISINF--(f1) guard_false(i6, descr=...) - f2 = call(ConstClass(sin), f1, descr=) - f3 = call(ConstClass(cos), f1, descr=) + f2 = call(ConstClass(sin), f1, descr=) + f3 = call(ConstClass(cos), f1, descr=) f4 = float_sub(f2, f3) f5 = float_add(f0, f4) i7 = int_add(i0, f1) diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_version.py b/pypy/module/test_lib_pypy/cffi_tests/test_version.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_version.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_version.py @@ -10,6 +10,7 @@ '0.4.2': '0.4', # did not change '0.7.1': '0.7', # did not change '0.7.2': '0.7', # did not change + '0.8.1': '0.8', # did not change } def test_version(): diff --git a/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c b/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c --- a/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c +++ b/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c @@ -567,3 +567,13 @@ errno = result + 1; return result; } + +EXPORT(int *) test_issue1655(char const *tag, int *len) +{ + static int data[] = { -1, -2, -3, -4 }; + *len = -42; + if (strcmp(tag, "testing!") != 0) + return NULL; + *len = sizeof(data) / sizeof(data[0]); + return data; +} diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py @@ -559,3 +559,25 @@ assert (res, n) == (42, 43) set_errno(0) assert get_errno() == 0 + + def test_issue1655(self): + def ret_list_p(icount): + def sz_array_p(obj, func, args): + assert ('.LP_c_int object' in repr(obj) or + '.LP_c_long object' in repr(obj)) + assert repr(args) in ("('testing!', c_int(4))", + "('testing!', c_long(4))") + assert args[icount].value == 4 + return [ obj[i] for i in range(args[icount].value) ] + return sz_array_p + + get_data_prototype = CFUNCTYPE(POINTER(c_int), + c_char_p, POINTER(c_int)) + get_data_paramflag = ((1,), (2,)) + get_data_signature = ('test_issue1655', dll) + + get_data = get_data_prototype( get_data_signature, get_data_paramflag ) + assert get_data('testing!') == 4 + + get_data.errcheck = ret_list_p(1) + assert get_data('testing!') == [-1, -2, -3, -4] diff --git a/pypy/testrunner_cfg.py b/pypy/testrunner_cfg.py --- a/pypy/testrunner_cfg.py +++ b/pypy/testrunner_cfg.py @@ -3,7 +3,7 @@ DIRS_SPLIT = [ 'translator/c', 'rlib', - 'rpython/memory', 'jit/metainterp', 'rpython/test', + 'memory/test', 'jit/metainterp', 'jit/backend/arm', 'jit/backend/x86', ] diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -48,6 +48,7 @@ def package(basedir, name='pypy-nightly', rename_pypy_c='pypy', copy_to_dir=None, override_pypy_c=None, nostrip=False, withouttk=False): + assert '/' not in rename_pypy_c basedir = py.path.local(basedir) if override_pypy_c is None: basename = 'pypy-c' diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -584,10 +584,6 @@ def consider_op(self, block, opindex): op = block.operations[opindex] argcells = [self.binding(a) for a in op.args] - consider_meth = getattr(self,'consider_op_'+op.opname, - None) - if not consider_meth: - raise Exception,"unknown op: %r" % op # let's be careful about avoiding propagated SomeImpossibleValues # to enter an op; the latter can result in violations of the @@ -599,7 +595,7 @@ if isinstance(arg, annmodel.SomeImpossibleValue): raise BlockedInference(self, op, opindex) try: - resultcell = consider_meth(*argcells) + resultcell = op.consider(self, *argcells) except annmodel.AnnotatorError as e: # note that UnionError is a subclass graph = self.bookkeeper.position_key[0] e.source = gather_error(self, graph, block, opindex) diff --git a/rpython/annotator/argument.py b/rpython/annotator/argument.py --- a/rpython/annotator/argument.py +++ b/rpython/annotator/argument.py @@ -1,69 +1,25 @@ """ Arguments objects. """ -from rpython.annotator.model import SomeTuple, SomeObject +from rpython.annotator.model import SomeTuple +from rpython.flowspace.argument import CallSpec -# for parsing call arguments -class RPythonCallsSpace(object): - """Pseudo Object Space providing almost no real operation. - For the Arguments class: if it really needs other operations, it means - that the call pattern is too complex for R-Python. - """ - def newtuple(self, items_s): - if len(items_s) == 1 and items_s[0] is Ellipsis: - res = SomeObject() # hack to get a SomeObject as the *arg - res.from_ellipsis = True - return res - else: - return SomeTuple(items_s) - - def unpackiterable(self, s_obj, expected_length=None): - if isinstance(s_obj, SomeTuple): - return list(s_obj.items) - if (s_obj.__class__ is SomeObject and - getattr(s_obj, 'from_ellipsis', False)): # see newtuple() - return [Ellipsis] - raise CallPatternTooComplex("'*' argument must be SomeTuple") - - def bool(self, s_tup): - assert isinstance(s_tup, SomeTuple) - return bool(s_tup.items) - - -class CallPatternTooComplex(Exception): - pass - - -class ArgumentsForTranslation(object): - w_starstararg = None - def __init__(self, space, args_w, keywords=None, keywords_w=None, - w_stararg=None, w_starstararg=None): - self.w_stararg = w_stararg - assert w_starstararg is None - self.space = space - assert isinstance(args_w, list) - self.arguments_w = args_w - self.keywords = keywords - self.keywords_w = keywords_w - self.keyword_names_w = None - - def __repr__(self): - """ NOT_RPYTHON """ - name = self.__class__.__name__ - if not self.keywords: - return '%s(%s)' % (name, self.arguments_w,) - else: - return '%s(%s, %s, %s)' % (name, self.arguments_w, - self.keywords, self.keywords_w) - +class ArgumentsForTranslation(CallSpec): @property def positional_args(self): if self.w_stararg is not None: - args_w = self.space.unpackiterable(self.w_stararg) + args_w = self.unpackiterable(self.w_stararg) return self.arguments_w + args_w else: return self.arguments_w + def newtuple(self, items_s): + return SomeTuple(items_s) + + def unpackiterable(self, s_obj): + assert isinstance(s_obj, SomeTuple) + return list(s_obj.items) + def fixedunpack(self, argcount): """The simplest argument parsing: get the 'argcount' arguments, or raise a real ValueError if the length is wrong.""" @@ -77,14 +33,12 @@ def prepend(self, w_firstarg): # used often "Return a new Arguments with a new argument inserted first." - return ArgumentsForTranslation(self.space, [w_firstarg] + self.arguments_w, - self.keywords, self.keywords_w, self.w_stararg, - self.w_starstararg) + return ArgumentsForTranslation([w_firstarg] + self.arguments_w, + self.keywords, self.w_stararg) def copy(self): - return ArgumentsForTranslation(self.space, self.arguments_w, - self.keywords, self.keywords_w, self.w_stararg, - self.w_starstararg) + return ArgumentsForTranslation(self.arguments_w, self.keywords, + self.w_stararg) def _match_signature(self, scope_w, signature, defaults_w=None): """Parse args and kwargs according to the signature of a code object, @@ -97,7 +51,7 @@ args_w = self.positional_args num_args = len(args_w) - keywords = self.keywords or [] + keywords = self.keywords num_kwds = len(keywords) # put as many positional input arguments into place as available @@ -111,7 +65,7 @@ starargs_w = args_w[co_argcount:] else: starargs_w = [] - scope_w[co_argcount] = self.space.newtuple(starargs_w) + scope_w[co_argcount] = self.newtuple(starargs_w) elif num_args > co_argcount: raise ArgErrCount(num_args, num_kwds, signature, defaults_w, 0) @@ -119,22 +73,17 @@ # handle keyword arguments num_remainingkwds = 0 - keywords_w = self.keywords_w kwds_mapping = None if num_kwds: # kwds_mapping maps target indexes in the scope (minus input_argcount) - # to positions in the keywords_w list - kwds_mapping = [-1] * (co_argcount - input_argcount) + # to keyword names + kwds_mapping = [] # match the keywords given at the call site to the argument names # the called function takes # this function must not take a scope_w, to make the scope not # escape num_remainingkwds = len(keywords) - for i, name in enumerate(keywords): - # If name was not encoded as a string, it could be None. In that - # case, it's definitely not going to be in the signature. - if name is None: - continue + for name in keywords: j = signature.find_argname(name) # if j == -1 nothing happens if j < input_argcount: @@ -142,14 +91,14 @@ if j >= 0: raise ArgErrMultipleValues(name) else: - kwds_mapping[j - input_argcount] = i # map to the right index + kwds_mapping.append(name) num_remainingkwds -= 1 if num_remainingkwds: if co_argcount == 0: raise ArgErrCount(num_args, num_kwds, signature, defaults_w, 0) - raise ArgErrUnknownKwds(self.space, num_remainingkwds, keywords, - kwds_mapping, self.keyword_names_w) + raise ArgErrUnknownKwds(num_remainingkwds, keywords, + kwds_mapping) # check for missing arguments and fill them from the kwds, # or with defaults, if available @@ -157,14 +106,11 @@ if input_argcount < co_argcount: def_first = co_argcount - (0 if defaults_w is None else len(defaults_w)) j = 0 - kwds_index = -1 for i in range(input_argcount, co_argcount): - if kwds_mapping is not None: - kwds_index = kwds_mapping[j] - j += 1 - if kwds_index >= 0: - scope_w[i] = keywords_w[kwds_index] - continue + name = signature.argnames[i] + if name in keywords: + scope_w[i] = keywords[name] + continue defnum = i - def_first if defnum >= 0: scope_w[i] = defaults_w[defnum] @@ -175,8 +121,7 @@ def unpack(self): "Return a ([w1,w2...], {'kw':w3...}) pair." - kwds_w = dict(zip(self.keywords, self.keywords_w)) if self.keywords else {} - return self.positional_args, kwds_w + return self.positional_args, self.keywords def match_signature(self, signature, defaults_w): """Parse args and kwargs according to the signature of a code object, @@ -189,41 +134,29 @@ def unmatch_signature(self, signature, data_w): """kind of inverse of match_signature""" - need_cnt = len(self.positional_args) - need_kwds = self.keywords or [] - space = self.space argnames, varargname, kwargname = signature assert kwargname is None cnt = len(argnames) - data_args_w = data_w[:cnt] + need_cnt = len(self.positional_args) if varargname: - data_w_stararg = data_w[cnt] - cnt += 1 - else: - data_w_stararg = space.newtuple([]) + assert len(data_w) == cnt + 1 + stararg_w = self.unpackiterable(data_w[cnt]) + if stararg_w: + args_w = data_w[:cnt] + stararg_w + assert len(args_w) == need_cnt + assert not self.keywords + return ArgumentsForTranslation(args_w, {}) + else: + data_w = data_w[:-1] assert len(data_w) == cnt + assert len(data_w) >= need_cnt + args_w = data_w[:need_cnt] + _kwds_w = dict(zip(argnames[need_cnt:], data_w[need_cnt:])) + keywords_w = [_kwds_w[key] for key in self.keywords] + return ArgumentsForTranslation(args_w, dict(zip(self.keywords, keywords_w))) - unfiltered_kwds_w = {} - if len(data_args_w) >= need_cnt: - args_w = data_args_w[:need_cnt] - for argname, w_arg in zip(argnames[need_cnt:], data_args_w[need_cnt:]): - unfiltered_kwds_w[argname] = w_arg - assert not space.bool(data_w_stararg) - else: - stararg_w = space.unpackiterable(data_w_stararg) - args_w = data_args_w + stararg_w - assert len(args_w) == need_cnt - - keywords = [] - keywords_w = [] - for key in need_kwds: - keywords.append(key) - keywords_w.append(unfiltered_kwds_w[key]) - - return ArgumentsForTranslation(self.space, args_w, keywords, keywords_w) - - @staticmethod - def fromshape(space, (shape_cnt, shape_keys, shape_star, shape_stst), data_w): + @classmethod + def fromshape(cls, (shape_cnt, shape_keys, shape_star), data_w): args_w = data_w[:shape_cnt] p = end_keys = shape_cnt + len(shape_keys) if shape_star: @@ -231,40 +164,12 @@ p += 1 else: w_star = None - if shape_stst: - w_starstar = data_w[p] - p += 1 - else: - w_starstar = None - return ArgumentsForTranslation(space, args_w, list(shape_keys), - data_w[shape_cnt:end_keys], w_star, - w_starstar) + return cls(args_w, dict(zip(shape_keys, data_w[shape_cnt:end_keys])), + w_star) - def flatten(self): - """ Argument <-> list of w_objects together with "shape" information """ - shape_cnt, shape_keys, shape_star, shape_stst = self._rawshape() - data_w = self.arguments_w + [self.keywords_w[self.keywords.index(key)] - for key in shape_keys] - if shape_star: - data_w.append(self.w_stararg) - if shape_stst: - data_w.append(self.w_starstararg) - return (shape_cnt, shape_keys, shape_star, shape_stst), data_w - def _rawshape(self, nextra=0): - shape_cnt = len(self.arguments_w) + nextra # Number of positional args - if self.keywords: - shape_keys = self.keywords[:] # List of keywords (strings) - shape_keys.sort() - else: - shape_keys = [] - shape_star = self.w_stararg is not None # Flag: presence of *arg - shape_stst = self.w_starstararg is not None # Flag: presence of **kwds - return shape_cnt, tuple(shape_keys), shape_star, shape_stst # shape_keys are sorted - - -def rawshape(args, nextra=0): - return args._rawshape(nextra) +def rawshape(args): + return args._rawshape() # @@ -336,31 +241,12 @@ class ArgErrUnknownKwds(ArgErr): - def __init__(self, space, num_remainingkwds, keywords, kwds_mapping, - keyword_names_w): + def __init__(self, num_remainingkwds, keywords, kwds_mapping): name = '' self.num_kwds = num_remainingkwds if num_remainingkwds == 1: - for i in range(len(keywords)): - if i not in kwds_mapping: - name = keywords[i] - if name is None: - # We'll assume it's unicode. Encode it. - # Careful, I *think* it should not be possible to - # get an IndexError here but you never know. - try: - if keyword_names_w is None: - raise IndexError - # note: negative-based indexing from the end - w_name = keyword_names_w[i - len(keywords)] - except IndexError: - name = '?' - else: - w_enc = space.wrap(space.sys.defaultencoding) - w_err = space.wrap("replace") - w_name = space.call_method(w_name, "encode", w_enc, - w_err) - name = space.str_w(w_name) + for name in keywords: + if name not in kwds_mapping: break self.kwd_name = name diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -5,19 +5,15 @@ import py import operator from rpython.tool.pairtype import pair, pairtype -from rpython.annotator.model import SomeObject, SomeInteger, SomeBool, s_Bool -from rpython.annotator.model import SomeString, SomeChar, SomeList, SomeDict,\ - SomeOrderedDict -from rpython.annotator.model import SomeUnicodeCodePoint, SomeUnicodeString -from rpython.annotator.model import SomeTuple, SomeImpossibleValue, s_ImpossibleValue -from rpython.annotator.model import SomeInstance, SomeBuiltin, SomeIterator -from rpython.annotator.model import SomePBC, SomeFloat, s_None, SomeByteArray -from rpython.annotator.model import SomeWeakRef -from rpython.annotator.model import SomeAddress, SomeTypedAddressAccess -from rpython.annotator.model import SomeSingleFloat, SomeLongFloat, SomeType -from rpython.annotator.model import unionof, UnionError, missing_operation -from rpython.annotator.model import read_can_only_throw -from rpython.annotator.model import add_knowntypedata, merge_knowntypedata +from rpython.annotator.model import ( + SomeObject, SomeInteger, SomeBool, s_Bool, SomeString, SomeChar, SomeList, + SomeDict, SomeOrderedDict, SomeUnicodeCodePoint, SomeUnicodeString, + SomeTuple, SomeImpossibleValue, s_ImpossibleValue, SomeInstance, + SomeBuiltin, SomeIterator, SomePBC, SomeFloat, s_None, SomeByteArray, + SomeWeakRef, SomeAddress, SomeTypedAddressAccess, SomeSingleFloat, + SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, + missing_operation, read_can_only_throw, add_knowntypedata, + merge_knowntypedata,) from rpython.annotator.bookkeeper import getbookkeeper from rpython.flowspace.model import Variable, Constant from rpython.rlib import rarithmetic @@ -197,7 +193,9 @@ getitem_key = getitem_idx_key -class __extend__(pairtype(SomeType, SomeType)): +class __extend__(pairtype(SomeType, SomeType), + pairtype(SomeType, SomeConstantType), + pairtype(SomeConstantType, SomeType),): def union((obj1, obj2)): result = SomeType() diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -12,13 +12,13 @@ SomeBuiltin, SomePBC, SomeInteger, TLS, SomeAddress, SomeUnicodeCodePoint, s_None, s_ImpossibleValue, SomeLLADTMeth, SomeBool, SomeTuple, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, - SomeWeakRef, lltype_to_annotation, SomeType, SomeByteArray) + SomeWeakRef, lltype_to_annotation, SomeType, SomeByteArray, SomeConstantType) from rpython.annotator.classdef import InstanceSource, ClassDef from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef from rpython.annotator import description from rpython.annotator.signature import annotationoftype -from rpython.annotator.argument import ArgumentsForTranslation, RPythonCallsSpace +from rpython.annotator.argument import ArgumentsForTranslation from rpython.rlib.objectmodel import r_dict, Symbolic from rpython.tool.algo.unionfind import UnionFind from rpython.rtyper.lltypesystem import lltype, llmemory @@ -436,11 +436,7 @@ elif isinstance(x, llmemory.fakeaddress): result = SomeAddress() elif tp is type: - if (x is type(None) or # add cases here if needed - x.__module__ == 'rpython.rtyper.lltypesystem.lltype'): - result = SomeType() - else: - result = SomePBC([self.getdesc(x)]) + result = SomeConstantType(x, self) elif callable(x): if hasattr(x, 'im_self') and hasattr(x, 'im_func'): # on top of PyPy, for cases like 'l.append' where 'l' is a @@ -700,12 +696,11 @@ return op def build_args(self, op, args_s): - space = RPythonCallsSpace() if op == "simple_call": - return ArgumentsForTranslation(space, list(args_s)) + return ArgumentsForTranslation(list(args_s)) elif op == "call_args": return ArgumentsForTranslation.fromshape( - space, args_s[0].const, # shape + args_s[0].const, # shape list(args_s[1:])) def ondegenerated(self, what, s_value, where=None, called_from_graph=None): diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -312,7 +312,7 @@ r_func, nimplicitarg = s_repr.const.get_r_implfunc() nbargs = len(args_s) + nimplicitarg - s_sigs = r_func.get_s_signatures((nbargs, (), False, False)) + s_sigs = r_func.get_s_signatures((nbargs, (), False)) if len(s_sigs) != 1: raise TyperError("cannot hlinvoke callable %r with not uniform" "annotations: %r" % (s_repr.const, diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -879,11 +879,12 @@ self.name, flags) + @staticmethod def consider_call_site(bookkeeper, family, descs, args, s_result, op): - shape = rawshape(args, nextra=1) # account for the extra 'self' + cnt, keys, star = rawshape(args) + shape = cnt + 1, keys, star # account for the extra 'self' row = FunctionDesc.row_to_consider(descs, args, op) family.calltable_add_row(shape, row) - consider_call_site = staticmethod(consider_call_site) def rowkey(self): # we are computing call families and call tables that always contain @@ -1039,11 +1040,12 @@ args = args.prepend(s_self) return self.funcdesc.pycall(schedule, args, s_previous_result, op) + @staticmethod def consider_call_site(bookkeeper, family, descs, args, s_result, op): - shape = rawshape(args, nextra=1) # account for the extra 'self' + cnt, keys, star = rawshape(args) + shape = cnt + 1, keys, star # account for the extra 'self' row = FunctionDesc.row_to_consider(descs, args, op) family.calltable_add_row(shape, row) - consider_call_site = staticmethod(consider_call_site) def rowkey(self): return self.funcdesc diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -502,6 +502,14 @@ else: return kt.__name__ +class SomeConstantType(SomePBC): + can_be_None = False + subset_of = None + def __init__(self, x, bk): + self.descriptions = set([bk.getdesc(x)]) + self.knowntype = type(x) + self.const = x + class SomeBuiltin(SomeObject): "Stands for a built-in function or method with special-cased analysis." diff --git a/rpython/annotator/policy.py b/rpython/annotator/policy.py --- a/rpython/annotator/policy.py +++ b/rpython/annotator/policy.py @@ -1,31 +1,19 @@ # base annotation policy for specialization from rpython.annotator.specialize import default_specialize as default -from rpython.annotator.specialize import specialize_argvalue, specialize_argtype, specialize_arglistitemtype, specialize_arg_or_var -from rpython.annotator.specialize import memo, specialize_call_location +from rpython.annotator.specialize import ( + specialize_argvalue, specialize_argtype, specialize_arglistitemtype, + specialize_arg_or_var, memo, specialize_call_location) -class BasicAnnotatorPolicy(object): +class AnnotatorPolicy(object): + """ + Possibly subclass and pass an instance to the annotator to control + special-casing during annotation + """ def event(pol, bookkeeper, what, *args): pass - def get_specializer(pol, tag): - return pol.no_specialization - - def no_specialization(pol, funcdesc, args_s): - return funcdesc.cachedgraph(None) - - def no_more_blocks_to_annotate(pol, annotator): - # hint to all pending specializers that we are done - for callback in annotator.bookkeeper.pending_specializations: - callback() - del annotator.bookkeeper.pending_specializations[:] - -class AnnotatorPolicy(BasicAnnotatorPolicy): - """ - Possibly subclass and pass an instance to the annotator to control special casing during annotation - """ - def get_specializer(pol, directive): if directive is None: return pol.default_specialize @@ -74,3 +62,9 @@ def specialize__ll_and_arg(pol, *args): from rpython.rtyper.annlowlevel import LowLevelAnnotatorPolicy return LowLevelAnnotatorPolicy.specialize__ll_and_arg(*args) + + def no_more_blocks_to_annotate(pol, annotator): + # hint to all pending specializers that we are done + for callback in annotator.bookkeeper.pending_specializations: + callback() + del annotator.bookkeeper.pending_specializations[:] diff --git a/rpython/annotator/specialize.py b/rpython/annotator/specialize.py --- a/rpython/annotator/specialize.py +++ b/rpython/annotator/specialize.py @@ -5,6 +5,7 @@ from rpython.tool.algo.unionfind import UnionFind from rpython.flowspace.model import Block, Link, Variable, SpaceOperation from rpython.flowspace.model import checkgraph From noreply at buildbot.pypy.org Tue Dec 10 01:56:52 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 10 Dec 2013 01:56:52 +0100 (CET) Subject: [pypy-commit] pypy default: minor cleanup Message-ID: <20131210005652.1EF871C01AE@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r68405:d638fc970ebc Date: 2013-12-09 16:56 -0800 http://bitbucket.org/pypy/pypy/changeset/d638fc970ebc/ Log: minor cleanup diff --git a/pypy/module/math/app_math.py b/pypy/module/math/app_math.py --- a/pypy/module/math/app_math.py +++ b/pypy/module/math/app_math.py @@ -1,5 +1,7 @@ def factorial(x): - """Find x!.""" + """factorial(x) -> Integral + + "Find x!. Raise a ValueError if x is negative or non-integral.""" if isinstance(x, float): fl = int(x) if fl != x: @@ -14,15 +16,15 @@ res *= i return res - #Experimentally this gap seems good - gap = max(100, x>>7) + # Experimentally this gap seems good + gap = max(100, x >> 7) def _fac_odd(low, high): - if low+gap >= high: + if low + gap >= high: t = 1 for i in range(low, high, 2): t *= i return t - + mid = ((low + high) >> 1) | 1 return _fac_odd(low, mid) * _fac_odd(mid, high) From noreply at buildbot.pypy.org Tue Dec 10 02:20:50 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 10 Dec 2013 02:20:50 +0100 (CET) Subject: [pypy-commit] pypy py3k: fix breakage from the previous merge commit Message-ID: <20131210012050.9CAF01C03B3@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68406:3680c352658b Date: 2013-12-09 17:19 -0800 http://bitbucket.org/pypy/pypy/changeset/3680c352658b/ Log: fix breakage from the previous merge commit diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -693,6 +693,7 @@ else: prefix = 'cpyexttest' init_pycobject = rffi.llexternal('_%s_init_pycobject' % prefix, [], lltype.Void, + compilation_info=eci, _nowrapper=True) init_capsule = rffi.llexternal('_%s_init_capsule' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) INIT_FUNCTIONS.extend([ From noreply at buildbot.pypy.org Tue Dec 10 04:16:29 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 10 Dec 2013 04:16:29 +0100 (CET) Subject: [pypy-commit] pypy py3k: adjust per latest cffi: py3 str() now acts like repr() Message-ID: <20131210031629.BF3D91C03B3@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68407:5144606eb31e Date: 2013-12-09 19:15 -0800 http://bitbucket.org/pypy/pypy/changeset/5144606eb31e/ Log: adjust per latest cffi: py3 str() now acts like repr() diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -59,9 +59,6 @@ def descr__buffer__(self, space): return self.buffer.descr__buffer__(space) - def descr_str(self, space): - return space.wrap(self.buffer.as_str()) - MiniBuffer.typedef = TypeDef( "buffer", @@ -71,7 +68,6 @@ __setitem__ = interp2app(MiniBuffer.descr_setitem), __buffer__ = interp2app(MiniBuffer.descr__buffer__), __weakref__ = make_weakref_descr(MiniBuffer), - __str__ = interp2app(MiniBuffer.descr_str), ) MiniBuffer.typedef.acceptable_as_base_class = False From noreply at buildbot.pypy.org Wed Dec 11 11:11:19 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 11 Dec 2013 11:11:19 +0100 (CET) Subject: [pypy-commit] stmgc contention-counter: add contention counters on objects for experimentation Message-ID: <20131211101119.82E691C010D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: contention-counter Changeset: r555:83115a1bcd67 Date: 2013-12-11 11:10 +0100 http://bitbucket.org/pypy/stmgc/changeset/83115a1bcd67/ Log: add contention counters on objects for experimentation diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -94,6 +94,41 @@ fxcache_clear(&thread_descriptor->recent_reads_cache); } + +/************************************************************/ +/* CONTENTION COUNTER THINGS */ +#define RPY_STM_CONT_RMA_SAMPLES 64 + +void abort_because_of(gcptr L) +{ + gcptr obj = (gcptr)L->h_original; + if (!obj || (L->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + obj = L; + + /* abort-object should never be a priv_from_prot + *without* an original */ + assert(!(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + } + + //g->h_contention += (g->h_contention + 1) << 2; + revision_t old = (RPY_STM_CONT_RMA_SAMPLES - 1) * obj->h_contention; + old += 1000000; + obj->h_contention = old / RPY_STM_CONT_RMA_SAMPLES + + ((old % RPY_STM_CONT_RMA_SAMPLES) != 0); +} + +void commit_object(gcptr L) +{ + gcptr obj = L; + if (!(L->h_tid & GCFLAG_PREBUILT_ORIGINAL) && L->h_original) + obj = (gcptr)L->h_original; + + revision_t old = obj->h_contention; + revision_t old_rma = (RPY_STM_CONT_RMA_SAMPLES - 1) * old; + old_rma += old >> 2; + obj->h_contention = old_rma / RPY_STM_CONT_RMA_SAMPLES; +} + /************************************************************/ static void ValidateNow(struct tx_descriptor *); @@ -148,8 +183,10 @@ /* check P->h_revision->h_revision: if a pointer, then it means the backup copy has been stolen into a public object and then modified by some other thread. Abort. */ - if (IS_POINTER(((gcptr)P->h_revision)->h_revision)) + if (IS_POINTER(((gcptr)P->h_revision)->h_revision)) { + abort_because_of(P); AbortTransaction(ABRT_STOLEN_MODIFIED); + } goto add_in_recent_reads_cache; } @@ -830,6 +867,7 @@ { dprintf(("validation failed: " "%p has a more recent revision\n", R)); + abort_because_of(R); return 0; } } @@ -865,6 +903,7 @@ */ dprintf(("validation failed: " "%p is locked by another thread\n", R)); + abort_because_of(R); return 0; } } @@ -1157,6 +1196,7 @@ if (IS_POINTER(v)) /* "has a more recent revision" */ { assert(v != 0); + abort_because_of(R); AbortTransaction(ABRT_COMMIT); } if (v >= LOCKED) // already locked by someone else @@ -1343,7 +1383,7 @@ gcptrlist_insert(&stm_prebuilt_gcroots, R); pthread_mutex_unlock(&mutex_prebuilt_gcroots); } - + commit_object(R); } G2L_LOOP_END; g2l_clear(&d->public_to_private); @@ -1383,8 +1423,10 @@ while (1) { revision_t v = ACCESS_ONCE(B->h_revision); - if (IS_POINTER(v)) /* "was modified" */ + if (IS_POINTER(v)) { /* "was modified" */ + abort_because_of(P); AbortTransaction(ABRT_STOLEN_MODIFIED); + } if (bool_cas(&B->h_revision, v, (revision_t)P)) break; diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -758,6 +758,7 @@ /* has a more recent revision. Oups. */ dprintf(("ABRT_COLLECT_MAJOR %p: " "%p was read but modified already\n", d, obj)); + abort_because_of(obj); AbortTransactionAfterCollect(d, ABRT_COLLECT_MAJOR); /* fix_list_of_read_objects should not run */ gcptrlist_clear(&d->list_of_read_objects); diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -368,6 +368,7 @@ */ dprintf(("public_to_young: %p was modified! abort!\n", P)); item->val = NULL; + abort_because_of(P); AbortTransactionAfterCollect(d, ABRT_COLLECT_MINOR); continue; } diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -12,6 +12,7 @@ revision_t h_tid; revision_t h_revision; revision_t h_original; + revision_t h_contention; } *gcptr; From noreply at buildbot.pypy.org Wed Dec 11 20:21:07 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 11 Dec 2013 20:21:07 +0100 (CET) Subject: [pypy-commit] pypy default: support bool order argument for ndarray.__new__ Message-ID: <20131211192107.61E081C01AE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68408:fb30e6aa03bb Date: 2013-12-11 14:16 -0500 http://bitbucket.org/pypy/pypy/changeset/fb30e6aa03bb/ Log: support bool order argument for ndarray.__new__ diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1065,9 +1065,9 @@ return w_obj pass - at unwrap_spec(offset=int, order=str) + at unwrap_spec(offset=int) def descr_new_array(space, w_subtype, w_shape, w_dtype=None, w_buffer=None, - offset=0, w_strides=None, order='C'): + offset=0, w_strides=None, w_order=None): from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArray from pypy.module.micronumpy.support import calc_strides dtype = space.interp_w(interp_dtype.W_Dtype, @@ -1101,6 +1101,11 @@ if not shape: return W_NDimArray.new_scalar(space, dtype) + order = order_converter(space, w_order, NPY_CORDER) + if order == NPY_CORDER: + order = 'C' + else: + order = 'F' if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): return W_NDimArray.from_shape(space, shape, dtype, order) strides, backstrides = calc_strides(shape, dtype.base, order) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -271,6 +271,17 @@ # test uninitialized value crash? assert len(str(a)) > 0 + import sys + for order in [False, True, 'C', 'F']: + a = ndarray.__new__(ndarray, (2, 3), float, order=order) + assert a.shape == (2, 3) + if order in [True, 'F'] and '__pypy__' not in sys.builtin_module_names: + assert a.flags['F'] + assert not a.flags['C'] + else: + assert a.flags['C'] + assert not a.flags['F'] + def test_ndmin(self): from numpypy import array From noreply at buildbot.pypy.org Wed Dec 11 22:06:48 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 11 Dec 2013 22:06:48 +0100 (CET) Subject: [pypy-commit] pypy default: oct/hex for numpy scalars Message-ID: <20131211210648.779D71C32BC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68409:fbcabd23ed43 Date: 2013-12-11 16:04 -0500 http://bitbucket.org/pypy/pypy/changeset/fbcabd23ed43/ Log: oct/hex for numpy scalars diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -163,6 +163,12 @@ assert isinstance(box, W_Float64Box) return space.wrap(box.value) + def descr_oct(self, space): + return space.oct(self.descr_int(space)) + + def descr_hex(self, space): + return space.hex(self.descr_int(space)) + def descr_nonzero(self, space): dtype = self.get_dtype(space) return space.wrap(dtype.itemtype.bool(self)) @@ -511,6 +517,8 @@ __long__ = interp2app(W_GenericBox.descr_long), __float__ = interp2app(W_GenericBox.descr_float), __nonzero__ = interp2app(W_GenericBox.descr_nonzero), + __oct__ = interp2app(W_GenericBox.descr_oct), + __hex__ = interp2app(W_GenericBox.descr_hex), __add__ = interp2app(W_GenericBox.descr_add), __sub__ = interp2app(W_GenericBox.descr_sub), diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -18,6 +18,15 @@ #raises(TypeError, np.complex_, '1+2j') assert math.isnan(np.complex_(None)) + def test_builtin(self): + import numpy as np + assert oct(np.int32(11)) == '013' + assert oct(np.float32(11.6)) == '013' + assert oct(np.complex64(11-12j)) == '013' + assert hex(np.int32(11)) == '0xb' + assert hex(np.float32(11.6)) == '0xb' + assert hex(np.complex64(11-12j)) == '0xb' + def test_pickle(self): from numpypy import dtype, zeros try: From noreply at buildbot.pypy.org Thu Dec 12 01:25:13 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 12 Dec 2013 01:25:13 +0100 (CET) Subject: [pypy-commit] pypy default: support ndarray.flags.fnc/forc Message-ID: <20131212002513.A08FA1C010D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68410:750ec15cf8e6 Date: 2013-12-11 19:23 -0500 http://bitbucket.org/pypy/pypy/changeset/750ec15cf8e6/ Log: support ndarray.flags.fnc/forc diff --git a/pypy/module/micronumpy/interp_flagsobj.py b/pypy/module/micronumpy/interp_flagsobj.py --- a/pypy/module/micronumpy/interp_flagsobj.py +++ b/pypy/module/micronumpy/interp_flagsobj.py @@ -18,6 +18,16 @@ def descr_get_writeable(self, space): return space.w_True + def descr_get_fnc(self, space): + return space.wrap( + space.is_true(self.descr_get_fortran(space)) and not + space.is_true(self.descr_get_contiguous(space))) + + def descr_get_forc(self, space): + return space.wrap( + space.is_true(self.descr_get_fortran(space)) or + space.is_true(self.descr_get_contiguous(space))) + def descr_getitem(self, space, w_item): key = space.str_w(w_item) if key == "C" or key == "CONTIGUOUS" or key == "C_CONTIGUOUS": @@ -26,6 +36,10 @@ return self.descr_get_fortran(space) if key == "W" or key == "WRITEABLE": return self.descr_get_writeable(space) + if key == "FNC": + return self.descr_get_fnc(space) + if key == "FORC": + return self.descr_get_forc(space) raise OperationError(space.w_KeyError, space.wrap( "Unknown flag")) @@ -56,4 +70,6 @@ f_contiguous = GetSetProperty(W_FlagsObject.descr_get_fortran), fortran = GetSetProperty(W_FlagsObject.descr_get_fortran), writeable = GetSetProperty(W_FlagsObject.descr_get_writeable), + fnc = GetSetProperty(W_FlagsObject.descr_get_fnc), + forc = GetSetProperty(W_FlagsObject.descr_get_forc), ) diff --git a/pypy/module/micronumpy/test/test_flagsobj.py b/pypy/module/micronumpy/test/test_flagsobj.py --- a/pypy/module/micronumpy/test/test_flagsobj.py +++ b/pypy/module/micronumpy/test/test_flagsobj.py @@ -12,6 +12,10 @@ a = np.array([1,2,3]) assert a.flags.c_contiguous == True assert a.flags['W'] == True + assert a.flags.fnc == False + assert a.flags.forc == True + assert a.flags['FNC'] == False + assert a.flags['FORC'] == True raises(KeyError, "a.flags['blah']") raises(KeyError, "a.flags['C_CONTIGUOUS'] = False") raises((TypeError, AttributeError), "a.flags.c_contiguous = False") From noreply at buildbot.pypy.org Thu Dec 12 09:52:18 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 12 Dec 2013 09:52:18 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: add me to leysin Message-ID: <20131212085218.44BC81C015D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5118:d0ccfcf501f9 Date: 2013-12-12 09:51 +0100 http://bitbucket.org/pypy/extradoc/changeset/d0ccfcf501f9/ Log: add me to leysin diff --git a/sprintinfo/leysin-winter-2014/people.txt b/sprintinfo/leysin-winter-2014/people.txt --- a/sprintinfo/leysin-winter-2014/people.txt +++ b/sprintinfo/leysin-winter-2014/people.txt @@ -14,6 +14,7 @@ Romain Guillebert 11-19 Ermina Christian Clauss 11-12 & 18-19 I live nearby Maciej Fijalkowski 11-18 Ermina +Remi Meier 11-19 Ermina ==================== ============== ======================= From noreply at buildbot.pypy.org Thu Dec 12 23:49:08 2013 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 12 Dec 2013 23:49:08 +0100 (CET) Subject: [pypy-commit] pypy py3k: A slighly better way to monkeypatch pyobject.from_ref() Message-ID: <20131212224908.466C41C015D@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r68411:39d9a53d774b Date: 2013-12-12 23:48 +0100 http://bitbucket.org/pypy/pypy/changeset/39d9a53d774b/ Log: A slighly better way to monkeypatch pyobject.from_ref() diff --git a/pypy/module/cpyext/test/test_ztranslation.py b/pypy/module/cpyext/test/test_ztranslation.py --- a/pypy/module/cpyext/test/test_ztranslation.py +++ b/pypy/module/cpyext/test/test_ztranslation.py @@ -1,13 +1,10 @@ from pypy.objspace.fake.checkmodule import checkmodule from pypy.module.cpyext import pyobject -def test_cpyext_translates(): +def test_cpyext_translates(monkeypatch): def from_ref(space, ref): # XXX: avoid 'assert isinstance(w_type, W_TypeObject)' from the # original from_ref, just return w_some_obj return space.w_object - old, pyobject.from_ref = pyobject.from_ref, from_ref - try: - checkmodule('cpyext', '_ffi') - finally: - pyobject.from_ref = old + monkeypatch.setattr(pyobject, 'from_ref', from_ref) + checkmodule('cpyext', '_ffi') From noreply at buildbot.pypy.org Fri Dec 13 04:29:28 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 13 Dec 2013 04:29:28 +0100 (CET) Subject: [pypy-commit] pypy default: test/fix ndarray.__new__ passing array as buffer Message-ID: <20131213032928.9FAB41C30DB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68413:caa5f3b6bf46 Date: 2013-12-12 21:38 -0500 http://bitbucket.org/pypy/pypy/changeset/caa5f3b6bf46/ Log: test/fix ndarray.__new__ passing array as buffer diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -502,3 +502,6 @@ def getlength(self): return self.impl.size + + def get_raw_address(self): + return self.impl.storage diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -62,6 +62,11 @@ assert isinstance(b, matrix) assert (b == a).all() + def test_subtype_like_matrix(self): + import numpy as np + arr = np.array([1,2,3]) + ret = np.ndarray.__new__(np.ndarray, arr.shape, arr.dtype, buffer=arr) + assert (arr == ret).all() def test_finalize(self): #taken from http://docs.scipy.org/doc/numpy/user/basics.subclassing.html#simple-example-adding-an-extra-attribute-to-ndarray @@ -245,4 +250,3 @@ assert isinstance(b, D) c = array(a, float) assert c.dtype is dtype(float) - From noreply at buildbot.pypy.org Fri Dec 13 04:29:27 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 13 Dec 2013 04:29:27 +0100 (CET) Subject: [pypy-commit] pypy default: fix buffer(ndarray) Message-ID: <20131213032927.72E391C12A3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68412:63b12bac9e29 Date: 2013-12-12 21:28 -0500 http://bitbucket.org/pypy/pypy/changeset/63b12bac9e29/ Log: fix buffer(ndarray) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1177,6 +1177,7 @@ __int__ = interp2app(W_NDimArray.descr_int), __long__ = interp2app(W_NDimArray.descr_long), __float__ = interp2app(W_NDimArray.descr_float), + __buffer__ = interp2app(W_NDimArray.descr_get_data), __pos__ = interp2app(W_NDimArray.descr_pos), __neg__ = interp2app(W_NDimArray.descr_neg), diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -320,6 +320,12 @@ e = d.repeat(3, 0) assert e.shape == (9, 4, 0) + def test_buffer(self): + import numpy as np + a = np.array([1,2,3]) + b = buffer(a) + assert type(b) is buffer + def test_type(self): from numpypy import array ar = array(range(5)) From noreply at buildbot.pypy.org Fri Dec 13 04:29:29 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 13 Dec 2013 04:29:29 +0100 (CET) Subject: [pypy-commit] pypy default: intobject try int/trunc methods before str/unicode/buffer Message-ID: <20131213032929.D55F51C30DC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68414:a5313cbfc19e Date: 2013-12-12 22:25 -0500 http://bitbucket.org/pypy/pypy/changeset/a5313cbfc19e/ Log: intobject try int/trunc methods before str/unicode/buffer diff --git a/pypy/objspace/std/inttype.py b/pypy/objspace/std/inttype.py --- a/pypy/objspace/std/inttype.py +++ b/pypy/objspace/std/inttype.py @@ -96,6 +96,27 @@ if type(w_value) is W_IntObject: value = w_value.intval ok = True + elif space.lookup(w_value, '__int__') is not None or \ + space.lookup(w_value, '__trunc__') is not None: + # otherwise, use the __int__() or the __trunc__() methods + w_obj = w_value + if space.lookup(w_obj, '__int__') is None: + w_obj = space.trunc(w_obj) + w_obj = space.int(w_obj) + # 'int(x)' should return what x.__int__() returned, which should + # be an int or long or a subclass thereof. + if space.is_w(w_inttype, space.w_int): + return w_obj + # int_w is effectively what we want in this case, + # we cannot construct a subclass of int instance with an + # an overflowing long + try: + value = space.int_w(w_obj) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + else: + ok = True elif space.isinstance_w(w_value, space.w_str): value, w_longval = string_to_int_or_long(space, space.str_w(w_value)) ok = True @@ -117,30 +138,9 @@ ok = True if not ok: - # otherwise, use the __int__() or the __trunc__() methods - w_obj = w_value - if space.lookup(w_obj, '__int__') is None: - if space.lookup(w_obj, '__trunc__') is not None: - w_obj = space.trunc(w_obj) - else: - raise operationerrfmt(space.w_TypeError, - "int() argument must be a string or a number, not '%T'", - w_obj) - w_obj = space.int(w_obj) - # 'int(x)' should return what x.__int__() returned, which should - # be an int or long or a subclass thereof. - if space.is_w(w_inttype, space.w_int): - return w_obj - # int_w is effectively what we want in this case, - # we cannot construct a subclass of int instance with an - # an overflowing long - try: - value = space.int_w(w_obj) - except OperationError, e: - if e.match(space, space.w_TypeError): - raise OperationError(space.w_ValueError, - space.wrap("value can't be converted to int")) - raise e + raise operationerrfmt(space.w_TypeError, + "int() argument must be a string or a number, not '%T'", + w_value) else: base = space.int_w(w_base) diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -478,6 +478,12 @@ return Integral() assert int(TruncReturnsNonInt()) == 42 + def test_int_before_string(self): + class Integral(str): + def __int__(self): + return 42 + assert int(Integral('abc')) == 42 + def test_getnewargs(self): assert 0 .__getnewargs__() == (0,) @@ -488,7 +494,7 @@ # __eq__ & the others. assert 1 .__cmp__ assert int .__cmp__ - + def test_bit_length(self): for val, bits in [ (0, 0), From noreply at buildbot.pypy.org Fri Dec 13 06:18:56 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 13 Dec 2013 06:18:56 +0100 (CET) Subject: [pypy-commit] pypy default: fix __array__ on subclasses of array Message-ID: <20131213051856.7F7CE1C0050@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68415:ff3d519d9557 Date: 2013-12-13 00:17 -0500 http://bitbucket.org/pypy/pypy/changeset/ff3d519d9557/ Log: fix __array__ on subclasses of array diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -493,8 +493,10 @@ if not space.is_none(w_dtype): raise OperationError(space.w_NotImplementedError, space.wrap( "__array__(dtype) not implemented")) - # stub implementation of __array__() - return self + if type(self) is W_NDimArray: + return self + return W_NDimArray.from_shape_and_storage( + space, self.get_shape(), self.implementation.storage, self.get_dtype()) def descr_array_iface(self, space): addr = self.implementation.get_storage_as_int(space) diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -50,6 +50,12 @@ b[0]=100 assert a[0,0] == 100 + assert type(a) is not ndarray + assert a[0,0] == 100 + b = a.__array__() + assert type(b) is ndarray + assert b[0,0] == 100 + def test_subtype_view(self): from numpypy import ndarray, array class matrix(ndarray): From noreply at buildbot.pypy.org Fri Dec 13 06:29:57 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 13 Dec 2013 06:29:57 +0100 (CET) Subject: [pypy-commit] pypy default: fix base after __array__ Message-ID: <20131213052957.27C901C30A4@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68416:e028990fb8df Date: 2013-12-13 00:29 -0500 http://bitbucket.org/pypy/pypy/changeset/e028990fb8df/ Log: fix base after __array__ diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -496,7 +496,8 @@ if type(self) is W_NDimArray: return self return W_NDimArray.from_shape_and_storage( - space, self.get_shape(), self.implementation.storage, self.get_dtype()) + space, self.get_shape(), self.implementation.storage, + self.get_dtype(), w_base=self) def descr_array_iface(self, space): addr = self.implementation.get_storage_as_int(space) diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -52,9 +52,11 @@ assert type(a) is not ndarray assert a[0,0] == 100 + assert a.base is not None b = a.__array__() assert type(b) is ndarray assert b[0,0] == 100 + assert b.base is a def test_subtype_view(self): from numpypy import ndarray, array From noreply at buildbot.pypy.org Fri Dec 13 08:51:41 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 13 Dec 2013 08:51:41 +0100 (CET) Subject: [pypy-commit] pypy default: Support os.remove() in RPython. It worked accidentally when the Message-ID: <20131213075141.3C7A51C30A4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68417:ca66d486e64d Date: 2013-12-13 08:50 +0100 http://bitbucket.org/pypy/pypy/changeset/ca66d486e64d/ Log: Support os.remove() in RPython. It worked accidentally when the translator was running on CPython, but not on PyPy, due to 'os.remove == os.unlink' being false. diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -54,6 +54,12 @@ from rpython.rlib.rfile import create_temp_rfile return space.appcall(create_temp_rfile) + at register_flow_sc(os.remove) +def sc_os_remove(space, *args_w): + # on top of PyPy only: 'os.remove != os.unlink' + # (on CPython they are '==', but not identical either) + return space.appcall(os.unlink, *args_w) + # _________________________________________________________________________ # a simplified version of the basic printing routines, for RPython programs class StdOutBuffer: diff --git a/rpython/flowspace/test/test_objspace.py b/rpython/flowspace/test/test_objspace.py --- a/rpython/flowspace/test/test_objspace.py +++ b/rpython/flowspace/test/test_objspace.py @@ -1244,6 +1244,20 @@ graph = self.codetest(g) assert "Undefined closure variable 'b'" in str(excinfo.value) + def call_os_remove(msg): + os.remove(msg) + os.unlink(msg) + + def test_call_os_remove(self): + x = self.codetest(self.call_os_remove) + simplify_graph(x) + self.show(x) + ops = x.startblock.operations + assert ops[0].opname == 'simple_call' + assert ops[0].args[0].value is os.unlink + assert ops[1].opname == 'simple_call' + assert ops[1].args[0].value is os.unlink + DATA = {'x': 5, 'y': 6} From noreply at buildbot.pypy.org Fri Dec 13 13:49:06 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 13 Dec 2013 13:49:06 +0100 (CET) Subject: [pypy-commit] pypy align_float_cast: test non-aligned float read, add arm specific raw_load_f Message-ID: <20131213124906.D1C541C04FF@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: align_float_cast Changeset: r68418:6196be9fb0b7 Date: 2013-12-13 14:47 +0200 http://bitbucket.org/pypy/pypy/changeset/6196be9fb0b7/ Log: test non-aligned float read, add arm specific raw_load_f diff --git a/TODO b/TODO new file mode 100644 --- /dev/null +++ b/TODO @@ -0,0 +1,3 @@ + - fix jitted assembler + - add fast path for aligned float + - test non-aligned write, fix diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -5,9 +5,10 @@ from rpython.jit.backend.llsupport import jitframe from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU from rpython.rlib.jit_hooks import LOOP_RUN_CONTAINER -from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.jit.backend.arm.detect import detect_hardfloat from rpython.jit.backend.arm.detect import detect_arch_version +from rpython.jit.codewriter import longlong jitframe.STATICSIZE = JITFRAME_FIXED_SIZE @@ -117,6 +118,21 @@ assert self.assembler is not None return Regalloc(self.assembler) + def bh_raw_load_f(self, struct, offset, descr): + ll_p = rffi.cast(rffi.CCHARP, struct) + ll_p_offset = rffi.ptradd(ll_p, offset) + if rffi.cast(lltype.Signed, ll_p_offset) & 3: + with lltype.scoped_alloc(rffi.CArray(longlong.FLOATSTORAGE), 1) as s_array: + rffi.c_memcpy(rffi.cast(rffi.VOIDP, s_array), + rffi.cast(rffi.VOIDP, ll_p_offset), + rffi.sizeof(rffi.DOUBLE)) + ll_p = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), + s_array) + return ll_p[0] + ll_p = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), + ll_p_offset) + return ll_p[0] + class CPU_ARM(AbstractARMCPU): """ARM""" diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4024,6 +4024,38 @@ assert result == rffi.cast(lltype.Float, value) rawstorage.free_raw_storage(p) + def test_raw_load_float_unaligned(self): + if not self.cpu.supports_floats: + py.test.skip("requires floats") + from rpython.rlib import rawstorage + for T in [rffi.DOUBLE]: + ops = """ + [i0, i1] + f2 = raw_load(i0, i1, descr=arraydescr) + finish(f2) + """ + arraydescr = self.cpu.arraydescrof(rffi.CArray(T)) + p_aligned = rawstorage.alloc_raw_storage(33) + for i in range(33): + p_aligned[i] = '\xDD' + value = rffi.cast(T, 1.12e20) + p = rffi.ptradd(p_aligned,1) + rawstorage.raw_storage_setitem(p, 16, value) + got = self.cpu.bh_raw_load_f(rffi.cast(lltype.Signed, p), 16, + arraydescr) + got = longlong.getrealfloat(got) + assert got == rffi.cast(lltype.Float, value) + # + loop = parse(ops, self.cpu, namespace=locals()) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + deadframe = self.cpu.execute_token(looptoken, + rffi.cast(lltype.Signed, p), 16) + result = self.cpu.get_float_value(deadframe, 0) + result = longlong.getrealfloat(result) + assert result == rffi.cast(lltype.Float, value) + rawstorage.free_raw_storage(p_aligned) + def test_raw_load_singlefloat(self): if not self.cpu.supports_singlefloats: py.test.skip("requires singlefloats") From noreply at buildbot.pypy.org Fri Dec 13 17:31:14 2013 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 13 Dec 2013 17:31:14 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: fix modifier keys handling and Shift+Character input for miniimage Message-ID: <20131213163114.E85A01C04FF@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r522:83bbb98f8690 Date: 2013-12-13 17:29 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/83bbb98f8690/ Log: fix modifier keys handling and Shift+Character input for miniimage diff --git a/spyvm/display.py b/spyvm/display.py --- a/spyvm/display.py +++ b/spyvm/display.py @@ -11,7 +11,7 @@ MOUSE_BTN_LEFT = 4 MOD_SHIFT = 1 MOD_CONTROL = 2 -MOD_ALT = 16 +MOD_ALT_CMD = 16 | 8 class SDLDisplay(object): _attrs_ = ["screen", "width", "height", "depth", "surface", "has_surface", @@ -71,19 +71,30 @@ y = rffi.getintfield(m, "c_y") self.mouse_position = [x, y] elif c_type == RSDL.KEYUP or c_type == RSDL.KEYDOWN: - p = rffi.cast(RSDL.KeyboardEventPtr, event) - char = rffi.getintfield(p.c_keysym, 'c_unicode') - if char != 0: - chars = unicode_encode_utf_8(unichr(char), 1, "ignore") - if len(chars) == 1: - if c_type == RSDL.KEYDOWN: - self.key = ord(chars[0]) - interrupt = self.interrupt_key - if (interrupt & 0xFF == self.key and - interrupt >> 8 == self.get_modifier_mask(0)): - raise KeyboardInterrupt - else: - pass # XXX: Todo? + if c_type == RSDL.KEYDOWN: # TODO: create KeyUp events and KeyRepeat events + self.key = 0 + p = rffi.cast(RSDL.KeyboardEventPtr, event) + sym = rffi.getintfield(p.c_keysym, 'c_sym') + char = rffi.getintfield(p.c_keysym, 'c_unicode') + if sym == RSDL.K_DOWN: + self.key = 31 + elif sym == RSDL.K_LEFT: + self.key = 28 + elif sym == RSDL.K_RIGHT: + self.key = 29 + elif sym == RSDL.K_UP: + self.key = 30 + elif char != 0: + chars = unicode_encode_utf_8(unichr(char), 1, "ignore") + if len(chars) == 1: + asciivalue = ord(chars[0]) + if asciivalue >= 32: + self.key = asciivalue + if self.key == 0 and sym <= 255: + self.key = sym + interrupt = self.interrupt_key + if (interrupt & 0xFF == self.key and interrupt >> 8 == self.get_modifier_mask(0)): + raise KeyboardInterrupt elif c_type == RSDL.QUIT: from spyvm.error import Exit raise Exit("Window closed..") @@ -99,7 +110,7 @@ if mod & RSDL.KMOD_SHIFT != 0: modifier |= MOD_SHIFT if mod & RSDL.KMOD_ALT != 0: - modifier |= MOD_ALT + modifier |= MOD_ALT_CMD return modifier << shift def mouse_point(self): @@ -108,16 +119,18 @@ def mouse_button(self): self.get_next_event() - return self.button | self.get_modifier_mask(3) + mod = self.get_modifier_mask(3) + return self.button | mod def next_keycode(self): key = self.key self.key = 0 - return key | self.get_modifier_mask(8) + return key def peek_keycode(self): self.get_next_event() - return self.key | self.get_modifier_mask(8) + self.key |= self.get_modifier_mask(8) + return self.key def set_interrupt_key(self, space, encoded_key): self.interrupt_key = encoded_key diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -803,7 +803,7 @@ @expose_primitive(KBD_NEXT, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): code = interp.space.get_display().next_keycode() - if code == 0: + if code & 0xFF == 0: return interp.space.w_nil else: return interp.space.wrap_int(code) @@ -811,7 +811,7 @@ @expose_primitive(KBD_PEEK, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): code = interp.space.get_display().peek_keycode() - if code == 0: + if code & 0xFF == 0: return interp.space.w_nil else: return interp.space.wrap_int(code) From noreply at buildbot.pypy.org Fri Dec 13 21:46:53 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 13 Dec 2013 21:46:53 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20131213204653.B33421C04FF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68419:83fa1f936f1c Date: 2013-12-13 14:50 -0500 http://bitbucket.org/pypy/pypy/changeset/83fa1f936f1c/ Log: cleanup diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -326,8 +326,7 @@ pass class W_IntegerBox(W_NumberBox): - def int_w(self, space): - return space.int_w(self.descr_int(space)) + pass class W_SignedIntegerBox(W_IntegerBox): pass diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1958,11 +1958,13 @@ assert a.itemsize == 3 a = array(3.1415).astype('S3').dtype assert a.itemsize == 3 - try: + + import sys + if '__pypy__' not in sys.builtin_module_names: a = array(['1', '2','3']).astype(float) assert a[2] == 3.0 - except NotImplementedError: - skip('astype("float") not implemented for str arrays') + else: + raises(NotImplementedError, array(['1', '2', '3']).astype, float) def test_base(self): from numpypy import array From noreply at buildbot.pypy.org Fri Dec 13 21:46:55 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 13 Dec 2013 21:46:55 +0100 (CET) Subject: [pypy-commit] pypy default: also fix longtype: try __long__ before str/unicode Message-ID: <20131213204655.03ECF1C1051@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68420:a9f62405eb7d Date: 2013-12-13 15:40 -0500 http://bitbucket.org/pypy/pypy/changeset/a9f62405eb7d/ Log: also fix longtype: try __long__ before str/unicode diff --git a/pypy/objspace/std/longtype.py b/pypy/objspace/std/longtype.py --- a/pypy/objspace/std/longtype.py +++ b/pypy/objspace/std/longtype.py @@ -27,6 +27,18 @@ return w_value elif type(w_value) is W_LongObject: return newbigint(space, w_longtype, w_value.num) + elif (space.lookup(w_value, '__long__') is not None or + space.lookup(w_value, '__int__') is not None): + w_obj = space.long(w_value) + return newbigint(space, w_longtype, space.bigint_w(w_obj)) + elif space.lookup(w_value, '__trunc__') is not None: + w_obj = space.trunc(w_value) + # :-( blame CPython 2.7 + if space.lookup(w_obj, '__long__') is not None: + w_obj = space.long(w_obj) + else: + w_obj = space.int(w_obj) + return newbigint(space, w_longtype, space.bigint_w(w_obj)) elif space.isinstance_w(w_value, space.w_str): return string_to_w_long(space, w_longtype, space.str_w(w_value)) elif space.isinstance_w(w_value, space.w_unicode): @@ -34,24 +46,9 @@ return string_to_w_long(space, w_longtype, unicode_to_decimal_w(space, w_value)) else: - # otherwise, use the __long__() or the __trunc__ methods - w_obj = w_value - if (space.lookup(w_obj, '__long__') is not None or - space.lookup(w_obj, '__int__') is not None): - w_obj = space.long(w_obj) - elif space.lookup(w_obj, '__trunc__') is not None: - w_obj = space.trunc(w_obj) - # :-( blame CPython 2.7 - if space.lookup(w_obj, '__long__') is not None: - w_obj = space.long(w_obj) - else: - w_obj = space.int(w_obj) - else: - raise operationerrfmt(space.w_TypeError, - "long() argument must be a string or a number, not '%T'", - w_obj) - bigint = space.bigint_w(w_obj) - return newbigint(space, w_longtype, bigint) + raise operationerrfmt(space.w_TypeError, + "long() argument must be a string or a number, not '%T'", + w_value) else: base = space.int_w(w_base) diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -291,6 +291,12 @@ return Integral() assert long(TruncReturnsNonLong()) == 42 + def test_long_before_string(self): + class A(str): + def __long__(self): + return 42 + assert long(A('abc')) == 42 + def test_conjugate(self): assert (7L).conjugate() == 7L assert (-7L).conjugate() == -7L From noreply at buildbot.pypy.org Fri Dec 13 21:46:56 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 13 Dec 2013 21:46:56 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup inttype Message-ID: <20131213204656.3CC691C3159@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68421:1320c7d5c78d Date: 2013-12-13 15:44 -0500 http://bitbucket.org/pypy/pypy/changeset/1320c7d5c78d/ Log: cleanup inttype diff --git a/pypy/objspace/std/inttype.py b/pypy/objspace/std/inttype.py --- a/pypy/objspace/std/inttype.py +++ b/pypy/objspace/std/inttype.py @@ -91,11 +91,9 @@ w_value = w_x # 'x' is the keyword argument name in CPython value = 0 if w_base is None: - ok = False # check for easy cases if type(w_value) is W_IntObject: value = w_value.intval - ok = True elif space.lookup(w_value, '__int__') is not None or \ space.lookup(w_value, '__trunc__') is not None: # otherwise, use the __int__() or the __trunc__() methods @@ -110,21 +108,13 @@ # int_w is effectively what we want in this case, # we cannot construct a subclass of int instance with an # an overflowing long - try: - value = space.int_w(w_obj) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - else: - ok = True + value = space.int_w(w_obj) elif space.isinstance_w(w_value, space.w_str): value, w_longval = string_to_int_or_long(space, space.str_w(w_value)) - ok = True elif space.isinstance_w(w_value, space.w_unicode): from pypy.objspace.std.unicodeobject import unicode_to_decimal_w string = unicode_to_decimal_w(space, w_value) value, w_longval = string_to_int_or_long(space, string) - ok = True else: # If object supports the buffer interface try: @@ -132,15 +122,12 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise + raise operationerrfmt(space.w_TypeError, + "int() argument must be a string or a number, not '%T'", + w_value) else: buf = space.interp_w(Buffer, w_buffer) value, w_longval = string_to_int_or_long(space, buf.as_str()) - ok = True - - if not ok: - raise operationerrfmt(space.w_TypeError, - "int() argument must be a string or a number, not '%T'", - w_value) else: base = space.int_w(w_base) From noreply at buildbot.pypy.org Fri Dec 13 21:54:24 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 13 Dec 2013 21:54:24 +0100 (CET) Subject: [pypy-commit] pypy default: use the float method which has builtin type check Message-ID: <20131213205424.19AA71C3159@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68422:050a624fe426 Date: 2013-12-13 15:53 -0500 http://bitbucket.org/pypy/pypy/changeset/050a624fe426/ Log: use the float method which has builtin type check diff --git a/pypy/objspace/std/floattype.py b/pypy/objspace/std/floattype.py --- a/pypy/objspace/std/floattype.py +++ b/pypy/objspace/std/floattype.py @@ -27,12 +27,8 @@ def descr__new__(space, w_floattype, w_x): from pypy.objspace.std.floatobject import W_FloatObject w_value = w_x # 'x' is the keyword argument name in CPython - w_special = space.lookup(w_value, "__float__") - if w_special is not None: - w_obj = space.get_and_call_function(w_special, w_value) - if not space.isinstance_w(w_obj, space.w_float): - raise OperationError(space.w_TypeError, - space.wrap("__float__ returned non-float")) + if space.lookup(w_value, "__float__") is not None: + w_obj = space.float(w_value) if space.is_w(w_floattype, space.w_float): return w_obj value = space.float_w(w_obj) From noreply at buildbot.pypy.org Fri Dec 13 21:58:08 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 13 Dec 2013 21:58:08 +0100 (CET) Subject: [pypy-commit] pypy default: Make setslice on ffi.buffer(...) faster by using memcpy Message-ID: <20131213205808.AB6A41C3159@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68423:d9b57c97270d Date: 2013-12-13 12:56 -0800 http://bitbucket.org/pypy/pypy/changeset/d9b57c97270d/ Log: Make setslice on ffi.buffer(...) faster by using memcpy diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -5,7 +5,9 @@ from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray +from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw class LLBuffer(RWBuffer): @@ -34,8 +36,7 @@ def setslice(self, start, string): raw_cdata = rffi.ptradd(self.raw_cdata, start) - for i in range(len(string)): - raw_cdata[i] = string[i] + copy_string_to_raw(llstr(string), raw_cdata, 0, len(string)) class MiniBuffer(W_Root): From noreply at buildbot.pypy.org Fri Dec 13 21:58:10 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 13 Dec 2013 21:58:10 +0100 (CET) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20131213205810.713351C3159@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68424:64115be033cf Date: 2013-12-13 12:57 -0800 http://bitbucket.org/pypy/pypy/changeset/64115be033cf/ Log: merged upstream diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -328,21 +328,23 @@ raise ValueError( "native COM method call without 'this' parameter" ) - thisarg = cast(args[0], POINTER(POINTER(c_void_p))) - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args[1:], kwargs) - newargs.insert(0, args[0].value) + thisvalue = args.pop(0) + thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) + args.insert(0, thisvalue) + newargs.insert(0, thisvalue.value) argtypes.insert(0, c_void_p) else: thisarg = None - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args, kwargs) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) funcptr = self._getfuncptr(argtypes, self._restype_, thisarg) result = self._call_funcptr(funcptr, *newargs) - result = self._do_errcheck(result, args) + result, forced = self._do_errcheck(result, errcheckargs) - if not outargs: + if not outargs or forced: return result from ctypes import c_void_p @@ -377,22 +379,22 @@ set_last_error(tmp) # try: - return self._build_result(self._restype_, result, newargs) + return self._build_result(self._restype_, result) finally: funcptr.free_temp_buffers() def _do_errcheck(self, result, args): # The 'errcheck' protocol if self._errcheck_: - v = self._errcheck_(result, self, args) + v = self._errcheck_(result, self, tuple(args)) # If the errcheck funtion failed, let it throw # If the errcheck function returned newargs unchanged, # continue normal processing. # If the errcheck function returned something else, # use that as result. if v is not args: - return v - return result + return v, True + return result, False def _getfuncptr_fromaddress(self, argtypes, restype): address = self._get_address() @@ -495,16 +497,16 @@ newargtypes = [] total = len(args) paramflags = self._paramflags - inargs_idx = 0 if not paramflags and total < len(argtypes): raise TypeError("not enough arguments") - for i, argtype in enumerate(argtypes): - flag = 0 - name = None - defval = marker - if paramflags: + if paramflags: + errcheckargs = [] + inargs_idx = 0 + for i, argtype in enumerate(argtypes): + flag = 0 + defval = marker paramflag = paramflags[i] paramlen = len(paramflag) name = None @@ -519,6 +521,7 @@ val = defval if val is marker: val = 0 + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) @@ -536,27 +539,31 @@ raise TypeError("required argument '%s' missing" % name) else: raise TypeError("not enough arguments") + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) elif flag == PARAMFLAG_FOUT: if defval is not marker: - outargs.append(defval) + val = defval keepalive, newarg, newargtype = self._conv_param(argtype, defval) else: import ctypes val = argtype._type_() - outargs.append(val) keepalive = None newarg = ctypes.byref(val) newargtype = type(newarg) + errcheckargs.append(val) + outargs.append(val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) else: raise ValueError("paramflag %d not yet implemented" % flag) - else: + else: + errcheckargs = args + for i, argtype in enumerate(argtypes): try: keepalive, newarg, newargtype = self._conv_param(argtype, args[i]) except (UnicodeError, TypeError, ValueError), e: @@ -564,7 +571,6 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - inargs_idx += 1 if len(newargs) < len(args): extra = args[len(newargs):] @@ -576,7 +582,7 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - return keepalives, newargs, newargtypes, outargs + return keepalives, newargs, newargtypes, outargs, errcheckargs @staticmethod def _is_primitive(argtype): @@ -601,7 +607,7 @@ retval = restype._CData_retval(buf) return retval - def _build_result(self, restype, result, argsandobjs): + def _build_result(self, restype, result): """Build the function result: If there is no OUT parameter, return the actual function result If there is one OUT parameter, return it @@ -611,11 +617,6 @@ # i.e. an array of ints. Now it takes a result, which is already a # python object. All places that do "resbuffer[0]" should check that # result is actually an int and just use it. - # - # Also, argsandobjs used to be "args" in __call__, now it's "newargs" - # (i.e., the already unwrapped objects). It's used only when we have a - # PARAMFLAG_FOUT and it's probably wrong, I'll fix it when I find a - # failing test retval = None @@ -704,7 +705,7 @@ funcptr = self._getfuncptr(argtypes, restype, thisarg) try: result = self._call_funcptr(funcptr, *args) - result = self._do_errcheck(result, args) + result, _ = self._do_errcheck(result, args) except (TypeError, ArgumentError, UnicodeDecodeError): assert self._slowpath_allowed return CFuncPtr.__call__(self, *args) diff --git a/pypy/module/math/app_math.py b/pypy/module/math/app_math.py --- a/pypy/module/math/app_math.py +++ b/pypy/module/math/app_math.py @@ -1,5 +1,7 @@ def factorial(x): - """Find x!.""" + """factorial(x) -> Integral + + "Find x!. Raise a ValueError if x is negative or non-integral.""" if isinstance(x, float): fl = int(x) if fl != x: @@ -14,15 +16,15 @@ res *= i return res - #Experimentally this gap seems good - gap = max(100, x>>7) + # Experimentally this gap seems good + gap = max(100, x >> 7) def _fac_odd(low, high): - if low+gap >= high: + if low + gap >= high: t = 1 for i in range(low, high, 2): t *= i return t - + mid = ((low + high) >> 1) | 1 return _fac_odd(low, mid) * _fac_odd(mid, high) diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -502,3 +502,6 @@ def getlength(self): return self.impl.size + + def get_raw_address(self): + return self.impl.storage diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -163,6 +163,12 @@ assert isinstance(box, W_Float64Box) return space.wrap(box.value) + def descr_oct(self, space): + return space.oct(self.descr_int(space)) + + def descr_hex(self, space): + return space.hex(self.descr_int(space)) + def descr_nonzero(self, space): dtype = self.get_dtype(space) return space.wrap(dtype.itemtype.bool(self)) @@ -320,8 +326,7 @@ pass class W_IntegerBox(W_NumberBox): - def int_w(self, space): - return space.int_w(self.descr_int(space)) + pass class W_SignedIntegerBox(W_IntegerBox): pass @@ -511,6 +516,8 @@ __long__ = interp2app(W_GenericBox.descr_long), __float__ = interp2app(W_GenericBox.descr_float), __nonzero__ = interp2app(W_GenericBox.descr_nonzero), + __oct__ = interp2app(W_GenericBox.descr_oct), + __hex__ = interp2app(W_GenericBox.descr_hex), __add__ = interp2app(W_GenericBox.descr_add), __sub__ = interp2app(W_GenericBox.descr_sub), diff --git a/pypy/module/micronumpy/interp_flagsobj.py b/pypy/module/micronumpy/interp_flagsobj.py --- a/pypy/module/micronumpy/interp_flagsobj.py +++ b/pypy/module/micronumpy/interp_flagsobj.py @@ -18,6 +18,16 @@ def descr_get_writeable(self, space): return space.w_True + def descr_get_fnc(self, space): + return space.wrap( + space.is_true(self.descr_get_fortran(space)) and not + space.is_true(self.descr_get_contiguous(space))) + + def descr_get_forc(self, space): + return space.wrap( + space.is_true(self.descr_get_fortran(space)) or + space.is_true(self.descr_get_contiguous(space))) + def descr_getitem(self, space, w_item): key = space.str_w(w_item) if key == "C" or key == "CONTIGUOUS" or key == "C_CONTIGUOUS": @@ -26,6 +36,10 @@ return self.descr_get_fortran(space) if key == "W" or key == "WRITEABLE": return self.descr_get_writeable(space) + if key == "FNC": + return self.descr_get_fnc(space) + if key == "FORC": + return self.descr_get_forc(space) raise OperationError(space.w_KeyError, space.wrap( "Unknown flag")) @@ -56,4 +70,6 @@ f_contiguous = GetSetProperty(W_FlagsObject.descr_get_fortran), fortran = GetSetProperty(W_FlagsObject.descr_get_fortran), writeable = GetSetProperty(W_FlagsObject.descr_get_writeable), + fnc = GetSetProperty(W_FlagsObject.descr_get_fnc), + forc = GetSetProperty(W_FlagsObject.descr_get_forc), ) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -493,8 +493,11 @@ if not space.is_none(w_dtype): raise OperationError(space.w_NotImplementedError, space.wrap( "__array__(dtype) not implemented")) - # stub implementation of __array__() - return self + if type(self) is W_NDimArray: + return self + return W_NDimArray.from_shape_and_storage( + space, self.get_shape(), self.implementation.storage, + self.get_dtype(), w_base=self) def descr_array_iface(self, space): addr = self.implementation.get_storage_as_int(space) @@ -1065,9 +1068,9 @@ return w_obj pass - at unwrap_spec(offset=int, order=str) + at unwrap_spec(offset=int) def descr_new_array(space, w_subtype, w_shape, w_dtype=None, w_buffer=None, - offset=0, w_strides=None, order='C'): + offset=0, w_strides=None, w_order=None): from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArray from pypy.module.micronumpy.support import calc_strides dtype = space.interp_w(interp_dtype.W_Dtype, @@ -1101,6 +1104,11 @@ if not shape: return W_NDimArray.new_scalar(space, dtype) + order = order_converter(space, w_order, NPY_CORDER) + if order == NPY_CORDER: + order = 'C' + else: + order = 'F' if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): return W_NDimArray.from_shape(space, shape, dtype, order) strides, backstrides = calc_strides(shape, dtype.base, order) @@ -1172,6 +1180,7 @@ __int__ = interp2app(W_NDimArray.descr_int), __long__ = interp2app(W_NDimArray.descr_long), __float__ = interp2app(W_NDimArray.descr_float), + __buffer__ = interp2app(W_NDimArray.descr_get_data), __pos__ = interp2app(W_NDimArray.descr_pos), __neg__ = interp2app(W_NDimArray.descr_neg), diff --git a/pypy/module/micronumpy/test/test_flagsobj.py b/pypy/module/micronumpy/test/test_flagsobj.py --- a/pypy/module/micronumpy/test/test_flagsobj.py +++ b/pypy/module/micronumpy/test/test_flagsobj.py @@ -12,6 +12,10 @@ a = np.array([1,2,3]) assert a.flags.c_contiguous == True assert a.flags['W'] == True + assert a.flags.fnc == False + assert a.flags.forc == True + assert a.flags['FNC'] == False + assert a.flags['FORC'] == True raises(KeyError, "a.flags['blah']") raises(KeyError, "a.flags['C_CONTIGUOUS'] = False") raises((TypeError, AttributeError), "a.flags.c_contiguous = False") diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -271,6 +271,17 @@ # test uninitialized value crash? assert len(str(a)) > 0 + import sys + for order in [False, True, 'C', 'F']: + a = ndarray.__new__(ndarray, (2, 3), float, order=order) + assert a.shape == (2, 3) + if order in [True, 'F'] and '__pypy__' not in sys.builtin_module_names: + assert a.flags['F'] + assert not a.flags['C'] + else: + assert a.flags['C'] + assert not a.flags['F'] + def test_ndmin(self): from numpypy import array @@ -309,6 +320,12 @@ e = d.repeat(3, 0) assert e.shape == (9, 4, 0) + def test_buffer(self): + import numpy as np + a = np.array([1,2,3]) + b = buffer(a) + assert type(b) is buffer + def test_type(self): from numpypy import array ar = array(range(5)) @@ -1941,11 +1958,13 @@ assert a.itemsize == 3 a = array(3.1415).astype('S3').dtype assert a.itemsize == 3 - try: + + import sys + if '__pypy__' not in sys.builtin_module_names: a = array(['1', '2','3']).astype(float) assert a[2] == 3.0 - except NotImplementedError: - skip('astype("float") not implemented for str arrays') + else: + raises(NotImplementedError, array(['1', '2', '3']).astype, float) def test_base(self): from numpypy import array diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -18,6 +18,15 @@ #raises(TypeError, np.complex_, '1+2j') assert math.isnan(np.complex_(None)) + def test_builtin(self): + import numpy as np + assert oct(np.int32(11)) == '013' + assert oct(np.float32(11.6)) == '013' + assert oct(np.complex64(11-12j)) == '013' + assert hex(np.int32(11)) == '0xb' + assert hex(np.float32(11.6)) == '0xb' + assert hex(np.complex64(11-12j)) == '0xb' + def test_pickle(self): from numpypy import dtype, zeros try: diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -50,6 +50,14 @@ b[0]=100 assert a[0,0] == 100 + assert type(a) is not ndarray + assert a[0,0] == 100 + assert a.base is not None + b = a.__array__() + assert type(b) is ndarray + assert b[0,0] == 100 + assert b.base is a + def test_subtype_view(self): from numpypy import ndarray, array class matrix(ndarray): @@ -62,6 +70,11 @@ assert isinstance(b, matrix) assert (b == a).all() + def test_subtype_like_matrix(self): + import numpy as np + arr = np.array([1,2,3]) + ret = np.ndarray.__new__(np.ndarray, arr.shape, arr.dtype, buffer=arr) + assert (arr == ret).all() def test_finalize(self): #taken from http://docs.scipy.org/doc/numpy/user/basics.subclassing.html#simple-example-adding-an-extra-attribute-to-ndarray @@ -245,4 +258,3 @@ assert isinstance(b, D) c = array(a, float) assert c.dtype is dtype(float) - diff --git a/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c b/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c --- a/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c +++ b/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c @@ -567,3 +567,13 @@ errno = result + 1; return result; } + +EXPORT(int *) test_issue1655(char const *tag, int *len) +{ + static int data[] = { -1, -2, -3, -4 }; + *len = -42; + if (strcmp(tag, "testing!") != 0) + return NULL; + *len = sizeof(data) / sizeof(data[0]); + return data; +} diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py @@ -559,3 +559,25 @@ assert (res, n) == (42, 43) set_errno(0) assert get_errno() == 0 + + def test_issue1655(self): + def ret_list_p(icount): + def sz_array_p(obj, func, args): + assert ('.LP_c_int object' in repr(obj) or + '.LP_c_long object' in repr(obj)) + assert repr(args) in ("('testing!', c_int(4))", + "('testing!', c_long(4))") + assert args[icount].value == 4 + return [ obj[i] for i in range(args[icount].value) ] + return sz_array_p + + get_data_prototype = CFUNCTYPE(POINTER(c_int), + c_char_p, POINTER(c_int)) + get_data_paramflag = ((1,), (2,)) + get_data_signature = ('test_issue1655', dll) + + get_data = get_data_prototype( get_data_signature, get_data_paramflag ) + assert get_data('testing!') == 4 + + get_data.errcheck = ret_list_p(1) + assert get_data('testing!') == [-1, -2, -3, -4] diff --git a/pypy/objspace/std/floattype.py b/pypy/objspace/std/floattype.py --- a/pypy/objspace/std/floattype.py +++ b/pypy/objspace/std/floattype.py @@ -27,12 +27,8 @@ def descr__new__(space, w_floattype, w_x): from pypy.objspace.std.floatobject import W_FloatObject w_value = w_x # 'x' is the keyword argument name in CPython - w_special = space.lookup(w_value, "__float__") - if w_special is not None: - w_obj = space.get_and_call_function(w_special, w_value) - if not space.isinstance_w(w_obj, space.w_float): - raise OperationError(space.w_TypeError, - space.wrap("__float__ returned non-float")) + if space.lookup(w_value, "__float__") is not None: + w_obj = space.float(w_value) if space.is_w(w_floattype, space.w_float): return w_obj value = space.float_w(w_obj) diff --git a/pypy/objspace/std/inttype.py b/pypy/objspace/std/inttype.py --- a/pypy/objspace/std/inttype.py +++ b/pypy/objspace/std/inttype.py @@ -91,41 +91,15 @@ w_value = w_x # 'x' is the keyword argument name in CPython value = 0 if w_base is None: - ok = False # check for easy cases if type(w_value) is W_IntObject: value = w_value.intval - ok = True - elif space.isinstance_w(w_value, space.w_str): - value, w_longval = string_to_int_or_long(space, space.str_w(w_value)) - ok = True - elif space.isinstance_w(w_value, space.w_unicode): - from pypy.objspace.std.unicodeobject import unicode_to_decimal_w - string = unicode_to_decimal_w(space, w_value) - value, w_longval = string_to_int_or_long(space, string) - ok = True - else: - # If object supports the buffer interface - try: - w_buffer = space.buffer(w_value) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - else: - buf = space.interp_w(Buffer, w_buffer) - value, w_longval = string_to_int_or_long(space, buf.as_str()) - ok = True - - if not ok: + elif space.lookup(w_value, '__int__') is not None or \ + space.lookup(w_value, '__trunc__') is not None: # otherwise, use the __int__() or the __trunc__() methods w_obj = w_value if space.lookup(w_obj, '__int__') is None: - if space.lookup(w_obj, '__trunc__') is not None: - w_obj = space.trunc(w_obj) - else: - raise operationerrfmt(space.w_TypeError, - "int() argument must be a string or a number, not '%T'", - w_obj) + w_obj = space.trunc(w_obj) w_obj = space.int(w_obj) # 'int(x)' should return what x.__int__() returned, which should # be an int or long or a subclass thereof. @@ -134,13 +108,26 @@ # int_w is effectively what we want in this case, # we cannot construct a subclass of int instance with an # an overflowing long + value = space.int_w(w_obj) + elif space.isinstance_w(w_value, space.w_str): + value, w_longval = string_to_int_or_long(space, space.str_w(w_value)) + elif space.isinstance_w(w_value, space.w_unicode): + from pypy.objspace.std.unicodeobject import unicode_to_decimal_w + string = unicode_to_decimal_w(space, w_value) + value, w_longval = string_to_int_or_long(space, string) + else: + # If object supports the buffer interface try: - value = space.int_w(w_obj) + w_buffer = space.buffer(w_value) except OperationError, e: - if e.match(space, space.w_TypeError): - raise OperationError(space.w_ValueError, - space.wrap("value can't be converted to int")) - raise e + if not e.match(space, space.w_TypeError): + raise + raise operationerrfmt(space.w_TypeError, + "int() argument must be a string or a number, not '%T'", + w_value) + else: + buf = space.interp_w(Buffer, w_buffer) + value, w_longval = string_to_int_or_long(space, buf.as_str()) else: base = space.int_w(w_base) diff --git a/pypy/objspace/std/longtype.py b/pypy/objspace/std/longtype.py --- a/pypy/objspace/std/longtype.py +++ b/pypy/objspace/std/longtype.py @@ -27,6 +27,18 @@ return w_value elif type(w_value) is W_LongObject: return newbigint(space, w_longtype, w_value.num) + elif (space.lookup(w_value, '__long__') is not None or + space.lookup(w_value, '__int__') is not None): + w_obj = space.long(w_value) + return newbigint(space, w_longtype, space.bigint_w(w_obj)) + elif space.lookup(w_value, '__trunc__') is not None: + w_obj = space.trunc(w_value) + # :-( blame CPython 2.7 + if space.lookup(w_obj, '__long__') is not None: + w_obj = space.long(w_obj) + else: + w_obj = space.int(w_obj) + return newbigint(space, w_longtype, space.bigint_w(w_obj)) elif space.isinstance_w(w_value, space.w_str): return string_to_w_long(space, w_longtype, space.str_w(w_value)) elif space.isinstance_w(w_value, space.w_unicode): @@ -34,24 +46,9 @@ return string_to_w_long(space, w_longtype, unicode_to_decimal_w(space, w_value)) else: - # otherwise, use the __long__() or the __trunc__ methods - w_obj = w_value - if (space.lookup(w_obj, '__long__') is not None or - space.lookup(w_obj, '__int__') is not None): - w_obj = space.long(w_obj) - elif space.lookup(w_obj, '__trunc__') is not None: - w_obj = space.trunc(w_obj) - # :-( blame CPython 2.7 - if space.lookup(w_obj, '__long__') is not None: - w_obj = space.long(w_obj) - else: - w_obj = space.int(w_obj) - else: - raise operationerrfmt(space.w_TypeError, - "long() argument must be a string or a number, not '%T'", - w_obj) - bigint = space.bigint_w(w_obj) - return newbigint(space, w_longtype, bigint) + raise operationerrfmt(space.w_TypeError, + "long() argument must be a string or a number, not '%T'", + w_value) else: base = space.int_w(w_base) diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -478,6 +478,12 @@ return Integral() assert int(TruncReturnsNonInt()) == 42 + def test_int_before_string(self): + class Integral(str): + def __int__(self): + return 42 + assert int(Integral('abc')) == 42 + def test_getnewargs(self): assert 0 .__getnewargs__() == (0,) @@ -488,7 +494,7 @@ # __eq__ & the others. assert 1 .__cmp__ assert int .__cmp__ - + def test_bit_length(self): for val, bits in [ (0, 0), diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -291,6 +291,12 @@ return Integral() assert long(TruncReturnsNonLong()) == 42 + def test_long_before_string(self): + class A(str): + def __long__(self): + return 42 + assert long(A('abc')) == 42 + def test_conjugate(self): assert (7L).conjugate() == 7L assert (-7L).conjugate() == -7L diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -54,6 +54,12 @@ from rpython.rlib.rfile import create_temp_rfile return space.appcall(create_temp_rfile) + at register_flow_sc(os.remove) +def sc_os_remove(space, *args_w): + # on top of PyPy only: 'os.remove != os.unlink' + # (on CPython they are '==', but not identical either) + return space.appcall(os.unlink, *args_w) + # _________________________________________________________________________ # a simplified version of the basic printing routines, for RPython programs class StdOutBuffer: diff --git a/rpython/flowspace/test/test_objspace.py b/rpython/flowspace/test/test_objspace.py --- a/rpython/flowspace/test/test_objspace.py +++ b/rpython/flowspace/test/test_objspace.py @@ -1244,6 +1244,20 @@ graph = self.codetest(g) assert "Undefined closure variable 'b'" in str(excinfo.value) + def call_os_remove(msg): + os.remove(msg) + os.unlink(msg) + + def test_call_os_remove(self): + x = self.codetest(self.call_os_remove) + simplify_graph(x) + self.show(x) + ops = x.startblock.operations + assert ops[0].opname == 'simple_call' + assert ops[0].args[0].value is os.unlink + assert ops[1].opname == 'simple_call' + assert ops[1].args[0].value is os.unlink + DATA = {'x': 5, 'y': 6} From noreply at buildbot.pypy.org Fri Dec 13 23:17:53 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 13 Dec 2013 23:17:53 +0100 (CET) Subject: [pypy-commit] pypy default: oops can't remove this Message-ID: <20131213221753.40A661C327A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68425:8608b7b52941 Date: 2013-12-13 17:16 -0500 http://bitbucket.org/pypy/pypy/changeset/8608b7b52941/ Log: oops can't remove this diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -326,7 +326,8 @@ pass class W_IntegerBox(W_NumberBox): - pass + def int_w(self, space): + return space.int_w(self.descr_int(space)) class W_SignedIntegerBox(W_IntegerBox): pass From noreply at buildbot.pypy.org Fri Dec 13 23:41:28 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 13 Dec 2013 23:41:28 +0100 (CET) Subject: [pypy-commit] pypy default: bin() look for __index__ Message-ID: <20131213224128.9CB731C00EC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68426:3b5c5d8fc95e Date: 2013-12-13 17:39 -0500 http://bitbucket.org/pypy/pypy/changeset/3b5c5d8fc95e/ Log: bin() look for __index__ diff --git a/pypy/module/__builtin__/app_operation.py b/pypy/module/__builtin__/app_operation.py --- a/pypy/module/__builtin__/app_operation.py +++ b/pypy/module/__builtin__/app_operation.py @@ -1,4 +1,8 @@ def bin(x): - if not isinstance(x, (int, long)): - raise TypeError("must be int or long") - return x.__format__("#b") + if isinstance(x, (int, long)): + value = x + elif hasattr(x, '__index__'): + value = x.__index__() + else: + raise TypeError("object cannot be interpreted as an index") + return value.__format__("#b") diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -46,6 +46,15 @@ assert bin(2L) == "0b10" assert bin(-2L) == "-0b10" raises(TypeError, bin, 0.) + class C(object): + def __index__(self): + return 42 + assert bin(C()) == bin(42) + class D(object): + def __int__(self): + return 42 + exc = raises(TypeError, bin, D()) + assert exc.value.message.find("object cannot be interpreted as an index") != -1 def test_unichr(self): import sys From noreply at buildbot.pypy.org Fri Dec 13 23:41:29 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 13 Dec 2013 23:41:29 +0100 (CET) Subject: [pypy-commit] pypy default: test bin() of numpy scalar Message-ID: <20131213224129.E585B1C00EC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68427:0edc9b68247e Date: 2013-12-13 17:40 -0500 http://bitbucket.org/pypy/pypy/changeset/0edc9b68247e/ Log: test bin() of numpy scalar diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -26,6 +26,9 @@ assert hex(np.int32(11)) == '0xb' assert hex(np.float32(11.6)) == '0xb' assert hex(np.complex64(11-12j)) == '0xb' + assert bin(np.int32(11)) == '0b1011' + exc = raises(TypeError, "bin(np.float32(11.6))") + assert exc.value.message.find('object cannot be interpreted as an index') != -1 def test_pickle(self): from numpypy import dtype, zeros From noreply at buildbot.pypy.org Sat Dec 14 12:47:45 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 14 Dec 2013 12:47:45 +0100 (CET) Subject: [pypy-commit] pypy annotator: rm unused builtin analyzer for __import__ Message-ID: <20131214114745.B7CE11C1446@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68430:17f036f72c89 Date: 2013-08-20 18:53 +0200 http://bitbucket.org/pypy/pypy/changeset/17f036f72c89/ Log: rm unused builtin analyzer for __import__ diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -347,9 +347,6 @@ def test(*args): return s_Bool -def import_func(*args): - return SomeObject() - # collect all functions import __builtin__ BUILTIN_ANALYZERS = {} @@ -397,9 +394,6 @@ else: BUILTIN_ANALYZERS[object.__init__] = object_init -# import -BUILTIN_ANALYZERS[__import__] = import_func - # annotation of low-level types from rpython.annotator.model import SomePtr from rpython.rtyper.lltypesystem import lltype From noreply at buildbot.pypy.org Sat Dec 14 12:47:43 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 14 Dec 2013 12:47:43 +0100 (CET) Subject: [pypy-commit] pypy annotator: Start branch for random annotator refactoring Message-ID: <20131214114743.4491F1C04FF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68428:b1c48379581a Date: 2013-12-13 18:53 +0000 http://bitbucket.org/pypy/pypy/changeset/b1c48379581a/ Log: Start branch for random annotator refactoring From noreply at buildbot.pypy.org Sat Dec 14 12:47:44 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 14 Dec 2013 12:47:44 +0100 (CET) Subject: [pypy-commit] pypy annotator: Kill obsolete Stats thing in bookkeeper.py Message-ID: <20131214114744.85E561C08A1@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68429:29bcc678802c Date: 2013-08-20 19:48 +0200 http://bitbucket.org/pypy/pypy/changeset/29bcc678802c/ Log: Kill obsolete Stats thing in bookkeeper.py diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -613,7 +613,6 @@ # XXX "contains" clash with SomeObject method def consider_op_contains(self, seq, elem): - self.bookkeeper.count("contains", seq) return seq.op_contains(elem) def consider_op_newtuple(self, *args): diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -78,46 +78,39 @@ if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const < obj2.const) else: - getbookkeeper().count("non_int_comp", obj1, obj2) return s_Bool def le((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const <= obj2.const) else: - getbookkeeper().count("non_int_comp", obj1, obj2) return s_Bool def eq((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const == obj2.const) else: - getbookkeeper().count("non_int_eq", obj1, obj2) return s_Bool def ne((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const != obj2.const) else: - getbookkeeper().count("non_int_eq", obj1, obj2) return s_Bool def gt((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const > obj2.const) else: - getbookkeeper().count("non_int_comp", obj1, obj2) return s_Bool def ge((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const >= obj2.const) else: - getbookkeeper().count("non_int_comp", obj1, obj2) return s_Bool def cmp((obj1, obj2)): - getbookkeeper().count("cmp", obj1, obj2) if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(cmp(obj1.const, obj2.const)) else: @@ -163,11 +156,9 @@ return r def divmod((obj1, obj2)): - getbookkeeper().count("divmod", obj1, obj2) return SomeTuple([pair(obj1, obj2).div(), pair(obj1, obj2).mod()]) def coerce((obj1, obj2)): - getbookkeeper().count("coerce", obj1, obj2) return pair(obj1, obj2).union() # reasonable enough # approximation of an annotation intersection, the result should be the annotation obj or @@ -472,7 +463,6 @@ SomeUnicodeString))): raise AnnotatorError( "string formatting mixing strings and unicode not supported") - getbookkeeper().count('strformat', s_string, s_tuple) no_nul = s_string.no_nul for s_item in s_tuple.items: if isinstance(s_item, SomeFloat): @@ -490,7 +480,6 @@ pairtype(SomeUnicodeString, SomeObject)): def mod((s_string, args)): - getbookkeeper().count('strformat', s_string, args) return s_string.__class__() class __extend__(pairtype(SomeFloat, SomeFloat)): @@ -592,19 +581,16 @@ return [KeyError] def getitem((dic1, obj2)): - getbookkeeper().count("dict_getitem", dic1) dic1.dictdef.generalize_key(obj2) return dic1.dictdef.read_value() getitem.can_only_throw = _can_only_throw def setitem((dic1, obj2), s_value): - getbookkeeper().count("dict_setitem", dic1) dic1.dictdef.generalize_key(obj2) dic1.dictdef.generalize_value(s_value) setitem.can_only_throw = _can_only_throw def delitem((dic1, obj2)): - getbookkeeper().count("dict_delitem", dic1) dic1.dictdef.generalize_key(obj2) delitem.can_only_throw = _can_only_throw @@ -618,7 +604,6 @@ except IndexError: return s_ImpossibleValue else: - getbookkeeper().count("tuple_random_getitem", tup1) return unionof(*tup1.items) getitem.can_only_throw = [IndexError] @@ -629,74 +614,63 @@ return lst1.listdef.offspring() def getitem((lst1, int2)): - getbookkeeper().count("list_getitem", int2) return lst1.listdef.read_item() getitem.can_only_throw = [] getitem_key = getitem def getitem_idx((lst1, int2)): - getbookkeeper().count("list_getitem", int2) return lst1.listdef.read_item() getitem_idx.can_only_throw = [IndexError] getitem_idx_key = getitem_idx def setitem((lst1, int2), s_value): - getbookkeeper().count("list_setitem", int2) lst1.listdef.mutate() lst1.listdef.generalize(s_value) setitem.can_only_throw = [IndexError] def delitem((lst1, int2)): - getbookkeeper().count("list_delitem", int2) lst1.listdef.resize() delitem.can_only_throw = [IndexError] class __extend__(pairtype(SomeString, SomeInteger)): def getitem((str1, int2)): - getbookkeeper().count("str_getitem", int2) return SomeChar(no_nul=str1.no_nul) getitem.can_only_throw = [] getitem_key = getitem def getitem_idx((str1, int2)): - getbookkeeper().count("str_getitem", int2) return SomeChar(no_nul=str1.no_nul) getitem_idx.can_only_throw = [IndexError] getitem_idx_key = getitem_idx def mul((str1, int2)): # xxx do we want to support this - getbookkeeper().count("str_mul", str1, int2) return SomeString(no_nul=str1.no_nul) class __extend__(pairtype(SomeUnicodeString, SomeInteger)): def getitem((str1, int2)): - getbookkeeper().count("str_getitem", int2) return SomeUnicodeCodePoint() getitem.can_only_throw = [] getitem_key = getitem def getitem_idx((str1, int2)): - getbookkeeper().count("str_getitem", int2) return SomeUnicodeCodePoint() getitem_idx.can_only_throw = [IndexError] getitem_idx_key = getitem_idx def mul((str1, int2)): # xxx do we want to support this - getbookkeeper().count("str_mul", str1, int2) return SomeUnicodeString() class __extend__(pairtype(SomeInteger, SomeString), pairtype(SomeInteger, SomeUnicodeString)): def mul((int1, str2)): # xxx do we want to support this - getbookkeeper().count("str_mul", str2, int1) return str2.basestringclass() class __extend__(pairtype(SomeUnicodeCodePoint, SomeUnicodeString), diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -25,112 +25,6 @@ from rpython.rtyper import extregistry -class Stats(object): - - def __init__(self, bookkeeper): - self.bookkeeper = bookkeeper - self.classify = {} - - def count(self, category, *args): - for_category = self.classify.setdefault(category, {}) - classifier = getattr(self, 'consider_%s' % category, self.consider_generic) - outcome = classifier(*args) - for_category[self.bookkeeper.position_key] = outcome - - def indexrepr(self, idx): - if idx.is_constant(): - if idx.const is None: - return '' - if isinstance(idx, SomeInteger): - if idx.const >=0: - return 'pos-constant' - else: - return 'Neg-constant' - return idx.const - else: - if isinstance(idx, SomeInteger): - if idx.nonneg: - return "non-neg" - else: - return "MAYBE-NEG" - else: - return self.typerepr(idx) - - def steprepr(self, stp): - if stp.is_constant(): - if stp.const in (1, None): - return 'step=1' - else: - return 'step=%s?' % stp.const - else: - return 'non-const-step %s' % self.typerepr(stp) - - def consider_generic(self, *args): - return tuple([self.typerepr(x) for x in args]) - - def consider_list_list_eq(self, obj1, obj2): - return obj1, obj2 - - def consider_contains(self, seq): - return seq - - def consider_non_int_eq(self, obj1, obj2): - if obj1.knowntype == obj2.knowntype == list: - self.count("list_list_eq", obj1, obj2) - return self.typerepr(obj1), self.typerepr(obj2) - - def consider_non_int_comp(self, obj1, obj2): - return self.typerepr(obj1), self.typerepr(obj2) - - def typerepr(self, obj): - if isinstance(obj, SomeInstance): - return obj.classdef.name - else: - return obj.knowntype.__name__ - - def consider_tuple_random_getitem(self, tup): - return tuple([self.typerepr(x) for x in tup.items]) - - def consider_list_index(self): - return '!' - - def consider_list_getitem(self, idx): - return self.indexrepr(idx) - - def consider_list_setitem(self, idx): - return self.indexrepr(idx) - - def consider_list_delitem(self, idx): - return self.indexrepr(idx) - - def consider_str_join(self, s): - if s.is_constant(): - return repr(s.const) - else: - return "NON-CONSTANT" - - def consider_str_getitem(self, idx): - return self.indexrepr(idx) - - def consider_strformat(self, str, args): - if str.is_constant(): - s = repr(str.const) - else: - s = "?!!!!!!" - if isinstance(args, SomeTuple): - return (s, tuple([self.typerepr(x) for x in args.items])) - else: - return (s, self.typerepr(args)) - - def consider_dict_getitem(self, dic): - return dic - - def consider_dict_setitem(self, dic): - return dic - - def consider_dict_delitem(self, dic): - return dic - class Bookkeeper(object): """The log of choices that have been made while analysing the operations. It ensures that the same 'choice objects' will be returned if we ask @@ -165,13 +59,8 @@ self.needs_generic_instantiate = {} - self.stats = Stats(self) - delayed_imports() - def count(self, category, *args): - self.stats.count(category, *args) - def enter(self, position_key): """Start of an operation. The operation is uniquely identified by the given key.""" diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -84,23 +84,18 @@ raise AnnotatorError("cannot use hash() in RPython") def str(self): - getbookkeeper().count('str', self) return SomeString() def unicode(self): - getbookkeeper().count('unicode', self) return SomeUnicodeString() def repr(self): - getbookkeeper().count('repr', self) return SomeString() def hex(self): - getbookkeeper().count('hex', self) return SomeString() def oct(self): - getbookkeeper().count('oct', self) return SomeString() def id(self): @@ -237,7 +232,6 @@ return immutablevalue(len(self.items)) def iter(self): - getbookkeeper().count("tuple_iter", self) return SomeIterator(self) iter.can_only_throw = [] @@ -281,7 +275,6 @@ method_pop.can_only_throw = [IndexError] def method_index(self, s_value): - getbookkeeper().count("list_index") self.listdef.generalize(s_value) return SomeInteger(nonneg=True) @@ -472,7 +465,6 @@ def method_join(self, s_list): if s_None.contains(s_list): return SomeImpossibleValue() - getbookkeeper().count("str_join", self) s_item = s_list.listdef.read_item() if s_None.contains(s_item): if isinstance(self, SomeUnicodeString): @@ -489,7 +481,6 @@ return self.basecharclass() def method_split(self, patt, max=-1): - getbookkeeper().count("str_split", self, patt) if max == -1 and patt.is_constant() and patt.const == "\0": no_nul = True else: @@ -498,7 +489,6 @@ return getbookkeeper().newlist(s_item) def method_rsplit(self, patt, max=-1): - getbookkeeper().count("str_rsplit", self, patt) s_item = self.basestringclass(no_nul=self.no_nul) return getbookkeeper().newlist(s_item) @@ -709,8 +699,6 @@ if self.s_self is not None: return self.analyser(self.s_self, *args) else: - if self.methodname: - getbookkeeper().count(self.methodname.replace('.', '_'), *args) return self.analyser(*args) simple_call.can_only_throw = _can_only_throw From noreply at buildbot.pypy.org Sat Dec 14 12:47:51 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 14 Dec 2013 12:47:51 +0100 (CET) Subject: [pypy-commit] pypy annotator: effectively define binaryop.BINARY_OPERATIONS in rpython.flowspace.operation Message-ID: <20131214114751.629CB1C31C5@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68435:81ba01f85d71 Date: 2013-12-14 02:51 +0100 http://bitbucket.org/pypy/pypy/changeset/81ba01f85d71/ Log: effectively define binaryop.BINARY_OPERATIONS in rpython.flowspace.operation diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -16,6 +16,7 @@ merge_knowntypedata,) from rpython.annotator.bookkeeper import getbookkeeper from rpython.flowspace.model import Variable, Constant +from rpython.flowspace.operation import op from rpython.rlib import rarithmetic from rpython.annotator.model import AnnotatorError @@ -23,26 +24,8 @@ def immutablevalue(x): return getbookkeeper().immutablevalue(x) -# XXX unify this with ObjSpace.MethodTable -BINARY_OPERATIONS = set(['add', 'sub', 'mul', 'div', 'mod', - 'truediv', 'floordiv', 'divmod', - 'and_', 'or_', 'xor', - 'lshift', 'rshift', - 'getitem', 'setitem', 'delitem', - 'getitem_idx', 'getitem_key', 'getitem_idx_key', - 'inplace_add', 'inplace_sub', 'inplace_mul', - 'inplace_truediv', 'inplace_floordiv', 'inplace_div', - 'inplace_mod', - 'inplace_lshift', 'inplace_rshift', - 'inplace_and', 'inplace_or', 'inplace_xor', - 'lt', 'le', 'eq', 'ne', 'gt', 'ge', 'is_', 'cmp', - 'coerce', - ] - +[opname+'_ovf' for opname in - """add sub mul floordiv div mod lshift - """.split() - ]) - +BINARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() + if oper.dispatch == 2]) for opname in BINARY_OPERATIONS: missing_operation(pairtype(SomeObject, SomeObject), opname) diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -274,7 +274,7 @@ raise ValueError("this is not supported") -add_operator('is_', 2, pure=True) +add_operator('is_', 2, dispatch=2, pure=True) add_operator('id', 1, dispatch=1, pyfunc=id) add_operator('type', 1, dispatch=1, pyfunc=new_style_type, pure=True) add_operator('issubtype', 2, dispatch=1, pyfunc=issubclass, pure=True) # not for old-style classes @@ -285,12 +285,12 @@ add_operator('hash', 1, dispatch=1, pyfunc=hash) add_operator('setattr', 3, dispatch=1, pyfunc=setattr) add_operator('delattr', 2, dispatch=1, pyfunc=delattr) -add_operator('getitem', 2, pure=True) -add_operator('getitem_idx', 2, pure=True) -add_operator('getitem_key', 2, pure=True) -add_operator('getitem_idx_key', 2, pure=True) -add_operator('setitem', 3) -add_operator('delitem', 2) +add_operator('getitem', 2, dispatch=2, pure=True) +add_operator('getitem_idx', 2, dispatch=2, pure=True) +add_operator('getitem_key', 2, dispatch=2, pure=True) +add_operator('getitem_idx_key', 2, dispatch=2, pure=True) +add_operator('setitem', 3, dispatch=2) +add_operator('delitem', 2, dispatch=2) add_operator('getslice', 3, dispatch=1, pyfunc=do_getslice, pure=True) add_operator('setslice', 4, dispatch=1, pyfunc=do_setslice) add_operator('delslice', 3, dispatch=1, pyfunc=do_delslice) @@ -304,44 +304,44 @@ add_operator('oct', 1, dispatch=1, pyfunc=oct, pure=True) add_operator('ord', 1, dispatch=1, pyfunc=ord, pure=True) add_operator('invert', 1, dispatch=1, pure=True) -add_operator('add', 2, pure=True, ovf=True) -add_operator('sub', 2, pure=True, ovf=True) -add_operator('mul', 2, pure=True, ovf=True) -add_operator('truediv', 2, pure=True) -add_operator('floordiv', 2, pure=True, ovf=True) -add_operator('div', 2, pure=True, ovf=True) -add_operator('mod', 2, pure=True, ovf=True) +add_operator('add', 2, dispatch=2, pure=True, ovf=True) +add_operator('sub', 2, dispatch=2, pure=True, ovf=True) +add_operator('mul', 2, dispatch=2, pure=True, ovf=True) +add_operator('truediv', 2, dispatch=2, pure=True) +add_operator('floordiv', 2, dispatch=2, pure=True, ovf=True) +add_operator('div', 2, dispatch=2, pure=True, ovf=True) +add_operator('mod', 2, dispatch=2, pure=True, ovf=True) add_operator('divmod', 2, pyfunc=divmod, pure=True) -add_operator('lshift', 2, pure=True, ovf=True) -add_operator('rshift', 2, pure=True) -add_operator('and_', 2, pure=True) -add_operator('or_', 2, pure=True) -add_operator('xor', 2, pure=True) +add_operator('lshift', 2, dispatch=2, pure=True, ovf=True) +add_operator('rshift', 2, dispatch=2, pure=True) +add_operator('and_', 2, dispatch=2, pure=True) +add_operator('or_', 2, dispatch=2, pure=True) +add_operator('xor', 2, dispatch=2, pure=True) add_operator('int', 1, dispatch=1, pyfunc=do_int, pure=True) add_operator('index', 1, pyfunc=do_index, pure=True) add_operator('float', 1, dispatch=1, pyfunc=do_float, pure=True) add_operator('long', 1, dispatch=1, pyfunc=do_long, pure=True) -add_operator('inplace_add', 2, pyfunc=inplace_add) -add_operator('inplace_sub', 2, pyfunc=inplace_sub) -add_operator('inplace_mul', 2, pyfunc=inplace_mul) -add_operator('inplace_truediv', 2, pyfunc=inplace_truediv) -add_operator('inplace_floordiv', 2, pyfunc=inplace_floordiv) -add_operator('inplace_div', 2, pyfunc=inplace_div) -add_operator('inplace_mod', 2, pyfunc=inplace_mod) +add_operator('inplace_add', 2, dispatch=2, pyfunc=inplace_add) +add_operator('inplace_sub', 2, dispatch=2, pyfunc=inplace_sub) +add_operator('inplace_mul', 2, dispatch=2, pyfunc=inplace_mul) +add_operator('inplace_truediv', 2, dispatch=2, pyfunc=inplace_truediv) +add_operator('inplace_floordiv', 2, dispatch=2, pyfunc=inplace_floordiv) +add_operator('inplace_div', 2, dispatch=2, pyfunc=inplace_div) +add_operator('inplace_mod', 2, dispatch=2, pyfunc=inplace_mod) add_operator('inplace_pow', 2, pyfunc=inplace_pow) -add_operator('inplace_lshift', 2, pyfunc=inplace_lshift) -add_operator('inplace_rshift', 2, pyfunc=inplace_rshift) -add_operator('inplace_and', 2, pyfunc=inplace_and) -add_operator('inplace_or', 2, pyfunc=inplace_or) -add_operator('inplace_xor', 2, pyfunc=inplace_xor) -add_operator('lt', 2, pure=True) -add_operator('le', 2, pure=True) -add_operator('eq', 2, pure=True) -add_operator('ne', 2, pure=True) -add_operator('gt', 2, pure=True) -add_operator('ge', 2, pure=True) -add_operator('cmp', 2, pyfunc=cmp, pure=True) # rich cmps preferred -add_operator('coerce', 2, pyfunc=coerce, pure=True) +add_operator('inplace_lshift', 2, dispatch=2, pyfunc=inplace_lshift) +add_operator('inplace_rshift', 2, dispatch=2, pyfunc=inplace_rshift) +add_operator('inplace_and', 2, dispatch=2, pyfunc=inplace_and) +add_operator('inplace_or', 2, dispatch=2, pyfunc=inplace_or) +add_operator('inplace_xor', 2, dispatch=2, pyfunc=inplace_xor) +add_operator('lt', 2, dispatch=2, pure=True) +add_operator('le', 2, dispatch=2, pure=True) +add_operator('eq', 2, dispatch=2, pure=True) +add_operator('ne', 2, dispatch=2, pure=True) +add_operator('gt', 2, dispatch=2, pure=True) +add_operator('ge', 2, dispatch=2, pure=True) +add_operator('cmp', 2, dispatch=2, pyfunc=cmp, pure=True) # rich cmps preferred +add_operator('coerce', 2, dispatch=2, pyfunc=coerce, pure=True) add_operator('contains', 2, pure=True) add_operator('get', 3, pyfunc=get, pure=True) add_operator('set', 3, pyfunc=set) From noreply at buildbot.pypy.org Sat Dec 14 12:47:50 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 14 Dec 2013 12:47:50 +0100 (CET) Subject: [pypy-commit] pypy annotator: Don't use unaryop.UNARY_OPERATIONS in annrpython.py Message-ID: <20131214114750.46CE81C3159@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68434:d35c47e702b3 Date: 2013-12-14 02:37 +0100 http://bitbucket.org/pypy/pypy/changeset/d35c47e702b3/ Log: Don't use unaryop.UNARY_OPERATIONS in annrpython.py diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -9,7 +9,7 @@ from rpython.flowspace.model import (Variable, Constant, FunctionGraph, c_last_exception, checkgraph) from rpython.translator import simplify, transform -from rpython.annotator import model as annmodel, signature, unaryop, binaryop +from rpython.annotator import model as annmodel, signature, binaryop from rpython.annotator.bookkeeper import Bookkeeper import py @@ -460,7 +460,7 @@ arg2 = self.binding(op.args[1]) binop = getattr(pair(arg1, arg2), op.opname, None) can_only_throw = annmodel.read_can_only_throw(binop, arg1, arg2) - elif op.opname in unaryop.UNARY_OPERATIONS: + elif op.dispatch == 1: arg1 = self.binding(op.args[0]) opname = op.opname if opname == 'contains': opname = 'op_contains' @@ -625,16 +625,6 @@ return self.bookkeeper.newdict() -def _register_unary(): - d = {} - for opname in unaryop.UNARY_OPERATIONS: - fnname = 'consider_op_' + opname - exec py.code.Source(""" -def consider_op_%s(self, arg, *args): - return arg.%s(*args) -""" % (opname, opname)).compile() in globals(), d - setattr(RPythonAnnotator, fnname, d[fnname]) - def _register_binary(): d = {} for opname in binaryop.BINARY_OPERATIONS: @@ -646,7 +636,6 @@ setattr(RPythonAnnotator, fnname, d[fnname]) # register simple operations handling -_register_unary() _register_binary() diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -132,9 +132,19 @@ ovf.offset = self.offset return ovf +class SingleDispatchMixin(object): + dispatch = 1 + def consider(self, annotator, arg, *other_args): + impl = getattr(arg, self.opname) + return impl(*other_args) + def add_operator(name, arity, dispatch=None, pyfunc=None, pure=False, ovf=False): operator_func = getattr(operator, name, None) + if dispatch == 1: + bases = [SingleDispatchMixin] + else: + bases = [] if ovf: assert pure base_cls = OverflowingOperation @@ -142,7 +152,8 @@ base_cls = PureOperation else: base_cls = HLOperation - cls = HLOperationMeta(name, (base_cls,), {'opname': name, 'arity': arity, + bases.append(base_cls) + cls = HLOperationMeta(name, tuple(bases), {'opname': name, 'arity': arity, 'canraise': [], 'dispatch': dispatch}) if pyfunc is not None: @@ -358,10 +369,9 @@ self.offset = -1 -class Iter(HLOperation): +class Iter(SingleDispatchMixin, HLOperation): opname = 'iter' arity = 1 - dispatch = 1 can_overflow = False canraise = [] pyfunc = staticmethod(iter) @@ -373,10 +383,9 @@ if isinstance(iterable, unrolling_iterable): return const(iterable.get_unroller()) -class Next(HLOperation): +class Next(SingleDispatchMixin, HLOperation): opname = 'next' arity = 1 - dispatch = 1 can_overflow = False canraise = [] pyfunc = staticmethod(next) @@ -398,10 +407,9 @@ frame.guessexception([StopIteration, RuntimeError], force=True) return w_item -class GetAttr(HLOperation): +class GetAttr(SingleDispatchMixin, HLOperation): opname = 'getattr' arity = 2 - dispatch = 1 can_overflow = False canraise = [] pyfunc = staticmethod(getattr) @@ -442,13 +450,11 @@ # *any* exception for non-builtins return [Exception] -class SimpleCall(CallOp): +class SimpleCall(SingleDispatchMixin, CallOp): opname = 'simple_call' - dispatch = 1 -class CallArgs(CallOp): +class CallArgs(SingleDispatchMixin, CallOp): opname = 'call_args' - dispatch = 1 # Other functions that get directly translated to SpaceOperators func2op[type] = op.type From noreply at buildbot.pypy.org Sat Dec 14 12:47:49 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 14 Dec 2013 12:47:49 +0100 (CET) Subject: [pypy-commit] pypy annotator: kill dead test code Message-ID: <20131214114749.22A521C3141@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68433:d3bc5f261805 Date: 2013-12-14 01:44 +0100 http://bitbucket.org/pypy/pypy/changeset/d3bc5f261805/ Log: kill dead test code diff --git a/rpython/rtyper/test/test_rbool.py b/rpython/rtyper/test/test_rbool.py --- a/rpython/rtyper/test/test_rbool.py +++ b/rpython/rtyper/test/test_rbool.py @@ -1,5 +1,4 @@ from rpython.translator.translator import TranslationContext -from rpython.annotator import unaryop, binaryop from rpython.rtyper.test import snippet from rpython.rtyper.test.tool import BaseRtypingTest @@ -23,15 +22,6 @@ def test_bool_cast1(self): self._test(snippet.bool_cast1, [bool]) - def DONTtest_unary_operations(self): - # XXX TODO test if all unary operations are implemented - for opname in unaryop.UNARY_OPERATIONS: - print 'UNARY_OPERATIONS:', opname - - def DONTtest_binary_operations(self): - # XXX TODO test if all binary operations are implemented - for opname in binaryop.BINARY_OPERATIONS: - print 'BINARY_OPERATIONS:', opname class TestRbool(BaseRtypingTest): diff --git a/rpython/rtyper/test/test_rfloat.py b/rpython/rtyper/test/test_rfloat.py --- a/rpython/rtyper/test/test_rfloat.py +++ b/rpython/rtyper/test/test_rfloat.py @@ -1,10 +1,9 @@ -import sys, py +import sys from rpython.translator.translator import TranslationContext -from rpython.annotator import unaryop, binaryop from rpython.rtyper.test import snippet from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rlib.rarithmetic import ( - r_int, r_uint, r_longlong, r_ulonglong, r_singlefloat) + r_uint, r_longlong, r_ulonglong, r_singlefloat) from rpython.rlib.objectmodel import compute_hash class TestSnippet(object): @@ -27,15 +26,6 @@ def test_float_cast1(self): self._test(snippet.float_cast1, [float]) - def DONTtest_unary_operations(self): - # XXX TODO test if all unary operations are implemented - for opname in unaryop.UNARY_OPERATIONS: - print 'UNARY_OPERATIONS:', opname - - def DONTtest_binary_operations(self): - # XXX TODO test if all binary operations are implemented - for opname in binaryop.BINARY_OPERATIONS: - print 'BINARY_OPERATIONS:', opname class TestRfloat(BaseRtypingTest): @@ -93,7 +83,6 @@ big = float(0x7fffffffffffffff) x = big - 1.e10 assert x != big - y = fn(x) assert fn(x) == 9223372026854775808 def test_to_r_uint(self): diff --git a/rpython/rtyper/test/test_rint.py b/rpython/rtyper/test/test_rint.py --- a/rpython/rtyper/test/test_rint.py +++ b/rpython/rtyper/test/test_rint.py @@ -1,7 +1,6 @@ import py import sys, operator from rpython.translator.translator import TranslationContext -from rpython.annotator import unaryop, binaryop from rpython.rtyper.test import snippet from rpython.rlib.rarithmetic import r_int, r_uint, r_longlong, r_ulonglong from rpython.rlib.rarithmetic import ovfcheck, r_int64, intmask, int_between @@ -29,16 +28,6 @@ def test_int_cast1(self): self._test(snippet.int_cast1, [int]) - def DONTtest_unary_operations(self): - # XXX TODO test if all unary operations are implemented - for opname in unaryop.UNARY_OPERATIONS: - print 'UNARY_OPERATIONS:', opname - - def DONTtest_binary_operations(self): - # XXX TODO test if all binary operations are implemented - for opname in binaryop.BINARY_OPERATIONS: - print 'BINARY_OPERATIONS:', opname - class TestRint(BaseRtypingTest): From noreply at buildbot.pypy.org Sat Dec 14 12:47:52 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 14 Dec 2013 12:47:52 +0100 (CET) Subject: [pypy-commit] pypy annotator: Don't use binaryop.BINARY_OPERATIONS in annrpython.py Message-ID: <20131214114752.782311C04FF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68436:b0d606986842 Date: 2013-12-14 03:08 +0100 http://bitbucket.org/pypy/pypy/changeset/b0d606986842/ Log: Don't use binaryop.BINARY_OPERATIONS in annrpython.py diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -9,7 +9,7 @@ from rpython.flowspace.model import (Variable, Constant, FunctionGraph, c_last_exception, checkgraph) from rpython.translator import simplify, transform -from rpython.annotator import model as annmodel, signature, binaryop +from rpython.annotator import model as annmodel, signature from rpython.annotator.bookkeeper import Bookkeeper import py @@ -455,7 +455,7 @@ # occour for this specific, typed operation. if block.exitswitch == c_last_exception: op = block.operations[-1] - if op.opname in binaryop.BINARY_OPERATIONS: + if op.dispatch == 2: arg1 = self.binding(op.args[0]) arg2 = self.binding(op.args[1]) binop = getattr(pair(arg1, arg2), op.opname, None) @@ -625,20 +625,6 @@ return self.bookkeeper.newdict() -def _register_binary(): - d = {} - for opname in binaryop.BINARY_OPERATIONS: - fnname = 'consider_op_' + opname - exec py.code.Source(""" -def consider_op_%s(self, arg1, arg2, *args): - return pair(arg1,arg2).%s(*args) -""" % (opname, opname)).compile() in globals(), d - setattr(RPythonAnnotator, fnname, d[fnname]) - -# register simple operations handling -_register_binary() - - class BlockedInference(Exception): """This exception signals the type inference engine that the situation is currently blocked, and that it should try to progress elsewhere.""" diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -8,6 +8,7 @@ import operator import sys import types +from rpython.tool.pairtype import pair from rpython.rlib.unroll import unrolling_iterable, _unroller from rpython.tool.sourcetools import compile2 from rpython.flowspace.model import (Constant, WrapException, const, Variable, @@ -138,11 +139,19 @@ impl = getattr(arg, self.opname) return impl(*other_args) +class DoubleDispatchMixin(object): + dispatch = 2 + def consider(self, annotator, arg1, arg2, *other_args): + impl = getattr(pair(arg1, arg2), self.opname) + return impl(*other_args) + def add_operator(name, arity, dispatch=None, pyfunc=None, pure=False, ovf=False): operator_func = getattr(operator, name, None) if dispatch == 1: bases = [SingleDispatchMixin] + elif dispatch == 2: + bases = [DoubleDispatchMixin] else: bases = [] if ovf: From noreply at buildbot.pypy.org Sat Dec 14 12:47:46 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 14 Dec 2013 12:47:46 +0100 (CET) Subject: [pypy-commit] pypy annotator: cleanup Message-ID: <20131214114746.D346F1C14FF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68431:82a2e33bc37d Date: 2013-12-13 20:14 +0100 http://bitbucket.org/pypy/pypy/changeset/82a2e33bc37d/ Log: cleanup diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -625,28 +625,29 @@ return self.bookkeeper.newdict() - def _registeroperations(cls, unary_ops, binary_ops): - # All unary operations - d = {} - for opname in unary_ops: - fnname = 'consider_op_' + opname - exec py.code.Source(""" +def _register_unary(): + d = {} + for opname in unaryop.UNARY_OPERATIONS: + fnname = 'consider_op_' + opname + exec py.code.Source(""" def consider_op_%s(self, arg, *args): return arg.%s(*args) """ % (opname, opname)).compile() in globals(), d - setattr(cls, fnname, d[fnname]) - # All binary operations - for opname in binary_ops: - fnname = 'consider_op_' + opname - exec py.code.Source(""" + setattr(RPythonAnnotator, fnname, d[fnname]) + +def _register_binary(): + d = {} + for opname in binaryop.BINARY_OPERATIONS: + fnname = 'consider_op_' + opname + exec py.code.Source(""" def consider_op_%s(self, arg1, arg2, *args): return pair(arg1,arg2).%s(*args) """ % (opname, opname)).compile() in globals(), d - setattr(cls, fnname, d[fnname]) - _registeroperations = classmethod(_registeroperations) + setattr(RPythonAnnotator, fnname, d[fnname]) # register simple operations handling -RPythonAnnotator._registeroperations(unaryop.UNARY_OPERATIONS, binaryop.BINARY_OPERATIONS) +_register_unary() +_register_binary() class BlockedInference(Exception): From noreply at buildbot.pypy.org Sat Dec 14 12:47:53 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 14 Dec 2013 12:47:53 +0100 (CET) Subject: [pypy-commit] pypy annotator: kill dead code Message-ID: <20131214114753.95DED1C04FF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68437:6dedfb7fe4c9 Date: 2013-12-14 03:14 +0100 http://bitbucket.org/pypy/pypy/changeset/6dedfb7fe4c9/ Log: kill dead code diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -798,11 +798,7 @@ def missing_operation(cls, name): def default_op(*args): - if args and isinstance(args[0], tuple): - flattened = tuple(args[0]) + args[1:] - else: - flattened = args - for arg in flattened: + for arg in args: if arg.__class__ is SomeObject and arg.knowntype is not type: return SomeObject() bookkeeper = rpython.annotator.bookkeeper.getbookkeeper() From noreply at buildbot.pypy.org Sat Dec 14 12:47:54 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 14 Dec 2013 12:47:54 +0100 (CET) Subject: [pypy-commit] pypy annotator: Kill useless 'no precise annotation' warning Message-ID: <20131214114754.B2ADB1C04FF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68438:4fdc3b6d77d5 Date: 2013-12-14 12:38 +0100 http://bitbucket.org/pypy/pypy/changeset/4fdc3b6d77d5/ Log: Kill useless 'no precise annotation' warning diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -798,11 +798,6 @@ def missing_operation(cls, name): def default_op(*args): - for arg in args: - if arg.__class__ is SomeObject and arg.knowntype is not type: - return SomeObject() - bookkeeper = rpython.annotator.bookkeeper.getbookkeeper() - bookkeeper.warning("no precise annotation supplied for %s%r" % (name, args)) return s_ImpossibleValue setattr(cls, name, default_op) From noreply at buildbot.pypy.org Sat Dec 14 12:47:48 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 14 Dec 2013 12:47:48 +0100 (CET) Subject: [pypy-commit] pypy annotator: effectively define unaryop.UNARY_OPERATIONS in rpython.flowspace.operation Message-ID: <20131214114748.096421C30A4@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68432:42e96df5f648 Date: 2013-12-14 01:37 +0100 http://bitbucket.org/pypy/pypy/changeset/42e96df5f648/ Log: effectively define unaryop.UNARY_OPERATIONS in rpython.flowspace.operation diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -5,6 +5,7 @@ from __future__ import absolute_import from types import MethodType +from rpython.flowspace.operation import op from rpython.annotator.model import (SomeObject, SomeInteger, SomeBool, SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeFloat, SomeIterator, @@ -20,15 +21,8 @@ def immutablevalue(x): return getbookkeeper().immutablevalue(x) -UNARY_OPERATIONS = set(['len', 'bool', 'getattr', 'setattr', 'delattr', - 'simple_call', 'call_args', 'str', 'repr', - 'iter', 'next', 'invert', 'type', 'issubtype', - 'pos', 'neg', 'abs', 'hex', 'oct', - 'ord', 'int', 'float', 'long', - 'hash', 'id', # <== not supported any more - 'getslice', 'setslice', 'delslice', - 'neg_ovf', 'abs_ovf', 'hint', 'unicode', 'unichr']) - +UNARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() + if oper.dispatch == 1]) for opname in UNARY_OPERATIONS: missing_operation(SomeObject, opname) diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -54,6 +54,8 @@ __metaclass__ = HLOperationMeta pure = False can_overflow = False + dispatch = None # number of arguments to dispatch on + # (None means special handling) def __init__(self, *args): self.args = list(args) @@ -131,7 +133,7 @@ return ovf -def add_operator(name, arity, pyfunc=None, pure=False, ovf=False): +def add_operator(name, arity, dispatch=None, pyfunc=None, pure=False, ovf=False): operator_func = getattr(operator, name, None) if ovf: assert pure @@ -141,7 +143,8 @@ else: base_cls = HLOperation cls = HLOperationMeta(name, (base_cls,), {'opname': name, 'arity': arity, - 'canraise': []}) + 'canraise': [], + 'dispatch': dispatch}) if pyfunc is not None: func2op[pyfunc] = cls if operator_func: @@ -153,7 +156,7 @@ if ovf: from rpython.rlib.rarithmetic import ovfcheck ovf_func = lambda *args: ovfcheck(cls.pyfunc(*args)) - add_operator(name + '_ovf', arity, pyfunc=ovf_func) + add_operator(name + '_ovf', arity, dispatch, pyfunc=ovf_func) cls.ovf_variant = getattr(op, name + '_ovf') # ____________________________________________________________ @@ -261,35 +264,35 @@ add_operator('is_', 2, pure=True) -add_operator('id', 1, pyfunc=id) -add_operator('type', 1, pyfunc=new_style_type, pure=True) -add_operator('issubtype', 2, pyfunc=issubclass, pure=True) # not for old-style classes -add_operator('repr', 1, pyfunc=repr, pure=True) -add_operator('str', 1, pyfunc=str, pure=True) +add_operator('id', 1, dispatch=1, pyfunc=id) +add_operator('type', 1, dispatch=1, pyfunc=new_style_type, pure=True) +add_operator('issubtype', 2, dispatch=1, pyfunc=issubclass, pure=True) # not for old-style classes +add_operator('repr', 1, dispatch=1, pyfunc=repr, pure=True) +add_operator('str', 1, dispatch=1, pyfunc=str, pure=True) add_operator('format', 2, pyfunc=unsupported) -add_operator('len', 1, pyfunc=len, pure=True) -add_operator('hash', 1, pyfunc=hash) -add_operator('setattr', 3, pyfunc=setattr) -add_operator('delattr', 2, pyfunc=delattr) +add_operator('len', 1, dispatch=1, pyfunc=len, pure=True) +add_operator('hash', 1, dispatch=1, pyfunc=hash) +add_operator('setattr', 3, dispatch=1, pyfunc=setattr) +add_operator('delattr', 2, dispatch=1, pyfunc=delattr) add_operator('getitem', 2, pure=True) add_operator('getitem_idx', 2, pure=True) add_operator('getitem_key', 2, pure=True) add_operator('getitem_idx_key', 2, pure=True) add_operator('setitem', 3) add_operator('delitem', 2) -add_operator('getslice', 3, pyfunc=do_getslice, pure=True) -add_operator('setslice', 4, pyfunc=do_setslice) -add_operator('delslice', 3, pyfunc=do_delslice) +add_operator('getslice', 3, dispatch=1, pyfunc=do_getslice, pure=True) +add_operator('setslice', 4, dispatch=1, pyfunc=do_setslice) +add_operator('delslice', 3, dispatch=1, pyfunc=do_delslice) add_operator('trunc', 1, pyfunc=unsupported) -add_operator('pos', 1, pure=True) -add_operator('neg', 1, pure=True, ovf=True) -add_operator('bool', 1, pyfunc=bool, pure=True) +add_operator('pos', 1, dispatch=1, pure=True) +add_operator('neg', 1, dispatch=1, pure=True, ovf=True) +add_operator('bool', 1, dispatch=1, pyfunc=bool, pure=True) op.is_true = op.nonzero = op.bool # for llinterp -add_operator('abs', 1, pyfunc=abs, pure=True, ovf=True) -add_operator('hex', 1, pyfunc=hex, pure=True) -add_operator('oct', 1, pyfunc=oct, pure=True) -add_operator('ord', 1, pyfunc=ord, pure=True) -add_operator('invert', 1, pure=True) +add_operator('abs', 1, dispatch=1, pyfunc=abs, pure=True, ovf=True) +add_operator('hex', 1, dispatch=1, pyfunc=hex, pure=True) +add_operator('oct', 1, dispatch=1, pyfunc=oct, pure=True) +add_operator('ord', 1, dispatch=1, pyfunc=ord, pure=True) +add_operator('invert', 1, dispatch=1, pure=True) add_operator('add', 2, pure=True, ovf=True) add_operator('sub', 2, pure=True, ovf=True) add_operator('mul', 2, pure=True, ovf=True) @@ -303,10 +306,10 @@ add_operator('and_', 2, pure=True) add_operator('or_', 2, pure=True) add_operator('xor', 2, pure=True) -add_operator('int', 1, pyfunc=do_int, pure=True) +add_operator('int', 1, dispatch=1, pyfunc=do_int, pure=True) add_operator('index', 1, pyfunc=do_index, pure=True) -add_operator('float', 1, pyfunc=do_float, pure=True) -add_operator('long', 1, pyfunc=do_long, pure=True) +add_operator('float', 1, dispatch=1, pyfunc=do_float, pure=True) +add_operator('long', 1, dispatch=1, pyfunc=do_long, pure=True) add_operator('inplace_add', 2, pyfunc=inplace_add) add_operator('inplace_sub', 2, pyfunc=inplace_sub) add_operator('inplace_mul', 2, pyfunc=inplace_mul) @@ -339,7 +342,7 @@ add_operator('newtuple', None, pure=True, pyfunc=lambda *args:args) add_operator('newlist', None) add_operator('newslice', 3) -add_operator('hint', None) +add_operator('hint', None, dispatch=1) class Pow(PureOperation): @@ -358,6 +361,7 @@ class Iter(HLOperation): opname = 'iter' arity = 1 + dispatch = 1 can_overflow = False canraise = [] pyfunc = staticmethod(iter) @@ -372,6 +376,7 @@ class Next(HLOperation): opname = 'next' arity = 1 + dispatch = 1 can_overflow = False canraise = [] pyfunc = staticmethod(next) @@ -396,6 +401,7 @@ class GetAttr(HLOperation): opname = 'getattr' arity = 2 + dispatch = 1 can_overflow = False canraise = [] pyfunc = staticmethod(getattr) @@ -438,9 +444,11 @@ class SimpleCall(CallOp): opname = 'simple_call' + dispatch = 1 class CallArgs(CallOp): opname = 'call_args' + dispatch = 1 # Other functions that get directly translated to SpaceOperators func2op[type] = op.type From noreply at buildbot.pypy.org Sat Dec 14 18:54:18 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 14 Dec 2013 18:54:18 +0100 (CET) Subject: [pypy-commit] pypy default: fix array(scalar)[newaxis] Message-ID: <20131214175418.CDA921C02FB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68439:a8a77877670d Date: 2013-12-14 12:42 -0500 http://bitbucket.org/pypy/pypy/changeset/a8a77877670d/ Log: fix array(scalar)[newaxis] diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -132,6 +132,12 @@ if space.isinstance_w(w_idx, space.w_tuple): if space.len_w(w_idx) == 0: return self.get_scalar_value() + if space.is_none(w_idx): + new_shape = [1] + arr = W_NDimArray.from_shape(space, new_shape, self.dtype) + arr_iter = arr.create_iter(new_shape) + arr_iter.setitem(self.value) + return arr raise OperationError(space.w_IndexError, space.wrap("0-d arrays can't be indexed")) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -640,6 +640,9 @@ for y in range(2): expected[x, y] = math.cos(a[x]) * math.cos(b[y]) assert ((cos(a)[:,newaxis] * cos(b).T) == expected).all() + a = array(1)[newaxis] + assert a == array([1]) + assert a.shape == (1,) def test_newaxis_slice(self): from numpypy import array, newaxis From noreply at buildbot.pypy.org Sat Dec 14 18:54:20 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 14 Dec 2013 18:54:20 +0100 (CET) Subject: [pypy-commit] pypy default: fix array setitem_array_int with scalar value Message-ID: <20131214175420.2DBB51C02FB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68440:55f7a6da11c7 Date: 2013-12-14 12:51 -0500 http://bitbucket.org/pypy/pypy/changeset/55f7a6da11c7/ Log: fix array setitem_array_int with scalar value diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -564,8 +564,11 @@ index_w[i] = indexes_w[i] w_idx = space.newtuple(prefix_w[:prefixlen] + iter.get_index(space, shapelen)) - arr.descr_setitem(space, space.newtuple(index_w), - val_arr.descr_getitem(space, w_idx)) + if val_arr.is_scalar(): + w_value = val_arr.get_scalar_value() + else: + w_value = val_arr.descr_getitem(space, w_idx) + arr.descr_setitem(space, space.newtuple(index_w), w_value) iter.next() byteswap_driver = jit.JitDriver(name='numpy_byteswap_driver', diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1888,6 +1888,10 @@ assert (a == [0, 1, 1, 0, 4, 0, 6, 7, 8, 9]).all() raises(IndexError, "arange(10)[array([10])] = 3") raises(IndexError, "arange(10)[[-11]] = 3") + a = zeros(10) + b = array([3,4,5]) + a[b] = 1 + assert (a == [0, 0, 0, 1, 1, 1, 0, 0, 0, 0]).all() def test_array_scalar_index(self): import numpypy as np From noreply at buildbot.pypy.org Sun Dec 15 06:46:36 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 15 Dec 2013 06:46:36 +0100 (CET) Subject: [pypy-commit] pypy default: test, fix for matrix subtype which modfies shape in __getitem__ Message-ID: <20131215054636.9B6851C1175@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r68441:bacd9679adbf Date: 2013-12-15 07:43 +0200 http://bitbucket.org/pypy/pypy/changeset/bacd9679adbf/ Log: test, fix for matrix subtype which modfies shape in __getitem__ causing an infinite loop in find_shape_and_elems diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -51,8 +51,8 @@ rstrides.append(strides[i]) rbackstrides.append(backstrides[i]) if backwards: - rstrides = rstrides + [0] * (len(res_shape) - len(orig_shape)) - rbackstrides = rbackstrides + [0] * (len(res_shape) - len(orig_shape)) + rstrides = rstrides + [0] * (len(res_shape) - len(orig_shape)) + rbackstrides = rbackstrides + [0] * (len(res_shape) - len(orig_shape)) else: rstrides = [0] * (len(res_shape) - len(orig_shape)) + rstrides rbackstrides = [0] * (len(res_shape) - len(orig_shape)) + rbackstrides @@ -62,7 +62,7 @@ if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): return True if (space.isinstance_w(w_elem, space.w_tuple) or - isinstance(w_elem, W_NDimArray) or + isinstance(w_elem, W_NDimArray) or space.isinstance_w(w_elem, space.w_list)): return False return True @@ -87,6 +87,12 @@ space.len_w(w_elem) != size): raise OperationError(space.w_ValueError, space.wrap( "setting an array element with a sequence")) + w_array = space.lookup(w_elem, '__array__') + if w_array is not None: + # Make sure we call the array implementation of listview, + # since for some ndarray subclasses (matrix, for instance) + # listview does not reduce but rather returns the same class + w_elem = space.get_and_call_function(w_array, w_elem, space.w_None) new_batch += space.listview(w_elem) shape.append(size) batch = new_batch diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -258,3 +258,46 @@ assert isinstance(b, D) c = array(a, float) assert c.dtype is dtype(float) + + def test__getitem_modifies_shape(self): + import numpypy as N + # numpy's matrix class caused an infinite loop + class matrix(N.ndarray): + getcnt = 0 + def __new__(subtype, data, dtype=None, copy=True): + arr = N.array(data, dtype=dtype, copy=copy) + shape = arr.shape + + ret = N.ndarray.__new__(subtype, shape, arr.dtype, + buffer=arr, + order=True) + return ret + + def __getitem__(self, index): + matrix.getcnt += 1 + if matrix.getcnt > 10: + # XXX strides.find_shape_and_elems is sensitive + # to shape modification + xxx + out = N.ndarray.__getitem__(self, index) + + if not isinstance(out, N.ndarray): + return out + # Determine when we should have a column array + old_shape = out.shape + if out.ndim < 2: + sh = out.shape[0] + try: + n = len(index) + except: + n = 0 + if n > 1: + out.shape = (sh, 1) + else: + out.shape = (1, sh) + print 'out, shape was',old_shape,'now',out.shape + return out + a = matrix([[1., 2.]]) + b = N.array([a]) + + From noreply at buildbot.pypy.org Sun Dec 15 22:46:49 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 15 Dec 2013 22:46:49 +0100 (CET) Subject: [pypy-commit] pypy default: Replace a loop with a memcpy Message-ID: <20131215214649.AC9021C087E@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68442:92ff43b19372 Date: 2013-12-15 13:45 -0800 http://bitbucket.org/pypy/pypy/changeset/92ff43b19372/ Log: Replace a loop with a memcpy diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -206,8 +206,7 @@ w_value.get_array_length() == length): # fast path: copying from exactly the correct type s = w_value._cdata - for i in range(ctitemsize * length): - cdata[i] = s[i] + rffi.c_memcpy(cdata, s, ctitemsize * length) keepalive_until_here(w_value) return # @@ -259,7 +258,6 @@ space = self.space if isinstance(w_other, W_CData): from pypy.module._cffi_backend import ctypeptr, ctypearray - from pypy.module._cffi_backend import ctypevoid ct = w_other.ctype if isinstance(ct, ctypearray.W_CTypeArray): ct = ct.ctptr From noreply at buildbot.pypy.org Mon Dec 16 15:07:42 2013 From: noreply at buildbot.pypy.org (timfel) Date: Mon, 16 Dec 2013 15:07:42 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: return modifiers also when we did not peek before -- now works on Squeak4.5 Message-ID: <20131216140742.086431C3223@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r524:1ffee021de59 Date: 2013-12-16 10:48 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/1ffee021de59/ Log: return modifiers also when we did not peek before -- now works on Squeak4.5 diff --git a/spyvm/display.py b/spyvm/display.py --- a/spyvm/display.py +++ b/spyvm/display.py @@ -132,7 +132,7 @@ def next_keycode(self): key = self.key self.key = 0 - return key + return key | self.get_modifier_mask(8) def peek_keycode(self): self.get_next_event() From noreply at buildbot.pypy.org Mon Dec 16 15:07:43 2013 From: noreply at buildbot.pypy.org (timfel) Date: Mon, 16 Dec 2013 15:07:43 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: make interrupt check counter configurable via envvar Message-ID: <20131216140743.04E0E1C327A@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r525:131776c331c9 Date: 2013-12-16 15:07 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/131776c331c9/ Log: make interrupt check counter configurable via envvar diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -1,4 +1,5 @@ import py +import os from spyvm.shadow import ContextPartShadow, MethodContextShadow, BlockContextShadow, MethodNotFound from spyvm import model, constants, primitives, conftest, wrapper from spyvm.tool.bitmanipulation import splitter @@ -42,7 +43,11 @@ self.remaining_stack_depth = max_stack_depth self._loop = False self.next_wakeup_tick = 0 - self.interrupt_check_counter = constants.INTERRUPT_COUNTER_SIZE + try: + self.interrupt_counter_size = int(os.environ.get("SPY_ICS")) + except KeyError: + self.interrupt_counter_size = constants.INTERRUPT_COUNTER_SIZE + self.interrupt_check_counter = self.interrupt_counter_size # ###################################################################### self.trace = trace self.trace_proxy = False @@ -157,7 +162,7 @@ s_frame.push(w_receiver) s_frame.push_all(list(arguments_w)) - self.interrupt_check_counter = constants.INTERRUPT_COUNTER_SIZE + self.interrupt_check_counter = self.interrupt_counter_size try: self.loop(s_frame.w_self()) except ReturnFromTopLevel, e: @@ -166,7 +171,7 @@ def quick_check_for_interrupt(self, s_frame, dec=1): self.interrupt_check_counter -= dec if self.interrupt_check_counter <= 0: - self.interrupt_check_counter = constants.INTERRUPT_COUNTER_SIZE + self.interrupt_check_counter = self.interrupt_counter_size self.check_for_interrupts(s_frame) def check_for_interrupts(self, s_frame): From noreply at buildbot.pypy.org Mon Dec 16 15:07:40 2013 From: noreply at buildbot.pypy.org (timfel) Date: Mon, 16 Dec 2013 15:07:40 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: just refactor the event polling code Message-ID: <20131216140741.00F851C31CA@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r523:14e1ccb756c8 Date: 2013-12-16 10:36 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/14e1ccb756c8/ Log: just refactor the event polling code diff --git a/spyvm/display.py b/spyvm/display.py --- a/spyvm/display.py +++ b/spyvm/display.py @@ -43,61 +43,68 @@ def flip(self): RSDL.Flip(self.screen) + def handle_mouse_button(self, c_type, event): + b = rffi.cast(RSDL.MouseButtonEventPtr, event) + btn = rffi.getintfield(b, 'c_button') + if btn == RSDL.BUTTON_RIGHT: + btn = MOUSE_BTN_RIGHT + elif btn == RSDL.BUTTON_MIDDLE: + btn = MOUSE_BTN_MIDDLE + elif btn == RSDL.BUTTON_LEFT: + btn = MOUSE_BTN_LEFT + + if c_type == RSDL.MOUSEBUTTONDOWN: + self.button |= btn + else: + self.button &= ~btn + + def handle_mouse_move(self, c_type, event): + m = rffi.cast(RSDL.MouseMotionEventPtr, event) + x = rffi.getintfield(m, "c_x") + y = rffi.getintfield(m, "c_y") + self.mouse_position = [x, y] + + def handle_keypress(self, c_type, event): + self.key = 0 + p = rffi.cast(RSDL.KeyboardEventPtr, event) + sym = rffi.getintfield(p.c_keysym, 'c_sym') + char = rffi.getintfield(p.c_keysym, 'c_unicode') + if sym == RSDL.K_DOWN: + self.key = 31 + elif sym == RSDL.K_LEFT: + self.key = 28 + elif sym == RSDL.K_RIGHT: + self.key = 29 + elif sym == RSDL.K_UP: + self.key = 30 + elif char != 0: + chars = unicode_encode_utf_8(unichr(char), 1, "ignore") + if len(chars) == 1: + asciivalue = ord(chars[0]) + if asciivalue >= 32: + self.key = asciivalue + if self.key == 0 and sym <= 255: + self.key = sym + interrupt = self.interrupt_key + if (interrupt & 0xFF == self.key and interrupt >> 8 == self.get_modifier_mask(0)): + raise KeyboardInterrupt + def get_next_event(self): event = lltype.malloc(RSDL.Event, flavor="raw") - ok = 1 + ok = rffi.cast(lltype.Signed, RSDL.PollEvent(event)) try: while ok == 1: + c_type = rffi.getintfield(event, 'c_type') + if c_type == RSDL.MOUSEBUTTONDOWN or c_type == RSDL.MOUSEBUTTONUP: + self.handle_mouse_button(c_type, event) + elif c_type == RSDL.MOUSEMOTION: + self.handle_mouse_move(c_type, event) + elif c_type == RSDL.KEYDOWN: + self.handle_keypress(c_type, event) + elif c_type == RSDL.QUIT: + from spyvm.error import Exit + raise Exit("Window closed..") ok = rffi.cast(lltype.Signed, RSDL.PollEvent(event)) - if ok == 1: - c_type = rffi.getintfield(event, 'c_type') - if c_type == RSDL.MOUSEBUTTONDOWN or c_type == RSDL.MOUSEBUTTONUP: - b = rffi.cast(RSDL.MouseButtonEventPtr, event) - btn = rffi.getintfield(b, 'c_button') - if btn == RSDL.BUTTON_RIGHT: - btn = MOUSE_BTN_RIGHT - elif btn == RSDL.BUTTON_MIDDLE: - btn = MOUSE_BTN_MIDDLE - elif btn == RSDL.BUTTON_LEFT: - btn = MOUSE_BTN_LEFT - - if c_type == RSDL.MOUSEBUTTONDOWN: - self.button |= btn - else: - self.button &= ~btn - elif c_type == RSDL.MOUSEMOTION: - m = rffi.cast(RSDL.MouseMotionEventPtr, event) - x = rffi.getintfield(m, "c_x") - y = rffi.getintfield(m, "c_y") - self.mouse_position = [x, y] - elif c_type == RSDL.KEYUP or c_type == RSDL.KEYDOWN: - if c_type == RSDL.KEYDOWN: # TODO: create KeyUp events and KeyRepeat events - self.key = 0 - p = rffi.cast(RSDL.KeyboardEventPtr, event) - sym = rffi.getintfield(p.c_keysym, 'c_sym') - char = rffi.getintfield(p.c_keysym, 'c_unicode') - if sym == RSDL.K_DOWN: - self.key = 31 - elif sym == RSDL.K_LEFT: - self.key = 28 - elif sym == RSDL.K_RIGHT: - self.key = 29 - elif sym == RSDL.K_UP: - self.key = 30 - elif char != 0: - chars = unicode_encode_utf_8(unichr(char), 1, "ignore") - if len(chars) == 1: - asciivalue = ord(chars[0]) - if asciivalue >= 32: - self.key = asciivalue - if self.key == 0 and sym <= 255: - self.key = sym - interrupt = self.interrupt_key - if (interrupt & 0xFF == self.key and interrupt >> 8 == self.get_modifier_mask(0)): - raise KeyboardInterrupt - elif c_type == RSDL.QUIT: - from spyvm.error import Exit - raise Exit("Window closed..") finally: lltype.free(event, flavor='raw') From noreply at buildbot.pypy.org Mon Dec 16 18:30:35 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 16 Dec 2013 18:30:35 +0100 (CET) Subject: [pypy-commit] pypy default: test, fix for subtype pickle numpy compatability, including quirks Message-ID: <20131216173035.15E6C1C087E@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r68443:d5e489e07679 Date: 2013-12-16 19:14 +0200 http://bitbucket.org/pypy/pypy/changeset/d5e489e07679/ Log: test, fix for subtype pickle numpy compatability, including quirks diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1021,8 +1021,8 @@ multiarray = numpypy.get("multiarray") assert isinstance(multiarray, MixedModule) reconstruct = multiarray.get("_reconstruct") - - parameters = space.newtuple([space.gettypefor(W_NDimArray), space.newtuple([space.wrap(0)]), space.wrap("b")]) + parameters = space.newtuple([self.getclass(space), + space.newtuple([space.wrap(0)]), space.wrap("b")]) builder = StringBuilder() if isinstance(self.implementation, SliceArray): @@ -1045,14 +1045,22 @@ return space.newtuple([reconstruct, parameters, state]) def descr_setstate(self, space, w_state): - from rpython.rtyper.lltypesystem import rffi - - shape = space.getitem(w_state, space.wrap(1)) - dtype = space.getitem(w_state, space.wrap(2)) - assert isinstance(dtype, interp_dtype.W_Dtype) - isfortran = space.getitem(w_state, space.wrap(3)) - storage = space.getitem(w_state, space.wrap(4)) - + lens = space.len_w(w_state) + # numpy compatability, see multiarray/methods.c + if lens == 5: + base_index = 1 + elif lens == 4: + base_index = 0 + else: + raise OperationError(space.w_ValueError, space.wrap( + "__setstate__ called with len(args[1])==%d, not 5 or 4" % lens)) + shape = space.getitem(w_state, space.wrap(base_index)) + dtype = space.getitem(w_state, space.wrap(base_index+1)) + isfortran = space.getitem(w_state, space.wrap(base_index+2)) + storage = space.getitem(w_state, space.wrap(base_index+3)) + if not isinstance(dtype, interp_dtype.W_Dtype): + raise OperationError(space.w_ValueError, space.wrap( + "__setstate__(self, (shape, dtype, .. called with improper dtype '%r'" % dtype)) self.implementation = W_NDimArray.from_shape_and_storage(space, [space.int_w(i) for i in space.listview(shape)], rffi.str2charp(space.str_w(storage), track_allocation=False), diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -3,6 +3,7 @@ class AppTestSupport(BaseNumpyAppTest): + spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) def setup_class(cls): BaseNumpyAppTest.setup_class.im_func(cls) cls.w_NoNew = cls.space.appexec([], '''(): @@ -300,4 +301,75 @@ a = matrix([[1., 2.]]) b = N.array([a]) + def test_setstate_no_version(self): + # Some subclasses of ndarray, like MaskedArray, do not use + # version in __setstare__ + from numpy import ndarray, array + from pickle import loads, dumps + import sys, new + class D(ndarray): + ''' A subtype with a constructor that accepts a list of + data values, where ndarray accepts a shape + ''' + def __new__(subtype, data, dtype=None, copy=True): + arr = array(data, dtype=dtype, copy=copy) + shape = arr.shape + ret = ndarray.__new__(subtype, shape, arr.dtype, + buffer=arr, + order=True) + return ret + def __setstate__(self, state): + (version, shp, typ, isf, raw) = state + ndarray.__setstate__(self, (shp, typ, isf, raw)) + D.__module__ = 'mod' + mod = new.module('mod') + mod.D = D + sys.modules['mod'] = mod + a = D([1., 2.]) + s = dumps(a) + #Taken from numpy version 1.8 + s_from_numpy = '''ignore this line + _reconstruct + p0 + (cmod + D + p1 + (I0 + tp2 + S'b' + p3 + tp4 + Rp5 + (I1 + (I2 + tp6 + cnumpy + dtype + p7 + (S'f8' + p8 + I0 + I1 + tp9 + Rp10 + (I3 + S'<' + p11 + NNNI-1 + I-1 + I0 + tp12 + bI00 + S'\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@' + p13 + tp14 + b.'''.replace(' ','') + for ss,sn in zip(s.split('\n')[1:],s_from_numpy.split('\n')[1:]): + if len(ss)>10: + # ignore binary data, it will be checked later + continue + assert ss == sn + b = loads(s) + assert (a == b).all() + assert isinstance(b, D) From noreply at buildbot.pypy.org Tue Dec 17 01:47:54 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 17 Dec 2013 01:47:54 +0100 (CET) Subject: [pypy-commit] pypy default: support creating long from buffer Message-ID: <20131217004754.BC6261C010E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68444:21bfa9ad6eaf Date: 2013-12-16 19:46 -0500 http://bitbucket.org/pypy/pypy/changeset/21bfa9ad6eaf/ Log: support creating long from buffer diff --git a/pypy/objspace/std/longtype.py b/pypy/objspace/std/longtype.py --- a/pypy/objspace/std/longtype.py +++ b/pypy/objspace/std/longtype.py @@ -2,6 +2,7 @@ from pypy.interpreter import typedef from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault,\ interpindirect2app +from pypy.interpreter.buffer import Buffer from pypy.objspace.std.model import W_Object from pypy.objspace.std.stdtypedef import StdTypeDef from rpython.rlib.rstring import ParseStringError @@ -46,9 +47,17 @@ return string_to_w_long(space, w_longtype, unicode_to_decimal_w(space, w_value)) else: - raise operationerrfmt(space.w_TypeError, - "long() argument must be a string or a number, not '%T'", - w_value) + try: + w_buffer = space.buffer(w_value) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + raise operationerrfmt(space.w_TypeError, + "long() argument must be a string or a number, not '%T'", + w_value) + else: + buf = space.interp_w(Buffer, w_buffer) + return string_to_w_long(space, w_longtype, buf.as_str()) else: base = space.int_w(w_base) diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -249,6 +249,8 @@ n = -sys.maxint-1 assert long(n) == n assert str(long(n)) == str(n) + a = buffer('123') + assert long(a) == 123L def test_huge_longs(self): import operator From noreply at buildbot.pypy.org Tue Dec 17 12:29:15 2013 From: noreply at buildbot.pypy.org (timfel) Date: Tue, 17 Dec 2013 12:29:15 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: don't segfault in get_instances_array if translating with STM - just return an empty array Message-ID: <20131217112915.7D3001C3623@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r526:73289e897e57 Date: 2013-12-17 12:28 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/73289e897e57/ Log: don't segfault in get_instances_array if translating with STM - just return an empty array diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -519,30 +519,30 @@ # This primitive returns some instance of the class on the stack. # Not sure quite how to do this; maintain a weak list of all # existing instances or something? - match_w = s_frame.instances_array(w_class) if match_w is None: + match_w = [] from rpython.rlib import rgc - match_w = [] - roots = [gcref for gcref in rgc.get_rpy_roots() if gcref] - pending = roots[:] - while pending: - gcref = pending.pop() - if not rgc.get_gcflag_extra(gcref): - rgc.toggle_gcflag_extra(gcref) - w_obj = rgc.try_cast_gcref_to_instance(model.W_Object, gcref) - if (w_obj is not None and w_obj.has_class() - and w_obj.getclass(space) is w_class): - match_w.append(w_obj) - pending.extend(rgc.get_rpy_referents(gcref)) + if rgc.stm_is_enabled is None or not rgc.stm_is_enabled(): + roots = [gcref for gcref in rgc.get_rpy_roots() if gcref] + pending = roots[:] + while pending: + gcref = pending.pop() + if not rgc.get_gcflag_extra(gcref): + rgc.toggle_gcflag_extra(gcref) + w_obj = rgc.try_cast_gcref_to_instance(model.W_Object, gcref) + if (w_obj is not None and w_obj.has_class() + and w_obj.getclass(space) is w_class): + match_w.append(w_obj) + pending.extend(rgc.get_rpy_referents(gcref)) - while roots: - gcref = roots.pop() - if rgc.get_gcflag_extra(gcref): - rgc.toggle_gcflag_extra(gcref) - roots.extend(rgc.get_rpy_referents(gcref)) - s_frame.store_instances_array(w_class, match_w) + while roots: + gcref = roots.pop() + if rgc.get_gcflag_extra(gcref): + rgc.toggle_gcflag_extra(gcref) + roots.extend(rgc.get_rpy_referents(gcref)) + s_frame.store_instances_array(w_class, match_w) return match_w @expose_primitive(SOME_INSTANCE, unwrap_spec=[object]) From noreply at buildbot.pypy.org Tue Dec 17 16:18:59 2013 From: noreply at buildbot.pypy.org (timfel) Date: Tue, 17 Dec 2013 16:18:59 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: determine if we're using stm at compile time Message-ID: <20131217151859.C9FD31C300C@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r528:82eefe4aa56b Date: 2013-12-17 15:49 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/82eefe4aa56b/ Log: determine if we're using stm at compile time diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -515,6 +515,13 @@ w_frame.store(interp.space, constants.CTXPART_STACKP_INDEX, interp.space.wrap_int(stackp)) return w_frame + +def stm_enabled(): + """NOT RPYTHON""" + from rpython.rlib import rgc + return hasattr(rgc, "stm_is_enabled") and rgc.stm_is_enabled() +USES_STM = stm_enabled() + def get_instances_array(space, s_frame, w_class): # This primitive returns some instance of the class on the stack. # Not sure quite how to do this; maintain a weak list of all @@ -524,7 +531,7 @@ match_w = [] from rpython.rlib import rgc - if rgc.stm_is_enabled is None or not rgc.stm_is_enabled(): + if USES_STM: roots = [gcref for gcref in rgc.get_rpy_roots() if gcref] pending = roots[:] while pending: From noreply at buildbot.pypy.org Tue Dec 17 16:18:58 2013 From: noreply at buildbot.pypy.org (timfel) Date: Tue, 17 Dec 2013 16:18:58 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: fix enviroment get for interrupt counter size Message-ID: <20131217151858.3A2221C0531@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r527:a9fdae413f52 Date: 2013-12-17 15:49 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/a9fdae413f52/ Log: fix enviroment get for interrupt counter size diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -44,7 +44,7 @@ self._loop = False self.next_wakeup_tick = 0 try: - self.interrupt_counter_size = int(os.environ.get("SPY_ICS")) + self.interrupt_counter_size = int(os.environ["SPY_ICS"]) except KeyError: self.interrupt_counter_size = constants.INTERRUPT_COUNTER_SIZE self.interrupt_check_counter = self.interrupt_counter_size From noreply at buildbot.pypy.org Tue Dec 17 16:19:02 2013 From: noreply at buildbot.pypy.org (timfel) Date: Tue, 17 Dec 2013 16:19:02 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: enable stm if on stm branch Message-ID: <20131217151902.E5AAB1C3298@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r530:e3f00715c4bd Date: 2013-12-17 15:54 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/e3f00715c4bd/ Log: enable stm if on stm branch diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -161,7 +161,13 @@ # _____ Define and setup target ___ -def target(*args): +def target(driver, *args): + # driver.config.translation.gc = "stmgc" + # driver.config.translation.gcrootfinder = "stm" + from rpython.rlib import rgc + if hasattr(rgc, "stm_is_enabled"): + driver.config.translation.stm = True + driver.config.translation.thread = True return entry_point, None From noreply at buildbot.pypy.org Tue Dec 17 16:19:01 2013 From: noreply at buildbot.pypy.org (timfel) Date: Tue, 17 Dec 2013 16:19:01 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: etoys uses display scaling and calls bitblt stuff before BE_DISPLAY - we need to convert the Form to an SDL Bitmap from there, too Message-ID: <20131217151901.5F7B11C31CA@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r529:5e5665fee98c Date: 2013-12-17 15:50 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/5e5665fee98c/ Log: etoys uses display scaling and calls bitblt stuff before BE_DISPLAY - we need to convert the Form to an SDL Bitmap from there, too diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -567,9 +567,17 @@ space = IProxy.space if w_dest_form.is_same_object(space.objtable['w_display']): w_bitmap = w_dest_form.fetch(space, 0) - assert isinstance(w_bitmap, model.W_DisplayBitmap) - w_bitmap.update_from_buffer() - w_bitmap.flush_to_screen() + if not isinstance(w_bitmap, model.W_DisplayBitmap): + assert isinstance(w_bitmap, model.W_WordsObject) + w_display_bitmap = w_bitmap.as_display_bitmap( + w_dest_form, + IProxy.interp, + sdldisplay=None + ) + else: + w_display_bitmap = w_bitmap + w_display_bitmap.update_from_buffer() + w_display_bitmap.flush_to_screen() return 0 @expose_on_virtual_machine_proxy([int], int) diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -951,6 +951,26 @@ self.words = None return c_words + def as_display_bitmap(self, w_form, interp, sdldisplay=None): + width = space.unwrap_int(w_form.fetch(space, 1)) + height = space.unwrap_int(w_form.fetch(space, 2)) + depth = space.unwrap_int(w_form.fetch(space, 3)) + if not sdldisplay: + from spyvm import display + sdldisplay = display.SDLDisplay(interp.image_name) + sdldisplay.set_video_mode(width, height, depth) + w_display_bitmap = W_DisplayBitmap.create( + interp.space, + self.getclass(interp.space), + self.size(), + depth, + sdldisplay + ) + for idx in range(self.size()): + w_display_bitmap.setword(idx, self.getword(idx)) + w_form.store(space, 0, w_display_bitmap) + return w_display_bitmap + def __del__(self): if self.words is None: lltype.free(self.c_words, flavor='raw') @@ -970,6 +990,8 @@ # return W_DisplayBitmap32Bit(space, w_class, size, depth, display) elif depth == 32: return W_DisplayBitmap32Bit(space, w_class, size, depth, display) + elif depth == 16: + return W_DisplayBitmap16Bit(space, w_class, size, depth, display) else: raise NotImplementedError("non B/W squeak") diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -743,19 +743,11 @@ w_display_bitmap = w_bitmap else: assert isinstance(w_bitmap, model.W_WordsObject) - if not sdldisplay: - sdldisplay = display.SDLDisplay(interp.image_name) - sdldisplay.set_video_mode(width, height, depth) - w_display_bitmap = model.W_DisplayBitmap.create( - interp.space, - w_bitmap.getclass(interp.space), - w_bitmap.size(), - depth, - sdldisplay + w_display_bitmap = w_bitmap.as_display_bitmap( + w_rcvr, + interp, + sdldisplay=sdldisplay ) - for idx, word in enumerate(w_bitmap.words): - w_display_bitmap.setword(idx, word) - w_rcvr.store(interp.space, 0, w_display_bitmap) w_display_bitmap.flush_to_screen() if interp.image: From noreply at buildbot.pypy.org Tue Dec 17 16:19:04 2013 From: noreply at buildbot.pypy.org (timfel) Date: Tue, 17 Dec 2013 16:19:04 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: translate again Message-ID: <20131217151904.7D2B81C0531@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r531:039a087e7c10 Date: 2013-12-17 16:13 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/039a087e7c10/ Log: translate again diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -952,9 +952,9 @@ return c_words def as_display_bitmap(self, w_form, interp, sdldisplay=None): - width = space.unwrap_int(w_form.fetch(space, 1)) - height = space.unwrap_int(w_form.fetch(space, 2)) - depth = space.unwrap_int(w_form.fetch(space, 3)) + width = interp.space.unwrap_int(w_form.fetch(interp.space, 1)) + height = interp.space.unwrap_int(w_form.fetch(interp.space, 2)) + depth = interp.space.unwrap_int(w_form.fetch(interp.space, 3)) if not sdldisplay: from spyvm import display sdldisplay = display.SDLDisplay(interp.image_name) @@ -968,7 +968,7 @@ ) for idx in range(self.size()): w_display_bitmap.setword(idx, self.getword(idx)) - w_form.store(space, 0, w_display_bitmap) + w_form.store(interp.space, 0, w_display_bitmap) return w_display_bitmap def __del__(self): @@ -990,8 +990,6 @@ # return W_DisplayBitmap32Bit(space, w_class, size, depth, display) elif depth == 32: return W_DisplayBitmap32Bit(space, w_class, size, depth, display) - elif depth == 16: - return W_DisplayBitmap16Bit(space, w_class, size, depth, display) else: raise NotImplementedError("non B/W squeak") @@ -1088,7 +1086,7 @@ def setword(self, n, word): self._real_depth_buffer[n] = word pos, line_end = self.compute_pos_and_line_end(n, 16) - mask = 0xf + mask = r_uint(0xf) for i in range(2): pixel = 0 for j in range(4): @@ -1117,7 +1115,7 @@ """ _immutable_fields_ = ["_shadow?"] - _attrs_ = ["bytes", "_likely_methodname", "header", "argsize", "primitive", + _attrs_ = ["bytes", "_likely_methodname", "header", "argsize", "primitive", "literals", "tempsize", "literalsize", "islarge", "_shadow"] ### Extension from Squeak 3.9 doc, which we do not implement: ### trailer (variable) From noreply at buildbot.pypy.org Tue Dec 17 16:19:06 2013 From: noreply at buildbot.pypy.org (timfel) Date: Tue, 17 Dec 2013 16:19:06 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: try to report all events, not skip stuff i we're too slow Message-ID: <20131217151906.0A02B1C0531@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r532:f1a609bb3086 Date: 2013-12-17 16:16 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/f1a609bb3086/ Log: try to report all events, not skip stuff i we're too slow diff --git a/spyvm/display.py b/spyvm/display.py --- a/spyvm/display.py +++ b/spyvm/display.py @@ -91,20 +91,20 @@ def get_next_event(self): event = lltype.malloc(RSDL.Event, flavor="raw") - ok = rffi.cast(lltype.Signed, RSDL.PollEvent(event)) try: - while ok == 1: + if rffi.cast(lltype.Signed, RSDL.PollEvent(event)) == 1: c_type = rffi.getintfield(event, 'c_type') if c_type == RSDL.MOUSEBUTTONDOWN or c_type == RSDL.MOUSEBUTTONUP: self.handle_mouse_button(c_type, event) + return elif c_type == RSDL.MOUSEMOTION: self.handle_mouse_move(c_type, event) elif c_type == RSDL.KEYDOWN: self.handle_keypress(c_type, event) + return elif c_type == RSDL.QUIT: from spyvm.error import Exit raise Exit("Window closed..") - ok = rffi.cast(lltype.Signed, RSDL.PollEvent(event)) finally: lltype.free(event, flavor='raw') From noreply at buildbot.pypy.org Tue Dec 17 18:21:24 2013 From: noreply at buildbot.pypy.org (timfel) Date: Tue, 17 Dec 2013 18:21:24 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: buggy impl of 16bit depth Message-ID: <20131217172124.08F381C08AF@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r533:9a5409326f66 Date: 2013-12-17 18:18 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/9a5409326f66/ Log: buggy impl of 16bit depth diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -986,8 +986,8 @@ def create(space, w_class, size, depth, display): if depth == 1: return W_DisplayBitmap1Bit(space, w_class, size, depth, display) - # elif depth == 16: - # return W_DisplayBitmap32Bit(space, w_class, size, depth, display) + elif depth == 16: + return W_DisplayBitmap16Bit(space, w_class, size, depth, display) elif depth == 32: return W_DisplayBitmap32Bit(space, w_class, size, depth, display) else: @@ -1079,22 +1079,22 @@ mask >>= 1 pos += 1 -# XXX: We stop supporting 16 bit displays, because the 16bit are with 5bit per -# color channel class W_DisplayBitmap16Bit(W_DisplayBitmap): @jit.unroll_safe def setword(self, n, word): self._real_depth_buffer[n] = word pos, line_end = self.compute_pos_and_line_end(n, 16) - mask = r_uint(0xf) - for i in range(2): - pixel = 0 - for j in range(4): - pixel |= r_uint(word & mask << (8 * j + 4)) - mask <<= 4 - self.pixelbuffer[pos + i] = pixel - if pos + 1 == line_end: + for i in xrange(2): + if pos >= line_end: return + pixel = r_uint(0x0 | + ((word & 0b111110000000000) << 9) | + ((word & 0b000001111100000) << 6) | + ((word & 0b000000000011111) << 3) + ) + self.pixelbuffer[pos] = pixel + word = (word >> 16) & 0xffff + pos += 1 class W_DisplayBitmap32Bit(W_DisplayBitmap): @jit.unroll_safe From noreply at buildbot.pypy.org Tue Dec 17 20:42:35 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 17 Dec 2013 20:42:35 +0100 (CET) Subject: [pypy-commit] pypy default: simplify Message-ID: <20131217194236.000D81C0531@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r68445:61a7394c57ff Date: 2013-12-17 11:41 -0800 http://bitbucket.org/pypy/pypy/changeset/61a7394c57ff/ Log: simplify diff --git a/pypy/module/__builtin__/app_operation.py b/pypy/module/__builtin__/app_operation.py --- a/pypy/module/__builtin__/app_operation.py +++ b/pypy/module/__builtin__/app_operation.py @@ -1,8 +1,5 @@ +import operator + def bin(x): - if isinstance(x, (int, long)): - value = x - elif hasattr(x, '__index__'): - value = x.__index__() - else: - raise TypeError("object cannot be interpreted as an index") + value = operator.index(x) return value.__format__("#b") diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -54,7 +54,7 @@ def __int__(self): return 42 exc = raises(TypeError, bin, D()) - assert exc.value.message.find("object cannot be interpreted as an index") != -1 + assert "index" in exc.value.message def test_unichr(self): import sys From noreply at buildbot.pypy.org Tue Dec 17 20:56:57 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 17 Dec 2013 20:56:57 +0100 (CET) Subject: [pypy-commit] pypy default: loosen this test also Message-ID: <20131217195657.7ECF81C0531@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68446:55b7a58f38d0 Date: 2013-12-17 14:56 -0500 http://bitbucket.org/pypy/pypy/changeset/55b7a58f38d0/ Log: loosen this test also diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -28,7 +28,7 @@ assert hex(np.complex64(11-12j)) == '0xb' assert bin(np.int32(11)) == '0b1011' exc = raises(TypeError, "bin(np.float32(11.6))") - assert exc.value.message.find('object cannot be interpreted as an index') != -1 + assert "index" in exc.value.message def test_pickle(self): from numpypy import dtype, zeros From noreply at buildbot.pypy.org Tue Dec 17 23:24:21 2013 From: noreply at buildbot.pypy.org (Laurens Van Houtven) Date: Tue, 17 Dec 2013 23:24:21 +0100 (CET) Subject: [pypy-commit] pypy default: Whitespace fixes, MD5 -> SHA Message-ID: <20131217222421.27EBE1C0531@cobra.cs.uni-duesseldorf.de> Author: Laurens Van Houtven <_ at lvh.io> Branch: Changeset: r68447:555be03b98bf Date: 2013-09-28 12:02 +0200 http://bitbucket.org/pypy/pypy/changeset/555be03b98bf/ Log: Whitespace fixes, MD5 -> SHA diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -115,14 +115,14 @@ ] class sha: - "An implementation of the MD5 hash function in pure Python." + "An implementation of the SHA hash function in pure Python." digest_size = digestsize = 20 block_size = 1 def __init__(self): "Initialisation." - + # Initial message length in bits(!). self.length = 0 self.count = [0, 0] @@ -209,7 +209,7 @@ self.H2 = (self.H2 + C) & 0xffffffff self.H3 = (self.H3 + D) & 0xffffffff self.H4 = (self.H4 + E) & 0xffffffff - + # Down from here all methods follow the Python Standard Library # API of the sha module. @@ -295,13 +295,13 @@ _long2bytesBigEndian(self.H3, 4) + \ _long2bytesBigEndian(self.H4, 4) - self.H0 = H0 - self.H1 = H1 + self.H0 = H0 + self.H1 = H1 self.H2 = H2 self.H3 = H3 self.H4 = H4 - self.input = input - self.count = count + self.input = input + self.count = count return digest From noreply at buildbot.pypy.org Tue Dec 17 23:24:22 2013 From: noreply at buildbot.pypy.org (Laurens Van Houtven) Date: Tue, 17 Dec 2013 23:24:22 +0100 (CET) Subject: [pypy-commit] pypy default: SHA's block size is 512 bits, not 8 Message-ID: <20131217222422.599E61C0531@cobra.cs.uni-duesseldorf.de> Author: Laurens Van Houtven <_ at lvh.io> Branch: Changeset: r68448:01a580ab4dde Date: 2013-09-28 12:03 +0200 http://bitbucket.org/pypy/pypy/changeset/01a580ab4dde/ Log: SHA's block size is 512 bits, not 8 diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -118,7 +118,7 @@ "An implementation of the SHA hash function in pure Python." digest_size = digestsize = 20 - block_size = 1 + block_size = 512 // 8 def __init__(self): "Initialisation." From noreply at buildbot.pypy.org Tue Dec 17 23:24:23 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 17 Dec 2013 23:24:23 +0100 (CET) Subject: [pypy-commit] pypy default: Merged in lvh/pypy (pull request #191) Message-ID: <20131217222423.8B5461C0531@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68449:95d3af2f316f Date: 2013-12-17 14:23 -0800 http://bitbucket.org/pypy/pypy/changeset/95d3af2f316f/ Log: Merged in lvh/pypy (pull request #191) Pure-Python SHA implementation is confused diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -115,14 +115,14 @@ ] class sha: - "An implementation of the MD5 hash function in pure Python." + "An implementation of the SHA hash function in pure Python." digest_size = digestsize = 20 - block_size = 1 + block_size = 512 // 8 def __init__(self): "Initialisation." - + # Initial message length in bits(!). self.length = 0 self.count = [0, 0] @@ -209,7 +209,7 @@ self.H2 = (self.H2 + C) & 0xffffffff self.H3 = (self.H3 + D) & 0xffffffff self.H4 = (self.H4 + E) & 0xffffffff - + # Down from here all methods follow the Python Standard Library # API of the sha module. @@ -295,13 +295,13 @@ _long2bytesBigEndian(self.H3, 4) + \ _long2bytesBigEndian(self.H4, 4) - self.H0 = H0 - self.H1 = H1 + self.H0 = H0 + self.H1 = H1 self.H2 = H2 self.H3 = H3 self.H4 = H4 - self.input = input - self.count = count + self.input = input + self.count = count return digest From noreply at buildbot.pypy.org Tue Dec 17 23:25:33 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 17 Dec 2013 23:25:33 +0100 (CET) Subject: [pypy-commit] pypy default: test _sha.py block size Message-ID: <20131217222533.792951C0531@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68450:737e36faea12 Date: 2013-12-17 17:24 -0500 http://bitbucket.org/pypy/pypy/changeset/737e36faea12/ Log: test _sha.py block size diff --git a/pypy/module/test_lib_pypy/test_sha_extra.py b/pypy/module/test_lib_pypy/test_sha_extra.py --- a/pypy/module/test_lib_pypy/test_sha_extra.py +++ b/pypy/module/test_lib_pypy/test_sha_extra.py @@ -8,7 +8,6 @@ class AppTestSHA: - spaceconfig = dict(usemodules=('struct',)) def setup_class(cls): @@ -37,3 +36,4 @@ assert _sha.blocksize == 1 assert _sha.new().digest_size == 20 assert _sha.new().digestsize == 20 + assert _sha.new().block_size == 64 From noreply at buildbot.pypy.org Wed Dec 18 01:42:59 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 18 Dec 2013 01:42:59 +0100 (CET) Subject: [pypy-commit] pypy default: test/fix conversion of str arrays Message-ID: <20131218004259.DC4E61C08AF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68452:ccbbf6d54188 Date: 2013-12-17 19:36 -0500 http://bitbucket.org/pypy/pypy/changeset/ccbbf6d54188/ Log: test/fix conversion of str arrays diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -988,28 +988,43 @@ shape = self.get_shape() if len(shape) == 0: assert isinstance(self.implementation, scalar.Scalar) - return space.int(space.wrap(self.implementation.get_scalar_value())) - if shape == [1]: - return space.int(self.descr_getitem(space, space.wrap(0))) - raise OperationError(space.w_TypeError, space.wrap("only length-1 arrays can be converted to Python scalars")) + value = space.wrap(self.implementation.get_scalar_value()) + elif shape == [1]: + value = self.descr_getitem(space, space.wrap(0)) + else: + raise OperationError(space.w_TypeError, space.wrap("only length-1 arrays can be converted to Python scalars")) + if self.get_dtype().is_str_or_unicode(): + raise OperationError(space.w_TypeError, space.wrap( + "don't know how to convert scalar number to int")) + return space.int(value) def descr_long(self, space): shape = self.get_shape() if len(shape) == 0: assert isinstance(self.implementation, scalar.Scalar) - return space.long(space.wrap(self.implementation.get_scalar_value())) - if shape == [1]: - return space.int(self.descr_getitem(space, space.wrap(0))) - raise OperationError(space.w_TypeError, space.wrap("only length-1 arrays can be converted to Python scalars")) + value = space.wrap(self.implementation.get_scalar_value()) + elif shape == [1]: + value = self.descr_getitem(space, space.wrap(0)) + else: + raise OperationError(space.w_TypeError, space.wrap("only length-1 arrays can be converted to Python scalars")) + if self.get_dtype().is_str_or_unicode(): + raise OperationError(space.w_TypeError, space.wrap( + "don't know how to convert scalar number to long")) + return space.long(value) def descr_float(self, space): shape = self.get_shape() if len(shape) == 0: assert isinstance(self.implementation, scalar.Scalar) - return space.float(space.wrap(self.implementation.get_scalar_value())) - if shape == [1]: - return space.float(self.descr_getitem(space, space.wrap(0))) - raise OperationError(space.w_TypeError, space.wrap("only length-1 arrays can be converted to Python scalars")) + value = space.wrap(self.implementation.get_scalar_value()) + elif shape == [1]: + value = self.descr_getitem(space, space.wrap(0)) + else: + raise OperationError(space.w_TypeError, space.wrap("only length-1 arrays can be converted to Python scalars")) + if self.get_dtype().is_str_or_unicode(): + raise OperationError(space.w_TypeError, space.wrap( + "don't know how to convert scalar number to float")) + return space.float(value) def descr_reduce(self, space): from rpython.rlib.rstring import StringBuilder diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2083,6 +2083,11 @@ assert int(array([1])) == 1 assert raises(TypeError, "int(array([1, 2]))") assert int(array([1.5])) == 1 + for op in ["int", "float", "long"]: + for a in [array('123'), array(['123'])]: + exc = raises(TypeError, "%s(a)" % op) + assert exc.value.message == "don't know how to convert " \ + "scalar number to %s" % op def test__reduce__(self): from numpypy import array, dtype From noreply at buildbot.pypy.org Wed Dec 18 01:42:58 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 18 Dec 2013 01:42:58 +0100 (CET) Subject: [pypy-commit] pypy default: test/fix conversion of np.str to numeric types Message-ID: <20131218004258.8DA591C0531@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68451:6b8b4188b7ee Date: 2013-12-17 19:28 -0500 http://bitbucket.org/pypy/pypy/changeset/6b8b4188b7ee/ Log: test/fix conversion of np.str to numeric types diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -47,7 +47,7 @@ def setslice(self, space, arr): impl = arr.implementation if impl.is_scalar(): - self.fill(impl.get_scalar_value()) + self.fill(space, impl.get_scalar_value()) return shape = shape_agreement(space, self.get_shape(), arr) if impl.storage == self.storage: @@ -100,7 +100,7 @@ tmp = self.get_real(orig_array) tmp.setslice(space, convert_to_array(space, w_value)) - def get_imag(self, orig_array): + def get_imag(self, space, orig_array): strides = self.get_strides() backstrides = self.get_backstrides() if self.dtype.is_complex_type(): @@ -110,11 +110,11 @@ impl = NonWritableArray(self.get_shape(), self.dtype, self.order, strides, backstrides) if not self.dtype.is_flexible_type(): - impl.fill(self.dtype.box(0)) + impl.fill(space, self.dtype.box(0)) return impl def set_imag(self, space, orig_array, w_value): - tmp = self.get_imag(orig_array) + tmp = self.get_imag(space, orig_array) tmp.setslice(space, convert_to_array(space, w_value)) # -------------------- applevel get/setitem ----------------------- @@ -357,7 +357,7 @@ self.get_backstrides(), self.get_shape()) - def fill(self, box): + def fill(self, space, box): self.dtype.itemtype.fill(self.storage, self.dtype.get_size(), box, 0, self.size, 0) @@ -435,8 +435,8 @@ def base(self): return self.orig_arr - def fill(self, box): - loop.fill(self, box.convert_to(self.dtype)) + def fill(self, space, box): + loop.fill(self, box.convert_to(space, self.dtype)) def create_iter(self, shape=None, backward_broadcast=False, require_index=False): if shape is not None and \ diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -54,8 +54,7 @@ return self.value def set_scalar_value(self, w_val): - assert isinstance(w_val, W_GenericBox) - self.value = w_val.convert_to(self.dtype) + self.value = w_val def copy(self, space): scalar = Scalar(self.dtype) @@ -96,12 +95,12 @@ ','.join([str(x) for x in w_arr.get_shape()],)))) if self.dtype.is_complex_type(): self.value = self.dtype.itemtype.composite( - w_arr.get_scalar_value().convert_to(dtype), + w_arr.get_scalar_value().convert_to(space, dtype), self.value.convert_imag_to(dtype)) else: self.value = w_arr.get_scalar_value() - def get_imag(self, orig_array): + def get_imag(self, space, orig_array): if self.dtype.is_complex_type(): scalar = Scalar(self.dtype.float_type) scalar.value = self.value.convert_imag_to(scalar.dtype) @@ -125,7 +124,7 @@ ','.join([str(x) for x in w_arr.get_shape()],)))) self.value = self.dtype.itemtype.composite( self.value.convert_real_to(dtype), - w_arr.get_scalar_value().convert_to(dtype), + w_arr.get_scalar_value().convert_to(space, dtype), ) def descr_getitem(self, space, _, w_idx): @@ -180,7 +179,7 @@ w_res.implementation.setitem(0, index_type.itemtype.box(0)) return space.newtuple([w_res]) - def fill(self, w_value): + def fill(self, space, w_value): self.value = w_value def get_storage_as_int(self, space): diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -89,7 +89,7 @@ shape = shape_agreement(space, arr.get_shape(), x) shape = shape_agreement(space, shape, y) out = W_NDimArray.from_shape(space, shape, dtype) - return loop.where(out, shape, arr, x, y, dtype) + return loop.where(space, out, shape, arr, x, y, dtype) def dot(space, w_obj1, w_obj2, w_out=None): w_arr = convert_to_array(space, w_obj1) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -66,7 +66,7 @@ def __init__(self, value): self.value = value - def convert_to(self, dtype): + def convert_to(self, space, dtype): return dtype.box(self.value) def __repr__(self): @@ -91,7 +91,7 @@ self.real = real self.imag = imag - def convert_to(self, dtype): + def convert_to(self, space, dtype): return dtype.box_complex(self.real, self.imag) def convert_real_to(self, dtype): @@ -149,17 +149,17 @@ return space.index(self.item(space)) def descr_int(self, space): - box = self.convert_to(W_LongBox._get_dtype(space)) + box = self.convert_to(space, W_LongBox._get_dtype(space)) assert isinstance(box, W_LongBox) return space.wrap(box.value) def descr_long(self, space): - box = self.convert_to(W_Int64Box._get_dtype(space)) + box = self.convert_to(space, W_Int64Box._get_dtype(space)) assert isinstance(box, W_Int64Box) return space.wrap(box.value) def descr_float(self, space): - box = self.convert_to(W_Float64Box._get_dtype(space)) + box = self.convert_to(space, W_Float64Box._get_dtype(space)) assert isinstance(box, W_Float64Box) return space.wrap(box.value) @@ -265,14 +265,14 @@ if not space.is_none(w_out): raise OperationError(space.w_NotImplementedError, space.wrap( "out not supported")) - v = self.convert_to(self.get_dtype(space)) + v = self.convert_to(space, self.get_dtype(space)) return self.get_dtype(space).itemtype.round(v, decimals) def descr_astype(self, space, w_dtype): from pypy.module.micronumpy.interp_dtype import W_Dtype dtype = space.interp_w(W_Dtype, space.call_function(space.gettypefor(W_Dtype), w_dtype)) - return self.convert_to(dtype) + return self.convert_to(space, dtype) def descr_view(self, space, w_dtype): from pypy.module.micronumpy.interp_dtype import W_Dtype @@ -311,7 +311,7 @@ return space.wrap(0) def descr_copy(self, space): - return self.convert_to(self.get_dtype(space)) + return self.convert_to(space, self.get_dtype(space)) w_flags = None def descr_get_flags(self, space): @@ -472,14 +472,13 @@ dtype.itemtype.store(self.arr, self.ofs, ofs, dtype.coerce(space, w_value)) - def convert_to(self, dtype): + def convert_to(self, space, dtype): # if we reach here, the record fields are guarenteed to match. return self class W_CharacterBox(W_FlexibleBox): - def convert_to(self, dtype): - # XXX assert dtype is str type - return self + def convert_to(self, space, dtype): + return dtype.coerce(space, space.wrap(self.raw_str())) class W_StringBox(W_CharacterBox): def descr__new__string_box(space, w_subtype, w_arg): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -94,7 +94,7 @@ return space.wrap(self.get_size() * self.get_dtype().get_size()) def descr_fill(self, space, w_value): - self.fill(self.get_dtype().coerce(space, w_value)) + self.fill(space, self.get_dtype().coerce(space, w_value)) def descr_tostring(self, space, w_order=None): order = order_converter(space, w_order, NPY_CORDER) @@ -288,8 +288,8 @@ def set_scalar_value(self, w_val): self.implementation.set_scalar_value(w_val) - def fill(self, box): - self.implementation.fill(box) + def fill(self, space, box): + self.implementation.fill(space, box) def descr_get_size(self, space): return space.wrap(self.get_size()) @@ -314,7 +314,7 @@ self.implementation.get_real(self)) def descr_get_imag(self, space): - ret = self.implementation.get_imag(self) + ret = self.implementation.get_imag(space, self) return wrap_impl(space, space.type(self), self, ret) def descr_set_real(self, space, w_value): @@ -1427,7 +1427,7 @@ return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) w_arr = W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) one = dtype.box(1) - w_arr.fill(one) + w_arr.fill(space, one) return space.wrap(w_arr) def _reconstruct(space, w_subtype, w_shape, w_dtype): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -226,7 +226,7 @@ dtype = out.get_dtype() else: out = W_NDimArray.from_shape(space, shape, dtype, w_instance=obj) - return loop.do_axis_reduce(shape, self.func, obj, dtype, axis, out, + return loop.do_axis_reduce(space, shape, self.func, obj, dtype, axis, out, self.identity, cumulative, temp) if cumulative: if out: @@ -235,7 +235,7 @@ "out of incompatible size")) else: out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, w_instance=obj) - loop.compute_reduce_cumulative(obj, out, dtype, self.func, + loop.compute_reduce_cumulative(space, obj, out, dtype, self.func, self.identity) return out if out: @@ -244,7 +244,7 @@ "for reduction operation %s has too many" " dimensions",self.name) dtype = out.get_dtype() - res = loop.compute_reduce(obj, dtype, self.func, self.done_func, + res = loop.compute_reduce(space, obj, dtype, self.func, self.done_func, self.identity) if out: out.set_scalar_value(res) @@ -303,13 +303,13 @@ res_dtype = interp_dtype.get_dtype_cache(space).w_float64dtype if w_obj.is_scalar(): w_val = self.func(calc_dtype, - w_obj.get_scalar_value().convert_to(calc_dtype)) + w_obj.get_scalar_value().convert_to(space, calc_dtype)) if out is None: return w_val if out.is_scalar(): out.set_scalar_value(w_val) else: - out.fill(res_dtype.coerce(space, w_val)) + out.fill(space, res_dtype.coerce(space, w_val)) return out shape = shape_agreement(space, w_obj.get_shape(), out, broadcast_down=False) @@ -395,14 +395,14 @@ res_dtype = calc_dtype if w_lhs.is_scalar() and w_rhs.is_scalar(): arr = self.func(calc_dtype, - w_lhs.get_scalar_value().convert_to(calc_dtype), - w_rhs.get_scalar_value().convert_to(calc_dtype) + w_lhs.get_scalar_value().convert_to(space, calc_dtype), + w_rhs.get_scalar_value().convert_to(space, calc_dtype) ) if isinstance(out, W_NDimArray): if out.is_scalar(): out.set_scalar_value(arr) else: - out.fill(arr) + out.fill(space, arr) else: out = arr return out diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -58,10 +58,10 @@ out=out, left_iter=left_iter, right_iter=right_iter, out_iter=out_iter) - w_left = left_iter.getitem().convert_to(calc_dtype) - w_right = right_iter.getitem().convert_to(calc_dtype) + w_left = left_iter.getitem().convert_to(space, calc_dtype) + w_right = right_iter.getitem().convert_to(space, calc_dtype) out_iter.setitem(func(calc_dtype, w_left, w_right).convert_to( - res_dtype)) + space, res_dtype)) left_iter.next() right_iter.next() out_iter.next() @@ -84,8 +84,8 @@ calc_dtype=calc_dtype, res_dtype=res_dtype, shape=shape, w_obj=w_obj, out=out, obj_iter=obj_iter, out_iter=out_iter) - elem = obj_iter.getitem().convert_to(calc_dtype) - out_iter.setitem(func(calc_dtype, elem).convert_to(res_dtype)) + elem = obj_iter.getitem().convert_to(space, calc_dtype) + out_iter.setitem(func(calc_dtype, elem).convert_to(space, res_dtype)) out_iter.next() obj_iter.next() return out @@ -111,7 +111,7 @@ shapelen = len(shape) while not target_iter.done(): setslice_driver1.jit_merge_point(shapelen=shapelen, dtype=dtype) - target_iter.setitem(source_iter.getitem().convert_to(dtype)) + target_iter.setitem(source_iter.getitem().convert_to(space, dtype)) target_iter.next() source_iter.next() return target @@ -135,20 +135,20 @@ 'calc_dtype'], reds = 'auto') -def compute_reduce(obj, calc_dtype, func, done_func, identity): +def compute_reduce(space, obj, calc_dtype, func, done_func, identity): obj_iter = obj.create_iter() if identity is None: - cur_value = obj_iter.getitem().convert_to(calc_dtype) + cur_value = obj_iter.getitem().convert_to(space, calc_dtype) obj_iter.next() else: - cur_value = identity.convert_to(calc_dtype) + cur_value = identity.convert_to(space, calc_dtype) shapelen = len(obj.get_shape()) while not obj_iter.done(): reduce_driver.jit_merge_point(shapelen=shapelen, func=func, done_func=done_func, calc_dtype=calc_dtype, ) - rval = obj_iter.getitem().convert_to(calc_dtype) + rval = obj_iter.getitem().convert_to(space, calc_dtype) if done_func is not None and done_func(calc_dtype, rval): return rval cur_value = func(calc_dtype, cur_value, rval) @@ -159,22 +159,22 @@ greens = ['shapelen', 'func', 'dtype'], reds = 'auto') -def compute_reduce_cumulative(obj, out, calc_dtype, func, identity): +def compute_reduce_cumulative(space, obj, out, calc_dtype, func, identity): obj_iter = obj.create_iter() out_iter = out.create_iter() if identity is None: - cur_value = obj_iter.getitem().convert_to(calc_dtype) + cur_value = obj_iter.getitem().convert_to(space, calc_dtype) out_iter.setitem(cur_value) out_iter.next() obj_iter.next() else: - cur_value = identity.convert_to(calc_dtype) + cur_value = identity.convert_to(space, calc_dtype) shapelen = len(obj.get_shape()) while not obj_iter.done(): reduce_cum_driver.jit_merge_point(shapelen=shapelen, func=func, dtype=calc_dtype, ) - rval = obj_iter.getitem().convert_to(calc_dtype) + rval = obj_iter.getitem().convert_to(space, calc_dtype) cur_value = func(calc_dtype, cur_value, rval) out_iter.setitem(cur_value) out_iter.next() @@ -190,7 +190,7 @@ greens = ['shapelen', 'dtype', 'arr_dtype'], reds = 'auto') -def where(out, shape, arr, x, y, dtype): +def where(space, out, shape, arr, x, y, dtype): out_iter = out.create_iter(shape) arr_iter = arr.create_iter(shape) arr_dtype = arr.get_dtype() @@ -209,9 +209,9 @@ arr_dtype=arr_dtype) w_cond = arr_iter.getitem() if arr_dtype.itemtype.bool(w_cond): - w_val = x_iter.getitem().convert_to(dtype) + w_val = x_iter.getitem().convert_to(space, dtype) else: - w_val = y_iter.getitem().convert_to(dtype) + w_val = y_iter.getitem().convert_to(space, dtype) out_iter.setitem(w_val) out_iter.next() arr_iter.next() @@ -224,7 +224,7 @@ 'func', 'dtype'], reds='auto') -def do_axis_reduce(shape, func, arr, dtype, axis, out, identity, cumulative, +def do_axis_reduce(space, shape, func, arr, dtype, axis, out, identity, cumulative, temp): out_iter = out.create_axis_iter(arr.get_shape(), axis, cumulative) if cumulative: @@ -233,7 +233,7 @@ temp_iter = out_iter # hack arr_iter = arr.create_iter() if identity is not None: - identity = identity.convert_to(dtype) + identity = identity.convert_to(space, dtype) shapelen = len(shape) while not out_iter.done(): axis_reduce__driver.jit_merge_point(shapelen=shapelen, func=func, @@ -241,7 +241,7 @@ if arr_iter.done(): w_val = identity else: - w_val = arr_iter.getitem().convert_to(dtype) + w_val = arr_iter.getitem().convert_to(space, dtype) if out_iter.first_line: if identity is not None: w_val = func(dtype, identity, w_val) @@ -316,11 +316,11 @@ righti = right.create_dot_iter(broadcast_shape, right_skip) while not outi.done(): dot_driver.jit_merge_point(dtype=dtype) - lval = lefti.getitem().convert_to(dtype) - rval = righti.getitem().convert_to(dtype) - outval = outi.getitem().convert_to(dtype) + lval = lefti.getitem().convert_to(space, dtype) + rval = righti.getitem().convert_to(space, dtype) + outval = outi.getitem().convert_to(space, dtype) v = dtype.itemtype.mul(lval, rval) - value = dtype.itemtype.add(v, outval).convert_to(dtype) + value = dtype.itemtype.add(v, outval).convert_to(space, dtype) outi.setitem(value) outi.next() righti.next() @@ -457,7 +457,7 @@ arr_iter.next_skip_x(start) while length > 0: flatiter_setitem_driver1.jit_merge_point(dtype=dtype) - arr_iter.setitem(val_iter.getitem().convert_to(dtype)) + arr_iter.setitem(val_iter.getitem().convert_to(space, dtype)) # need to repeat i_nput values until all assignments are done arr_iter.next_skip_x(step) length -= 1 @@ -610,7 +610,7 @@ index = 0 else: index = len(iterators) - 1 - out_iter.setitem(iterators[index].getitem().convert_to(dtype)) + out_iter.setitem(iterators[index].getitem().convert_to(space, dtype)) for iter in iterators: iter.next() out_iter.next() @@ -629,9 +629,9 @@ out_iter = out.create_iter(shape) while not arr_iter.done(): clip_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_v = arr_iter.getitem().convert_to(dtype) - w_min = min_iter.getitem().convert_to(dtype) - w_max = max_iter.getitem().convert_to(dtype) + w_v = arr_iter.getitem().convert_to(space, dtype) + w_min = min_iter.getitem().convert_to(space, dtype) + w_max = max_iter.getitem().convert_to(space, dtype) if dtype.itemtype.lt(w_v, w_min): w_v = w_min elif dtype.itemtype.gt(w_v, w_max): @@ -652,7 +652,7 @@ out_iter = out.create_iter(shape) while not arr_iter.done(): round_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_v = dtype.itemtype.round(arr_iter.getitem().convert_to(dtype), + w_v = dtype.itemtype.round(arr_iter.getitem().convert_to(space, dtype), decimals) out_iter.setitem(w_v) arr_iter.next() diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1973,6 +1973,12 @@ else: raises(NotImplementedError, array(['1', '2', '3']).astype, float) + a = array('123') + assert a.astype('i8') == 123 + a = array('abcdefgh') + exc = raises(ValueError, a.astype, 'i8') + assert exc.value.message.startswith('invalid literal for int()') + def test_base(self): from numpypy import array assert array(1).base is None diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -20,6 +20,9 @@ def test_builtin(self): import numpy as np + assert int(np.str_('12')) == 12 + exc = raises(ValueError, "int(np.str_('abc'))") + assert exc.value.message.startswith('invalid literal for int()') assert oct(np.int32(11)) == '013' assert oct(np.float32(11.6)) == '013' assert oct(np.complex64(11-12j)) == '013' @@ -77,6 +80,9 @@ a = np.bool_(True).astype('int32') assert type(a) is np.int32 assert a == 1 + a = np.str_('123').astype('int32') + assert type(a) is np.int32 + assert a == 123 def test_copy(self): import numpy as np From noreply at buildbot.pypy.org Wed Dec 18 02:42:08 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 18 Dec 2013 02:42:08 +0100 (CET) Subject: [pypy-commit] pypy default: simplify {date, time, datetime}.replace (constructor does field validation, no need for it here) Message-ID: <20131218014208.477C41C08AF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68453:2e0e9bfcb1a2 Date: 2013-12-17 20:39 -0500 http://bitbucket.org/pypy/pypy/changeset/2e0e9bfcb1a2/ Log: simplify {date,time,datetime}.replace (constructor does field validation, no need for it here) diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -878,7 +878,6 @@ month = self._month if day is None: day = self._day - year, month, day = _check_date_fields(year, month, day) return date(year, month, day) # Comparisons of date objects with other. @@ -1389,8 +1388,6 @@ microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) - _check_tzinfo_arg(tzinfo) return time(hour, minute, second, microsecond, tzinfo) def __nonzero__(self): @@ -1608,9 +1605,6 @@ microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo - year, month, day = _check_date_fields(year, month, day) - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) - _check_tzinfo_arg(tzinfo) return datetime(year, month, day, hour, minute, second, microsecond, tzinfo) From noreply at buildbot.pypy.org Wed Dec 18 03:04:20 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 18 Dec 2013 03:04:20 +0100 (CET) Subject: [pypy-commit] pypy py3k: simplify {date, time, datetime}.replace (constructor does field validation, no need for it here) Message-ID: <20131218020420.8FC271C31CA@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: py3k Changeset: r68454:ebc1b6f37b2a Date: 2013-12-17 21:02 -0500 http://bitbucket.org/pypy/pypy/changeset/ebc1b6f37b2a/ Log: simplify {date,time,datetime}.replace (constructor does field validation, no need for it here) diff --git a/lib-python/3/datetime.py b/lib-python/3/datetime.py --- a/lib-python/3/datetime.py +++ b/lib-python/3/datetime.py @@ -806,7 +806,6 @@ month = self._month if day is None: day = self._day - year, month, day = _check_date_fields(year, month, day) return date(year, month, day) # Comparisons of date objects with other. @@ -1270,8 +1269,6 @@ microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) - _check_tzinfo_arg(tzinfo) return time(hour, minute, second, microsecond, tzinfo) def __bool__(self): @@ -1486,9 +1483,6 @@ microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo - year, month, day = _check_date_fields(year, month, day) - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) - _check_tzinfo_arg(tzinfo) return datetime(year, month, day, hour, minute, second, microsecond, tzinfo) From noreply at buildbot.pypy.org Wed Dec 18 03:09:24 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 18 Dec 2013 03:09:24 +0100 (CET) Subject: [pypy-commit] pypy py3k: transplant fa83d625fabf Message-ID: <20131218020924.87B6B1C0531@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: py3k Changeset: r68455:88328f6a8ea2 Date: 2013-12-17 21:08 -0500 http://bitbucket.org/pypy/pypy/changeset/88328f6a8ea2/ Log: transplant fa83d625fabf diff --git a/lib-python/3/datetime.py b/lib-python/3/datetime.py --- a/lib-python/3/datetime.py +++ b/lib-python/3/datetime.py @@ -40,9 +40,9 @@ # for all computations. See the book for algorithms for converting between # proleptic Gregorian ordinals and many other calendar systems. -_DAYS_IN_MONTH = [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] +_DAYS_IN_MONTH = [-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] -_DAYS_BEFORE_MONTH = [None] +_DAYS_BEFORE_MONTH = [-1] dbm = 0 for dim in _DAYS_IN_MONTH[1:]: _DAYS_BEFORE_MONTH.append(dbm) From noreply at buildbot.pypy.org Wed Dec 18 03:20:36 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 18 Dec 2013 03:20:36 +0100 (CET) Subject: [pypy-commit] pypy default: test/fix buffer(numpy.scalar) Message-ID: <20131218022036.8166B1C0531@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68456:2ec4090a85d4 Date: 2013-12-17 21:17 -0500 http://bitbucket.org/pypy/pypy/changeset/2ec4090a85d4/ Log: test/fix buffer(numpy.scalar) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -313,6 +313,9 @@ def descr_copy(self, space): return self.convert_to(space, self.get_dtype(space)) + def descr_buffer(self, space): + return self.descr_ravel(space).descr_get_data(space) + w_flags = None def descr_get_flags(self, space): if self.w_flags is None: @@ -518,6 +521,7 @@ __nonzero__ = interp2app(W_GenericBox.descr_nonzero), __oct__ = interp2app(W_GenericBox.descr_oct), __hex__ = interp2app(W_GenericBox.descr_hex), + __buffer__ = interp2app(W_GenericBox.descr_buffer), __add__ = interp2app(W_GenericBox.descr_add), __sub__ = interp2app(W_GenericBox.descr_sub), diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -92,6 +92,15 @@ assert b == a assert b is not a + def test_buffer(self): + import numpy as np + a = np.int32(123) + b = buffer(a) + assert type(b) is buffer + a = np.string_('abc') + b = buffer(a) + assert str(b) == a + def test_squeeze(self): import numpy as np assert np.True_.squeeze() is np.True_ From noreply at buildbot.pypy.org Wed Dec 18 04:25:46 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 18 Dec 2013 04:25:46 +0100 (CET) Subject: [pypy-commit] pypy default: fix len(numpy.string) Message-ID: <20131218032546.98DDC1C08AF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68457:e8735afef7f7 Date: 2013-12-17 22:25 -0500 http://bitbucket.org/pypy/pypy/changeset/e8735afef7f7/ Log: fix len(numpy.string) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -483,6 +483,9 @@ def convert_to(self, space, dtype): return dtype.coerce(space, space.wrap(self.raw_str())) + def descr_len(self, space): + return space.len(self.item(space)) + class W_StringBox(W_CharacterBox): def descr__new__string_box(space, w_subtype, w_arg): from pypy.module.micronumpy.interp_dtype import new_string_dtype @@ -756,9 +759,11 @@ W_StringBox.typedef = TypeDef("string_", (W_CharacterBox.typedef, str_typedef), __module__ = "numpy", __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), + __len__ = interp2app(W_StringBox.descr_len), ) W_UnicodeBox.typedef = TypeDef("unicode_", (W_CharacterBox.typedef, unicode_typedef), __module__ = "numpy", __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), + __len__ = interp2app(W_UnicodeBox.descr_len), ) diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -32,6 +32,9 @@ assert bin(np.int32(11)) == '0b1011' exc = raises(TypeError, "bin(np.float32(11.6))") assert "index" in exc.value.message + exc = raises(TypeError, "len(np.int32(11))") + assert "has no len" in exc.value.message + assert len(np.string_('123')) == 3 def test_pickle(self): from numpypy import dtype, zeros From noreply at buildbot.pypy.org Wed Dec 18 06:02:39 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 18 Dec 2013 06:02:39 +0100 (CET) Subject: [pypy-commit] pypy default: provide ndarray.__index__() Message-ID: <20131218050239.DB89C1C08AF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68458:42d72b7298bc Date: 2013-12-17 23:51 -0500 http://bitbucket.org/pypy/pypy/changeset/42d72b7298bc/ Log: provide ndarray.__index__() diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -992,7 +992,8 @@ elif shape == [1]: value = self.descr_getitem(space, space.wrap(0)) else: - raise OperationError(space.w_TypeError, space.wrap("only length-1 arrays can be converted to Python scalars")) + raise OperationError(space.w_TypeError, space.wrap( + "only length-1 arrays can be converted to Python scalars")) if self.get_dtype().is_str_or_unicode(): raise OperationError(space.w_TypeError, space.wrap( "don't know how to convert scalar number to int")) @@ -1006,7 +1007,8 @@ elif shape == [1]: value = self.descr_getitem(space, space.wrap(0)) else: - raise OperationError(space.w_TypeError, space.wrap("only length-1 arrays can be converted to Python scalars")) + raise OperationError(space.w_TypeError, space.wrap( + "only length-1 arrays can be converted to Python scalars")) if self.get_dtype().is_str_or_unicode(): raise OperationError(space.w_TypeError, space.wrap( "don't know how to convert scalar number to long")) @@ -1020,12 +1022,30 @@ elif shape == [1]: value = self.descr_getitem(space, space.wrap(0)) else: - raise OperationError(space.w_TypeError, space.wrap("only length-1 arrays can be converted to Python scalars")) + raise OperationError(space.w_TypeError, space.wrap( + "only length-1 arrays can be converted to Python scalars")) if self.get_dtype().is_str_or_unicode(): raise OperationError(space.w_TypeError, space.wrap( "don't know how to convert scalar number to float")) return space.float(value) + def descr_index(self, space): + shape = self.get_shape() + if len(shape) == 0: + assert isinstance(self.implementation, scalar.Scalar) + value = space.wrap(self.implementation.get_scalar_value()) + elif shape == [1]: + value = self.descr_getitem(space, space.wrap(0)) + else: + raise OperationError(space.w_TypeError, space.wrap( + "only integer arrays with one element " + "can be converted to an index")) + if not self.get_dtype().is_int_type() or self.get_dtype().is_bool_type(): + raise OperationError(space.w_TypeError, space.wrap( + "only integer arrays with one element " + "can be converted to an index")) + return value.item(space) + def descr_reduce(self, space): from rpython.rlib.rstring import StringBuilder from pypy.interpreter.mixedmodule import MixedModule @@ -1204,6 +1224,7 @@ __long__ = interp2app(W_NDimArray.descr_long), __float__ = interp2app(W_NDimArray.descr_float), __buffer__ = interp2app(W_NDimArray.descr_get_data), + __index__ = interp2app(W_NDimArray.descr_index), __pos__ = interp2app(W_NDimArray.descr_pos), __neg__ = interp2app(W_NDimArray.descr_neg), diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2718,6 +2718,17 @@ assert b[0] == 1 assert b[1] == 'ab' + def test_index(self): + import numpy as np + a = np.array([1], np.uint16) + i = a.__index__() + assert type(i) is int + assert i == 1 + for a in [np.array('abc'), np.array([1,2]), np.array([True])]: + exc = raises(TypeError, a.__index__) + assert exc.value.message == 'only integer arrays with one element ' \ + 'can be converted to an index' + def test_int_array_index(self): from numpypy import array assert (array([])[[]] == []).all() From noreply at buildbot.pypy.org Wed Dec 18 06:11:47 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 18 Dec 2013 06:11:47 +0100 (CET) Subject: [pypy-commit] pypy default: fix translation Message-ID: <20131218051147.3B9D01C08AF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68459:685403c62cab Date: 2013-12-18 00:10 -0500 http://bitbucket.org/pypy/pypy/changeset/685403c62cab/ Log: fix translation diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1044,6 +1044,7 @@ raise OperationError(space.w_TypeError, space.wrap( "only integer arrays with one element " "can be converted to an index")) + assert isinstance(value, interp_boxes.W_GenericBox) return value.item(space) def descr_reduce(self, space): From noreply at buildbot.pypy.org Wed Dec 18 07:42:10 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 18 Dec 2013 07:42:10 +0100 (CET) Subject: [pypy-commit] pypy default: simplify Message-ID: <20131218064210.CE02B1C042B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68460:6269dd19f95d Date: 2013-12-18 00:48 -0500 http://bitbucket.org/pypy/pypy/changeset/6269dd19f95d/ Log: simplify diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -265,8 +265,7 @@ if not space.is_none(w_out): raise OperationError(space.w_NotImplementedError, space.wrap( "out not supported")) - v = self.convert_to(space, self.get_dtype(space)) - return self.get_dtype(space).itemtype.round(v, decimals) + return self.get_dtype(space).itemtype.round(self, decimals) def descr_astype(self, space, w_dtype): from pypy.module.micronumpy.interp_dtype import W_Dtype From noreply at buildbot.pypy.org Wed Dec 18 07:42:12 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 18 Dec 2013 07:42:12 +0100 (CET) Subject: [pypy-commit] pypy default: test/fix ndarray init from list of array scalars Message-ID: <20131218064212.440FC1C3235@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68461:c8e4f9503987 Date: 2013-12-18 01:31 -0500 http://bitbucket.org/pypy/pypy/changeset/c8e4f9503987/ Log: test/fix ndarray init from list of array scalars diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1425,6 +1425,8 @@ if dtype is None or ( dtype.is_str_or_unicode() and dtype.get_size() < 1): for w_elem in elems_w: + if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): + w_elem = w_elem.get_scalar_value() dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, dtype) #if dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -62,9 +62,10 @@ if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): return True if (space.isinstance_w(w_elem, space.w_tuple) or - isinstance(w_elem, W_NDimArray) or space.isinstance_w(w_elem, space.w_list)): return False + if isinstance(w_elem, W_NDimArray) and not w_elem.is_scalar(): + return False return True def find_shape_and_elems(space, w_iterable, dtype): @@ -72,7 +73,6 @@ batch = space.listview(w_iterable) is_rec_type = dtype is not None and dtype.is_record_type() while True: - new_batch = [] if not batch: return shape[:], [] if is_single_elem(space, batch[0], is_rec_type): @@ -81,6 +81,7 @@ raise OperationError(space.w_ValueError, space.wrap( "setting an array element with a sequence")) return shape[:], batch + new_batch = [] size = space.len_w(batch[0]) for w_elem in batch: if (is_single_elem(space, w_elem, is_rec_type) or diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -455,6 +455,25 @@ a = array(range(5)) assert a[3] == 3 + def test_list_of_array_init(self): + import numpy as np + a = np.array([np.array(True), np.array(False)]) + assert a.shape == (2,) + assert a.dtype == np.bool_ + assert (a == [True, False]).all() + a = np.array([np.array(True), np.array(2)]) + assert a.shape == (2,) + assert a.dtype == np.int_ + assert (a == [1, 2]).all() + a = np.array([np.array(True), np.int_(2)]) + assert a.shape == (2,) + assert a.dtype == np.int_ + assert (a == [1, 2]).all() + a = np.array([np.array([True]), np.array([2])]) + assert a.shape == (2, 1) + assert a.dtype == np.int_ + assert (a == [[1], [2]]).all() + def test_getitem(self): from numpypy import array a = array(range(5)) From noreply at buildbot.pypy.org Wed Dec 18 07:55:56 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 18 Dec 2013 07:55:56 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20131218065556.384E71C3235@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68462:1d2fb89d5c6f Date: 2013-12-18 01:54 -0500 http://bitbucket.org/pypy/pypy/changeset/1d2fb89d5c6f/ Log: cleanup diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1405,38 +1405,34 @@ # arrays with correct dtype dtype = interp_dtype.decode_w_dtype(space, w_dtype) if isinstance(w_object, W_NDimArray) and \ - (space.is_none(w_dtype) or w_object.get_dtype() is dtype): + (space.is_none(w_dtype) or w_object.get_dtype() is dtype): shape = w_object.get_shape() if copy: w_ret = w_object.descr_copy(space) else: - if ndmin<= len(shape): + if ndmin <= len(shape): return w_object new_impl = w_object.implementation.set_shape(space, w_object, shape) w_ret = W_NDimArray(new_impl) if ndmin > len(shape): shape = [1] * (ndmin - len(shape)) + shape w_ret.implementation = w_ret.implementation.set_shape(space, - w_ret, shape) + w_ret, shape) return w_ret # not an array or incorrect dtype shape, elems_w = find_shape_and_elems(space, w_object, dtype) - if dtype is None or ( - dtype.is_str_or_unicode() and dtype.get_size() < 1): + if dtype is None or (dtype.is_str_or_unicode() and dtype.get_size() < 1): for w_elem in elems_w: if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): w_elem = w_elem.get_scalar_value() - dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, - dtype) - #if dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: - # break - + dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, dtype) if dtype is None: dtype = interp_dtype.get_dtype_cache(space).w_float64dtype - if dtype.is_str_or_unicode() and dtype.get_size() < 1: - # promote S0 -> S1, U0 -> U1 - dtype = interp_dtype.variable_dtype(space, dtype.char + '1') + elif dtype.is_str_or_unicode() and dtype.get_size() < 1: + # promote S0 -> S1, U0 -> U1 + dtype = interp_dtype.variable_dtype(space, dtype.char + '1') + if ndmin > len(shape): shape = [1] * (ndmin - len(shape)) + shape w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) From noreply at buildbot.pypy.org Wed Dec 18 10:15:45 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 18 Dec 2013 10:15:45 +0100 (CET) Subject: [pypy-commit] pypy default: add multiarray.empty_like() Message-ID: <20131218091545.134A61C0531@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68463:a0190dd8fc77 Date: 2013-12-18 03:52 -0500 http://bitbucket.org/pypy/pypy/changeset/a0190dd8fc77/ Log: add multiarray.empty_like() diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -11,6 +11,7 @@ 'zeros': 'interp_numarray.zeros', 'empty': 'interp_numarray.zeros', 'ones': 'interp_numarray.ones', + 'empty_like': 'interp_numarray.empty_like', '_reconstruct' : 'interp_numarray._reconstruct', 'scalar' : 'interp_numarray.build_scalar', 'dot': 'interp_arrayops.dot', diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1465,6 +1465,16 @@ w_arr.fill(space, one) return space.wrap(w_arr) + at unwrap_spec(subok=bool) +def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): + w_a = convert_to_array(space, w_a) + if subok and type(w_a) is not W_NDimArray: + raise OperationError(space.w_NotImplementedError, space.wrap( + "subtypes not implemented")) + if w_dtype is None: + w_dtype = w_a.get_dtype() + return zeros(space, w_a.descr_get_shape(space), w_dtype) + def _reconstruct(space, w_subtype, w_shape, w_dtype): return descr_new_array(space, w_subtype, w_shape, w_dtype) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -345,7 +345,7 @@ # TypeError raises((TypeError, AttributeError), 'x.ndim = 3') - def test_init(self): + def test_zeros(self): from numpypy import zeros a = zeros(15) # Check that storage was actually zero'd. @@ -355,6 +355,33 @@ assert a[13] == 5.3 assert zeros(()).shape == () + def test_empty_like(self): + import numpy as np + a = np.zeros((2, 3)) + assert a.shape == (2, 3) + a[0,0] = 1 + b = np.empty_like(a) + assert b.shape == a.shape + assert b.dtype == a.dtype + assert b[0,0] != 1 + b = np.empty_like(a, dtype='i4') + assert b.shape == a.shape + assert b.dtype == np.dtype('i4') + assert b[0,0] != 1 + b = np.empty_like([1,2,3]) + assert b.shape == (3,) + assert b.dtype == np.int_ + + class A(np.ndarray): + pass + import sys + if '__pypy__' not in sys.builtin_module_names: + b = np.empty_like(A((2, 3))) + assert b.shape == (2, 3) + assert type(b) is A + else: + raises(NotImplementedError, np.empty_like, A((2, 3))) + def test_size(self): from numpypy import array,arange,cos assert array(3).size == 1 From noreply at buildbot.pypy.org Wed Dec 18 10:48:03 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 18 Dec 2013 10:48:03 +0100 (CET) Subject: [pypy-commit] buildbot default: add a single run benchmark Message-ID: <20131218094803.549D01C1162@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r903:de7d92d8fc1d Date: 2013-12-18 11:47 +0200 http://bitbucket.org/pypy/buildbot/changeset/de7d92d8fc1d/ Log: add a single run benchmark diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -615,6 +615,50 @@ if trigger: # if provided trigger schedulers that depend on this one self.addStep(Trigger(schedulerNames=[trigger])) +class JITBenchmarkSingleRun(factory.BuildFactory): + def __init__(self, platform='linux', host='tannit', postfix=''): + factory.BuildFactory.__init__(self) + + repourl = 'https://bitbucket.org/pypy/benchmarks' + update_hg(platform, self, repourl, 'benchmarks', use_branch=True, + force_branch='single-run') + # + setup_steps(platform, self) + if host == 'tannit': + lock = TannitCPU + elif host == 'speed_python': + lock = SpeedPythonCPU + else: + assert False, 'unknown host %s' % host + + self.addStep( + Translate( + translationArgs=['-Ojit'], + targetArgs=[], + haltOnFailure=True, + # this step can be executed in parallel with other builds + locks=[lock.access('counting')], + ) + ) + pypy_c_rel = "../build/pypy/goal/pypy-c" + self.addStep(ShellCmd( + # this step needs exclusive access to the CPU + locks=[lock.access('exclusive')], + description="run benchmarks on top of pypy-c", + command=["python", "runner.py", '--output-filename', 'result.json', + '--python', pypy_c_rel, + '--full-store', + '--revision', WithProperties('%(got_revision)s'), + '--branch', WithProperties('%(branch)s'), + ], + workdir='./benchmarks', + timeout=3600)) + # a bit obscure hack to get both os.path.expand and a property + filename = '%(got_revision)s' + (postfix or '') + resfile = os.path.expanduser("~/bench_results_new/%s.json" % filename) + self.addStep(transfer.FileUpload(slavesrc="benchmarks/result.json", + masterdest=WithProperties(resfile), + workdir=".")) class JITBenchmark(factory.BuildFactory): def __init__(self, platform='linux', host='tannit', postfix=''): diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -147,6 +147,9 @@ pypyJITBenchmarkFactory_tannit = pypybuilds.JITBenchmark() pypyJITBenchmarkFactory64_tannit = pypybuilds.JITBenchmark(platform='linux64', postfix='-64') +pypyJITBenchmarkFactory64_speed = pypybuilds.JITBenchmarkSingleRun( + platform='linux64', + postfix='-64') pypyNumpyCompatability = pypybuilds.NativeNumpyTests(platform='linux64') @@ -180,7 +183,7 @@ JITONLYLINUXPPC64 = "jitonly-own-linux-ppc-64" JITBENCH = "jit-benchmark-linux-x86-32" JITBENCH64 = "jit-benchmark-linux-x86-64" -JITBENCH64_2 = 'jit-benchmark-linux-x86-64-2' +JITBENCH64_NEW = 'jit-benchmark-linux-x86-64-single-run' CPYTHON_64 = "cpython-2-benchmark-x86-64" NUMPY_64 = "numpy-compatability-linux-x86-64" # buildbot builder @@ -354,6 +357,12 @@ "category": "benchmark-run", # the locks are acquired with fine grain inside the build }, + {"name": JITBENCH64_NEW, + "slavenames": ["speed-python-64"], + "builddir": JITBENCH64_NEW, + "factory": pypyJITBenchmarkFactory64_speed, + "category": "benchmark-run", + }, {"name": MACOSX32, "slavenames": ["minime"], "builddir": MACOSX32, From noreply at buildbot.pypy.org Wed Dec 18 10:50:24 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 18 Dec 2013 10:50:24 +0100 (CET) Subject: [pypy-commit] buildbot default: these days we have to specify if you can force it Message-ID: <20131218095024.A81C61C1162@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r904:e4dcdf51ee43 Date: 2013-12-18 11:50 +0200 http://bitbucket.org/pypy/buildbot/changeset/e4dcdf51ee43/ Log: these days we have to specify if you can force it diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -273,6 +273,7 @@ JITONLYLINUXPPC64, JITBENCH, JITBENCH64, + JITBENCH64_NEW, NUMPY_64, ] + ARM.builderNames, properties=[]), ] + ARM.schedulers, From noreply at buildbot.pypy.org Wed Dec 18 10:56:21 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 18 Dec 2013 10:56:21 +0100 (CET) Subject: [pypy-commit] pypy default: empty_like support subtypes Message-ID: <20131218095621.C6E0C1C3235@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68464:5a4adfaff6c6 Date: 2013-12-18 04:30 -0500 http://bitbucket.org/pypy/pypy/changeset/5a4adfaff6c6/ Log: empty_like support subtypes diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1468,12 +1468,13 @@ @unwrap_spec(subok=bool) def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): w_a = convert_to_array(space, w_a) - if subok and type(w_a) is not W_NDimArray: - raise OperationError(space.w_NotImplementedError, space.wrap( - "subtypes not implemented")) if w_dtype is None: - w_dtype = w_a.get_dtype() - return zeros(space, w_a.descr_get_shape(space), w_dtype) + dtype = w_a.get_dtype() + else: + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, + w_instance=w_a if subok else None) def _reconstruct(space, w_subtype, w_shape, w_dtype): return descr_new_array(space, w_subtype, w_shape, w_dtype) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -357,6 +357,9 @@ def test_empty_like(self): import numpy as np + a = np.empty_like(np.zeros(())) + assert a.shape == () + assert a.dtype == np.float_ a = np.zeros((2, 3)) assert a.shape == (2, 3) a[0,0] = 1 @@ -371,16 +374,14 @@ b = np.empty_like([1,2,3]) assert b.shape == (3,) assert b.dtype == np.int_ - class A(np.ndarray): pass - import sys - if '__pypy__' not in sys.builtin_module_names: - b = np.empty_like(A((2, 3))) - assert b.shape == (2, 3) - assert type(b) is A - else: - raises(NotImplementedError, np.empty_like, A((2, 3))) + b = np.empty_like(A((2, 3))) + assert b.shape == (2, 3) + assert type(b) is A + b = np.empty_like(A((2, 3)), subok=False) + assert b.shape == (2, 3) + assert type(b) is np.ndarray def test_size(self): from numpypy import array,arange,cos From noreply at buildbot.pypy.org Wed Dec 18 10:56:23 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 18 Dec 2013 10:56:23 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20131218095623.19FA21C3235@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68465:b63fad133ab6 Date: 2013-12-18 04:53 -0500 http://bitbucket.org/pypy/pypy/changeset/b63fad133ab6/ Log: cleanup diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -539,7 +539,7 @@ def descr_astype(self, space, w_dtype): dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) impl = self.implementation if isinstance(impl, scalar.Scalar): return W_NDimArray.new_scalar(space, dtype, impl.value) @@ -1445,25 +1445,18 @@ @unwrap_spec(order=str) def zeros(space, w_shape, w_dtype=None, order='C'): dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) shape = _find_shape(space, w_shape, dtype) - if not shape: - return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) - return space.wrap(W_NDimArray.from_shape(space, shape, dtype=dtype, order=order)) + return W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) @unwrap_spec(order=str) def ones(space, w_shape, w_dtype=None, order='C'): dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) shape = _find_shape(space, w_shape, dtype) - if not shape: - return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) w_arr = W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) - one = dtype.box(1) - w_arr.fill(space, one) - return space.wrap(w_arr) + w_arr.fill(space, dtype.box(1)) + return w_arr @unwrap_spec(subok=bool) def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): From noreply at buildbot.pypy.org Wed Dec 18 10:56:24 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 18 Dec 2013 10:56:24 +0100 (CET) Subject: [pypy-commit] pypy default: fix subtypes of numpy scalars Message-ID: <20131218095624.552731C3235@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68466:819adf960b3e Date: 2013-12-18 04:51 -0500 http://bitbucket.org/pypy/pypy/changeset/819adf960b3e/ Log: fix subtypes of numpy scalars diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1146,8 +1146,6 @@ w_base=w_buffer, writable=buf.is_writable()) - if not shape: - return W_NDimArray.new_scalar(space, dtype) order = order_converter(space, w_order, NPY_CORDER) if order == NPY_CORDER: order = 'C' diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -33,6 +33,11 @@ self = ndarray.__new__(subtype, shape, dtype) self.id = 'subtype' return self + a = C((), int) + assert type(a) is C + assert a.shape == () + assert a.dtype is dtype(int) + assert a.id == 'subtype' a = C([2, 2], int) assert isinstance(a, C) assert isinstance(a, ndarray) From noreply at buildbot.pypy.org Wed Dec 18 11:19:24 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 18 Dec 2013 11:19:24 +0100 (CET) Subject: [pypy-commit] pypy default: ones lives at python-level in numpy now Message-ID: <20131218101924.621B01C08AF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68467:12d7e32de888 Date: 2013-12-18 05:06 -0500 http://bitbucket.org/pypy/pypy/changeset/12d7e32de888/ Log: ones lives at python-level in numpy now diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -10,7 +10,6 @@ 'array': 'interp_numarray.array', 'zeros': 'interp_numarray.zeros', 'empty': 'interp_numarray.zeros', - 'ones': 'interp_numarray.ones', 'empty_like': 'interp_numarray.empty_like', '_reconstruct' : 'interp_numarray._reconstruct', 'scalar' : 'interp_numarray.build_scalar', diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1447,15 +1447,6 @@ shape = _find_shape(space, w_shape, dtype) return W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) - at unwrap_spec(order=str) -def ones(space, w_shape, w_dtype=None, order='C'): - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - shape = _find_shape(space, w_shape, dtype) - w_arr = W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) - w_arr.fill(space, dtype.box(1)) - return w_arr - @unwrap_spec(subok=bool) def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): w_a = convert_to_array(space, w_a) diff --git a/pypy/module/micronumpy/test/dummy_module.py b/pypy/module/micronumpy/test/dummy_module.py --- a/pypy/module/micronumpy/test/dummy_module.py +++ b/pypy/module/micronumpy/test/dummy_module.py @@ -32,3 +32,8 @@ True_ = bool_(True) False_ = bool_(False) + +def ones(*args, **kwargs): + a = zeros(*args, **kwargs) + a.fill(1) + return a From noreply at buildbot.pypy.org Wed Dec 18 11:19:25 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 18 Dec 2013 11:19:25 +0100 (CET) Subject: [pypy-commit] pypy default: these live at python-level in numpy also Message-ID: <20131218101925.BCBDE1C08AF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68468:e66481e62e53 Date: 2013-12-18 05:15 -0500 http://bitbucket.org/pypy/pypy/changeset/e66481e62e53/ Log: these live at python-level in numpy also diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -106,8 +106,6 @@ ('logaddexp2', 'logaddexp2'), ('real', 'real'), ('imag', 'imag'), - ('ones_like', 'ones_like'), - ('zeros_like', 'zeros_like'), ]: interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -690,9 +690,6 @@ "allow_complex": False}), ("logaddexp2", "logaddexp2", 2, {"promote_to_float": True, "allow_complex": False}), - - ("ones_like", "ones_like", 1), - ("zeros_like", "zeros_like", 1), ]: self.add_ufunc(space, *ufunc_def) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -1029,22 +1029,6 @@ assert logaddexp2(float('inf'), float('-inf')) == float('inf') assert logaddexp2(float('inf'), float('inf')) == float('inf') - def test_ones_like(self): - from numpypy import array, ones_like - - assert ones_like(False) == array(True) - assert ones_like(2) == array(1) - assert ones_like(2.) == array(1.) - assert ones_like(complex(2)) == array(complex(1)) - - def test_zeros_like(self): - from numpypy import array, zeros_like - - assert zeros_like(True) == array(False) - assert zeros_like(2) == array(0) - assert zeros_like(2.) == array(0.) - assert zeros_like(complex(2)) == array(complex(0)) - def test_accumulate(self): from numpypy import add, multiply, arange assert (add.accumulate([2, 3, 5]) == [2, 5, 10]).all() diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -300,14 +300,6 @@ def min(self, v1, v2): return min(v1, v2) - @simple_unary_op - def ones_like(self, v): - return 1 - - @simple_unary_op - def zeros_like(self, v): - return 0 - @raw_unary_op def rint(self, v): float64 = Float64() @@ -1543,14 +1535,6 @@ except ValueError: return rfloat.NAN, rfloat.NAN - @complex_unary_op - def ones_like(self, v): - return 1, 0 - - @complex_unary_op - def zeros_like(self, v): - return 0, 0 - class Complex64(ComplexFloating, BaseType): T = rffi.FLOAT BoxType = interp_boxes.W_Complex64Box From noreply at buildbot.pypy.org Wed Dec 18 17:06:20 2013 From: noreply at buildbot.pypy.org (timfel) Date: Wed, 18 Dec 2013 17:06:20 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: add cmdline arg to pass code to run as benchmark Message-ID: <20131218160620.DF8B51C3235@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r534:16ef5cb0ae0c Date: 2013-12-18 13:21 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/16ef5cb0ae0c/ Log: add cmdline arg to pass code to run as benchmark diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -61,6 +61,25 @@ except error.Exit, e: print e.msg +def _run_code(interp, code): + import time + selector = "codeTest%d" % int(time.time()) + try: + w_result = interp.perform( + interp.space.w_SmallInteger, + "compile:classified:notifying:", + space.wrap_string("%s\r\n%s" % (selector, code)), + space.wrap_string("spy-run-code"), + space.w_nil + ) + except interpreter.ReturnFromTopLevel, e: + print e.object + return 1 + except error.Exit, e: + print e.msg + return 1 + return _run_benchmark(interp, 0, selector, "") + space = objspace.ObjSpace() @@ -86,6 +105,7 @@ -n|--number [smallint, default: 0] -m|--method [benchmark on smallint] -a|--arg [string argument to #method] + -r|--run [shell escaped code string] [image path, default: Squeak.image] """ % argv[0] @@ -102,6 +122,7 @@ benchmark = None trace = False stringarg = "" + code = None while idx < len(argv): arg = argv[idx] @@ -127,6 +148,10 @@ _arg_missing(argv, idx, arg) stringarg = argv[idx + 1] idx += 1 + elif arg in ["-r", "--run"]: + _arg_missing(argv, idx, arg) + code = argv[idx + 1] + idx += 1 elif path is None: path = argv[idx] else: @@ -154,6 +179,8 @@ space.runtime_setup(argv[0]) if benchmark is not None: return _run_benchmark(interp, number, benchmark, stringarg) + elif code is not None: + return _run_code(interp, code) else: _run_image(interp) return 0 From noreply at buildbot.pypy.org Wed Dec 18 17:06:22 2013 From: noreply at buildbot.pypy.org (timfel) Date: Wed, 18 Dec 2013 17:06:22 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: hoping to get rid of three calls in the interrupt checking code Message-ID: <20131218160622.08DA71C3391@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r535:d296a5bfee07 Date: 2013-12-18 13:28 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/d296a5bfee07/ Log: hoping to get rid of three calls in the interrupt checking code diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -5,7 +5,7 @@ from spyvm.tool.bitmanipulation import splitter from rpython.rlib import jit -from rpython.rlib import objectmodel, unroll +from rpython.rlib import objectmodel, unroll, rarithmetic class MissingBytecode(Exception): """Bytecode not implemented yet.""" @@ -182,7 +182,10 @@ # We don't adjust the check counter size # use the same time value as the primitive MILLISECOND_CLOCK - now = int(math.fmod(time.time()*1000, constants.TAGGED_MAXINT/2)) + now = rarithmetic.intmask( + int(time.time()*1000) & (constants.TAGGED_MAXINT/2 - 1) + ) + # now = int(math.fmod(time.time()*1000, constants.TAGGED_MAXINT/2)) # XXX the low space semaphore may be signaled here # Process inputs diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1017,7 +1017,9 @@ @expose_primitive(MILLISECOND_CLOCK, unwrap_spec=[object]) def func(interp, s_frame, w_arg): import time, math - return interp.space.wrap_int(int(math.fmod(time.time()*1000, constants.TAGGED_MAXINT/2))) + return interp.space.wrap_int(rarithmetic.intmask( + int(time.time()*1000) & (constants.TAGGED_MAXINT/2 - 1) + )) @expose_primitive(SIGNAL_AT_MILLISECONDS, unwrap_spec=[object, object, int]) def func(interp, s_frame, w_delay, w_semaphore, timestamp): From noreply at buildbot.pypy.org Wed Dec 18 17:06:23 2013 From: noreply at buildbot.pypy.org (timfel) Date: Wed, 18 Dec 2013 17:06:23 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: catch exception on os.fstat Message-ID: <20131218160623.3A33F1C3235@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r536:552c99b5e77c Date: 2013-12-18 13:58 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/552c99b5e77c/ Log: catch exception on os.fstat diff --git a/spyvm/plugins/fileplugin.py b/spyvm/plugins/fileplugin.py --- a/spyvm/plugins/fileplugin.py +++ b/spyvm/plugins/fileplugin.py @@ -40,7 +40,10 @@ file_path = os.path.join(full_path, py_name) except OSError: raise PrimitiveFailedError - file_info = os.stat(file_path) + try: + file_info = os.stat(file_path) + except OSError: + raise PrimitiveFailedError w_name = space.wrap_string(py_name) w_creationTime = smalltalk_timestamp(space, file_info.st_ctime) From noreply at buildbot.pypy.org Wed Dec 18 17:06:24 2013 From: noreply at buildbot.pypy.org (timfel) Date: Wed, 18 Dec 2013 17:06:24 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: ignore a few more files Message-ID: <20131218160624.4BAE11C3235@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r537:46675f9fdb93 Date: 2013-12-18 13:58 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/46675f9fdb93/ Log: ignore a few more files diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -1,8 +1,12 @@ syntax: glob *.py[co] *~ -pypy-c-jit-62116-b027d4428675-linux +pypy-c-jit-* images/Squeak* +images/resources* +*package-cache/ +Squeak* +*TAGS targetimageloadingsmalltalk-*c images/package-cache versions From noreply at buildbot.pypy.org Wed Dec 18 17:06:25 2013 From: noreply at buildbot.pypy.org (timfel) Date: Wed, 18 Dec 2013 17:06:25 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: allow just running literal code, as opposed to fair (using processes) benchmarking Message-ID: <20131218160625.5BE111C3235@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r538:5165739fe96b Date: 2013-12-18 13:59 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/5165739fe96b/ Log: allow just running literal code, as opposed to fair (using processes) benchmarking diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -61,7 +61,7 @@ except error.Exit, e: print e.msg -def _run_code(interp, code): +def _run_code(interp, code, as_benchmark=False): import time selector = "codeTest%d" % int(time.time()) try: @@ -78,7 +78,24 @@ except error.Exit, e: print e.msg return 1 - return _run_benchmark(interp, 0, selector, "") + + if not as_benchmark: + try: + w_result = interp.perform(space.wrap_int(0), selector) + except interpreter.ReturnFromTopLevel, e: + print e.object + return 1 + except error.Exit, e: + print e.msg + return 1 + if w_result: + if isinstance(w_result, model.W_BytesObject): + print w_result.as_string().replace('\r', '\n') + else: + print w_result.as_repr_string().replace('\r', '\n') + return 0 + else: + return _run_benchmark(interp, 0, selector, "") space = objspace.ObjSpace() @@ -105,7 +122,8 @@ -n|--number [smallint, default: 0] -m|--method [benchmark on smallint] -a|--arg [string argument to #method] - -r|--run [shell escaped code string] + -r|--run [code string] + -b|--benchmark [code string] [image path, default: Squeak.image] """ % argv[0] @@ -123,6 +141,7 @@ trace = False stringarg = "" code = None + as_benchmark = False while idx < len(argv): arg = argv[idx] @@ -151,6 +170,12 @@ elif arg in ["-r", "--run"]: _arg_missing(argv, idx, arg) code = argv[idx + 1] + as_benchmark = False + idx += 1 + elif arg in ["-b", "--benchmark"]: + _arg_missing(argv, idx, arg) + code = argv[idx + 1] + as_benchmark = True idx += 1 elif path is None: path = argv[idx] @@ -180,7 +205,7 @@ if benchmark is not None: return _run_benchmark(interp, number, benchmark, stringarg) elif code is not None: - return _run_code(interp, code) + return _run_code(interp, code, as_benchmark=as_benchmark) else: _run_image(interp) return 0 From noreply at buildbot.pypy.org Wed Dec 18 17:06:26 2013 From: noreply at buildbot.pypy.org (timfel) Date: Wed, 18 Dec 2013 17:06:26 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: add jittests like topaz does them Message-ID: <20131218160626.9D61E1C3235@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r539:a37f452bc27c Date: 2013-12-18 15:18 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/a37f452bc27c/ Log: add jittests like topaz does them diff --git a/spyvm/test/jittest/__init__.py b/spyvm/test/jittest/__init__.py new file mode 100644 diff --git a/spyvm/test/jittest/base.py b/spyvm/test/jittest/base.py new file mode 100644 --- /dev/null +++ b/spyvm/test/jittest/base.py @@ -0,0 +1,121 @@ +import subprocess +import os + +# TODO: +from pypy.tool.jitlogparser.parser import SimpleParser, Op +from pypy.tool.jitlogparser.storage import LoopStorage + +from rpython.jit.metainterp.resoperation import opname +from rpython.jit.tool import oparser +from rpython.tool import logparser + + +BasePath = os.path.abspath( + os.path.join( + os.path.join(os.path.dirname(__file__), os.path.pardir), + os.path.pardir, + os.path.pardir + ) +) +BenchmarkImage = os.path.join(os.path.dirname(__file__), "benchmark.image") + +class BaseJITTest(object): + def run(self, spy, tmpdir, code): + proc = subprocess.Popen( + [str(spy), "-r", code.replace("\n", "\r\n"), BenchmarkImage], + cwd=str(tmpdir), + env={"PYPYLOG": "jit-log-opt:%s" % tmpdir.join("x.pypylog")} + ) + proc.wait() + data = logparser.parse_log_file(str(tmpdir.join("x.pypylog")), verbose=False) + data = logparser.extract_category(data, "jit-log-opt-") + + storage = LoopStorage() + traces = [SimpleParser.parse_from_input(t) for t in data] + main_loops = storage.reconnect_loops(traces) + traces_w = [] + for trace in traces: + if trace in main_loops: + traces_w.append(Trace(trace)) + else: + traces_w[len(traces_w) - 1].addbridge(trace) + return traces_w + + def assert_matches(self, trace, expected): + expected_lines = [ + line.strip() + for line in expected.splitlines() + if line and not line.isspace() + ] + parser = Parser(None, None, {}, "lltype", None, invent_fail_descr=None, nonstrict=True) + expected_ops = [parser.parse_next_op(l) for l in expected_lines] + aliases = {} + assert len(trace) == len(expected_ops) + for op, expected in zip(trace, expected_ops): + self._assert_ops_equal(aliases, op, expected) + + def _assert_ops_equal(self, aliases, op, expected): + assert op.name == expected.name + assert len(op.args) == len(expected.args) + for arg, expected_arg in zip(op.args, expected.args): + if arg in aliases: + arg = aliases[arg] + elif arg != expected_arg and expected_arg not in aliases.viewvalues(): + aliases[arg] = arg = expected_arg + assert arg == expected_arg + + +class Parser(oparser.OpParser): + def get_descr(self, poss_descr, allow_invent): + if poss_descr.startswith(("TargetToken", ") + i60 = int_le(i49, 10000) + guard_true(i60, descr=) + i61 = int_add(i49, 1) + i62 = int_sub(i61, -1073741824) + i63 = uint_lt(i62, -2147483648) + guard_true(i63, descr=) + i64 = int_sub(i57, 1) + setfield_gc(ConstPtr(ptr54), i64, descr=) + i65 = int_le(i64, 0) + guard_false(i65, descr=) + jump(p0, p3, i61, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i64, descr=TargetToken(169145008)) + """) + self.assert_matches(traces[0].bridges[0], """ + i18 = getfield_gc(ConstPtr(ptr17), descr=), + f20 = call(ConstClass(ll_time.ll_time_time), descr=), + setfield_gc(ConstPtr(ptr17), i18, descr=), + guard_no_exception(descr=), + f22 = float_mul(f20, 1000.000000), + call(ConstClass(set_errno), 0, descr=), + f27 = call(ConstClass(fmod), f22, 536870911.000000, descr=), + i29 = call(ConstClass(get_errno), descr=), + i30 = float_ne(f27, f27), + guard_false(i30, descr=), + i31 = int_is_true(i29), + guard_false(i31, descr=), + i32 = cast_float_to_int(f27), + i33 = getfield_gc(ConstPtr(ptr17), descr=), + i34 = int_is_zero(i33), + guard_true(i34, descr=), + i35 = same_as(i18), + label(p0, p1, i16, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, i35, descr=TargetToken(164815776)), + guard_class(p0, ConstClass(MethodContextShadow), descr=), + p37 = getfield_gc(p0, descr=), + guard_value(p37, ConstPtr(ptr38), descr=), + guard_not_invalidated(descr=), + i40 = int_le(i16, 1000000000), + guard_true(i40, descr=), + i42 = int_add(i16, 1), + i44 = int_sub(i42, -1073741824), + i46 = uint_lt(i44, -2147483648), + guard_true(i46, descr=), + i48 = int_sub(i35, 1), + setfield_gc(ConstPtr(ptr17), i48, descr=), + i50 = int_le(i48, 0), + guard_false(i50, descr=), + jump(p0, p1, i42, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, i48, descr=TargetToken(164815536)) + """) + + def test_constant_string(self, spy, tmpdir): + traces = self.run(spy, tmpdir, """ + | i | + i := 0. + [i <= 10000] whileTrue: [ i := i + 'a' size]. + ^ i + """) + self.assert_matches(traces[0].loop, """ + label(p0, p3, i58, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i65, descr=TargetToken(153187472)) + debug_merge_point(0, 0, '2: [0x10]pushTemporaryVariableBytecode (codeTest1387373494)') + guard_not_invalidated(descr=) + debug_merge_point(0, 0, '3: [0x21]pushLiteralConstantBytecode (codeTest1387373494)') + debug_merge_point(0, 0, '4: [0xb4]bytecodePrimLessOrEqual (codeTest1387373494)') + i68 = int_le(i58, 10000) + guard_true(i68, descr=) + debug_merge_point(0, 0, '5: [0x9e]shortConditionalJump (codeTest1387373494)') + debug_merge_point(0, 0, '6: [0x10]pushTemporaryVariableBytecode (codeTest1387373494)') + debug_merge_point(0, 0, '7: [0x20]pushLiteralConstantBytecode (codeTest1387373494)') + debug_merge_point(0, 0, '8: [0xc2]bytecodePrimSize (codeTest1387373494)') + debug_merge_point(0, 0, '9: [0xb0]bytecodePrimAdd (codeTest1387373494)') + i69 = int_add(i58, 1) + i70 = int_sub(i69, -1073741824) + i71 = uint_lt(i70, -2147483648) + guard_true(i71, descr=) + debug_merge_point(0, 0, '10: [0x68]storeAndPopTemporaryVariableBytecode (codeTest1387373494)') + debug_merge_point(0, 0, '11: [0xa3]longUnconditionalJump (codeTest1387373494)') + i72 = int_sub(i65, 1) + setfield_gc(ConstPtr(ptr55), i72, descr=) + i73 = int_le(i72, 0) + guard_false(i73, descr=) + debug_merge_point(0, 0, '2: [0x10]pushTemporaryVariableBytecode (codeTest1387373494)') + jump(p0, p3, i69, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i72, descr=TargetToken(153187472)) + """) From noreply at buildbot.pypy.org Wed Dec 18 17:06:27 2013 From: noreply at buildbot.pypy.org (timfel) Date: Wed, 18 Dec 2013 17:06:27 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: (hopefully) fix the millisecond clock issue for now Message-ID: <20131218160627.A054A1C3235@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r540:182c667e14ff Date: 2013-12-18 16:10 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/182c667e14ff/ Log: (hopefully) fix the millisecond clock issue for now diff --git a/spyvm/constants.py b/spyvm/constants.py --- a/spyvm/constants.py +++ b/spyvm/constants.py @@ -146,6 +146,8 @@ TAGGED_MAXINT = 2 ** (LONG_BIT - 2) - 1 TAGGED_MININT = -2 ** (LONG_BIT - 2) +TAGGED_MASK = int(2 ** (LONG_BIT - 1) - 1) + # Entries into SO_SPECIAL_SELECTORS_ARRAY: #(#+ 1 #- 1 #< 1 #> 1 #<= 1 #>= 1 #= 1 #~= 1 #* 1 #/ 1 #\\ 1 #@ 1 #bitShift: 1 #// 1 #bitAnd: 1 #bitOr: 1 #at: 1 #at:put: 2 #size 0 #next 0 #nextPut: 1 #atEnd 0 #== 1 #class 0 #blockCopy: 1 #value 0 #value: 1 #do: 1 #new 0 #new: 1 #x 0 #y 0) diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -5,7 +5,7 @@ from spyvm.tool.bitmanipulation import splitter from rpython.rlib import jit -from rpython.rlib import objectmodel, unroll, rarithmetic +from rpython.rlib import objectmodel, unroll class MissingBytecode(Exception): """Bytecode not implemented yet.""" @@ -23,7 +23,9 @@ class Interpreter(object): - + _immutable_fields_ = ["space", "image", "image_name", + "max_stack_depth", "interrupt_counter_size", + "startup_time"] _w_last_active_context = None cnt = 0 _last_indent = "" @@ -36,9 +38,11 @@ def __init__(self, space, image=None, image_name="", trace=False, max_stack_depth=constants.MAX_LOOP_DEPTH): + import time self.space = space self.image = image self.image_name = image_name + self.startup_time = time.time() self.max_stack_depth = max_stack_depth self.remaining_stack_depth = max_stack_depth self._loop = False @@ -176,16 +180,12 @@ def check_for_interrupts(self, s_frame): # parallel to Interpreter>>#checkForInterrupts - import time, math # Profiling is skipped # We don't adjust the check counter size # use the same time value as the primitive MILLISECOND_CLOCK - now = rarithmetic.intmask( - int(time.time()*1000) & (constants.TAGGED_MAXINT/2 - 1) - ) - # now = int(math.fmod(time.time()*1000, constants.TAGGED_MAXINT/2)) + now = self.time_now() # XXX the low space semaphore may be signaled here # Process inputs @@ -199,6 +199,11 @@ # We do not support external semaphores. # In cog, the method to add such a semaphore is only called in GC. + def time_now(self): + import time + from rpython.rlib.rarithmetic import intmask + return intmask(int((time.time() - self.startup_time) * 1000) & constants.TAGGED_MASK) + def padding(self, symbol=' '): return symbol * (self.max_stack_depth - self.remaining_stack_depth) diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1016,10 +1016,7 @@ @expose_primitive(MILLISECOND_CLOCK, unwrap_spec=[object]) def func(interp, s_frame, w_arg): - import time, math - return interp.space.wrap_int(rarithmetic.intmask( - int(time.time()*1000) & (constants.TAGGED_MAXINT/2 - 1) - )) + return interp.space.wrap_int(interp.time_now()) @expose_primitive(SIGNAL_AT_MILLISECONDS, unwrap_spec=[object, object, int]) def func(interp, s_frame, w_delay, w_semaphore, timestamp): From noreply at buildbot.pypy.org Wed Dec 18 17:06:28 2013 From: noreply at buildbot.pypy.org (timfel) Date: Wed, 18 Dec 2013 17:06:28 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: update test Message-ID: <20131218160628.9506D1C3235@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r541:2c7d7bfa6077 Date: 2013-12-18 17:05 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/2c7d7bfa6077/ Log: update test diff --git a/spyvm/test/jittest/test_basic.py b/spyvm/test/jittest/test_basic.py --- a/spyvm/test/jittest/test_basic.py +++ b/spyvm/test/jittest/test_basic.py @@ -23,39 +23,29 @@ jump(p0, p3, i61, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i64, descr=TargetToken(169145008)) """) self.assert_matches(traces[0].bridges[0], """ - i18 = getfield_gc(ConstPtr(ptr17), descr=), - f20 = call(ConstClass(ll_time.ll_time_time), descr=), - setfield_gc(ConstPtr(ptr17), i18, descr=), - guard_no_exception(descr=), - f22 = float_mul(f20, 1000.000000), - call(ConstClass(set_errno), 0, descr=), - f27 = call(ConstClass(fmod), f22, 536870911.000000, descr=), - i29 = call(ConstClass(get_errno), descr=), - i30 = float_ne(f27, f27), - guard_false(i30, descr=), - i31 = int_is_true(i29), - guard_false(i31, descr=), - i32 = cast_float_to_int(f27), - i33 = getfield_gc(ConstPtr(ptr17), descr=), - i34 = int_is_zero(i33), - guard_true(i34, descr=), - i35 = same_as(i18), - label(p0, p1, i16, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, i35, descr=TargetToken(164815776)), - guard_class(p0, ConstClass(MethodContextShadow), descr=), - p37 = getfield_gc(p0, descr=), - guard_value(p37, ConstPtr(ptr38), descr=), - guard_not_invalidated(descr=), - i40 = int_le(i16, 1000000000), - guard_true(i40, descr=), - i42 = int_add(i16, 1), - i44 = int_sub(i42, -1073741824), - i46 = uint_lt(i44, -2147483648), - guard_true(i46, descr=), - i48 = int_sub(i35, 1), - setfield_gc(ConstPtr(ptr17), i48, descr=), - i50 = int_le(i48, 0), - guard_false(i50, descr=), - jump(p0, p1, i42, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, i48, descr=TargetToken(164815536)) + f18 = call(ConstClass(ll_time.ll_time_time), descr=) + setfield_gc(ConstPtr(ptr19), 10000, descr=) + guard_no_exception(descr=) + f22 = float_sub(f18, 1387380038.806162) + f24 = float_mul(f22, 1000.000000) + i25 = cast_float_to_int(f24) + i27 = int_and(i25, 2147483647) + i28 = getfield_gc(ConstPtr(ptr19), descr=) + i29 = int_is_zero(i28) + guard_true(i29, descr=) + label(p0, p1, i16, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, descr=TargetToken(158475216)) + guard_class(p0, ConstClass(MethodContextShadow), descr=) + p31 = getfield_gc(p0, descr=) + guard_value(p31, ConstPtr(ptr32), descr=) + guard_not_invalidated(descr=) + i34 = int_le(i16, 1000000000) + guard_true(i34, descr=) + i36 = int_add(i16, 1) + i38 = int_sub(i36, -1073741824) + i40 = uint_lt(i38, -2147483648) + guard_true(i40, descr=) + setfield_gc(ConstPtr(ptr19), 9999, descr=) + jump(p0, p1, i36, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, 9999, descr=TargetToken(158474976)) """) def test_constant_string(self, spy, tmpdir): From noreply at buildbot.pypy.org Wed Dec 18 17:10:05 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 18 Dec 2013 17:10:05 +0100 (CET) Subject: [pypy-commit] stmgc c5: Make more public the attempt started at Message-ID: <20131218161005.DA2471C08A1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r556:808b5bebaf57 Date: 2013-12-18 17:05 +0100 http://bitbucket.org/pypy/stmgc/changeset/808b5bebaf57/ Log: Make more public the attempt started at https://bitbucket.org/arigo/arigo/raw/default/hack/stm/c5 From noreply at buildbot.pypy.org Wed Dec 18 17:10:07 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 18 Dec 2013 17:10:07 +0100 (CET) Subject: [pypy-commit] stmgc c5: Initial import. Message-ID: <20131218161007.009F01C08A1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r557:49446e74e137 Date: 2013-12-18 17:09 +0100 http://bitbucket.org/pypy/stmgc/changeset/49446e74e137/ Log: Initial import. diff --git a/c5/Makefile b/c5/Makefile new file mode 100644 --- /dev/null +++ b/c5/Makefile @@ -0,0 +1,10 @@ + +H_FILES = core.h pagecopy.h +C_FILES = core.c pagecopy.c + + +demo1: demo1.c $(C_FILES) $(H_FILES) + gcc -o $@ -O2 -g demo1.c $(C_FILES) -Wall + +clean: + rm -f demo1 diff --git a/c5/core.c b/c5/core.c new file mode 100644 --- /dev/null +++ b/c5/core.c @@ -0,0 +1,537 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include + +#include "core.h" +#include "pagecopy.h" + + +/* This file only works on 64-bit Linux for now. The logic is based on + remapping pages around, which can get a bit confusing. Each "thread" + runs in its own process, so that it has its own mapping. The + processes share an mmap of length NB_PAGES, which is created shared + but anonymous, and passed to subprocesses by forking. + + The mmap's content does not depend on which process is looking at it: + it contains what we'll call "mm pages", which is 4096 bytes of data + at some file offset (which all processes agree on). The term "pgoff" + used below means such an offset. It is a uint32_t expressed in units + of 4096 bytes; so the underlying mmap is limited to 2**32 pages or + 16TB. + + The mm pages are then mapped in each process at some address, and + their content is accessed with regular pointers. We'll call such a + page a "local page". The term "local" is used because each process + has its own, different mapping. As it turns out, mm pages are + initially mapped sequentially as local pages, but this changes over + time. To do writes in a transaction, the data containing the object + is first duplicated --- so we allocate a fresh new mm page in the + mmap file, and copy the contents to it. Then we remap the new mm + page over the *same* local page as the original. So from this + process' point of view, the object is still at the same address, but + writes to it now happen to go to the new mm page instead of the old + one. + + The local pages are usually referenced by pointers, but may also be + expressed as an index, called the "local index" of the page. +*/ + +#ifdef STM_TESTS +# define NB_PAGES (256*10) // 10MB +#else +# define NB_PAGES (256*1024) // 1GB +#endif +#define MAP_PAGES_FLAGS (MAP_SHARED|MAP_ANONYMOUS) + +#define CACHE_LINE_SIZE 128 // conservatively large value to avoid aliasing + +#define PGKIND_NEVER_USED 0 +#define LARGE_OBJECT_WORDS 36 /* range(2, LARGE_OBJECT_WORDS) */ +#define PGKIND_FREED 0xff +#define PGKIND_WRITE_HISTORY 0xfe +#define PGKIND_SHARED_DESCRIPTOR 0xfd /* only for the first mm page */ + +struct page_header_s { + /* Every page starts with one such structure */ + uint16_t version; /* when the data in the page was written */ + uint8_t modif_head; /* head of a chained list of objects in this + page that have modified == this->version */ + uint8_t kind; /* either PGKIND_xxx or a number in + range(2, LARGE_OBJECT_WORDS) */ + uint32_t pgoff; /* the mm page offset */ +}; + +struct read_marker_s { + /* We associate a single byte to every object, by simply dividing + the address of the object by 16. This is the last byte of the + last time we have read the object. See stm_read(). */ + unsigned char c; +}; + +struct write_history_s { + struct write_history_s *previous_older_transaction; + uint16_t transaction_version; + uint32_t nb_updates; + uint32_t updates[]; /* pairs (local_index, new_pgoff) */ +}; + +struct shared_descriptor_s { + /* There is a single shared descriptor. This regroups all data + that needs to be dynamically shared among processes. The + first mm page is used for this. */ + union { + struct page_header_s header; + char _pad0[CACHE_LINE_SIZE]; + }; + union { + uint64_t index_page_never_used; + char _pad1[CACHE_LINE_SIZE]; + }; + union { + unsigned int next_transaction_version; + char _pad2[CACHE_LINE_SIZE]; + }; + union { + struct write_history_s *most_recent_committed_transaction; + char _pad3[CACHE_LINE_SIZE]; + }; +}; + +struct alloc_for_size_s { + char *next; + char *end; +}; + +struct local_data_s { + /* This is just a bunch of global variables, but during testing, + we save it all away and restore different ones to simulate + different forked processes. */ + char *read_markers; + struct read_marker_s *current_read_markers; + uint16_t transaction_version; + struct write_history_s *base_page_mapping; + struct write_history_s *writes_by_this_transaction; + struct alloc_for_size_s alloc[LARGE_OBJECT_WORDS]; +}; + +struct shared_descriptor_s *stm_shared_descriptor; +struct local_data_s stm_local; + + +void stm_read(struct object_s *object) +{ + stm_local.current_read_markers[((uintptr_t)object) >> 4].c = + (unsigned char)(uintptr_t)stm_local.current_read_markers; +} + +_Bool _stm_was_read(struct object_s *object) +{ + return (stm_local.current_read_markers[((uintptr_t)object) >> 4].c == + (unsigned char)(uintptr_t)stm_local.current_read_markers); +} + +void _stm_write_slowpath(struct object_s *); + +void stm_write(struct object_s *object) +{ + if (__builtin_expect(object->modified != stm_local.transaction_version, + 0)) + _stm_write_slowpath(object); +} + +_Bool _stm_was_written(struct object_s *object) +{ + return (object->modified == stm_local.transaction_version); +} + + +struct page_header_s *_stm_reserve_page(void) +{ + /* Grab a free mm page, and map it into the address space. + Return a pointer to it. It has kind == PGKIND_FREED. */ + + // XXX look in some free list first + + /* Return the index'th mm page, which is so far NEVER_USED. It + should never have been accessed so far, and be already mapped + as the index'th local page. */ + struct shared_descriptor_s *d = stm_shared_descriptor; + uint64_t index = __sync_fetch_and_add(&d->index_page_never_used, 1); + if (index >= NB_PAGES) { + fprintf(stderr, "Out of mmap'ed memory!\n"); + abort(); + } + struct page_header_s *result = (struct page_header_s *) + (((char *)stm_shared_descriptor) + index * 4096); + assert(result->kind == PGKIND_NEVER_USED); + result->kind = PGKIND_FREED; + result->pgoff = index; + return result; +} + + +static uint32_t get_pgoff(struct page_header_s *page) +{ + assert(page->pgoff > 0); + assert(page->pgoff < NB_PAGES); + return page->pgoff; +} + +static uint32_t get_local_index(struct page_header_s *page) +{ + uint64_t index = ((char *)page) - (char *)stm_shared_descriptor; + assert((index & 4095) == 0); + index /= 4096; + assert(0 < index && index < NB_PAGES); + return index; +} + +static struct page_header_s *get_page_by_local_index(uint32_t index) +{ + assert(0 < index && index < NB_PAGES); + uint64_t ofs = ((uint64_t)index) * 4096; + return (struct page_header_s *)(((char *)stm_shared_descriptor) + ofs); +} + +void _stm_write_slowpath(struct object_s * object) +{ + stm_read(object); + + struct page_header_s *page; + page = (struct page_header_s *)(((uintptr_t)object) & ~4095); + assert(2 <= page->kind && page->kind < LARGE_OBJECT_WORDS); + + if (page->version != stm_local.transaction_version) { + struct page_header_s *newpage = _stm_reserve_page(); + uint32_t old_pgoff = get_pgoff(page); + uint32_t new_pgoff = get_pgoff(newpage); + + pagecopy(newpage, page); + newpage->version = stm_local.transaction_version; + newpage->modif_head = 0xff; + newpage->pgoff = new_pgoff; + assert(page->version != stm_local.transaction_version); + assert(page->pgoff == old_pgoff); + + remap_file_pages((void *)page, 4096, 0, new_pgoff, MAP_PAGES_FLAGS); + + assert(page->version == stm_local.transaction_version); + assert(page->pgoff == new_pgoff); + + struct write_history_s *cur = stm_local.writes_by_this_transaction; + uint64_t i = cur->nb_updates++; + size_t history_size_max = 4096 - (((uintptr_t)cur) & 4095); + assert(sizeof(*cur) + cur->nb_updates * 8 <= history_size_max); + cur->updates[i * 2 + 0] = get_local_index(page); + cur->updates[i * 2 + 1] = new_pgoff; + } + object->modified = stm_local.transaction_version; + object->modif_next = page->modif_head; + page->modif_head = (uint8_t)(((uintptr_t)object) >> 4); + assert(page->modif_head != 0xff); +} + +char *_stm_alloc_next_page(size_t i) +{ + struct page_header_s *newpage = _stm_reserve_page(); + newpage->modif_head = 0xff; + newpage->kind = i; /* object size in words */ + newpage->version = stm_local.transaction_version; + stm_local.alloc[i].next = ((char *)(newpage + 1)) + (i * 8); + stm_local.alloc[i].end = ((char *)newpage) + 4096; + assert(stm_local.alloc[i].next <= stm_local.alloc[i].end); + return (char *)(newpage + 1); +} + +struct object_s *stm_allocate(size_t size) +{ + assert(size % 8 == 0); + size_t i = size / 8; + assert(2 <= i && i < LARGE_OBJECT_WORDS); + struct alloc_for_size_s *alloc = &stm_local.alloc[i]; + + char *p = alloc->next; + alloc->next += size; + if (alloc->next > alloc->end) + p = _stm_alloc_next_page(i); + + struct object_s *result = (struct object_s *)p; + result->modified = stm_local.transaction_version; + /*result->modif_next is uninitialized*/ + result->flags = 0x42; /* for debugging */ + return result; +} + + +unsigned char stm_get_read_marker_number(void) +{ + return (unsigned char)(uintptr_t)stm_local.current_read_markers; +} + +void stm_set_read_marker_number(uint8_t num) +{ + char *stm_pages = ((char *)stm_shared_descriptor) + 4096; + uintptr_t delta = ((uintptr_t)stm_pages) >> 4; + struct read_marker_s *crm = (struct read_marker_s *)stm_local.read_markers; + stm_local.current_read_markers = crm - delta; + assert(stm_get_read_marker_number() == 0); + stm_local.current_read_markers += num; +} + +void stm_setup(void) +{ + if (sizeof(char *) != 8) { + fprintf(stderr, "Only works on 64-bit Linux systems for now!\n"); + abort(); + } + if (NB_PAGES > (1ull << 32)) { + fprintf(stderr, "Cannot use more than 1<<32 pages of memory"); + abort(); + } + char *stm_pages = mmap(NULL, NB_PAGES*4096, PROT_READ|PROT_WRITE, + MAP_PAGES_FLAGS, -1, 0); + if (stm_pages == MAP_FAILED) { + perror("mmap stm_pages failed"); + abort(); + } + assert(sizeof(struct shared_descriptor_s) <= 4096); + stm_shared_descriptor = (struct shared_descriptor_s *)stm_pages; + stm_shared_descriptor->header.kind = PGKIND_SHARED_DESCRIPTOR; + /* the page at index 0 contains the '*stm_shared_descriptor' structure */ + /* the page at index 1 is reserved for history_fast_forward() */ + stm_shared_descriptor->index_page_never_used = 2; + stm_shared_descriptor->next_transaction_version = 1; +} + +void _stm_teardown(void) +{ + munmap((void *)stm_shared_descriptor, NB_PAGES*4096); + stm_shared_descriptor = NULL; +} + +void stm_setup_process(void) +{ + memset(&stm_local, 0, sizeof(stm_local)); + stm_local.read_markers = mmap(NULL, NB_PAGES*(4096 >> 4) + 1, + PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANONYMOUS, + -1, 0); + if (stm_local.read_markers == MAP_FAILED) { + perror("mmap stm_read_markers failed"); + abort(); + } + + stm_set_read_marker_number(42); + assert(stm_get_read_marker_number() == 42); + stm_set_read_marker_number(1); +} + +void _stm_teardown_process(void) +{ + munmap((void *)stm_local.read_markers, NB_PAGES*(4096 >> 4) + 1); + memset(&stm_local, 0, sizeof(stm_local)); +} + +static size_t get_obj_size_in_words(struct page_header_s *page) +{ + size_t result = page->kind; + assert(2 <= result && result < LARGE_OBJECT_WORDS); + return result; +} + +static +struct object_s *get_object_in_page(struct page_header_s *page, size_t index) +{ + /* Slight complication here, because objects are aligned to 8 bytes, + but we divides their page offset by 16 to fit a byte (4096/16 = + 256) and reduce memory overhead of the read markers. Objects are + at least 16 bytes in size, so there is no ambiguity. Example for + objects of 24 bytes of the organization inside a page (each word + of the first line is 8 bytes): + + [HDR][OBJ.ECT.24][OBJ.ECT.24][OBJ.ECT.24][OBJ.ECT.24][.. + 0 (16) 32 48 (64) 80 96 + + The second line is all possible offsets, which are multiples of + 16. They are the rounded-down version of the real offsets. + object and round it down to a mutiple of 16. For objects of size + 24, the numbers in parenthesis above are not reachable this way. + The number 255 is never reachable. To go from the number to the + object address, we have to add either 0 or 8. + */ + size_t obj_size_in_words = get_obj_size_in_words(page); + size_t offset = (index << 4) + + ((index << 1) % obj_size_in_words == 0 ? 8 : 0); + return (struct object_s *)(((char *)page) + offset); +} + +static int history_fast_forward(struct write_history_s *new, int conflict) +{ + /* XXX do a non-recursive version, which also should avoid repeated + remap_file_pages() on the same local-index-ed page */ + if (stm_local.base_page_mapping != new->previous_older_transaction) { + conflict = history_fast_forward(new->previous_older_transaction, + conflict); + } + assert(stm_local.base_page_mapping == new->previous_older_transaction); + + uint64_t i, nb_updates = new->nb_updates; + for (i = 0; i < nb_updates; i++) { + retry:; + /* new->updates[] is an array of pairs (local_index, new_pgoff) */ + uint32_t local_index = new->updates[i * 2 + 0]; + uint32_t new_pgoff = new->updates[i * 2 + 1]; + struct page_header_s *page = get_page_by_local_index(local_index); + struct page_header_s *mypage = page; + + if (!conflict && page->version == stm_local.transaction_version) { + /* If we have also modified this page, then we must merge our + changes with the ones done at 'new_pgoff'. In this case + we map 'new_pgoff' at the local index 1. */ + page = get_page_by_local_index(1); + } + + remap_file_pages((void *)page, 4096, 0, new_pgoff, MAP_PAGES_FLAGS); + assert(page->pgoff == new_pgoff); + + if (conflict) + continue; + + /* look for read-from-me, write-from-others conflicts */ + if (mypage == page) { + /* only look for conflicts: for every object modified by the + other transaction, check that it was not read by us. */ + size_t modif_index = page->modif_head; + while (modif_index != 0xff) { + struct object_s *obj = get_object_in_page(page, modif_index); + assert(obj->flags == 0x42); + if (_stm_was_read(obj)) { + fprintf(stderr, "# conflict: %p\n", obj); + conflict = 1; + break; + } + modif_index = obj->modif_next; + } + } + else { + /* Merge two versions of the page: for every object modified + by the other transaction, check that it was not read by us, + and then copy it over into our own page at 'mypage'. */ + size_t obj_size = get_obj_size_in_words(page) << 3; + uint64_t diff_to_mypage = ((char *)mypage) - (char *)page; + size_t modif_index = page->modif_head; + while (modif_index != 0xff) { + struct object_s *obj = get_object_in_page(page, modif_index); + struct object_s *myobj = (struct object_s *) + (((char *)obj) + diff_to_mypage); + assert(obj->flags == 0x42); + assert(myobj->flags == 0x42); // || myobj->flags == 0); + if (_stm_was_read(myobj)) { + fprintf(stderr, "# conflict: %p\n", myobj); + conflict = 1; + goto retry; + } + memcpy(myobj, obj, obj_size); + modif_index = obj->modif_next; + } + } + } + stm_local.base_page_mapping = new; + return conflict; +} + +void stm_start_transaction(void) +{ + struct shared_descriptor_s *d = stm_shared_descriptor; + stm_local.transaction_version = + __sync_fetch_and_add(&d->next_transaction_version, 1u); + assert(stm_local.transaction_version <= 0xffff); + + struct page_header_s *newpage = _stm_reserve_page(); + newpage->kind = PGKIND_WRITE_HISTORY; + + struct write_history_s *cur = (struct write_history_s *)(newpage + 1); + cur->previous_older_transaction = NULL; + cur->transaction_version = stm_local.transaction_version; + cur->nb_updates = 0; + assert(stm_local.writes_by_this_transaction == NULL); + stm_local.writes_by_this_transaction = cur; + + struct write_history_s *hist = d->most_recent_committed_transaction; + if (hist != stm_local.base_page_mapping) { + history_fast_forward(hist, 1); + } +} + +_Bool stm_stop_transaction(void) +{ + struct shared_descriptor_s *d = stm_shared_descriptor; + assert(stm_local.writes_by_this_transaction != NULL); + int conflict = 0; + //fprintf(stderr, "stm_stop_transaction\n"); + + while (1) { + struct write_history_s *hist = d->most_recent_committed_transaction; + if (hist != stm_local.base_page_mapping) { + conflict = history_fast_forward(hist, 0); + if (conflict) + break; + else + continue; /* retry from the start of the loop */ + } + struct write_history_s *cur = stm_local.writes_by_this_transaction; + cur->previous_older_transaction = hist; + if (__sync_bool_compare_and_swap(&d->most_recent_committed_transaction, + hist, cur)) + break; + } + stm_local.writes_by_this_transaction = NULL; + + assert(stm_get_read_marker_number() < 0xff); + stm_local.current_read_markers++; + return !conflict; +} + +#ifdef STM_TESTS +struct local_data_s *_stm_save_local_state(void) +{ + uint64_t i, page_count = stm_shared_descriptor->index_page_never_used; + uint32_t *pgoffs; + struct local_data_s *p = malloc(sizeof(struct local_data_s) + + page_count * sizeof(uint32_t)); + assert(p != NULL); + memcpy(p, &stm_local, sizeof(stm_local)); + + pgoffs = (uint32_t *)(p + 1); + pgoffs[0] = page_count; + for (i = 2; i < page_count; i++) { + pgoffs[i] = get_pgoff(get_page_by_local_index(i)); + } + + return p; +} + +void _stm_restore_local_state(struct local_data_s *p) +{ + uint64_t i, page_count; + uint32_t *pgoffs; + + remap_file_pages((void *)stm_shared_descriptor, 4096 * NB_PAGES, + 0, 0, MAP_PAGES_FLAGS); + + pgoffs = (uint32_t *)(p + 1); + page_count = pgoffs[0]; + for (i = 2; i < page_count; i++) { + struct page_header_s *page = get_page_by_local_index(i); + remap_file_pages((void *)page, 4096, 0, pgoffs[i], MAP_PAGES_FLAGS); + assert(get_pgoff(page) == pgoffs[i]); + } + + memcpy(&stm_local, p, sizeof(struct local_data_s)); + free(p); +} +#endif diff --git a/c5/core.h b/c5/core.h new file mode 100644 --- /dev/null +++ b/c5/core.h @@ -0,0 +1,30 @@ +#ifndef _STM_CORE_H +#define _STM_CORE_H + +#include + +struct object_s { + /* Every objects starts with one such structure */ + uint16_t modified; + uint8_t modif_next; + uint8_t flags; +}; + +void stm_setup(void); +void stm_setup_process(void); + +void stm_start_transaction(void); +_Bool stm_stop_transaction(void); +struct object_s *stm_allocate(size_t size); + +void stm_read(struct object_s *object); +void stm_write(struct object_s *object); +_Bool _stm_was_read(struct object_s *object); +_Bool _stm_was_written(struct object_s *object); + +struct local_data_s *_stm_save_local_state(void); +void _stm_restore_local_state(struct local_data_s *p); +void _stm_teardown(void); +void _stm_teardown_process(void); + +#endif diff --git a/c5/demo1.c b/c5/demo1.c new file mode 100644 --- /dev/null +++ b/c5/demo1.c @@ -0,0 +1,100 @@ +#include +#include +#include +#include +#include +#include + +#include "core.h" + + +#define NUM_THREADS 4 + + +typedef struct { + struct object_s header; + int val1, val2; +} obj_t; + +void do_run_in_thread(int i) +{ + stm_start_transaction(); + obj_t *ob1 = (obj_t *)stm_allocate(16); + obj_t *ob2 = (obj_t *)stm_allocate(16); + + assert(!_stm_was_read(&ob1->header)); + assert(!_stm_was_read(&ob2->header)); + stm_read(&ob1->header); + stm_read(&ob2->header); + assert(_stm_was_read(&ob1->header)); + assert(_stm_was_read(&ob2->header)); + assert(_stm_was_written(&ob1->header)); + assert(_stm_was_written(&ob2->header)); + stm_write(&ob1->header); + stm_write(&ob2->header); + assert(_stm_was_written(&ob1->header)); + assert(_stm_was_written(&ob2->header)); + ob1->val1 = 100; + ob1->val2 = 200; + ob2->val1 = 300; + ob2->val2 = 400; + + stm_stop_transaction(); + + int j; + for (j=0; j<2; j++) { + stm_start_transaction(); + + assert(!_stm_was_read(&ob1->header)); + assert(!_stm_was_read(&ob2->header)); + assert(!_stm_was_written(&ob1->header)); + assert(!_stm_was_written(&ob2->header)); + stm_read(&ob1->header); + printf("thread %d: ob1.val2=%d\n", i, ob1->val2); + + stm_write(&ob1->header); + assert(_stm_was_written(&ob1->header)); + assert(!_stm_was_written(&ob2->header)); + + stm_stop_transaction(); + } + + printf("thread %d: %p, %p\n", i, ob1, ob2); +} + +void do_test(void) +{ + int i; + pid_t child_pids[NUM_THREADS]; + + for (i = 0; i < NUM_THREADS; i++) { + child_pids[i] = fork(); + if (child_pids[i] == -1) { + perror("fork"); + abort(); + } + if (child_pids[i] == 0) { + stm_setup_process(); + do_run_in_thread(i); + exit(0); + } + } + + for (i = 0; i < NUM_THREADS; i++) { + int status; + if (waitpid(child_pids[i], &status, 0) == -1) { + perror("waitpid"); + abort(); + } + } +} + + +int main(int argc, char *argv[]) +{ + stm_setup(); + + do_test(); + + return 0; +} diff --git a/c5/pagecopy.c b/c5/pagecopy.c new file mode 100644 --- /dev/null +++ b/c5/pagecopy.c @@ -0,0 +1,60 @@ + +void pagecopy(void *dest, const void *src) +{ + asm volatile("0:\n" + "movdqa (%0), %%xmm0\n" + "movdqa 16(%0), %%xmm1\n" + "movdqa 32(%0), %%xmm2\n" + "movdqa 48(%0), %%xmm3\n" + "movdqa 64(%0), %%xmm4\n" + "movdqa 80(%0), %%xmm5\n" + "movdqa 96(%0), %%xmm6\n" + "movdqa 112(%0), %%xmm7\n" + "addq $128, %0\n" + "movdqa %%xmm0, (%1)\n" + "movdqa %%xmm1, 16(%1)\n" + "movdqa %%xmm2, 32(%1)\n" + "movdqa %%xmm3, 48(%1)\n" + "movdqa %%xmm4, 64(%1)\n" + "movdqa %%xmm5, 80(%1)\n" + "movdqa %%xmm6, 96(%1)\n" + "movdqa %%xmm7, 112(%1)\n" + "addq $128, %1\n" + "cmpq %2, %0\n" + "jne 0b" + : "=r"(src), "=r"(dest) + : "r"((char *)src + 4096), "0"(src), "1"(dest) + : "xmm0", "xmm1", "xmm2", "xmm3", + "xmm4", "xmm5", "xmm6", "xmm7"); +} + +#if 0 /* XXX enable if detected on the cpu */ +void pagecopy_ymm8(void *dest, const void *src) +{ + asm volatile("0:\n" + "vmovdqa (%0), %%ymm0\n" + "vmovdqa 32(%0), %%ymm1\n" + "vmovdqa 64(%0), %%ymm2\n" + "vmovdqa 96(%0), %%ymm3\n" + "vmovdqa 128(%0), %%ymm4\n" + "vmovdqa 160(%0), %%ymm5\n" + "vmovdqa 192(%0), %%ymm6\n" + "vmovdqa 224(%0), %%ymm7\n" + "addq $256, %0\n" + "vmovdqa %%ymm0, (%1)\n" + "vmovdqa %%ymm1, 32(%1)\n" + "vmovdqa %%ymm2, 64(%1)\n" + "vmovdqa %%ymm3, 96(%1)\n" + "vmovdqa %%ymm4, 128(%1)\n" + "vmovdqa %%ymm5, 160(%1)\n" + "vmovdqa %%ymm6, 192(%1)\n" + "vmovdqa %%ymm7, 224(%1)\n" + "addq $256, %1\n" + "cmpq %2, %0\n" + "jne 0b" + : "=r"(src), "=r"(dest) + : "r"((char *)src + 4096), "0"(src), "1"(dest) + : "xmm0", "xmm1", "xmm2", "xmm3", + "xmm4", "xmm5", "xmm6", "xmm7"); +} +#endif diff --git a/c5/pagecopy.h b/c5/pagecopy.h new file mode 100644 --- /dev/null +++ b/c5/pagecopy.h @@ -0,0 +1,2 @@ + +void pagecopy(void *dest, const void *src); diff --git a/c5/test/support.py b/c5/test/support.py new file mode 100644 --- /dev/null +++ b/c5/test/support.py @@ -0,0 +1,107 @@ +import os +import cffi + +# ---------- + +parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +header_files = [os.path.join(parent_dir, _n) for _n in + "core.h pagecopy.h".split()] +source_files = [os.path.join(parent_dir, _n) for _n in + "core.c pagecopy.c".split()] + +_pycache_ = os.path.join(parent_dir, 'test', '__pycache__') +if os.path.exists(_pycache_): + _fs = [_f for _f in os.listdir(_pycache_) if _f.startswith('_cffi_')] + if _fs: + _fsmtime = min(os.stat(os.path.join(_pycache_, _f)).st_mtime + for _f in _fs) + if any(os.stat(src).st_mtime >= _fsmtime + for src in header_files + source_files): + import shutil + shutil.rmtree(_pycache_) + +# ---------- + +ffi = cffi.FFI() +ffi.cdef(""" +void stm_setup(void); +void stm_setup_process(void); + +void stm_start_transaction(void); +_Bool stm_stop_transaction(void); +struct object_s *stm_allocate(size_t size); + +void stm_read(struct object_s *object); +void stm_write(struct object_s *object); +_Bool _stm_was_read(struct object_s *object); +_Bool _stm_was_written(struct object_s *object); + +struct local_data_s *_stm_save_local_state(void); +void _stm_restore_local_state(struct local_data_s *p); +void _stm_teardown(void); +void _stm_teardown_process(void); +""") + +lib = ffi.verify(''' +#include "core.h" +''', sources=source_files, + define_macros=[('STM_TESTS', '1')], + undef_macros=['NDEBUG'], + include_dirs=[parent_dir], + extra_compile_args=['-g', '-O0']) + +def intptr(p): + return int(ffi.cast("intptr_t", p)) + +def stm_allocate(size): + return ffi.cast("char *", lib.stm_allocate(size)) + +def stm_read(ptr): + lib.stm_read(ffi.cast("struct object_s *", ptr)) + +def stm_write(ptr): + lib.stm_write(ffi.cast("struct object_s *", ptr)) + +def _stm_was_read(ptr): + return lib._stm_was_read(ffi.cast("struct object_s *", ptr)) + +def _stm_was_written(ptr): + return lib._stm_was_written(ffi.cast("struct object_s *", ptr)) + +def stm_start_transaction(): + lib.stm_start_transaction() + +def stm_stop_transaction(expected_conflict): + res = lib.stm_stop_transaction() + if expected_conflict: + assert res == 0 + else: + assert res == 1 + + +class BaseTest(object): + + def setup_method(self, meth): + lib.stm_setup() + lib.stm_setup_process() + self.saved_states = {} + self.current_proc = "main" + + def teardown_method(self, meth): + lib._stm_teardown_process() + for saved_state in self.saved_states.values(): + lib._stm_restore_local_state(saved_state) + lib._stm_teardown_process() + del self.saved_states + lib._stm_teardown() + + def switch(self, process_name): + self.saved_states[self.current_proc] = lib._stm_save_local_state() + try: + target_saved_state = self.saved_states.pop(process_name) + except KeyError: + lib.stm_setup_process() + else: + lib._stm_restore_local_state(target_saved_state) + self.current_proc = process_name diff --git a/c5/test/test_basic.py b/c5/test/test_basic.py new file mode 100644 --- /dev/null +++ b/c5/test/test_basic.py @@ -0,0 +1,153 @@ +from support import * + + +class TestBasic(BaseTest): + + def test_thread_local_allocations(self): + p1 = stm_allocate(16) + p2 = stm_allocate(16) + assert intptr(p2) - intptr(p1) == 16 + p3 = stm_allocate(16) + assert intptr(p3) - intptr(p2) == 16 + # + self.switch("sub1") + p1s = stm_allocate(16) + assert abs(intptr(p1s) - intptr(p3)) >= 4000 + # + self.switch("main") + p4 = stm_allocate(16) + assert intptr(p4) - intptr(p3) == 16 + + def test_read_write_1(self): + stm_start_transaction() + p1 = stm_allocate(16) + p1[8] = 'a' + stm_stop_transaction(False) + # + self.switch("sub1") + stm_start_transaction() + stm_write(p1) + assert p1[8] == 'a' + p1[8] = 'b' + # + self.switch("main") + stm_start_transaction() + stm_read(p1) + assert p1[8] == 'a' + # + self.switch("sub1") + stm_stop_transaction(False) + # + self.switch("main") + assert p1[8] == 'a' + + def test_start_transaction_updates(self): + stm_start_transaction() + p1 = stm_allocate(16) + p1[8] = 'a' + stm_stop_transaction(False) + # + self.switch("sub1") + stm_start_transaction() + stm_write(p1) + assert p1[8] == 'a' + p1[8] = 'b' + stm_stop_transaction(False) + # + self.switch("main") + assert p1[8] == 'a' + stm_start_transaction() + assert p1[8] == 'b' + + def test_resolve_no_conflict_empty(self): + stm_start_transaction() + # + self.switch("sub1") + stm_start_transaction() + stm_stop_transaction(False) + # + self.switch("main") + stm_stop_transaction(False) + + def test_resolve_no_conflict_write_only_in_already_committed(self): + stm_start_transaction() + p1 = stm_allocate(16) + p1[8] = 'a' + stm_stop_transaction(False) + stm_start_transaction() + # + self.switch("sub1") + stm_start_transaction() + stm_write(p1) + p1[8] = 'b' + stm_stop_transaction(False) + # + self.switch("main") + assert p1[8] == 'a' + stm_stop_transaction(False) + assert p1[8] == 'b' + + def test_resolve_write_read_conflict(self): + stm_start_transaction() + p1 = stm_allocate(16) + p1[8] = 'a' + stm_stop_transaction(False) + stm_start_transaction() + # + self.switch("sub1") + stm_start_transaction() + stm_write(p1) + p1[8] = 'b' + stm_stop_transaction(False) + # + self.switch("main") + stm_read(p1) + assert p1[8] == 'a' + stm_stop_transaction(expected_conflict=True) + assert p1[8] in ('a', 'b') + stm_start_transaction() + assert p1[8] == 'b' + + def test_resolve_write_write_conflict(self): + stm_start_transaction() + p1 = stm_allocate(16) + p1[8] = 'a' + stm_stop_transaction(False) + stm_start_transaction() + # + self.switch("sub1") + stm_start_transaction() + stm_write(p1) + p1[8] = 'b' + stm_stop_transaction(False) + # + self.switch("main") + assert p1[8] == 'a' + stm_write(p1) + p1[8] = 'c' + stm_stop_transaction(expected_conflict=True) + assert p1[8] in ('a', 'b') + stm_start_transaction() + assert p1[8] == 'b' + + def test_resolve_write_write_no_conflict(self): + stm_start_transaction() + p1 = stm_allocate(16) + p2 = stm_allocate(16) + p1[8] = 'a' + p2[8] = 'A' + stm_stop_transaction(False) + stm_start_transaction() + # + self.switch("sub1") + stm_start_transaction() + stm_write(p1) + p1[8] = 'b' + stm_stop_transaction(False) + # + self.switch("main") + stm_write(p2) + p2[8] = 'C' + stm_stop_transaction(False) + assert p1[8] == 'b' + assert p2[8] == 'C' diff --git a/c5/test/test_bug.py b/c5/test/test_bug.py new file mode 100644 --- /dev/null +++ b/c5/test/test_bug.py @@ -0,0 +1,429 @@ +from support import * + + +class TestBug(BaseTest): + + def test_bug1(self): + stm_start_transaction() + p8 = stm_allocate(16) + p8[8] = '\x08' + stm_stop_transaction(False) + # + self.switch("sub1") + self.switch("main") + stm_start_transaction() + stm_write(p8) + p8[8] = '\x97' + # + self.switch("sub1") + stm_start_transaction() + stm_read(p8) + assert p8[8] == '\x08' + + def test_bug2(self): + stm_start_transaction() + p0 = stm_allocate(16) + p1 = stm_allocate(16) + p2 = stm_allocate(16) + p3 = stm_allocate(16) + p4 = stm_allocate(16) + p5 = stm_allocate(16) + p6 = stm_allocate(16) + p7 = stm_allocate(16) + p8 = stm_allocate(16) + p9 = stm_allocate(16) + p0[8] = '\x00' + p1[8] = '\x01' + p2[8] = '\x02' + p3[8] = '\x03' + p4[8] = '\x04' + p5[8] = '\x05' + p6[8] = '\x06' + p7[8] = '\x07' + p8[8] = '\x08' + p9[8] = '\t' + stm_stop_transaction(False) + self.switch(0) + self.switch(1) + self.switch(2) + # + self.switch(1) + stm_start_transaction() + stm_read(p7) + assert p7[8] == '\x07' + # + self.switch(1) + stm_read(p0) + assert p0[8] == '\x00' + # + self.switch(1) + stm_read(p4) + assert p4[8] == '\x04' + # + self.switch(0) + stm_start_transaction() + stm_read(p3) + assert p3[8] == '\x03' + # + self.switch(2) + stm_start_transaction() + stm_read(p8) + assert p8[8] == '\x08' + stm_write(p8) + p8[8] = '\x08' + # + self.switch(0) + stm_read(p0) + assert p0[8] == '\x00' + # + self.switch(0) + stm_read(p0) + assert p0[8] == '\x00' + # + self.switch(1) + stm_read(p2) + assert p2[8] == '\x02' + # + self.switch(2) + stm_read(p2) + assert p2[8] == '\x02' + # + self.switch(2) + stm_read(p2) + assert p2[8] == '\x02' + stm_write(p2) + p2[8] = 'm' + # + self.switch(0) + stm_read(p4) + assert p4[8] == '\x04' + stm_write(p4) + p4[8] = '\xc5' + # + self.switch(2) + stm_read(p1) + assert p1[8] == '\x01' + # + self.switch(2) + stm_stop_transaction(False) #1 + # ['\x00', '\x01', 'm', '\x03', '\x04', '\x05', '\x06', '\x07', '\x08', '\t'] + # log: [8, 2] + # + self.switch(0) + stm_stop_transaction(False) #2 + # ['\x00', '\x01', 'm', '\x03', '\xc5', '\x05', '\x06', '\x07', '\x08', '\t'] + # log: [4] + # + self.switch(0) + stm_start_transaction() + stm_read(p6) + assert p6[8] == '\x06' + # + self.switch(0) + stm_read(p4) + assert p4[8] == '\xc5' + # + self.switch(0) + stm_read(p4) + assert p4[8] == '\xc5' + # + self.switch(1) + stm_read(p0) + assert p0[8] == '\x00' + # + self.switch(1) + stm_stop_transaction(True) #3 + # conflict: 0xdf0a8028 + # + self.switch(2) + stm_start_transaction() + stm_read(p6) + assert p6[8] == '\x06' + # + self.switch(1) + stm_start_transaction() + stm_read(p1) + assert p1[8] == '\x01' + # + self.switch(0) + stm_read(p4) + assert p4[8] == '\xc5' + stm_write(p4) + p4[8] = '\x0c' + # + self.switch(2) + stm_read(p2) + assert p2[8] == 'm' + stm_write(p2) + p2[8] = '\x81' + # + self.switch(2) + stm_read(p7) + assert p7[8] == '\x07' + # + self.switch(0) + stm_read(p5) + assert p5[8] == '\x05' + stm_write(p5) + p5[8] = 'Z' + # + self.switch(1) + stm_stop_transaction(False) #4 + # ['\x00', '\x01', 'm', '\x03', '\xc5', '\x05', '\x06', '\x07', '\x08', '\t'] + # log: [] + # + self.switch(2) + stm_read(p8) + assert p8[8] == '\x08' + # + self.switch(0) + stm_read(p0) + assert p0[8] == '\x00' + # + self.switch(1) + stm_start_transaction() + stm_read(p0) + assert p0[8] == '\x00' + # + self.switch(2) + stm_read(p9) + assert p9[8] == '\t' + stm_write(p9) + p9[8] = '\x81' + # + self.switch(0) + stm_read(p0) + assert p0[8] == '\x00' + # + self.switch(1) + stm_read(p2) + assert p2[8] == 'm' + # + self.switch(2) + stm_read(p9) + assert p9[8] == '\x81' + stm_write(p9) + p9[8] = 'g' + # + self.switch(1) + stm_read(p3) + assert p3[8] == '\x03' + # + self.switch(2) + stm_read(p7) + assert p7[8] == '\x07' + # + self.switch(1) + stm_read(p1) + assert p1[8] == '\x01' + # + self.switch(0) + stm_read(p2) + assert p2[8] == 'm' + stm_write(p2) + p2[8] = 'T' + # + self.switch(2) + stm_read(p4) + assert p4[8] == '\xc5' + # + self.switch(2) + stm_read(p9) + assert p9[8] == 'g' + # + self.switch(2) + stm_read(p1) + assert p1[8] == '\x01' + stm_write(p1) + p1[8] = 'L' + # + self.switch(0) + stm_read(p0) + assert p0[8] == '\x00' + # + self.switch(2) + stm_read(p0) + assert p0[8] == '\x00' + stm_write(p0) + p0[8] = '\xf3' + # + self.switch(1) + stm_stop_transaction(False) #5 + # ['\x00', '\x01', 'm', '\x03', '\xc5', '\x05', '\x06', '\x07', '\x08', '\t'] + # log: [] + # + self.switch(0) + stm_read(p1) + assert p1[8] == '\x01' + stm_write(p1) + p1[8] = '*' + # + self.switch(1) + stm_start_transaction() + stm_read(p3) + assert p3[8] == '\x03' + stm_write(p3) + p3[8] = '\xd2' + # + self.switch(0) + stm_stop_transaction(False) #6 + # ['\x00', '*', 'T', '\x03', '\x0c', 'Z', '\x06', '\x07', '\x08', '\t'] + # log: [1, 2, 4, 5] + # + self.switch(1) + stm_read(p7) + assert p7[8] == '\x07' + stm_write(p7) + p7[8] = '.' + # + self.switch(0) + stm_start_transaction() + stm_read(p7) + assert p7[8] == '\x07' + # + self.switch(1) + stm_read(p2) + assert p2[8] == 'm' + stm_write(p2) + p2[8] = '\xe9' + # + self.switch(1) + stm_read(p0) + assert p0[8] == '\x00' + # + self.switch(0) + stm_read(p1) + assert p1[8] == '*' + # + self.switch(0) + stm_read(p8) + assert p8[8] == '\x08' + stm_write(p8) + p8[8] = 'X' + # + self.switch(2) + stm_stop_transaction(True) #7 + # conflict: 0xdf0a8018 + # + self.switch(1) + stm_read(p9) + assert p9[8] == '\t' + # + self.switch(0) + stm_read(p8) + assert p8[8] == 'X' + # + self.switch(1) + stm_read(p4) + assert p4[8] == '\xc5' + stm_write(p4) + p4[8] = '\xb2' + # + self.switch(0) + stm_read(p9) + assert p9[8] == '\t' + # + self.switch(2) + stm_start_transaction() + stm_read(p5) + assert p5[8] == 'Z' + stm_write(p5) + p5[8] = '\xfa' + # + self.switch(2) + stm_read(p3) + assert p3[8] == '\x03' + # + self.switch(1) + stm_read(p9) + assert p9[8] == '\t' + # + self.switch(1) + stm_read(p8) + assert p8[8] == '\x08' + stm_write(p8) + p8[8] = 'g' + # + self.switch(1) + stm_read(p8) + assert p8[8] == 'g' + # + self.switch(2) + stm_read(p5) + assert p5[8] == '\xfa' + stm_write(p5) + p5[8] = '\x86' + # + self.switch(2) + stm_read(p6) + assert p6[8] == '\x06' + # + self.switch(1) + stm_read(p4) + assert p4[8] == '\xb2' + stm_write(p4) + p4[8] = '\xce' + # + self.switch(2) + stm_read(p2) + assert p2[8] == 'T' + stm_write(p2) + p2[8] = 'Q' + # + self.switch(1) + stm_stop_transaction(True) #8 + # conflict: 0xdf0a8028 + # + self.switch(2) + stm_stop_transaction(False) #9 + # ['\x00', '*', 'Q', '\x03', '\x0c', '\x86', '\x06', '\x07', '\x08', '\t'] + # log: [2, 5] + # + self.switch(0) + stm_read(p0) + assert p0[8] == '\x00' + # + self.switch(1) + stm_start_transaction() + stm_read(p3) + assert p3[8] == '\x03' + # + self.switch(1) + stm_read(p5) + assert p5[8] == '\x86' + # + self.switch(2) + stm_start_transaction() + stm_read(p4) + assert p4[8] == '\x0c' + stm_write(p4) + p4[8] = '{' + # + self.switch(1) + stm_read(p2) + assert p2[8] == 'Q' + # + self.switch(2) + stm_read(p3) + assert p3[8] == '\x03' + stm_write(p3) + p3[8] = 'V' + # + self.switch(1) + stm_stop_transaction(False) #10 + # ['\x00', '*', 'Q', '\x03', '\x0c', '\x86', '\x06', '\x07', '\x08', '\t'] + # log: [] + # + self.switch(1) + stm_start_transaction() + stm_read(p7) + assert p7[8] == '\x07' + # + self.switch(2) + stm_read(p0) + assert p0[8] == '\x00' + stm_write(p0) + p0[8] = 'P' + # + self.switch(0) + stm_stop_transaction(False) #11 diff --git a/c5/test/test_random.py b/c5/test/test_random.py new file mode 100644 --- /dev/null +++ b/c5/test/test_random.py @@ -0,0 +1,85 @@ +from support import * +import sys, random + + +class TestRandom(BaseTest): + + def test_fixed_16_bytes_objects(self): + rnd = random.Random(1010) + + N_OBJECTS = 10 + N_THREADS = 3 + print >> sys.stderr, 'stm_start_transaction()' + stm_start_transaction() + plist = [stm_allocate(16) for i in range(N_OBJECTS)] + read_sets = [{} for i in range(N_THREADS)] + write_sets = [{} for i in range(N_THREADS)] + active_transactions = {} + + for i in range(N_OBJECTS): + print >> sys.stderr, 'p%d = stm_allocate(16)' % i + for i in range(N_OBJECTS): + print >> sys.stderr, 'p%d[8] = %r' % (i, chr(i)) + plist[i][8] = chr(i) + head_state = [[chr(i) for i in range(N_OBJECTS)]] + commit_log = [] + print >> sys.stderr, 'stm_stop_transaction(False)' + stm_stop_transaction(False) + + for i in range(N_THREADS): + print >> sys.stderr, 'self.switch(%d)' % i + self.switch(i) + stop_count = 1 + + for i in range(10000): + n_thread = rnd.randrange(0, N_THREADS) + print >> sys.stderr, '#\nself.switch(%d)' % n_thread + self.switch(n_thread) + if n_thread not in active_transactions: + print >> sys.stderr, 'stm_start_transaction()' + stm_start_transaction() + active_transactions[n_thread] = len(commit_log) + + action = rnd.randrange(0, 7) + if action < 6: + is_write = action >= 4 + i = rnd.randrange(0, N_OBJECTS) + print >> sys.stderr, "stm_read(p%d)" % i + stm_read(plist[i]) + got = plist[i][8] + print >> sys.stderr, "assert p%d[8] ==" % i, + my_head_state = head_state[active_transactions[n_thread]] + prev = read_sets[n_thread].setdefault(i, my_head_state[i]) + print >> sys.stderr, "%r" % (prev,) + assert got == prev + # + if is_write: + print >> sys.stderr, 'stm_write(p%d)' % i + stm_write(plist[i]) + newval = chr(rnd.randrange(0, 256)) + print >> sys.stderr, 'p%d[8] = %r' % (i, newval) + plist[i][8] = newval + read_sets[n_thread][i] = write_sets[n_thread][i] = newval + else: + src_index = active_transactions.pop(n_thread) + conflict = False + for i in range(src_index, len(commit_log)): + for j in commit_log[i]: + if j in read_sets[n_thread]: + conflict = True + print >> sys.stderr, "stm_stop_transaction(%r) #%d" % ( + conflict, stop_count) + stop_count += 1 + stm_stop_transaction(conflict) + # + if not conflict: + hs = head_state[-1][:] + for i, newval in write_sets[n_thread].items(): + hs[i] = newval + assert plist[i][8] == newval + head_state.append(hs) + commit_log.append(write_sets[n_thread].keys()) + print >> sys.stderr, '#', head_state[-1] + print >> sys.stderr, '# log:', commit_log[-1] + write_sets[n_thread].clear() + read_sets[n_thread].clear() From noreply at buildbot.pypy.org Wed Dec 18 17:33:25 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 18 Dec 2013 17:33:25 +0100 (CET) Subject: [pypy-commit] stmgc c5: Use MADV_DONTNEED to clear the read marker pages after 255 transactions. Message-ID: <20131218163325.A49C51C314B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r558:d3ce94726f63 Date: 2013-12-18 17:32 +0100 http://bitbucket.org/pypy/stmgc/changeset/d3ce94726f63/ Log: Use MADV_DONTNEED to clear the read marker pages after 255 transactions. diff --git a/c5/core.c b/c5/core.c --- a/c5/core.c +++ b/c5/core.c @@ -36,6 +36,12 @@ writes to it now happen to go to the new mm page instead of the old one. + This is basically what happens automatically with fork() for regular + memory; the difference is that at commit time, we try to publish the + modified pages back for everybody to see. This involves possibly + merging changes done by other processes to other objects from the + same page. + The local pages are usually referenced by pointers, but may also be expressed as an index, called the "local index" of the page. */ @@ -134,6 +140,11 @@ (unsigned char)(uintptr_t)stm_local.current_read_markers); } +static struct read_marker_s *get_current_read_marker(struct object_s *object) +{ + return stm_local.current_read_markers + (((uintptr_t)object) >> 4); +} + void _stm_write_slowpath(struct object_s *); void stm_write(struct object_s *object) @@ -282,6 +293,26 @@ stm_local.current_read_markers += num; } +static void clear_all_read_markers(void) +{ + /* set the largest possible read marker number, to find the last + possible read_marker to clear */ + stm_set_read_marker_number(0xff); + + uint64_t page_index = stm_shared_descriptor->index_page_never_used; + char *o = ((char *)stm_shared_descriptor) + page_index * 4096; + char *m = (char *)get_current_read_marker((struct object_s *)o); + size_t length = m - (char *)stm_local.read_markers; + length = (length + 4095) & ~4095; + + int r = madvise(stm_local.read_markers, length, MADV_DONTNEED); + if (r != 0) { + perror("madvise() failure"); + abort(); + } + stm_set_read_marker_number(1); +} + void stm_setup(void) { if (sizeof(char *) != 8) { @@ -491,8 +522,12 @@ } stm_local.writes_by_this_transaction = NULL; - assert(stm_get_read_marker_number() < 0xff); - stm_local.current_read_markers++; + if (stm_get_read_marker_number() < 0xff) { + stm_local.current_read_markers++; + } + else { + clear_all_read_markers(); + } return !conflict; } From noreply at buildbot.pypy.org Wed Dec 18 20:49:05 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 18 Dec 2013 20:49:05 +0100 (CET) Subject: [pypy-commit] pypy default: fix promote_to_largest in reduce operations (fixes issue1663) Message-ID: <20131218194905.D05881C3391@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68469:912fe4190438 Date: 2013-12-18 14:41 -0500 http://bitbucket.org/pypy/pypy/changeset/912fe4190438/ Log: fix promote_to_largest in reduce operations (fixes issue1663) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -956,8 +956,7 @@ return func_with_new_name(impl, "reduce_%s_impl_%d_%d" % (ufunc_name, promote_to_largest, cumulative)) - descr_sum = _reduce_ufunc_impl("add") - descr_sum_promote = _reduce_ufunc_impl("add", True) + descr_sum = _reduce_ufunc_impl("add", True) descr_prod = _reduce_ufunc_impl("multiply", True) descr_max = _reduce_ufunc_impl("maximum") descr_min = _reduce_ufunc_impl("minimum") diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -496,6 +496,15 @@ @jit.unroll_safe def find_unaryop_result_dtype(space, dt, promote_to_float=False, promote_bools=False, promote_to_largest=False): + if promote_to_largest: + if dt.kind == NPY_GENBOOLLTR or dt.kind == NPY_SIGNEDLTR: + return interp_dtype.get_dtype_cache(space).w_int64dtype + elif dt.kind == NPY_UNSIGNEDLTR: + return interp_dtype.get_dtype_cache(space).w_uint64dtype + elif dt.kind == NPY_FLOATINGLTR or dt.kind == NPY_COMPLEXLTR: + return dt + else: + assert False if promote_bools and (dt.kind == NPY_GENBOOLLTR): return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: @@ -507,15 +516,6 @@ if (dtype.kind == NPY_FLOATINGLTR and dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): return dtype - if promote_to_largest: - if dt.kind == NPY_GENBOOLLTR or dt.kind == NPY_SIGNEDLTR: - return interp_dtype.get_dtype_cache(space).w_float64dtype - elif dt.kind == NPY_FLOATINGLTR: - return interp_dtype.get_dtype_cache(space).w_float64dtype - elif dt.kind == NPY_UNSIGNEDLTR: - return interp_dtype.get_dtype_cache(space).w_uint64dtype - else: - assert False return dt def find_dtype_for_scalar(space, w_obj, current_guess=None): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1344,7 +1344,7 @@ assert d[1] == 12 def test_sum(self): - from numpypy import array, zeros + from numpypy import array, zeros, float16, complex64, str_ a = array(range(5)) assert a.sum() == 10 assert a[:4].sum() == 6 @@ -1352,6 +1352,12 @@ a = array([True] * 5, bool) assert a.sum() == 5 + assert array([True, False] * 200).sum() == 200 + assert array([True, False] * 200, dtype='int8').sum() == 200 + assert array([True, False] * 200).sum(dtype='int8') == -56 + assert type(array([True, False] * 200, dtype='float16').sum()) is float16 + assert type(array([True, False] * 200, dtype='complex64').sum()) is complex64 + raises(TypeError, 'a.sum(axis=0, out=3)') raises(ValueError, 'a.sum(axis=2)') d = array(0.) @@ -1394,10 +1400,16 @@ assert (array([[1,2],[3,4]]).prod(1) == [2, 12]).all() def test_prod(self): - from numpypy import array + from numpypy import array, int_, dtype a = array(range(1, 6)) assert a.prod() == 120.0 assert a[:4].prod() == 24.0 + a = array([True, False]) + assert a.prod() == 0 + assert type(a.prod()) is int_ + a = array([True, False], dtype='uint') + assert a.prod() == 0 + assert type(a.prod()) is dtype('uint').type def test_max(self): from numpypy import array, zeros From noreply at buildbot.pypy.org Wed Dec 18 22:15:11 2013 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 18 Dec 2013 22:15:11 +0100 (CET) Subject: [pypy-commit] pypy default: adapt import library for changeset a1989cb701a7 Message-ID: <20131218211512.011E81D22B1@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r68470:69886daae534 Date: 2013-12-18 23:13 +0200 http://bitbucket.org/pypy/pypy/changeset/69886daae534/ Log: adapt import library for changeset a1989cb701a7 diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -115,10 +115,11 @@ continue print "Picking %s" % p binaries.append((p, p.basename)) - if pypy_c.dirpath().join("libpypy-c.lib").check(): - shutil.copyfile(str(pypy_c.dirpath().join("libpypy-c.lib")), + importlib_name = 'python27.lib' + if pypy_c.dirpath().join(importlib_name).check(): + shutil.copyfile(str(pypy_c.dirpath().join(importlib_name)), str(pypydir.join('include/python27.lib'))) - print "Picking %s as %s" % (pypy_c.dirpath().join("libpypy-c.lib"), + print "Picking %s as %s" % (pypy_c.dirpath().join(importlib_name), pypydir.join('include/python27.lib')) else: pass From noreply at buildbot.pypy.org Wed Dec 18 23:18:05 2013 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 18 Dec 2013 23:18:05 +0100 (CET) Subject: [pypy-commit] pypy reflex-support: coding convention fix Message-ID: <20131218221805.8450A1C347B@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r68471:a53125f2cdc0 Date: 2013-10-17 12:28 -0700 http://bitbucket.org/pypy/pypy/changeset/a53125f2cdc0/ Log: coding convention fix diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h --- a/pypy/module/cppyy/include/capi.h +++ b/pypy/module/cppyy/include/capi.h @@ -89,11 +89,11 @@ cppyy_index_t cppyy_get_global_operator( cppyy_scope_t scope, cppyy_scope_t lc, cppyy_scope_t rc, const char* op); - /* method properties ----------------------------------------------------- */ + /* method properties ------------------------------------------------------ */ int cppyy_is_constructor(cppyy_type_t type, cppyy_index_t idx); int cppyy_is_staticmethod(cppyy_type_t type, cppyy_index_t idx); - /* data member reflection information ------------------------------------ */ + /* data member reflection information ------------------------------------- */ int cppyy_num_datamembers(cppyy_scope_t scope); char* cppyy_datamember_name(cppyy_scope_t scope, int datamember_index); char* cppyy_datamember_type(cppyy_scope_t scope, int datamember_index); @@ -101,7 +101,7 @@ int cppyy_datamember_index(cppyy_scope_t scope, const char* name); - /* data member properties ------------------------------------------------ */ + /* data member properties ------------------------------------------------- */ int cppyy_is_publicdata(cppyy_type_t type, int datamember_index); int cppyy_is_staticdata(cppyy_type_t type, int datamember_index); From noreply at buildbot.pypy.org Wed Dec 18 23:18:14 2013 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 18 Dec 2013 23:18:14 +0100 (CET) Subject: [pypy-commit] pypy reflex-support: merge default into branch Message-ID: <20131218221814.B89531C347B@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r68472:db156904ed81 Date: 2013-12-18 12:27 -0800 http://bitbucket.org/pypy/pypy/changeset/db156904ed81/ Log: merge default into branch diff too long, truncating to 2000 out of 47038 lines diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -1,7 +1,38 @@ all: pypy-c +PYPY_EXECUTABLE := $(shell which pypy) +URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5") + +ifeq ($(PYPY_EXECUTABLE),) +RUNINTERP = python +else +RUNINTERP = $(PYPY_EXECUTABLE) +endif + pypy-c: - @echo "Building PyPy with JIT, it'll take about 40 minutes and 4G of RAM" - @sleep 3 - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + @echo + @echo "====================================================================" +ifeq ($(PYPY_EXECUTABLE),) + @echo "Building a regular (jitting) version of PyPy, using CPython." + @echo "This takes around 2 hours and $(URAM) GB of RAM." + @echo "Note that pre-installing a PyPy binary would reduce this time" + @echo "and produce basically the same result." +else + @echo "Building a regular (jitting) version of PyPy, using" + @echo "$(PYPY_EXECUTABLE) to run the translation itself." + @echo "This takes up to 1 hour and $(URAM) GB of RAM." +endif + @echo + @echo "For more control (e.g. to use multiple CPU cores during part of" + @echo "the process) you need to run \`\`rpython/bin/rpython'' directly." + @echo "For more information see \`\`http://pypy.org/download.html''." + @echo "====================================================================" + @echo + @sleep 5 + $(RUNINTERP) rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + +# Note: the -jN option, or MAKEFLAGS=-jN, are not usable. They are +# replaced with an opaque --jobserver option by the time this Makefile +# runs. We cannot get their original value either: +# http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html diff --git a/README.rst b/README.rst --- a/README.rst +++ b/README.rst @@ -1,5 +1,5 @@ ===================================== -PyPy: Python in Python Implementation +PyPy: Python in Python Implementation ===================================== Welcome to PyPy! @@ -26,12 +26,14 @@ Building ======== -build with:: +build with: - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py +.. code-block:: console + + $ rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py This ends up with ``pypy-c`` binary in the main pypy directory. We suggest -to use virtualenv with the resulting pypy-c as the interpreter, you can +to use virtualenv with the resulting pypy-c as the interpreter; you can find more details about various installation schemes here: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy diff --git a/lib-python/2.7/ctypes/test/test_python_api.py b/lib-python/2.7/ctypes/test/test_python_api.py --- a/lib-python/2.7/ctypes/test/test_python_api.py +++ b/lib-python/2.7/ctypes/test/test_python_api.py @@ -73,6 +73,7 @@ del pyobj self.assertEqual(grc(s), ref) + @xfail def test_PyOS_snprintf(self): PyOS_snprintf = pythonapi.PyOS_snprintf PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -4,6 +4,21 @@ from __pypy__.builders import StringBuilder, UnicodeBuilder +class StringOrUnicodeBuilder(object): + def __init__(self): + self._builder = StringBuilder() + def append(self, string): + try: + self._builder.append(string) + except UnicodeEncodeError: + ub = UnicodeBuilder() + ub.append(self._builder.build()) + self._builder = ub + ub.append(string) + def build(self): + return self._builder.build() + + ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') HAS_UTF8 = re.compile(r'[\x80-\xff]') @@ -192,7 +207,7 @@ if self.ensure_ascii: builder = StringBuilder() else: - builder = UnicodeBuilder() + builder = StringOrUnicodeBuilder() self.__encode(o, markers, builder, 0) return builder.build() diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -335,9 +335,10 @@ s = self._sock self._sock = None if s is not None: - s._drop() if self._close: s.close() + else: + s._drop() def __del__(self): try: diff --git a/lib-python/2.7/string.py b/lib-python/2.7/string.py --- a/lib-python/2.7/string.py +++ b/lib-python/2.7/string.py @@ -66,16 +66,17 @@ must be of the same length. """ - if len(fromstr) != len(tostr): + n = len(fromstr) + if n != len(tostr): raise ValueError, "maketrans arguments must have same length" - global _idmapL - if not _idmapL: - _idmapL = list(_idmap) - L = _idmapL[:] - fromstr = map(ord, fromstr) - for i in range(len(fromstr)): - L[fromstr[i]] = tostr[i] - return ''.join(L) + # this function has been rewritten to suit PyPy better; it is + # almost 10x faster than the original. + buf = bytearray(256) + for i in range(256): + buf[i] = i + for i in range(n): + buf[ord(fromstr[i])] = tostr[i] + return str(buf) diff --git a/lib-python/2.7/test/keycert.pem b/lib-python/2.7/test/keycert.pem --- a/lib-python/2.7/test/keycert.pem +++ b/lib-python/2.7/test/keycert.pem @@ -1,32 +1,31 @@ ------BEGIN RSA PRIVATE KEY----- -MIICXwIBAAKBgQC8ddrhm+LutBvjYcQlnH21PPIseJ1JVG2HMmN2CmZk2YukO+9L -opdJhTvbGfEj0DQs1IE8M+kTUyOmuKfVrFMKwtVeCJphrAnhoz7TYOuLBSqt7lVH -fhi/VwovESJlaBOp+WMnfhcduPEYHYx/6cnVapIkZnLt30zu2um+DzA9jQIDAQAB -AoGBAK0FZpaKj6WnJZN0RqhhK+ggtBWwBnc0U/ozgKz2j1s3fsShYeiGtW6CK5nU -D1dZ5wzhbGThI7LiOXDvRucc9n7vUgi0alqPQ/PFodPxAN/eEYkmXQ7W2k7zwsDA -IUK0KUhktQbLu8qF/m8qM86ba9y9/9YkXuQbZ3COl5ahTZrhAkEA301P08RKv3KM -oXnGU2UHTuJ1MAD2hOrPxjD4/wxA/39EWG9bZczbJyggB4RHu0I3NOSFjAm3HQm0 -ANOu5QK9owJBANgOeLfNNcF4pp+UikRFqxk5hULqRAWzVxVrWe85FlPm0VVmHbb/ -loif7mqjU8o1jTd/LM7RD9f2usZyE2psaw8CQQCNLhkpX3KO5kKJmS9N7JMZSc4j -oog58yeYO8BBqKKzpug0LXuQultYv2K4veaIO04iL9VLe5z9S/Q1jaCHBBuXAkEA -z8gjGoi1AOp6PBBLZNsncCvcV/0aC+1se4HxTNo2+duKSDnbq+ljqOM+E7odU+Nq -ewvIWOG//e8fssd0mq3HywJBAJ8l/c8GVmrpFTx8r/nZ2Pyyjt3dH1widooDXYSV -q6Gbf41Llo5sYAtmxdndTLASuHKecacTgZVhy0FryZpLKrU= ------END RSA PRIVATE KEY----- +-----BEGIN PRIVATE KEY----- +MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBANtb0+YrKuxevGpm +LrjaUhZSgz6zFAmuGFmKmUbdjmfv9zSmmdsQIksK++jK0Be9LeZy20j6ahOfuVa0 +ufEmPoP7Fy4hXegKZR9cCWcIe/A6H2xWF1IIJLRTLaU8ol/I7T+um5HD5AwAwNPP +USNU0Eegmvp+xxWu3NX2m1Veot85AgMBAAECgYA3ZdZ673X0oexFlq7AAmrutkHt +CL7LvwrpOiaBjhyTxTeSNWzvtQBkIU8DOI0bIazA4UreAFffwtvEuPmonDb3F+Iq +SMAu42XcGyVZEl+gHlTPU9XRX7nTOXVt+MlRRRxL6t9GkGfUAXI3XxJDXW3c0vBK +UL9xqD8cORXOfE06rQJBAP8mEX1ERkR64Ptsoe4281vjTlNfIbs7NMPkUnrn9N/Y +BLhjNIfQ3HFZG8BTMLfX7kCS9D593DW5tV4Z9BP/c6cCQQDcFzCcVArNh2JSywOQ +ZfTfRbJg/Z5Lt9Fkngv1meeGNPgIMLN8Sg679pAOOWmzdMO3V706rNPzSVMME7E5 +oPIfAkEA8pDddarP5tCvTTgUpmTFbakm0KoTZm2+FzHcnA4jRh+XNTjTOv98Y6Ik +eO5d1ZnKXseWvkZncQgxfdnMqqpj5wJAcNq/RVne1DbYlwWchT2Si65MYmmJ8t+F +0mcsULqjOnEMwf5e+ptq5LzwbyrHZYq5FNk7ocufPv/ZQrcSSC+cFwJBAKvOJByS +x56qyGeZLOQlWS2JS3KJo59XuLFGqcbgN9Om9xFa41Yb4N9NvplFivsvZdw3m1Q/ +SPIXQuT8RMPDVNQ= +-----END PRIVATE KEY----- -----BEGIN CERTIFICATE----- -MIICpzCCAhCgAwIBAgIJAP+qStv1cIGNMA0GCSqGSIb3DQEBBQUAMIGJMQswCQYD -VQQGEwJVUzERMA8GA1UECBMIRGVsYXdhcmUxEzARBgNVBAcTCldpbG1pbmd0b24x -IzAhBgNVBAoTGlB5dGhvbiBTb2Z0d2FyZSBGb3VuZGF0aW9uMQwwCgYDVQQLEwNT -U0wxHzAdBgNVBAMTFnNvbWVtYWNoaW5lLnB5dGhvbi5vcmcwHhcNMDcwODI3MTY1 -NDUwWhcNMTMwMjE2MTY1NDUwWjCBiTELMAkGA1UEBhMCVVMxETAPBgNVBAgTCERl -bGF3YXJlMRMwEQYDVQQHEwpXaWxtaW5ndG9uMSMwIQYDVQQKExpQeXRob24gU29m -dHdhcmUgRm91bmRhdGlvbjEMMAoGA1UECxMDU1NMMR8wHQYDVQQDExZzb21lbWFj -aGluZS5weXRob24ub3JnMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC8ddrh -m+LutBvjYcQlnH21PPIseJ1JVG2HMmN2CmZk2YukO+9LopdJhTvbGfEj0DQs1IE8 -M+kTUyOmuKfVrFMKwtVeCJphrAnhoz7TYOuLBSqt7lVHfhi/VwovESJlaBOp+WMn -fhcduPEYHYx/6cnVapIkZnLt30zu2um+DzA9jQIDAQABoxUwEzARBglghkgBhvhC -AQEEBAMCBkAwDQYJKoZIhvcNAQEFBQADgYEAF4Q5BVqmCOLv1n8je/Jw9K669VXb -08hyGzQhkemEBYQd6fzQ9A/1ZzHkJKb1P6yreOLSEh4KcxYPyrLRC1ll8nr5OlCx -CMhKkTnR6qBsdNV0XtdU2+N25hqW+Ma4ZeqsN/iiJVCGNOZGnvQuvCAGWF8+J/f/ -iHkC6gGdBJhogs4= +MIICVDCCAb2gAwIBAgIJANfHOBkZr8JOMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNV +BAYTAlhZMRcwFQYDVQQHEw5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xMDEw +MDgyMzAxNTZaFw0yMDEwMDUyMzAxNTZaMF8xCzAJBgNVBAYTAlhZMRcwFQYDVQQH +Ew5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9uIFNvZnR3YXJlIEZvdW5k +YXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAw +gYkCgYEA21vT5isq7F68amYuuNpSFlKDPrMUCa4YWYqZRt2OZ+/3NKaZ2xAiSwr7 +6MrQF70t5nLbSPpqE5+5VrS58SY+g/sXLiFd6AplH1wJZwh78DofbFYXUggktFMt +pTyiX8jtP66bkcPkDADA089RI1TQR6Ca+n7HFa7c1fabVV6i3zkCAwEAAaMYMBYw +FAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBBQUAA4GBAHPctQBEQ4wd +BJ6+JcpIraopLn8BGhbjNWj40mmRqWB/NAWF6M5ne7KpGAu7tLeG4hb1zLaldK8G +lxy2GPSRF6LFS48dpEj2HbMv2nvv6xxalDMJ9+DicWgAKTQ6bcX2j3GUkCR0g/T1 +CRlNBAAlvhKzO7Clpf9l0YKBEfraJByX -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/sha256.pem b/lib-python/2.7/test/sha256.pem --- a/lib-python/2.7/test/sha256.pem +++ b/lib-python/2.7/test/sha256.pem @@ -1,129 +1,128 @@ # Certificate chain for https://sha256.tbs-internet.com - 0 s:/C=FR/postalCode=14000/ST=Calvados/L=CAEN/street=22 rue de Bretagne/O=TBS INTERNET/OU=0002 440443810/OU=sha-256 production/CN=sha256.tbs-internet.com - i:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC + 0 s:/C=FR/postalCode=14000/ST=Calvados/L=CAEN/street=22 rue de Bretagne/O=TBS INTERNET/OU=0002 440443810/OU=Certificats TBS X509/CN=ecom.tbs-x509.com + i:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA business -----BEGIN CERTIFICATE----- -MIIGXTCCBUWgAwIBAgIRAMmag+ygSAdxZsbyzYjhuW0wDQYJKoZIhvcNAQELBQAw -gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl +MIIGTjCCBTagAwIBAgIQOh3d9dNDPq1cSdJmEiMpqDANBgkqhkiG9w0BAQUFADCB +yTELMAkGA1UEBhMCRlIxETAPBgNVBAgTCENhbHZhZG9zMQ0wCwYDVQQHEwRDYWVu +MRUwEwYDVQQKEwxUQlMgSU5URVJORVQxSDBGBgNVBAsTP1Rlcm1zIGFuZCBDb25k +aXRpb25zOiBodHRwOi8vd3d3LnRicy1pbnRlcm5ldC5jb20vQ0EvcmVwb3NpdG9y +eTEYMBYGA1UECxMPVEJTIElOVEVSTkVUIENBMR0wGwYDVQQDExRUQlMgWDUwOSBD +QSBidXNpbmVzczAeFw0xMTAxMjUwMDAwMDBaFw0xMzAyMDUyMzU5NTlaMIHHMQsw +CQYDVQQGEwJGUjEOMAwGA1UEERMFMTQwMDAxETAPBgNVBAgTCENhbHZhZG9zMQ0w +CwYDVQQHEwRDQUVOMRswGQYDVQQJExIyMiBydWUgZGUgQnJldGFnbmUxFTATBgNV +BAoTDFRCUyBJTlRFUk5FVDEXMBUGA1UECxMOMDAwMiA0NDA0NDM4MTAxHTAbBgNV +BAsTFENlcnRpZmljYXRzIFRCUyBYNTA5MRowGAYDVQQDExFlY29tLnRicy14NTA5 +LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKRrlHUnJ++1lpcg +jtYco7cdmRe+EEfTmwPfCdfV3G1QfsTSvY6FfMpm/83pqHfT+4ANwr18wD9ZrAEN +G16mf9VdCGK12+TP7DmqeZyGIqlFFoahQnmb8EarvE43/1UeQ2CV9XmzwZvpqeli +LfXsFonawrY3H6ZnMwS64St61Z+9gdyuZ/RbsoZBbT5KUjDEG844QRU4OT1IGeEI +eY5NM5RNIh6ZNhVtqeeCxMS7afONkHQrOco73RdSTRck/Hj96Ofl3MHNHryr+AMK +DGFk1kLCZGpPdXtkxXvaDeQoiYDlil26CWc+YK6xyDPMdsWvoG14ZLyCpzMXA7/7 +4YAQRH0CAwEAAaOCAjAwggIsMB8GA1UdIwQYMBaAFBoJBMz5CY+7HqDO1KQUf0vV +I1jNMB0GA1UdDgQWBBQgOU8HsWzbmD4WZP5Wtdw7jca2WDAOBgNVHQ8BAf8EBAMC +BaAwDAYDVR0TAQH/BAIwADAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIw +TAYDVR0gBEUwQzBBBgsrBgEEAYDlNwIBATAyMDAGCCsGAQUFBwIBFiRodHRwczov +L3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL0NQUzEwdwYDVR0fBHAwbjA3oDWgM4Yx +aHR0cDovL2NybC50YnMtaW50ZXJuZXQuY29tL1RCU1g1MDlDQWJ1c2luZXNzLmNy +bDAzoDGgL4YtaHR0cDovL2NybC50YnMteDUwOS5jb20vVEJTWDUwOUNBYnVzaW5l +c3MuY3JsMIGwBggrBgEFBQcBAQSBozCBoDA9BggrBgEFBQcwAoYxaHR0cDovL2Ny +dC50YnMtaW50ZXJuZXQuY29tL1RCU1g1MDlDQWJ1c2luZXNzLmNydDA5BggrBgEF +BQcwAoYtaHR0cDovL2NydC50YnMteDUwOS5jb20vVEJTWDUwOUNBYnVzaW5lc3Mu +Y3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC50YnMteDUwOS5jb20wMwYDVR0R +BCwwKoIRZWNvbS50YnMteDUwOS5jb22CFXd3dy5lY29tLnRicy14NTA5LmNvbTAN +BgkqhkiG9w0BAQUFAAOCAQEArT4NHfbY87bGAw8lPV4DmHlmuDuVp/y7ltO3Ynse +3Rz8RxW2AzuO0Oy2F0Cu4yWKtMyEyMXyHqWtae7ElRbdTu5w5GwVBLJHClCzC8S9 +SpgMMQTx3Rgn8vjkHuU9VZQlulZyiPK7yunjc7c310S9FRZ7XxOwf8Nnx4WnB+No +WrfApzhhQl31w+RyrNxZe58hCfDDHmevRvwLjQ785ZoQXJDj2j3qAD4aI2yB8lB5 +oaE1jlCJzC7Kmz/Y9jzfmv/zAs1LQTm9ktevv4BTUFaGjv9jxnQ1xnS862ZiouLW +zZYIlYPf4F6JjXGiIQgQRglILUfq3ftJd9/ok9W9ZF8h8w== +-----END CERTIFICATE----- + 1 s:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA business + i:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root +-----BEGIN CERTIFICATE----- +MIIFPzCCBCegAwIBAgIQDlBz/++iRSmLDeVRHT/hADANBgkqhkiG9w0BAQUFADBv +MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk +ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF +eHRlcm5hbCBDQSBSb290MB4XDTA1MTIwMTAwMDAwMFoXDTE5MDcwOTE4MTkyMlow +gckxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv -cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg -Q0EgU0dDMB4XDTEwMDIxODAwMDAwMFoXDTEyMDIxOTIzNTk1OVowgcsxCzAJBgNV -BAYTAkZSMQ4wDAYDVQQREwUxNDAwMDERMA8GA1UECBMIQ2FsdmFkb3MxDTALBgNV -BAcTBENBRU4xGzAZBgNVBAkTEjIyIHJ1ZSBkZSBCcmV0YWduZTEVMBMGA1UEChMM -VEJTIElOVEVSTkVUMRcwFQYDVQQLEw4wMDAyIDQ0MDQ0MzgxMDEbMBkGA1UECxMS -c2hhLTI1NiBwcm9kdWN0aW9uMSAwHgYDVQQDExdzaGEyNTYudGJzLWludGVybmV0 -LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbuM8VT7f0nntwu -N3F7v9KIBlhKNAxqCrziOXU5iqUt8HrQB3DtHbdmII+CpVUlwlmepsx6G+srEZ9a -MIGAy0nxi5aLb7watkyIdPjJTMvTUBQ/+RPWzt5JtYbbY9BlJ+yci0dctP74f4NU -ISLtlrEjUbf2gTohLrcE01TfmOF6PDEbB5PKDi38cB3NzKfizWfrOaJW6Q1C1qOJ -y4/4jkUREX1UFUIxzx7v62VfjXSGlcjGpBX1fvtABQOSLeE0a6gciDZs1REqroFf -5eXtqYphpTa14Z83ITXMfgg5Nze1VtMnzI9Qx4blYBw4dgQVEuIsYr7FDBOITDzc -VEVXZx0CAwEAAaOCAj8wggI7MB8GA1UdIwQYMBaAFAdEdoWTKLx/bXjSCuv6TEvf -2YIfMB0GA1UdDgQWBBSJKI/AYVI9RQNY0QPIqc8ej2QivTAOBgNVHQ8BAf8EBAMC -BaAwDAYDVR0TAQH/BAIwADA0BgNVHSUELTArBggrBgEFBQcDAQYIKwYBBQUHAwIG -CisGAQQBgjcKAwMGCWCGSAGG+EIEATBMBgNVHSAERTBDMEEGCysGAQQBgOU3AgQB -MDIwMAYIKwYBBQUHAgEWJGh0dHBzOi8vd3d3LnRicy1pbnRlcm5ldC5jb20vQ0Ev -Q1BTNDBtBgNVHR8EZjBkMDKgMKAuhixodHRwOi8vY3JsLnRicy1pbnRlcm5ldC5j -b20vVEJTWDUwOUNBU0dDLmNybDAuoCygKoYoaHR0cDovL2NybC50YnMteDUwOS5j -b20vVEJTWDUwOUNBU0dDLmNybDCBpgYIKwYBBQUHAQEEgZkwgZYwOAYIKwYBBQUH -MAKGLGh0dHA6Ly9jcnQudGJzLWludGVybmV0LmNvbS9UQlNYNTA5Q0FTR0MuY3J0 -MDQGCCsGAQUFBzAChihodHRwOi8vY3J0LnRicy14NTA5LmNvbS9UQlNYNTA5Q0FT -R0MuY3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC50YnMteDUwOS5jb20wPwYD -VR0RBDgwNoIXc2hhMjU2LnRicy1pbnRlcm5ldC5jb22CG3d3dy5zaGEyNTYudGJz -LWludGVybmV0LmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAA5NL0D4QSqhErhlkdPmz -XtiMvdGL+ZehM4coTRIpasM/Agt36Rc0NzCvnQwKE+wkngg1Gy2qe7Q0E/ziqBtB -fZYzdVgu1zdiL4kTaf+wFKYAFGsFbyeEmXysy+CMwaNoF2vpSjCU1UD56bEnTX/W -fxVZYxtBQUpnu2wOsm8cDZuZRv9XrYgAhGj9Tt6F0aVHSDGn59uwShG1+BVF/uju -SCyPTTjL1oc7YElJUzR/x4mQJYvtQI8gDIDAGEOs7v3R/gKa5EMfbUQUI4C84UbI -Yz09Jdnws/MkC/Hm1BZEqk89u7Hvfv+oHqEb0XaUo0TDfsxE0M1sMdnLb91QNQBm -UQ== ------END CERTIFICATE----- - 1 s:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC - i:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root ------BEGIN CERTIFICATE----- -MIIFVjCCBD6gAwIBAgIQXpDZ0ETJMV02WTx3GTnhhTANBgkqhkiG9w0BAQUFADBv -MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk -ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF -eHRlcm5hbCBDQSBSb290MB4XDTA1MTIwMTAwMDAwMFoXDTE5MDYyNDE5MDYzMFow -gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl -bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u -ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv -cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg -Q0EgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsgOkO3f7wzN6 -rOjg45tR5vjBfzK7qmV9IBxb/QW9EEXxG+E7FNhZqQLtwGBKoSsHTnQqV75wWMk0 -9tinWvftBkSpj5sTi/8cbzJfUvTSVYh3Qxv6AVVjMMH/ruLjE6y+4PoaPs8WoYAQ -ts5R4Z1g8c/WnTepLst2x0/Wv7GmuoQi+gXvHU6YrBiu7XkeYhzc95QdviWSJRDk -owhb5K43qhcvjRmBfO/paGlCliDGZp8mHwrI21mwobWpVjTxZRwYO3bd4+TGcI4G -Ie5wmHwE8F7SK1tgSqbBacKjDa93j7txKkfz/Yd2n7TGqOXiHPsJpG655vrKtnXk -9vs1zoDeJQIDAQABo4IBljCCAZIwHQYDVR0OBBYEFAdEdoWTKLx/bXjSCuv6TEvf -2YIfMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgEAMCAGA1UdJQQZ -MBcGCisGAQQBgjcKAwMGCWCGSAGG+EIEATAYBgNVHSAEETAPMA0GCysGAQQBgOU3 -AgQBMHsGA1UdHwR0MHIwOKA2oDSGMmh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL0Fk -ZFRydXN0RXh0ZXJuYWxDQVJvb3QuY3JsMDagNKAyhjBodHRwOi8vY3JsLmNvbW9k -by5uZXQvQWRkVHJ1c3RFeHRlcm5hbENBUm9vdC5jcmwwgYAGCCsGAQUFBwEBBHQw -cjA4BggrBgEFBQcwAoYsaHR0cDovL2NydC5jb21vZG9jYS5jb20vQWRkVHJ1c3RV -VE5TR0NDQS5jcnQwNgYIKwYBBQUHMAKGKmh0dHA6Ly9jcnQuY29tb2RvLm5ldC9B -ZGRUcnVzdFVUTlNHQ0NBLmNydDARBglghkgBhvhCAQEEBAMCAgQwDQYJKoZIhvcN -AQEFBQADggEBAK2zEzs+jcIrVK9oDkdDZNvhuBYTdCfpxfFs+OAujW0bIfJAy232 -euVsnJm6u/+OrqKudD2tad2BbejLLXhMZViaCmK7D9nrXHx4te5EP8rL19SUVqLY -1pTnv5dhNgEgvA7n5lIzDSYs7yRLsr7HJsYPr6SeYSuZizyX1SNz7ooJ32/F3X98 -RB0Mlc/E0OyOrkQ9/y5IrnpnaSora8CnUrV5XNOg+kyCz9edCyx4D5wXYcwZPVWz -8aDqquESrezPyjtfi4WRO4s/VD3HLZvOxzMrWAVYCDG9FxaOhF0QGuuG1F7F3GKV -v6prNyCl016kRl2j1UT+a7gLd8fA25A4C9E= +cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEdMBsGA1UEAxMUVEJTIFg1MDkg +Q0EgYnVzaW5lc3MwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDB1PAU +qudCcz3tmyGcf+u6EkZqonKKHrV4gZYbvVkIRojmmlhfi/jwvpHvo8bqSt/9Rj5S +jhCDW0pcbI+IPPtD1Jy+CHNSfnMqVDy6CKQ3p5maTzCMG6ZT+XjnvcND5v+FtaiB +xk1iCX6uvt0jeUtdZvYbyytsSDE6c3Y5//wRxOF8tM1JxibwO3pyER26jbbN2gQz +m/EkdGjLdJ4svPk23WDAvQ6G0/z2LcAaJB+XLfqRwfQpHQvfKa1uTi8PivC8qtip +rmNQMMPMjxSK2azX8cKjjTDJiUKaCb4VHlJDWKEsCFRpgJAoAuX8f7Yfs1M4esGo +sWb3PGspK3O22uIlAgMBAAGjggF6MIIBdjAdBgNVHQ4EFgQUGgkEzPkJj7seoM7U +pBR/S9UjWM0wDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAwGAYD +VR0gBBEwDzANBgsrBgEEAYDlNwIBATB7BgNVHR8EdDByMDigNqA0hjJodHRwOi8v +Y3JsLmNvbW9kb2NhLmNvbS9BZGRUcnVzdEV4dGVybmFsQ0FSb290LmNybDA2oDSg +MoYwaHR0cDovL2NybC5jb21vZG8ubmV0L0FkZFRydXN0RXh0ZXJuYWxDQVJvb3Qu +Y3JsMIGGBggrBgEFBQcBAQR6MHgwOwYIKwYBBQUHMAKGL2h0dHA6Ly9jcnQuY29t +b2RvY2EuY29tL0FkZFRydXN0VVROU2VydmVyQ0EuY3J0MDkGCCsGAQUFBzAChi1o +dHRwOi8vY3J0LmNvbW9kby5uZXQvQWRkVHJ1c3RVVE5TZXJ2ZXJDQS5jcnQwEQYJ +YIZIAYb4QgEBBAQDAgIEMA0GCSqGSIb3DQEBBQUAA4IBAQA7mqrMgk/MrE6QnbNA +h4nRCn2ti4bg4w2C3lB6bSvRPnYwuNw9Jb8vuKkNFzRDxNJXqVDZdfFW5CVQJuyd +nfAx83+wk+spzvFaE1KhFYfN9G9pQfXUfvDRoIcJgPEKUXL1wRiOG+IjU3VVI8pg +IgqHkr7ylln5i5zCiFAPuIJmYUSFg/gxH5xkCNcjJqqrHrHatJr6Qrrke93joupw +oU1njfAcZtYp6fbiK6u2b1pJqwkVBE8RsfLnPhRj+SFbpvjv8Od7o/ieJhFIYQNU +k2jX2u8qZnAiNw93LZW9lpYjtuvMXq8QQppENNja5b53q7UwI+lU7ZGjZ7quuESp +J6/5 -----END CERTIFICATE----- 2 s:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root - i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC + i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware -----BEGIN CERTIFICATE----- -MIIEZjCCA06gAwIBAgIQUSYKkxzif5zDpV954HKugjANBgkqhkiG9w0BAQUFADCB -kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +MIIETzCCAzegAwIBAgIQHM5EYpUZep1jUvnyI6m2mDANBgkqhkiG9w0BAQUFADCB +lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho -dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw -IFNHQzAeFw0wNTA2MDcwODA5MTBaFw0xOTA2MjQxOTA2MzBaMG8xCzAJBgNVBAYT -AlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0 -ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB -IFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC39xoz5vIABC05 -4E5b7R+8bA/Ntfojts7emxEzl6QpTH2Tn71KvJPtAxrjj8/lbVBa1pcplFqAsEl6 -2y6V/bjKvzc4LR4+kUGtcFbH8E8/6DKedMrIkFTpxl8PeJ2aQDwOrGGqXhSPnoeh -alDc15pOrwWzpnGUnHGzUGAKxxOdOAeGAqjpqGkmGJCrTLBPI6s6T4TY386f4Wlv -u9dC12tE5Met7m1BX3JacQg3s3llpFmglDf3AC8NwpJy2tA4ctsUqEXEXSp9t7TW -xO6szRNEt8kr3UMAJfphuWlqWCMRt6czj1Z1WfXNKddGtworZbbTQm8Vsrh7++/p -XVPVNFonAgMBAAGjgdgwgdUwHwYDVR0jBBgwFoAUUzLRs89/+uDxoF2FTpLSnkUd -tE8wHQYDVR0OBBYEFK29mHo0tCb3+sQmVO8DveAky1QaMA4GA1UdDwEB/wQEAwIB -BjAPBgNVHRMBAf8EBTADAQH/MBEGCWCGSAGG+EIBAQQEAwIBAjAgBgNVHSUEGTAX -BgorBgEEAYI3CgMDBglghkgBhvhCBAEwPQYDVR0fBDYwNDAyoDCgLoYsaHR0cDov -L2NybC51c2VydHJ1c3QuY29tL1VUTi1EQVRBQ29ycFNHQy5jcmwwDQYJKoZIhvcN -AQEFBQADggEBAMbuUxdoFLJRIh6QWA2U/b3xcOWGLcM2MY9USEbnLQg3vGwKYOEO -rVE04BKT6b64q7gmtOmWPSiPrmQH/uAB7MXjkesYoPF1ftsK5p+R26+udd8jkWjd -FwBaS/9kbHDrARrQkNnHptZt9hPk/7XJ0h4qy7ElQyZ42TCbTg0evmnv3+r+LbPM -+bDdtRTKkdSytaX7ARmjR3mfnYyVhzT4HziS2jamEfpr62vp3EV4FTkG101B5CHI -3C+H0be/SGB1pWLLJN47YaApIKa+xWycxOkKaSLvkTr6Jq/RW0GnOuL4OAdCq8Fb -+M5tug8EPzI0rNwEKNdwMBQmBsTkm5jVz3g= +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt +SGFyZHdhcmUwHhcNMDUwNjA3MDgwOTEwWhcNMTkwNzA5MTgxOTIyWjBvMQswCQYD +VQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0 +IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5h +bCBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAt/caM+by +AAQtOeBOW+0fvGwPzbX6I7bO3psRM5ekKUx9k5+9SryT7QMa44/P5W1QWtaXKZRa +gLBJetsulf24yr83OC0ePpFBrXBWx/BPP+gynnTKyJBU6cZfD3idmkA8Dqxhql4U +j56HoWpQ3NeaTq8Fs6ZxlJxxs1BgCscTnTgHhgKo6ahpJhiQq0ywTyOrOk+E2N/O +n+Fpb7vXQtdrROTHre5tQV9yWnEIN7N5ZaRZoJQ39wAvDcKSctrQOHLbFKhFxF0q +fbe01sTurM0TRLfJK91DACX6YblpalgjEbenM49WdVn1zSnXRrcKK2W200JvFbK4 +e/vv6V1T1TRaJwIDAQABo4G9MIG6MB8GA1UdIwQYMBaAFKFyXyYbKJhDlV0HN9WF +lp1L0sNFMB0GA1UdDgQWBBStvZh6NLQm9/rEJlTvA73gJMtUGjAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zARBglghkgBhvhCAQEEBAMCAQIwRAYDVR0f +BD0wOzA5oDegNYYzaHR0cDovL2NybC51c2VydHJ1c3QuY29tL1VUTi1VU0VSRmly +c3QtSGFyZHdhcmUuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQByQhANOs4kClrwF8BW +onvUOGCSjRK52zYZgDXYNjDtmr5rJ6NyPFDNn+JxkLpjYetIFMTbSRe679Bt8m7a +gIAoQYFQtxMuyLnJegB2aEbQiIxh/tC21UcFF7ktdnDoTlA6w3pLuvunaI84Of3o +2YBrhzkTbCfaYk5JRlTpudW9DkUkHBsyx3nknPKnplkIGaK0jgn8E0n+SFabYaHk +I9LroYT/+JtLefh9lgBdAgVv0UPbzoGfuDsrk/Zh+UrgbLFpHoVnElhzbkh64Z0X +OGaJunQc68cCZu5HTn/aK7fBGMcVflRCXLVEQpU9PIAdGA8Ynvg684t8GMaKsRl1 +jIGZ -----END CERTIFICATE----- - 3 s:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC - i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC + 3 s:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware + i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware -----BEGIN CERTIFICATE----- -MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB -kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCB +lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho -dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw -IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG -EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD -VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu -dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN -BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6 -E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ -D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK -4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq -lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW -bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB -o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT -MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js -LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr -BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB -AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft -Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj -j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH -KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv -2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3 -mfnGV/TJVTl4uix5yaaIK/QI +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt +SGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgxOTIyWjCBlzELMAkG +A1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEe +MBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8v +d3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdh +cmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn +0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlIwrthdBKWHTxqctU8EGc6Oe0rE81m65UJ +M6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFdtqdt++BxF2uiiPsA3/4a +MXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8i4fDidNd +oI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqI +DsjfPe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9Ksy +oUhbAgMBAAGjgbkwgbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFKFyXyYbKJhDlV0HN9WFlp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0 +dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LUhhcmR3YXJlLmNy +bDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEF +BQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28Gpgoiskli +CE7/yMgUsogWXecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gE +CJChicsZUN/KHAG8HQQZexB2lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t +3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kniCrVWFCVH/A7HFe7fRQ5YiuayZSS +KqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67nfhmqA== -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/test_mailbox.py b/lib-python/2.7/test/test_mailbox.py --- a/lib-python/2.7/test/test_mailbox.py +++ b/lib-python/2.7/test/test_mailbox.py @@ -38,14 +38,9 @@ def _delete_recursively(self, target): # Delete a file or delete a directory recursively if os.path.isdir(target): - for path, dirs, files in os.walk(target, topdown=False): - for name in files: - os.remove(os.path.join(path, name)) - for name in dirs: - os.rmdir(os.path.join(path, name)) - os.rmdir(target) + test_support.rmtree(target) elif os.path.exists(target): - os.remove(target) + test_support.unlink(target) class TestMailbox(TestBase): @@ -137,6 +132,7 @@ msg = self._box.get(key1) self.assertEqual(msg['from'], 'foo') self.assertEqual(msg.fp.read(), '1') + msg.fp.close() def test_getitem(self): # Retrieve message using __getitem__() @@ -169,10 +165,14 @@ # Get file representations of messages key0 = self._box.add(self._template % 0) key1 = self._box.add(_sample_message) - self.assertEqual(self._box.get_file(key0).read().replace(os.linesep, '\n'), + msg0 = self._box.get_file(key0) + self.assertEqual(msg0.read().replace(os.linesep, '\n'), self._template % 0) - self.assertEqual(self._box.get_file(key1).read().replace(os.linesep, '\n'), + msg1 = self._box.get_file(key1) + self.assertEqual(msg1.read().replace(os.linesep, '\n'), _sample_message) + msg0.close() + msg1.close() def test_get_file_can_be_closed_twice(self): # Issue 11700 @@ -407,6 +407,7 @@ self._box.add(contents[0]) self._box.add(contents[1]) self._box.add(contents[2]) + oldbox = self._box method() if should_call_close: self._box.close() @@ -415,6 +416,7 @@ self.assertEqual(len(keys), 3) for key in keys: self.assertIn(self._box.get_string(key), contents) + oldbox.close() def test_dump_message(self): # Write message representations to disk @@ -1835,6 +1837,10 @@ def setUp(self): # create a new maildir mailbox to work with: self._dir = test_support.TESTFN + if os.path.isdir(self._dir): + test_support.rmtree(self._dir) + if os.path.isfile(self._dir): + test_support.unlink(self._dir) os.mkdir(self._dir) os.mkdir(os.path.join(self._dir, "cur")) os.mkdir(os.path.join(self._dir, "tmp")) @@ -1844,10 +1850,10 @@ def tearDown(self): map(os.unlink, self._msgfiles) - os.rmdir(os.path.join(self._dir, "cur")) - os.rmdir(os.path.join(self._dir, "tmp")) - os.rmdir(os.path.join(self._dir, "new")) - os.rmdir(self._dir) + test_support.rmdir(os.path.join(self._dir, "cur")) + test_support.rmdir(os.path.join(self._dir, "tmp")) + test_support.rmdir(os.path.join(self._dir, "new")) + test_support.rmdir(self._dir) def createMessage(self, dir, mbox=False): t = int(time.time() % 1000000) @@ -1883,7 +1889,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 1) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1891,7 +1899,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 1) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1900,8 +1910,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 2) - self.assertIsNot(self.mbox.next(), None) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1910,11 +1924,13 @@ import email.parser fname = self.createMessage("cur", True) n = 0 - for msg in mailbox.PortableUnixMailbox(open(fname), + fid = open(fname) + for msg in mailbox.PortableUnixMailbox(fid, email.parser.Parser().parse): n += 1 self.assertEqual(msg["subject"], "Simple Test") self.assertEqual(len(str(msg)), len(FROM_)+len(DUMMY_MESSAGE)) + fid.close() self.assertEqual(n, 1) ## End: classes from the original module (for backward compatibility). diff --git a/lib-python/2.7/test/test_mmap.py b/lib-python/2.7/test/test_mmap.py --- a/lib-python/2.7/test/test_mmap.py +++ b/lib-python/2.7/test/test_mmap.py @@ -11,7 +11,7 @@ def setUp(self): if os.path.exists(TESTFN): - os.unlink(TESTFN) + unlink(TESTFN) def tearDown(self): try: diff --git a/lib-python/2.7/test/test_multiprocessing.py b/lib-python/2.7/test/test_multiprocessing.py --- a/lib-python/2.7/test/test_multiprocessing.py +++ b/lib-python/2.7/test/test_multiprocessing.py @@ -1,5 +1,10 @@ #!/usr/bin/env python +## FIXME: remove when https://bugs.pypy.org/issue1644 is resolved +import sys +if sys.platform.startswith('freebsd'): + raise Exception("This test hangs on FreeBSD. Test deactivated for now until https://bugs.pypy.org/issue1644 get resolved") + # # Unit tests for the multiprocessing package # diff --git a/lib-python/2.7/test/test_old_mailbox.py b/lib-python/2.7/test/test_old_mailbox.py --- a/lib-python/2.7/test/test_old_mailbox.py +++ b/lib-python/2.7/test/test_old_mailbox.py @@ -73,7 +73,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -81,7 +83,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -90,8 +94,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 2) - self.assertTrue(self.mbox.next() is not None) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -75,7 +75,7 @@ self.assertFalse(os.path.exists(name), "file already exists for temporary file") # make sure we can create the file - open(name, "w") + open(name, "w").close() self.files.append(name) def test_tempnam(self): diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -111,13 +111,12 @@ if test_support.verbose: sys.stdout.write("\n" + pprint.pformat(p) + "\n") self.assertEqual(p['subject'], - ((('countryName', u'US'),), - (('stateOrProvinceName', u'Delaware'),), - (('localityName', u'Wilmington'),), - (('organizationName', u'Python Software Foundation'),), - (('organizationalUnitName', u'SSL'),), - (('commonName', u'somemachine.python.org'),)), + ((('countryName', 'XY'),), + (('localityName', 'Castle Anthrax'),), + (('organizationName', 'Python Software Foundation'),), + (('commonName', 'localhost'),)) ) + self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),)) # Issue #13034: the subjectAltName in some certificates # (notably projects.developer.nokia.com:443) wasn't parsed p = ssl._ssl._test_decode_cert(NOKIACERT) diff --git a/lib-python/2.7/test/test_support.py b/lib-python/2.7/test/test_support.py --- a/lib-python/2.7/test/test_support.py +++ b/lib-python/2.7/test/test_support.py @@ -179,15 +179,79 @@ except KeyError: pass +if sys.platform.startswith("win"): + def _waitfor(func, pathname, waitall=False): + # Peform the operation + func(pathname) + # Now setup the wait loop + if waitall: + dirname = pathname + else: + dirname, name = os.path.split(pathname) + dirname = dirname or '.' + # Check for `pathname` to be removed from the filesystem. + # The exponential backoff of the timeout amounts to a total + # of ~1 second after which the deletion is probably an error + # anyway. + # Testing on a i7 at 4.3GHz shows that usually only 1 iteration is + # required when contention occurs. + timeout = 0.001 + while timeout < 1.0: + # Note we are only testing for the existance of the file(s) in + # the contents of the directory regardless of any security or + # access rights. If we have made it this far, we have sufficient + # permissions to do that much using Python's equivalent of the + # Windows API FindFirstFile. + # Other Windows APIs can fail or give incorrect results when + # dealing with files that are pending deletion. + L = os.listdir(dirname) + if not (L if waitall else name in L): + return + # Increase the timeout and try again + time.sleep(timeout) + timeout *= 2 + warnings.warn('tests may fail, delete still pending for ' + pathname, + RuntimeWarning, stacklevel=4) + + def _unlink(filename): + _waitfor(os.unlink, filename) + + def _rmdir(dirname): + _waitfor(os.rmdir, dirname) + + def _rmtree(path): + def _rmtree_inner(path): + for name in os.listdir(path): + fullname = os.path.join(path, name) + if os.path.isdir(fullname): + _waitfor(_rmtree_inner, fullname, waitall=True) + os.rmdir(fullname) + else: + os.unlink(fullname) + _waitfor(_rmtree_inner, path, waitall=True) + _waitfor(os.rmdir, path) +else: + _unlink = os.unlink + _rmdir = os.rmdir + _rmtree = shutil.rmtree + def unlink(filename): try: - os.unlink(filename) + _unlink(filename) except OSError: pass +def rmdir(dirname): + try: + _rmdir(dirname) + except OSError as error: + # The directory need not exist. + if error.errno != errno.ENOENT: + raise + def rmtree(path): try: - shutil.rmtree(path) + _rmtree(path) except OSError, e: # Unix returns ENOENT, Windows returns ESRCH. if e.errno not in (errno.ENOENT, errno.ESRCH): diff --git a/lib-python/2.7/test/test_tarfile.py b/lib-python/2.7/test/test_tarfile.py --- a/lib-python/2.7/test/test_tarfile.py +++ b/lib-python/2.7/test/test_tarfile.py @@ -300,26 +300,21 @@ def test_extract_hardlink(self): # Test hardlink extraction (e.g. bug #857297). - tar = tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") + with tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") as tar: + tar.extract("ustar/regtype", TEMPDIR) + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/regtype")) - tar.extract("ustar/regtype", TEMPDIR) - try: tar.extract("ustar/lnktype", TEMPDIR) - except EnvironmentError, e: - if e.errno == errno.ENOENT: - self.fail("hardlink not extracted properly") + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/lnktype")) + with open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb") as f: + data = f.read() + self.assertEqual(md5sum(data), md5_regtype) - data = open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb").read() - self.assertEqual(md5sum(data), md5_regtype) - - try: tar.extract("ustar/symtype", TEMPDIR) - except EnvironmentError, e: - if e.errno == errno.ENOENT: - self.fail("symlink not extracted properly") - - data = open(os.path.join(TEMPDIR, "ustar/symtype"), "rb").read() - self.assertEqual(md5sum(data), md5_regtype) + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/symtype")) + with open(os.path.join(TEMPDIR, "ustar/symtype"), "rb") as f: + data = f.read() + self.assertEqual(md5sum(data), md5_regtype) def test_extractall(self): # Test if extractall() correctly restores directory permissions @@ -340,7 +335,7 @@ # constructor in case of an error. For the test we rely on # the fact that opening an empty file raises a ReadError. empty = os.path.join(TEMPDIR, "empty") - open(empty, "wb").write("") + open(empty, "wb").close() try: tar = object.__new__(tarfile.TarFile) @@ -351,7 +346,7 @@ else: self.fail("ReadError not raised") finally: - os.remove(empty) + test_support.unlink(empty) class StreamReadTest(CommonReadTest): @@ -1327,7 +1322,7 @@ def setUp(self): self.tarname = tmpname if os.path.exists(self.tarname): - os.remove(self.tarname) + test_support.unlink(self.tarname) def _add_testfile(self, fileobj=None): tar = tarfile.open(self.tarname, "a", fileobj=fileobj) diff --git a/lib-python/2.7/traceback.py b/lib-python/2.7/traceback.py --- a/lib-python/2.7/traceback.py +++ b/lib-python/2.7/traceback.py @@ -107,7 +107,7 @@ return list -def print_exception(etype, value, tb, limit=None, file=None): +def print_exception(etype, value, tb, limit=None, file=None, _encoding=None): """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. This differs from print_tb() in the following ways: (1) if @@ -123,7 +123,7 @@ if tb: _print(file, 'Traceback (most recent call last):') print_tb(tb, limit, file) - lines = format_exception_only(etype, value) + lines = format_exception_only(etype, value, _encoding) for line in lines: _print(file, line, '') @@ -144,7 +144,7 @@ list = list + format_exception_only(etype, value) return list -def format_exception_only(etype, value): +def format_exception_only(etype, value, _encoding=None): """Format the exception part of a traceback. The arguments are the exception type and value such as given by @@ -170,12 +170,12 @@ if (isinstance(etype, BaseException) or isinstance(etype, types.InstanceType) or etype is None or type(etype) is str): - return [_format_final_exc_line(etype, value)] + return [_format_final_exc_line(etype, value, _encoding)] stype = etype.__name__ if not issubclass(etype, SyntaxError): - return [_format_final_exc_line(stype, value)] + return [_format_final_exc_line(stype, value, _encoding)] # It was a syntax error; show exactly where the problem was found. lines = [] @@ -196,26 +196,26 @@ lines.append(' %s^\n' % ''.join(caretspace)) value = msg - lines.append(_format_final_exc_line(stype, value)) + lines.append(_format_final_exc_line(stype, value, _encoding)) return lines -def _format_final_exc_line(etype, value): +def _format_final_exc_line(etype, value, _encoding=None): """Return a list of a single line -- normal case for format_exception_only""" - valuestr = _some_str(value) + valuestr = _some_str(value, _encoding) if value is None or not valuestr: line = "%s\n" % etype else: line = "%s: %s\n" % (etype, valuestr) return line -def _some_str(value): +def _some_str(value, _encoding=None): try: return str(value) except Exception: pass try: value = unicode(value) - return value.encode("ascii", "backslashreplace") + return value.encode(_encoding or "ascii", "backslashreplace") except Exception: pass return '' % type(value).__name__ diff --git a/lib-python/2.7/uuid.py b/lib-python/2.7/uuid.py --- a/lib-python/2.7/uuid.py +++ b/lib-python/2.7/uuid.py @@ -128,10 +128,10 @@ """ if hex is not None: - if (bytes is not None or bytes_le is not None or fields is not None - or int is not None): - raise TypeError('if the hex argument is given, bytes, bytes_le, fields,' - ' and int need to be None') + if (bytes is not None or bytes_le is not None or + fields is not None or int is not None): + raise TypeError('if the hex argument is given, bytes,' + ' bytes_le, fields, and int need to be None') hex = hex.replace('urn:', '').replace('uuid:', '') hex = hex.strip('{}').replace('-', '') if len(hex) != 32: @@ -139,8 +139,8 @@ int = long(hex, 16) elif bytes_le is not None: if bytes is not None or fields is not None or int is not None: - raise TypeError('if the bytes_le argument is given, bytes, fields,' - ' and int need to be None') + raise TypeError('if the bytes_le argument is given, bytes,' + ' fields, and int need to be None') if len(bytes_le) != 16: raise ValueError('bytes_le is not a 16-char string') bytes = (bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] + @@ -150,15 +150,16 @@ struct.unpack('>Q', bytes[8:])[0]) elif bytes is not None: if fields is not None or int is not None: - raise TypeError('if the bytes argument is given, fields' - ' and int need to be None') + raise TypeError('if the bytes argument is given, fields ' + 'and int need to be None') if len(bytes) != 16: raise ValueError('bytes is not a 16-char string') int = (struct.unpack('>Q', bytes[:8])[0] << 64 | struct.unpack('>Q', bytes[8:])[0]) elif fields is not None: if int is not None: - raise TypeError('if the fields argument is given, int needs to be None') + raise TypeError('if the fields argument is given, int needs' + ' to be None') if len(fields) != 6: raise ValueError('fields is not a 6-tuple') (time_low, time_mid, time_hi_version, diff --git a/lib-python/2.7/weakref.py b/lib-python/2.7/weakref.py --- a/lib-python/2.7/weakref.py +++ b/lib-python/2.7/weakref.py @@ -48,7 +48,14 @@ def remove(wr, selfref=ref(self)): self = selfref() if self is not None: - del self.data[wr.key] + # Changed this for PyPy: made more resistent. The + # issue is that in some corner cases, self.data + # might already be changed or removed by the time + # this weakref's callback is called. If that is + # the case, we don't want to randomly kill an + # unrelated entry. + if self.data.get(wr.key) is wr: + del self.data[wr.key] self._remove = remove UserDict.UserDict.__init__(self, *args, **kw) @@ -160,22 +167,26 @@ try: o = self.data.pop(key)() except KeyError: + o = None + if o is None: if args: return args[0] - raise - if o is None: raise KeyError, key else: return o + # The logic above was fixed in PyPy def setdefault(self, key, default=None): try: - wr = self.data[key] + o = self.data[key]() except KeyError: + o = None + if o is None: self.data[key] = KeyedRef(default, self._remove, key) return default else: - return wr() + return o + # The logic above was fixed in PyPy def update(self, dict=None, **kwargs): d = self.data diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -328,21 +328,23 @@ raise ValueError( "native COM method call without 'this' parameter" ) - thisarg = cast(args[0], POINTER(POINTER(c_void_p))) - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args[1:], kwargs) - newargs.insert(0, args[0].value) + thisvalue = args.pop(0) + thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) + args.insert(0, thisvalue) + newargs.insert(0, thisvalue.value) argtypes.insert(0, c_void_p) else: thisarg = None - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args, kwargs) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) funcptr = self._getfuncptr(argtypes, self._restype_, thisarg) result = self._call_funcptr(funcptr, *newargs) - result = self._do_errcheck(result, args) + result, forced = self._do_errcheck(result, errcheckargs) - if not outargs: + if not outargs or forced: return result from ctypes import c_void_p @@ -377,22 +379,22 @@ set_last_error(tmp) # try: - return self._build_result(self._restype_, result, newargs) + return self._build_result(self._restype_, result) finally: funcptr.free_temp_buffers() def _do_errcheck(self, result, args): # The 'errcheck' protocol if self._errcheck_: - v = self._errcheck_(result, self, args) + v = self._errcheck_(result, self, tuple(args)) # If the errcheck funtion failed, let it throw # If the errcheck function returned newargs unchanged, # continue normal processing. # If the errcheck function returned something else, # use that as result. if v is not args: - return v - return result + return v, True + return result, False def _getfuncptr_fromaddress(self, argtypes, restype): address = self._get_address() @@ -495,16 +497,16 @@ newargtypes = [] total = len(args) paramflags = self._paramflags - inargs_idx = 0 if not paramflags and total < len(argtypes): raise TypeError("not enough arguments") - for i, argtype in enumerate(argtypes): - flag = 0 - name = None - defval = marker - if paramflags: + if paramflags: + errcheckargs = [] + inargs_idx = 0 + for i, argtype in enumerate(argtypes): + flag = 0 + defval = marker paramflag = paramflags[i] paramlen = len(paramflag) name = None @@ -519,6 +521,7 @@ val = defval if val is marker: val = 0 + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) @@ -536,27 +539,31 @@ raise TypeError("required argument '%s' missing" % name) else: raise TypeError("not enough arguments") + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) elif flag == PARAMFLAG_FOUT: if defval is not marker: - outargs.append(defval) + val = defval keepalive, newarg, newargtype = self._conv_param(argtype, defval) else: import ctypes val = argtype._type_() - outargs.append(val) keepalive = None newarg = ctypes.byref(val) newargtype = type(newarg) + errcheckargs.append(val) + outargs.append(val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) else: raise ValueError("paramflag %d not yet implemented" % flag) - else: + else: + errcheckargs = args + for i, argtype in enumerate(argtypes): try: keepalive, newarg, newargtype = self._conv_param(argtype, args[i]) except (UnicodeError, TypeError, ValueError), e: @@ -564,7 +571,6 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - inargs_idx += 1 if len(newargs) < len(args): extra = args[len(newargs):] @@ -576,7 +582,7 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - return keepalives, newargs, newargtypes, outargs + return keepalives, newargs, newargtypes, outargs, errcheckargs @staticmethod def _is_primitive(argtype): @@ -601,7 +607,7 @@ retval = restype._CData_retval(buf) return retval - def _build_result(self, restype, result, argsandobjs): + def _build_result(self, restype, result): """Build the function result: If there is no OUT parameter, return the actual function result If there is one OUT parameter, return it @@ -611,11 +617,6 @@ # i.e. an array of ints. Now it takes a result, which is already a # python object. All places that do "resbuffer[0]" should check that # result is actually an int and just use it. - # - # Also, argsandobjs used to be "args" in __call__, now it's "newargs" - # (i.e., the already unwrapped objects). It's used only when we have a - # PARAMFLAG_FOUT and it's probably wrong, I'll fix it when I find a - # failing test retval = None @@ -704,7 +705,7 @@ funcptr = self._getfuncptr(argtypes, restype, thisarg) try: result = self._call_funcptr(funcptr, *args) - result = self._do_errcheck(result, args) + result, _ = self._do_errcheck(result, args) except (TypeError, ArgumentError, UnicodeDecodeError): assert self._slowpath_allowed return CFuncPtr.__call__(self, *args) diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -33,14 +33,13 @@ # set link options output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') + # XXX pyconfig.h uses a pragma to link to the import library, + # which is currently python27.lib + library = os.path.join(thisdir, '..', 'include', 'python27') if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') + # For a local translation or nightly build + library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python27') + assert os.path.exists(library + '.lib'),'Could not find import library "%s"' % library libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 '/EXPORT:init' + modulename] diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -115,14 +115,14 @@ ] class sha: - "An implementation of the MD5 hash function in pure Python." + "An implementation of the SHA hash function in pure Python." digest_size = digestsize = 20 - block_size = 1 + block_size = 512 // 8 def __init__(self): "Initialisation." - + # Initial message length in bits(!). self.length = 0 self.count = [0, 0] @@ -209,7 +209,7 @@ self.H2 = (self.H2 + C) & 0xffffffff self.H3 = (self.H3 + D) & 0xffffffff self.H4 = (self.H4 + E) & 0xffffffff - + # Down from here all methods follow the Python Standard Library # API of the sha module. @@ -295,13 +295,13 @@ _long2bytesBigEndian(self.H3, 4) + \ _long2bytesBigEndian(self.H4, 4) - self.H0 = H0 - self.H1 = H1 + self.H0 = H0 + self.H1 = H1 self.H2 = H2 self.H3 = H3 self.H4 = H4 - self.input = input - self.count = count + self.input = input + self.count = count return digest diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -268,10 +268,18 @@ if _has_load_extension(): _ffi.cdef("int sqlite3_enable_load_extension(sqlite3 *db, int onoff);") -_lib = _ffi.verify(""" -#include -""", libraries=['sqlite3'] -) +if sys.platform.startswith('freebsd'): + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'], + include_dirs=['/usr/local/include'], + library_dirs=['/usr/local/lib'] + ) +else: + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'] + ) exported_sqlite_symbols = [ 'SQLITE_ALTER_TABLE', @@ -363,9 +371,11 @@ pass -def connect(database, **kwargs): - factory = kwargs.get("factory", Connection) - return factory(database, **kwargs) +def connect(database, timeout=5.0, detect_types=0, isolation_level="", + check_same_thread=True, factory=None, cached_statements=100): + factory = Connection if not factory else factory + return factory(database, timeout, detect_types, isolation_level, + check_same_thread, factory, cached_statements) def _unicode_text_factory(x): diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py --- a/lib_pypy/_tkinter/tclobj.py +++ b/lib_pypy/_tkinter/tclobj.py @@ -28,9 +28,11 @@ return result elif value.typePtr == typeCache.BooleanType: - return result + return bool(value.internalRep.longValue) elif value.typePtr == typeCache.ByteArrayType: - return result + size = tkffi.new('int*') + data = tklib.Tcl_GetByteArrayFromObj(value, size) + return tkffi.buffer(data, size[0])[:] elif value.typePtr == typeCache.DoubleType: return value.internalRep.doubleValue elif value.typePtr == typeCache.IntType: @@ -50,7 +52,7 @@ result.append(FromObj(app, tcl_elem[0])) return tuple(result) elif value.typePtr == typeCache.ProcBodyType: - return result + pass # fall through and return tcl object. elif value.typePtr == typeCache.StringType: buf = tklib.Tcl_GetUnicode(value) length = tklib.Tcl_GetCharLength(value) diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -72,6 +72,7 @@ int Tcl_GetBoolean(Tcl_Interp* interp, const char* src, int* boolPtr); char *Tcl_GetString(Tcl_Obj* objPtr); char *Tcl_GetStringFromObj(Tcl_Obj* objPtr, int* lengthPtr); +unsigned char *Tcl_GetByteArrayFromObj(Tcl_Obj* objPtr, int* lengthPtr); Tcl_UniChar *Tcl_GetUnicode(Tcl_Obj* objPtr); int Tcl_GetCharLength(Tcl_Obj* objPtr); @@ -111,6 +112,14 @@ incdirs = ['/usr/local/include/tcl8.5', '/usr/local/include/tk8.5', '/usr/X11R6/include'] linklibs = ['tk85', 'tcl85'] libdirs = ['/usr/local/lib', '/usr/X11R6/lib'] +elif sys.platform.startswith("freebsd"): + incdirs = ['/usr/local/include/tcl8.6', '/usr/local/include/tk8.6', '/usr/local/include/X11', '/usr/local/include'] + linklibs = ['tk86', 'tcl86'] + libdirs = ['/usr/local/lib'] +elif sys.platform == 'win32': + incdirs = [] + linklibs = ['tcl85', 'tk85'] + libdirs = [] else: incdirs=['/usr/include/tcl'] linklibs=['tcl', 'tk'] diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.7 +Version: 0.8 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.7.2" -__version_info__ = (0, 7, 2) +__version__ = "0.8.1" +__version_info__ = (0, 8, 1) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -1,4 +1,5 @@ import types +from .lock import allocate_lock try: callable @@ -61,6 +62,7 @@ # rely on it! It's probably not going to work well.) self._backend = backend + self._lock = allocate_lock() self._parser = cparser.Parser() self._cached_btypes = {} self._parsed_types = types.ModuleType('parsed_types').__dict__ @@ -74,7 +76,8 @@ if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # - self.BVoidP = self._get_cached_btype(model.voidp_type) + with self._lock: + self.BVoidP = self._get_cached_btype(model.voidp_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): @@ -95,11 +98,12 @@ if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') - self._parser.parse(csource, override=override) - self._cdefsources.append(csource) - if override: - for cache in self._function_caches: - cache.clear() + with self._lock: + self._parser.parse(csource, override=override) + self._cdefsources.append(csource) + if override: + for cache in self._function_caches: + cache.clear() def dlopen(self, name, flags=0): """Load and return a dynamic library identified by 'name'. @@ -109,31 +113,47 @@ library we only look for the actual (untyped) symbols. """ assert isinstance(name, basestring) or name is None - lib, function_cache = _make_ffi_library(self, name, flags) - self._function_caches.append(function_cache) - self._libraries.append(lib) + with self._lock: + lib, function_cache = _make_ffi_library(self, name, flags) + self._function_caches.append(function_cache) + self._libraries.append(lib) return lib + def _typeof_locked(self, cdecl): + # call me with the lock! + key = cdecl + if key in self._parsed_types: + return self._parsed_types[key] + # + if not isinstance(cdecl, str): # unicode, on Python 2 + cdecl = cdecl.encode('ascii') + # + type = self._parser.parse_type(cdecl) + really_a_function_type = type.is_raw_function + if really_a_function_type: + type = type.as_function_pointer() + btype = self._get_cached_btype(type) + result = btype, really_a_function_type + self._parsed_types[key] = result + return result + def _typeof(self, cdecl, consider_function_as_funcptr=False): # string -> ctype object try: - btype, cfaf = self._parsed_types[cdecl] - if consider_function_as_funcptr and not cfaf: - raise KeyError + result = self._parsed_types[cdecl] except KeyError: - key = cdecl - if not isinstance(cdecl, str): # unicode, on Python 2 - cdecl = cdecl.encode('ascii') - cfaf = consider_function_as_funcptr - type = self._parser.parse_type(cdecl, - consider_function_as_funcptr=cfaf) - btype = self._get_cached_btype(type) - self._parsed_types[key] = btype, cfaf + with self._lock: + result = self._typeof_locked(cdecl) + # + btype, really_a_function_type = result + if really_a_function_type and not consider_function_as_funcptr: + raise CDefError("the type %r is a function type, not a " + "pointer-to-function type" % (cdecl,)) return btype def typeof(self, cdecl): """Parse the C type given as a string and return the - corresponding Python type: '>. + corresponding object. It can also be used on 'cdata' instance to get its C type. """ if isinstance(cdecl, basestring): @@ -144,6 +164,10 @@ res = _builtin_function_type(cdecl) if res is not None: return res + if (isinstance(cdecl, types.FunctionType) + and hasattr(cdecl, '_cffi_base_type')): + with self._lock: + return self._get_cached_btype(cdecl._cffi_base_type) raise TypeError(type(cdecl)) def sizeof(self, cdecl): @@ -280,14 +304,17 @@ data. Later, when this new cdata object is garbage-collected, 'destructor(old_cdata_object)' will be called. """ - try: - gc_weakrefs = self.gc_weakrefs - except AttributeError: - from .gc_weakref import GcWeakrefs - gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) - return gc_weakrefs.build(cdata, destructor) + with self._lock: + try: + gc_weakrefs = self.gc_weakrefs + except AttributeError: + from .gc_weakref import GcWeakrefs + gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) + return gc_weakrefs.build(cdata, destructor) def _get_cached_btype(self, type): + assert self._lock.acquire(False) is False + # call me with the lock! try: BType = self._cached_btypes[type] except KeyError: @@ -320,9 +347,13 @@ errno = property(_get_errno, _set_errno, None, "the value of 'errno' from/to the C calls") + def getwinerror(self, code=-1): + return self._backend.getwinerror(code) + def _pointer_to(self, ctype): from . import model - return model.pointer_cache(self, ctype) + with self._lock: + return model.pointer_cache(self, ctype) def addressof(self, cdata, field=None): """Return the address of a . @@ -342,10 +373,12 @@ variables, which must anyway be accessed directly from the lib object returned by the original FFI instance. """ - self._parser.include(ffi_to_include._parser) - self._cdefsources.append('[') - self._cdefsources.extend(ffi_to_include._cdefsources) - self._cdefsources.append(']') + with ffi_to_include._lock: + with self._lock: + self._parser.include(ffi_to_include._parser) + self._cdefsources.append('[') + self._cdefsources.extend(ffi_to_include._cdefsources) + self._cdefsources.append(']') def new_handle(self, x): return self._backend.newp_handle(self.BVoidP, x) @@ -372,7 +405,7 @@ backendlib = backend.load_library(path, flags) copied_enums = [] # - def make_accessor(name): + def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: tp = ffi._parser._declarations[key] @@ -404,11 +437,17 @@ if enumname not in library.__dict__: library.__dict__[enumname] = enumval copied_enums.append(True) + if name in library.__dict__: + return # - if name in library.__dict__: # copied from an enum value just above, - return # or multithread's race condition raise AttributeError(name) # + def make_accessor(name): + with ffi._lock: + if name in library.__dict__ or name in FFILibrary.__dict__: + return # added by another thread while waiting for the lock + make_accessor_locked(name) + # class FFILibrary(object): def __getattr__(self, name): make_accessor(name) @@ -444,4 +483,5 @@ except (KeyError, AttributeError, TypeError): return None else: - return ffi._get_cached_btype(tp) + with ffi._lock: + return ffi._get_cached_btype(tp) diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -142,7 +142,7 @@ if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] if line: - msg = 'cannot parse "%s"\n%s' % (line, msg) + msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) @@ -217,19 +217,18 @@ # if decl.name: tp = self._get_type(node, partial_length_ok=True) - if self._is_constant_declaration(node): + if self._is_constant_globalvar(node): self._declare('constant ' + decl.name, tp) else: self._declare('variable ' + decl.name, tp) - def parse_type(self, cdecl, consider_function_as_funcptr=False): + def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type, - consider_function_as_funcptr=consider_function_as_funcptr) + return self._get_type(exprnode.type) def _declare(self, name, obj): if name in self._declarations: @@ -249,28 +248,17 @@ return model.ConstPointerType(type) return model.PointerType(type) - def _get_type(self, typenode, convert_array_to_pointer=False, - name=None, partial_length_ok=False, - consider_function_as_funcptr=False): + def _get_type(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): type = self._declarations['typedef ' + typenode.type.names[0]] - if isinstance(type, model.ArrayType): - if convert_array_to_pointer: - return type.item - else: - if (consider_function_as_funcptr and - isinstance(type, model.RawFunctionType)): - return type.as_function_pointer() return type # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type - if convert_array_to_pointer: - return self._get_type_pointer(self._get_type(typenode.type)) if typenode.dim is None: length = None else: @@ -331,10 +319,7 @@ # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - result = self._parse_function_type(typenode, name) - if consider_function_as_funcptr: - result = result.as_function_pointer() - return result + return self._parse_function_type(typenode, name) # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): @@ -365,21 +350,24 @@ isinstance(params[0].type.type, pycparser.c_ast.IdentifierType) and list(params[0].type.type.names) == ['void']): del params[0] - args = [self._get_type(argdeclnode.type, - convert_array_to_pointer=True, - consider_function_as_funcptr=True) + args = [self._as_func_arg(self._get_type(argdeclnode.type)) for argdeclnode in params] result = self._get_type(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) - def _is_constant_declaration(self, typenode, const=False): - if isinstance(typenode, pycparser.c_ast.ArrayDecl): - return self._is_constant_declaration(typenode.type) + def _as_func_arg(self, type): + if isinstance(type, model.ArrayType): + return model.PointerType(type.item) + elif isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + else: + return type + + def _is_constant_globalvar(self, typenode): if isinstance(typenode, pycparser.c_ast.PtrDecl): - const = 'const' in typenode.quals - return self._is_constant_declaration(typenode.type, const) + return 'const' in typenode.quals if isinstance(typenode, pycparser.c_ast.TypeDecl): - return const or 'const' in typenode.quals + return 'const' in typenode.quals return False def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): @@ -491,7 +479,7 @@ return tp def _make_partial(self, tp, nested): - if not isinstance(tp, model.StructType): + if not isinstance(tp, model.StructOrUnion): raise api.CDefError("%s cannot be partial" % (tp,)) if not tp.has_c_name() and not nested: raise NotImplementedError("%s is partial but has no C name" %(tp,)) @@ -511,7 +499,7 @@ if (isinstance(exprnode, pycparser.c_ast.ID) and exprnode.name == '__dotdotdotarray__'): self._partial_length = True - return None + return '...' # raise api.FFIError("unsupported expression: expected a " "simple numeric constant") diff --git a/lib_pypy/cffi/gc_weakref.py b/lib_pypy/cffi/gc_weakref.py --- a/lib_pypy/cffi/gc_weakref.py +++ b/lib_pypy/cffi/gc_weakref.py @@ -14,6 +14,6 @@ def build(self, cdata, destructor): # make a new cdata of the same type as the original one - new_cdata = self.ffi.cast(self.ffi.typeof(cdata), cdata) + new_cdata = self.ffi.cast(self.ffi._backend.typeof(cdata), cdata) self.data[ref(new_cdata, self.remove)] = destructor, cdata return new_cdata diff --git a/lib_pypy/cffi/lock.py b/lib_pypy/cffi/lock.py new file mode 100644 --- /dev/null +++ b/lib_pypy/cffi/lock.py @@ -0,0 +1,30 @@ +import sys + +if sys.version_info < (3,): + try: + from thread import allocate_lock + except ImportError: + from dummy_thread import allocate_lock +else: + try: + from _thread import allocate_lock + except ImportError: + from _dummy_thread import allocate_lock + + +##import sys +##l1 = allocate_lock + +##class allocate_lock(object): +## def __init__(self): +## self._real = l1() +## def __enter__(self): +## for i in range(4, 0, -1): +## print sys._getframe(i).f_code +## print +## return self._real.__enter__() +## def __exit__(self, *args): +## return self._real.__exit__(*args) +## def acquire(self, f): +## assert f is False +## return self._real.acquire(f) diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,7 +1,10 @@ import weakref +from .lock import allocate_lock + class BaseTypeByIdentity(object): is_array_type = False + is_raw_function = False def get_c_name(self, replace_with='', context='a C file'): result = self.c_name_with_marker @@ -146,6 +149,7 @@ # a function, but not a pointer-to-function. The backend has no # notion of such a type; it's used temporarily by parsing. _base_pattern = '(&)(%s)' + is_raw_function = True def build_backend_type(self, ffi, finishlist): from . import api @@ -192,10 +196,6 @@ _base_pattern = " const *&" _base_pattern_array = "(const *&)" - def build_backend_type(self, ffi, finishlist): - BPtr = PointerType(self.totype).get_cached_btype(ffi, finishlist) - return BPtr - const_voidp_type = ConstPointerType(void_type) @@ -216,10 +216,12 @@ self.item = item self.length = length # - if self.length is None: + if length is None: brackets = '&[]' + elif length == '...': + brackets = '&[/*...*/]' else: - brackets = '&[%d]' % self.length + brackets = '&[%d]' % length self.c_name_with_marker = ( self.item.c_name_with_marker.replace('&', brackets)) @@ -227,6 +229,10 @@ return ArrayType(self.item, newlength) def build_backend_type(self, ffi, finishlist): + if self.length == '...': + from . import api + raise api.CDefError("cannot render the type %r: unknown length" % + (self,)) self.item.get_cached_btype(ffi, finishlist) # force the item BType BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) @@ -252,6 +258,7 @@ class StructOrUnion(StructOrUnionOrEnum): fixedlayout = None completed = False + partial = False def __init__(self, name, fldnames, fldtypes, fldbitsize): self.name = name @@ -303,20 +310,21 @@ return # not completing it: it's an opaque struct # self.completed = 1 - fldtypes = tuple(tp.get_cached_btype(ffi, finishlist) - for tp in self.fldtypes) # if self.fixedlayout is None: + fldtypes = [tp.get_cached_btype(ffi, finishlist) + for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) ffi._backend.complete_struct_or_union(BType, lst, self) # else: + fldtypes = [] fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout for i in range(len(self.fldnames)): fsize = fieldsize[i] ftype = self.fldtypes[i] # - if isinstance(ftype, ArrayType) and ftype.length is None: + if isinstance(ftype, ArrayType) and ftype.length == '...': # fix the length to match the total size BItemType = ftype.item.get_cached_btype(ffi, finishlist) nlen, nrest = divmod(fsize, ffi.sizeof(BItemType)) @@ -327,18 +335,20 @@ ftype = ftype.resolve_length(nlen) self.fldtypes = (self.fldtypes[:i] + (ftype,) + self.fldtypes[i+1:]) - BArrayType = ftype.get_cached_btype(ffi, finishlist) - fldtypes = (fldtypes[:i] + (BArrayType,) + - fldtypes[i+1:]) - continue # - bitemsize = ffi.sizeof(fldtypes[i]) - if bitemsize != fsize: - self._verification_error( - "field '%s.%s' is declared as %d bytes, but is " - "really %d bytes" % (self.name, - self.fldnames[i] or '{}', - bitemsize, fsize)) + BFieldType = ftype.get_cached_btype(ffi, finishlist) + if isinstance(ftype, ArrayType) and ftype.length is None: + assert fsize == 0 + else: + bitemsize = ffi.sizeof(BFieldType) + if bitemsize != fsize: + self._verification_error( + "field '%s.%s' is declared as %d bytes, but is " + "really %d bytes" % (self.name, + self.fldnames[i] or '{}', + bitemsize, fsize)) + fldtypes.append(BFieldType) + # lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs)) ffi._backend.complete_struct_or_union(BType, lst, self, totalsize, totalalignment) @@ -348,11 +358,6 @@ from .ffiplatform import VerificationError raise VerificationError(msg) - -class StructType(StructOrUnion): - kind = 'struct' - partial = False - def check_not_partial(self): if self.partial and self.fixedlayout is None: from . import ffiplatform @@ -361,19 +366,18 @@ def build_backend_type(self, ffi, finishlist): self.check_not_partial() finishlist.append(self) - - return global_cache(self, ffi, 'new_struct_type', + # + return global_cache(self, ffi, 'new_%s_type' % self.kind, self.get_official_name(), key=self) +class StructType(StructOrUnion): + kind = 'struct' + + class UnionType(StructOrUnion): kind = 'union' - def build_backend_type(self, ffi, finishlist): - finishlist.append(self) - return global_cache(self, ffi, 'new_union_type', - self.get_official_name(), key=self) - class EnumType(StructOrUnionOrEnum): kind = 'enum' @@ -387,6 +391,12 @@ self.baseinttype = baseinttype self.build_c_name_with_marker() + def force_the_name(self, forcename): + StructOrUnionOrEnum.force_the_name(self, forcename) + if self.forcename is None: + name = self.get_official_name() + self.forcename = '$' + name.replace(' ', '_') + def check_not_partial(self): if self.partial and not self.partial_resolved: from . import ffiplatform @@ -444,6 +454,9 @@ tp = StructType(structname, None, None, None) return NamedPointerType(tp, name) + +global_lock = allocate_lock() + def global_cache(srctype, ffi, funcname, *args, **kwds): key = kwds.pop('key', (funcname, args)) assert not kwds @@ -464,8 +477,17 @@ res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: raise NotImplementedError("%r: %s" % (srctype, e)) - ffi._backend.__typecache[key] = res - return res + # note that setdefault() on WeakValueDictionary is not atomic + # and contains a rare bug (http://bugs.python.org/issue19542); + # we have to use a lock and do it ourselves + cache = ffi._backend.__typecache + with global_lock: + res1 = cache.get(key) + if res1 is None: + cache[key] = res + return res + else: + return res1 def pointer_cache(ffi, BType): return global_cache('?', ffi, 'new_pointer_type', BType) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py From noreply at buildbot.pypy.org Wed Dec 18 23:18:15 2013 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 18 Dec 2013 23:18:15 +0100 (CET) Subject: [pypy-commit] pypy reflex-support: fix callback declaration Message-ID: <20131218221815.EF3AA1C347B@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r68473:0744e9384dae Date: 2013-12-18 14:07 -0800 http://bitbucket.org/pypy/pypy/changeset/0744e9384dae/ Log: fix callback declaration diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -8,6 +8,7 @@ from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import libffi, rdynload +from rpython.tool.udir import udir from pypy.module.cppyy.capi.capi_types import C_OBJECT @@ -22,13 +23,13 @@ import commands (stat, incdir) = commands.getstatusoutput("root-config --incdir") if stat != 0: - rootincpath = [os.path.join(os.environ["ROOTSYS"], "include")] + rootincpath = [os.path.join(os.environ["ROOTSYS"], "include"), py.path.local(udir)] rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] else: - rootincpath = [incdir] + rootincpath = [incdir, py.path.local(udir)] rootlibpath = commands.getoutput("root-config --libdir").split() else: - rootincpath = [] + rootincpath = [py.path.local(udir)] rootlibpath = [] def identify(): @@ -422,7 +423,7 @@ from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL @cpython_api([rffi.VOIDP], lltype.Void, error=CANNOT_FAIL) -def cppyy_recursive_remove(space, cppobject): +def _Py_cppyy_recursive_remove(space, cppobject): from pypy.module.cppyy.interp_cppyy import memory_regulator from pypy.module.cppyy.capi import C_OBJECT, C_NULL_OBJECT diff --git a/pypy/module/cppyy/src/cintcwrapper.cxx b/pypy/module/cppyy/src/cintcwrapper.cxx --- a/pypy/module/cppyy/src/cintcwrapper.cxx +++ b/pypy/module/cppyy/src/cintcwrapper.cxx @@ -37,6 +37,9 @@ #include #include +// for recursive_remove callback +#include "pypy_macros.h" + /* ROOT/CINT internals --------------------------------------------------- */ extern long G__store_struct_offset; @@ -61,12 +64,12 @@ }; // memory regulation (cppyy_recursive_remove is generated as a cpyext capi call) -extern "C" void cppyy_recursive_remove(void*); +extern "C" void _Py_cppyy_recursive_remove(void*); class Cppyy_MemoryRegulator : public TObject { public: virtual void RecursiveRemove(TObject* object) { - cppyy_recursive_remove((void*)object); + _Py_cppyy_recursive_remove((void*)object); } }; From noreply at buildbot.pypy.org Wed Dec 18 23:18:17 2013 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 18 Dec 2013 23:18:17 +0100 (CET) Subject: [pypy-commit] pypy reflex-support: revert back to default choice of capi Message-ID: <20131218221817.160E41C347B@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r68474:2a1f5512b630 Date: 2013-12-18 14:18 -0800 http://bitbucket.org/pypy/pypy/changeset/2a1f5512b630/ Log: revert back to default choice of capi diff --git a/pypy/module/cppyy/capi/__init__.py b/pypy/module/cppyy/capi/__init__.py --- a/pypy/module/cppyy/capi/__init__.py +++ b/pypy/module/cppyy/capi/__init__.py @@ -9,8 +9,8 @@ # the selection of the desired backend (default is Reflex). # choose C-API access method: -#from pypy.module.cppyy.capi.loadable_capi import * -from pypy.module.cppyy.capi.builtin_capi import * +from pypy.module.cppyy.capi.loadable_capi import * +#from pypy.module.cppyy.capi.builtin_capi import * from pypy.module.cppyy.capi.capi_types import C_OBJECT,\ C_NULL_TYPE, C_NULL_OBJECT diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/cppyy/capi/builtin_capi.py --- a/pypy/module/cppyy/capi/builtin_capi.py +++ b/pypy/module/cppyy/capi/builtin_capi.py @@ -1,8 +1,8 @@ from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import jit -#import reflex_capi as backend -import cint_capi as backend +import reflex_capi as backend +#import cint_capi as backend from pypy.module.cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX,\ From noreply at buildbot.pypy.org Thu Dec 19 00:48:55 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 19 Dec 2013 00:48:55 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20131218234855.0B8811C08A1@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68475:c9fe258e0217 Date: 2013-12-18 15:40 -0800 http://bitbucket.org/pypy/pypy/changeset/c9fe258e0217/ Log: merge default diff --git a/pypy/module/__builtin__/app_operation.py b/pypy/module/__builtin__/app_operation.py --- a/pypy/module/__builtin__/app_operation.py +++ b/pypy/module/__builtin__/app_operation.py @@ -2,8 +2,8 @@ def bin(x): """Return the binary representation of an integer.""" - x = operator.index(x) - return x.__format__("#b") + value = operator.index(x) + return value.__format__("#b") def oct(x): """Return the octal representation of an integer.""" diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -84,6 +84,15 @@ assert bin(-2) == "-0b10" assert bin(Foo()) == "0b100" raises(TypeError, bin, 0.) + class C(object): + def __index__(self): + return 42 + assert bin(C()) == bin(42) + class D(object): + def __int__(self): + return 42 + exc = raises(TypeError, bin, D()) + assert "index" in exc.value.message def test_oct(self): class Foo: diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -5,7 +5,9 @@ from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray +from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw class LLBuffer(RWBuffer): @@ -34,8 +36,7 @@ def setslice(self, start, string): raw_cdata = rffi.ptradd(self.raw_cdata, start) - for i in range(len(string)): - raw_cdata[i] = string[i] + copy_string_to_raw(llstr(string), raw_cdata, 0, len(string)) class MiniBuffer(W_Root): diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -206,8 +206,7 @@ w_value.get_array_length() == length): # fast path: copying from exactly the correct type s = w_value._cdata - for i in range(ctitemsize * length): - cdata[i] = s[i] + rffi.c_memcpy(cdata, s, ctitemsize * length) keepalive_until_here(w_value) return # @@ -259,7 +258,6 @@ space = self.space if isinstance(w_other, W_CData): from pypy.module._cffi_backend import ctypeptr, ctypearray - from pypy.module._cffi_backend import ctypevoid ct = w_other.ctype if isinstance(ct, ctypearray.W_CTypeArray): ct = ct.ctptr diff --git a/pypy/module/math/app_math.py b/pypy/module/math/app_math.py --- a/pypy/module/math/app_math.py +++ b/pypy/module/math/app_math.py @@ -1,7 +1,9 @@ import sys def factorial(x): - """Find x!.""" + """factorial(x) -> Integral + + "Find x!. Raise a ValueError if x is negative or non-integral.""" if isinstance(x, float): fl = int(x) if fl != x: @@ -18,15 +20,15 @@ res *= i return res - #Experimentally this gap seems good - gap = max(100, x>>7) + # Experimentally this gap seems good + gap = max(100, x >> 7) def _fac_odd(low, high): - if low+gap >= high: + if low + gap >= high: t = 1 for i in range(low, high, 2): t *= i return t - + mid = ((low + high) >> 1) | 1 return _fac_odd(low, mid) * _fac_odd(mid, high) diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -502,3 +502,6 @@ def getlength(self): return self.impl.size + + def get_raw_address(self): + return self.impl.storage diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -132,6 +132,12 @@ if space.isinstance_w(w_idx, space.w_tuple): if space.len_w(w_idx) == 0: return self.get_scalar_value() + if space.is_none(w_idx): + new_shape = [1] + arr = W_NDimArray.from_shape(space, new_shape, self.dtype) + arr_iter = arr.create_iter(new_shape) + arr_iter.setitem(self.value) + return arr raise OperationError(space.w_IndexError, space.wrap("0-d arrays can't be indexed")) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -158,6 +158,12 @@ assert isinstance(box, W_Float64Box) return space.wrap(box.value) + def descr_oct(self, space): + return space.oct(self.descr_int(space)) + + def descr_hex(self, space): + return space.hex(self.descr_int(space)) + def descr_nonzero(self, space): dtype = self.get_dtype(space) return space.wrap(dtype.itemtype.bool(self)) @@ -506,6 +512,8 @@ __int__ = interp2app(W_GenericBox.descr_int), __float__ = interp2app(W_GenericBox.descr_float), __bool__ = interp2app(W_GenericBox.descr_nonzero), + __oct__ = interp2app(W_GenericBox.descr_oct), + __hex__ = interp2app(W_GenericBox.descr_hex), __add__ = interp2app(W_GenericBox.descr_add), __sub__ = interp2app(W_GenericBox.descr_sub), diff --git a/pypy/module/micronumpy/interp_flagsobj.py b/pypy/module/micronumpy/interp_flagsobj.py --- a/pypy/module/micronumpy/interp_flagsobj.py +++ b/pypy/module/micronumpy/interp_flagsobj.py @@ -18,6 +18,16 @@ def descr_get_writeable(self, space): return space.w_True + def descr_get_fnc(self, space): + return space.wrap( + space.is_true(self.descr_get_fortran(space)) and not + space.is_true(self.descr_get_contiguous(space))) + + def descr_get_forc(self, space): + return space.wrap( + space.is_true(self.descr_get_fortran(space)) or + space.is_true(self.descr_get_contiguous(space))) + def descr_getitem(self, space, w_item): key = space.str_w(w_item) if key == "C" or key == "CONTIGUOUS" or key == "C_CONTIGUOUS": @@ -26,6 +36,10 @@ return self.descr_get_fortran(space) if key == "W" or key == "WRITEABLE": return self.descr_get_writeable(space) + if key == "FNC": + return self.descr_get_fnc(space) + if key == "FORC": + return self.descr_get_forc(space) raise OperationError(space.w_KeyError, space.wrap( "Unknown flag")) @@ -56,4 +70,6 @@ f_contiguous = GetSetProperty(W_FlagsObject.descr_get_fortran), fortran = GetSetProperty(W_FlagsObject.descr_get_fortran), writeable = GetSetProperty(W_FlagsObject.descr_get_writeable), + fnc = GetSetProperty(W_FlagsObject.descr_get_fnc), + forc = GetSetProperty(W_FlagsObject.descr_get_forc), ) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -493,8 +493,11 @@ if not space.is_none(w_dtype): raise OperationError(space.w_NotImplementedError, space.wrap( "__array__(dtype) not implemented")) - # stub implementation of __array__() - return self + if type(self) is W_NDimArray: + return self + return W_NDimArray.from_shape_and_storage( + space, self.get_shape(), self.implementation.storage, + self.get_dtype(), w_base=self) def descr_array_iface(self, space): addr = self.implementation.get_storage_as_int(space) @@ -1009,8 +1012,8 @@ multiarray = numpypy.get("multiarray") assert isinstance(multiarray, MixedModule) reconstruct = multiarray.get("_reconstruct") - - parameters = space.newtuple([space.gettypefor(W_NDimArray), space.newtuple([space.wrap(0)]), space.wrap("b")]) + parameters = space.newtuple([self.getclass(space), + space.newtuple([space.wrap(0)]), space.wrap("b")]) builder = StringBuilder() if isinstance(self.implementation, SliceArray): @@ -1033,14 +1036,22 @@ return space.newtuple([reconstruct, parameters, state]) def descr_setstate(self, space, w_state): - from rpython.rtyper.lltypesystem import rffi - - shape = space.getitem(w_state, space.wrap(1)) - dtype = space.getitem(w_state, space.wrap(2)) - assert isinstance(dtype, interp_dtype.W_Dtype) - isfortran = space.getitem(w_state, space.wrap(3)) - storage = space.getitem(w_state, space.wrap(4)) - + lens = space.len_w(w_state) + # numpy compatability, see multiarray/methods.c + if lens == 5: + base_index = 1 + elif lens == 4: + base_index = 0 + else: + raise OperationError(space.w_ValueError, space.wrap( + "__setstate__ called with len(args[1])==%d, not 5 or 4" % lens)) + shape = space.getitem(w_state, space.wrap(base_index)) + dtype = space.getitem(w_state, space.wrap(base_index+1)) + isfortran = space.getitem(w_state, space.wrap(base_index+2)) + storage = space.getitem(w_state, space.wrap(base_index+3)) + if not isinstance(dtype, interp_dtype.W_Dtype): + raise OperationError(space.w_ValueError, space.wrap( + "__setstate__(self, (shape, dtype, .. called with improper dtype '%r'" % dtype)) self.implementation = W_NDimArray.from_shape_and_storage(space, [space.int_w(i) for i in space.listview(shape)], rffi.str2charp(space.str_w(storage), track_allocation=False), @@ -1056,9 +1067,9 @@ return w_obj pass - at unwrap_spec(offset=int, order=str) + at unwrap_spec(offset=int) def descr_new_array(space, w_subtype, w_shape, w_dtype=None, w_buffer=None, - offset=0, w_strides=None, order='C'): + offset=0, w_strides=None, w_order=None): from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArray from pypy.module.micronumpy.support import calc_strides dtype = space.interp_w(interp_dtype.W_Dtype, @@ -1092,6 +1103,11 @@ if not shape: return W_NDimArray.new_scalar(space, dtype) + order = order_converter(space, w_order, NPY_CORDER) + if order == NPY_CORDER: + order = 'C' + else: + order = 'F' if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): return W_NDimArray.from_shape(space, shape, dtype, order) strides, backstrides = calc_strides(shape, dtype.base, order) @@ -1162,6 +1178,7 @@ __str__ = interp2app(W_NDimArray.descr_str), __int__ = interp2app(W_NDimArray.descr_int), __float__ = interp2app(W_NDimArray.descr_float), + __buffer__ = interp2app(W_NDimArray.descr_get_data), __pos__ = interp2app(W_NDimArray.descr_pos), __neg__ = interp2app(W_NDimArray.descr_neg), diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -564,8 +564,11 @@ index_w[i] = indexes_w[i] w_idx = space.newtuple(prefix_w[:prefixlen] + iter.get_index(space, shapelen)) - arr.descr_setitem(space, space.newtuple(index_w), - val_arr.descr_getitem(space, w_idx)) + if val_arr.is_scalar(): + w_value = val_arr.get_scalar_value() + else: + w_value = val_arr.descr_getitem(space, w_idx) + arr.descr_setitem(space, space.newtuple(index_w), w_value) iter.next() byteswap_driver = jit.JitDriver(name='numpy_byteswap_driver', diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -51,8 +51,8 @@ rstrides.append(strides[i]) rbackstrides.append(backstrides[i]) if backwards: - rstrides = rstrides + [0] * (len(res_shape) - len(orig_shape)) - rbackstrides = rbackstrides + [0] * (len(res_shape) - len(orig_shape)) + rstrides = rstrides + [0] * (len(res_shape) - len(orig_shape)) + rbackstrides = rbackstrides + [0] * (len(res_shape) - len(orig_shape)) else: rstrides = [0] * (len(res_shape) - len(orig_shape)) + rstrides rbackstrides = [0] * (len(res_shape) - len(orig_shape)) + rbackstrides @@ -62,7 +62,7 @@ if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): return True if (space.isinstance_w(w_elem, space.w_tuple) or - isinstance(w_elem, W_NDimArray) or + isinstance(w_elem, W_NDimArray) or space.isinstance_w(w_elem, space.w_list)): return False return True @@ -87,6 +87,12 @@ space.len_w(w_elem) != size): raise OperationError(space.w_ValueError, space.wrap( "setting an array element with a sequence")) + w_array = space.lookup(w_elem, '__array__') + if w_array is not None: + # Make sure we call the array implementation of listview, + # since for some ndarray subclasses (matrix, for instance) + # listview does not reduce but rather returns the same class + w_elem = space.get_and_call_function(w_array, w_elem, space.w_None) new_batch += space.listview(w_elem) shape.append(size) batch = new_batch diff --git a/pypy/module/micronumpy/test/test_flagsobj.py b/pypy/module/micronumpy/test/test_flagsobj.py --- a/pypy/module/micronumpy/test/test_flagsobj.py +++ b/pypy/module/micronumpy/test/test_flagsobj.py @@ -12,6 +12,10 @@ a = np.array([1,2,3]) assert a.flags.c_contiguous == True assert a.flags['W'] == True + assert a.flags.fnc == False + assert a.flags.forc == True + assert a.flags['FNC'] == False + assert a.flags['FORC'] == True raises(KeyError, "a.flags['blah']") raises(KeyError, "a.flags['C_CONTIGUOUS'] = False") raises((TypeError, AttributeError), "a.flags.c_contiguous = False") diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -271,6 +271,17 @@ # test uninitialized value crash? assert len(str(a)) > 0 + import sys + for order in [False, True, 'C', 'F']: + a = ndarray.__new__(ndarray, (2, 3), float, order=order) + assert a.shape == (2, 3) + if order in [True, 'F'] and '__pypy__' not in sys.builtin_module_names: + assert a.flags['F'] + assert not a.flags['C'] + else: + assert a.flags['C'] + assert not a.flags['F'] + def test_ndmin(self): from numpypy import array @@ -309,6 +320,12 @@ e = d.repeat(3, 0) assert e.shape == (9, 4, 0) + def test_buffer(self): + import numpy as np + a = np.array([1,2,3]) + b = buffer(a) + assert type(b) is buffer + def test_type(self): from numpypy import array ar = array(range(5)) @@ -623,6 +640,9 @@ for y in range(2): expected[x, y] = math.cos(a[x]) * math.cos(b[y]) assert ((cos(a)[:,newaxis] * cos(b).T) == expected).all() + a = array(1)[newaxis] + assert a == array([1]) + assert a.shape == (1,) def test_newaxis_slice(self): from numpypy import array, newaxis @@ -1868,6 +1888,10 @@ assert (a == [0, 1, 1, 0, 4, 0, 6, 7, 8, 9]).all() raises(IndexError, "arange(10)[array([10])] = 3") raises(IndexError, "arange(10)[[-11]] = 3") + a = zeros(10) + b = array([3,4,5]) + a[b] = 1 + assert (a == [0, 0, 0, 1, 1, 1, 0, 0, 0, 0]).all() def test_array_scalar_index(self): import numpypy as np @@ -1941,11 +1965,13 @@ assert a.itemsize == 3 a = array(3.1415).astype('S3').dtype assert a.itemsize == 3 - try: + + import sys + if '__pypy__' not in sys.builtin_module_names: a = array(['1', '2','3']).astype(float) assert a[2] == 3.0 - except NotImplementedError: - skip('astype("float") not implemented for str arrays') + else: + raises(NotImplementedError, array(['1', '2', '3']).astype, float) def test_base(self): from numpypy import array diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -18,6 +18,18 @@ #raises(TypeError, np.complex_, '1+2j') assert math.isnan(np.complex_(None)) + def test_builtin(self): + import numpy as np + assert oct(np.int32(11)) == '013' + assert oct(np.float32(11.6)) == '013' + assert oct(np.complex64(11-12j)) == '013' + assert hex(np.int32(11)) == '0xb' + assert hex(np.float32(11.6)) == '0xb' + assert hex(np.complex64(11-12j)) == '0xb' + assert bin(np.int32(11)) == '0b1011' + exc = raises(TypeError, "bin(np.float32(11.6))") + assert exc.value.message.find('object cannot be interpreted as an index') != -1 + def test_pickle(self): from numpypy import dtype, zeros try: diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -3,6 +3,7 @@ class AppTestSupport(BaseNumpyAppTest): + spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) def setup_class(cls): BaseNumpyAppTest.setup_class.im_func(cls) cls.w_NoNew = cls.space.appexec([], '''(): @@ -50,6 +51,14 @@ b[0]=100 assert a[0,0] == 100 + assert type(a) is not ndarray + assert a[0,0] == 100 + assert a.base is not None + b = a.__array__() + assert type(b) is ndarray + assert b[0,0] == 100 + assert b.base is a + def test_subtype_view(self): from numpypy import ndarray, array class matrix(ndarray): @@ -62,6 +71,11 @@ assert isinstance(b, matrix) assert (b == a).all() + def test_subtype_like_matrix(self): + import numpy as np + arr = np.array([1,2,3]) + ret = np.ndarray.__new__(np.ndarray, arr.shape, arr.dtype, buffer=arr) + assert (arr == ret).all() def test_finalize(self): #taken from http://docs.scipy.org/doc/numpy/user/basics.subclassing.html#simple-example-adding-an-extra-attribute-to-ndarray @@ -246,3 +260,116 @@ c = array(a, float) assert c.dtype is dtype(float) + def test__getitem_modifies_shape(self): + import numpypy as N + # numpy's matrix class caused an infinite loop + class matrix(N.ndarray): + getcnt = 0 + def __new__(subtype, data, dtype=None, copy=True): + arr = N.array(data, dtype=dtype, copy=copy) + shape = arr.shape + + ret = N.ndarray.__new__(subtype, shape, arr.dtype, + buffer=arr, + order=True) + return ret + + def __getitem__(self, index): + matrix.getcnt += 1 + if matrix.getcnt > 10: + # XXX strides.find_shape_and_elems is sensitive + # to shape modification + xxx + out = N.ndarray.__getitem__(self, index) + + if not isinstance(out, N.ndarray): + return out + # Determine when we should have a column array + old_shape = out.shape + if out.ndim < 2: + sh = out.shape[0] + try: + n = len(index) + except: + n = 0 + if n > 1: + out.shape = (sh, 1) + else: + out.shape = (1, sh) + print 'out, shape was',old_shape,'now',out.shape + return out + a = matrix([[1., 2.]]) + b = N.array([a]) + + def test_setstate_no_version(self): + # Some subclasses of ndarray, like MaskedArray, do not use + # version in __setstare__ + from numpy import ndarray, array + from pickle import loads, dumps + import sys, new + class D(ndarray): + ''' A subtype with a constructor that accepts a list of + data values, where ndarray accepts a shape + ''' + def __new__(subtype, data, dtype=None, copy=True): + arr = array(data, dtype=dtype, copy=copy) + shape = arr.shape + ret = ndarray.__new__(subtype, shape, arr.dtype, + buffer=arr, + order=True) + return ret + def __setstate__(self, state): + (version, shp, typ, isf, raw) = state + ndarray.__setstate__(self, (shp, typ, isf, raw)) + + D.__module__ = 'mod' + mod = new.module('mod') + mod.D = D + sys.modules['mod'] = mod + a = D([1., 2.]) + s = dumps(a) + #Taken from numpy version 1.8 + s_from_numpy = '''ignore this line + _reconstruct + p0 + (cmod + D + p1 + (I0 + tp2 + S'b' + p3 + tp4 + Rp5 + (I1 + (I2 + tp6 + cnumpy + dtype + p7 + (S'f8' + p8 + I0 + I1 + tp9 + Rp10 + (I3 + S'<' + p11 + NNNI-1 + I-1 + I0 + tp12 + bI00 + S'\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@' + p13 + tp14 + b.'''.replace(' ','') + for ss,sn in zip(s.split('\n')[1:],s_from_numpy.split('\n')[1:]): + if len(ss)>10: + # ignore binary data, it will be checked later + continue + assert ss == sn + b = loads(s) + assert (a == b).all() + assert isinstance(b, D) diff --git a/pypy/objspace/std/floattype.py b/pypy/objspace/std/floattype.py --- a/pypy/objspace/std/floattype.py +++ b/pypy/objspace/std/floattype.py @@ -27,12 +27,8 @@ def descr__new__(space, w_floattype, w_x): from pypy.objspace.std.floatobject import W_FloatObject w_value = w_x # 'x' is the keyword argument name in CPython - w_special = space.lookup(w_value, "__float__") - if w_special is not None: - w_obj = space.get_and_call_function(w_special, w_value) - if not space.isinstance_w(w_obj, space.w_float): - raise OperationError(space.w_TypeError, - space.wrap("__float__ returned non-float")) + if space.lookup(w_value, "__float__") is not None: + w_obj = space.float(w_value) if space.is_w(w_floattype, space.w_float): return w_obj value = space.float_w(w_obj) diff --git a/pypy/objspace/std/inttype.py b/pypy/objspace/std/inttype.py --- a/pypy/objspace/std/inttype.py +++ b/pypy/objspace/std/inttype.py @@ -98,41 +98,15 @@ ## w_value = w_x # 'x' is the keyword argument name in CPython ## value = 0 ## if w_base is None: -## ok = False ## # check for easy cases ## if type(w_value) is W_IntObject: ## value = w_value.intval -## ok = True -## elif space.isinstance_w(w_value, space.w_str): -## value, w_longval = string_to_int_or_long(space, space.str_w(w_value)) -## ok = True -## elif space.isinstance_w(w_value, space.w_unicode): -## from pypy.objspace.std.unicodeobject import unicode_to_decimal_w -## string = unicode_to_decimal_w(space, w_value) -## value, w_longval = string_to_int_or_long(space, string) -## ok = True -## else: -## # If object supports the buffer interface -## try: -## w_buffer = space.buffer(w_value) -## except OperationError, e: -## if not e.match(space, space.w_TypeError): -## raise -## else: -## buf = space.interp_w(Buffer, w_buffer) -## value, w_longval = string_to_int_or_long(space, buf.as_str()) -## ok = True - -## if not ok: +## elif space.lookup(w_value, '__int__') is not None or \ +## space.lookup(w_value, '__trunc__') is not None: ## # otherwise, use the __int__() or the __trunc__() methods ## w_obj = w_value ## if space.lookup(w_obj, '__int__') is None: -## if space.lookup(w_obj, '__trunc__') is not None: -## w_obj = space.trunc(w_obj) -## else: -## raise operationerrfmt(space.w_TypeError, -## "int() argument must be a string or a number, not '%T'", -## w_obj) +## w_obj = space.trunc(w_obj) ## w_obj = space.int(w_obj) ## # 'int(x)' should return what x.__int__() returned, which should ## # be an int or long or a subclass thereof. @@ -141,13 +115,26 @@ ## # int_w is effectively what we want in this case, ## # we cannot construct a subclass of int instance with an ## # an overflowing long +## value = space.int_w(w_obj) +## elif space.isinstance_w(w_value, space.w_str): +## value, w_longval = string_to_int_or_long(space, space.str_w(w_value)) +## elif space.isinstance_w(w_value, space.w_unicode): +## from pypy.objspace.std.unicodeobject import unicode_to_decimal_w +## string = unicode_to_decimal_w(space, w_value) +## value, w_longval = string_to_int_or_long(space, string) +## else: +## # If object supports the buffer interface ## try: -## value = space.int_w(w_obj) +## w_buffer = space.buffer(w_value) ## except OperationError, e: -## if e.match(space, space.w_TypeError): -## raise OperationError(space.w_ValueError, -## space.wrap("value can't be converted to int")) -## raise e +## if not e.match(space, space.w_TypeError): +## raise +## raise operationerrfmt(space.w_TypeError, +## "int() argument must be a string or a number, not '%T'", +## w_value) +## else: +## buf = space.interp_w(Buffer, w_buffer) +## value, w_longval = string_to_int_or_long(space, buf.as_str()) ## else: ## base = space.int_w(w_base) diff --git a/pypy/objspace/std/longtype.py b/pypy/objspace/std/longtype.py --- a/pypy/objspace/std/longtype.py +++ b/pypy/objspace/std/longtype.py @@ -2,6 +2,7 @@ from pypy.interpreter import typedef from pypy.interpreter.gateway import ( WrappedDefault, applevel, interp2app, interpindirect2app, unwrap_spec) +from pypy.interpreter.buffer import Buffer from pypy.objspace.std.model import W_Object from pypy.objspace.std.stdtypedef import StdTypeDef from rpython.rlib.rstring import ParseStringError @@ -27,6 +28,13 @@ return w_value elif type(w_value) is W_LongObject: return newbigint(space, w_longtype, w_value.num) + elif space.lookup(w_value, '__int__') is not None: + w_obj = space.int(w_value) + return newbigint(space, w_longtype, space.bigint_w(w_obj)) + elif space.lookup(w_value, '__trunc__') is not None: + w_obj = space.trunc(w_value) + w_obj = space.int(w_obj) + return newbigint(space, w_longtype, space.bigint_w(w_obj)) elif space.isinstance_w(w_value, space.w_unicode): from pypy.objspace.std.unicodeobject import unicode_to_decimal_w return string_to_w_long(space, w_longtype, @@ -36,21 +44,17 @@ strvalue = space.bufferstr_w(w_value) return string_to_w_long(space, w_longtype, strvalue.decode('latin-1')) else: - # otherwise, use the __int__() or the __trunc__ methods - w_obj = w_value - if space.lookup(w_obj, '__int__') is not None: - w_obj = space.int(w_obj) - elif space.lookup(w_obj, '__trunc__') is not None: - w_obj = space.trunc(w_obj) - w_obj = space.int(w_obj) - else: + try: + w_buffer = space.buffer(w_value) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise raise operationerrfmt(space.w_TypeError, "int() argument must be a string or a number, not '%T'", - w_obj) - if space.is_w(w_longtype, space.w_int): - return w_obj - bigint = space.bigint_w(w_obj) - return newbigint(space, w_longtype, bigint) + w_value) + else: + buf = space.interp_w(Buffer, w_buffer) + return string_to_w_long(space, w_longtype, buf.as_str()) else: try: base = space.int_w(w_base) diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -461,9 +461,15 @@ return Integral() assert int(TruncReturnsNonInt()) == 42 + def test_int_before_string(self): + class Integral(str): + def __int__(self): + return 42 + assert int(Integral('abc')) == 42 + def test_getnewargs(self): assert 0 .__getnewargs__() == (0,) - + def test_bit_length(self): for val, bits in [ (0, 0), diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -259,6 +259,8 @@ n = -sys.maxsize-1 assert int(n) == n assert str(int(n)) == str(n) + a = memoryview(b'123') + assert int(a) == 123 def test_huge_longs(self): import operator @@ -297,6 +299,12 @@ return Integral() assert int(TruncReturnsNonInt()) == 42 + def test_long_before_string(self): + class A(str): + def __long__(self): + return 42 + assert int(A('abc')) == 42 + def test_conjugate(self): assert (7).conjugate() == 7 assert (-7).conjugate() == -7 diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -54,6 +54,12 @@ from rpython.rlib.rfile import create_temp_rfile return space.appcall(create_temp_rfile) + at register_flow_sc(os.remove) +def sc_os_remove(space, *args_w): + # on top of PyPy only: 'os.remove != os.unlink' + # (on CPython they are '==', but not identical either) + return space.appcall(os.unlink, *args_w) + # _________________________________________________________________________ # a simplified version of the basic printing routines, for RPython programs class StdOutBuffer: diff --git a/rpython/flowspace/test/test_objspace.py b/rpython/flowspace/test/test_objspace.py --- a/rpython/flowspace/test/test_objspace.py +++ b/rpython/flowspace/test/test_objspace.py @@ -1244,6 +1244,20 @@ graph = self.codetest(g) assert "Undefined closure variable 'b'" in str(excinfo.value) + def call_os_remove(msg): + os.remove(msg) + os.unlink(msg) + + def test_call_os_remove(self): + x = self.codetest(self.call_os_remove) + simplify_graph(x) + self.show(x) + ops = x.startblock.operations + assert ops[0].opname == 'simple_call' + assert ops[0].args[0].value is os.unlink + assert ops[1].opname == 'simple_call' + assert ops[1].args[0].value is os.unlink + DATA = {'x': 5, 'y': 6} From noreply at buildbot.pypy.org Thu Dec 19 00:48:56 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 19 Dec 2013 00:48:56 +0100 (CET) Subject: [pypy-commit] pypy py3k: reapply py3k's special casing of int() results and fix buffer handling Message-ID: <20131218234856.403911C08A1@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68476:225823af4c47 Date: 2013-12-18 15:45 -0800 http://bitbucket.org/pypy/pypy/changeset/225823af4c47/ Log: reapply py3k's special casing of int() results and fix buffer handling diff --git a/pypy/objspace/std/longtype.py b/pypy/objspace/std/longtype.py --- a/pypy/objspace/std/longtype.py +++ b/pypy/objspace/std/longtype.py @@ -29,12 +29,10 @@ elif type(w_value) is W_LongObject: return newbigint(space, w_longtype, w_value.num) elif space.lookup(w_value, '__int__') is not None: - w_obj = space.int(w_value) - return newbigint(space, w_longtype, space.bigint_w(w_obj)) + return _from_intlike(space, w_longtype, w_value) elif space.lookup(w_value, '__trunc__') is not None: w_obj = space.trunc(w_value) - w_obj = space.int(w_obj) - return newbigint(space, w_longtype, space.bigint_w(w_obj)) + return _from_intlike(space, w_longtype, w_obj) elif space.isinstance_w(w_value, space.w_unicode): from pypy.objspace.std.unicodeobject import unicode_to_decimal_w return string_to_w_long(space, w_longtype, @@ -54,7 +52,8 @@ w_value) else: buf = space.interp_w(Buffer, w_buffer) - return string_to_w_long(space, w_longtype, buf.as_str()) + return string_to_w_long(space, w_longtype, + buf.as_str().decode('latin-1')) else: try: base = space.int_w(w_base) @@ -77,6 +76,13 @@ return string_to_w_long(space, w_longtype, s, base) +def _from_intlike(space, w_longtype, w_intlike): + w_obj = space.int(w_intlike) + if space.is_w(w_longtype, space.w_int): + return w_obj + return newbigint(space, w_longtype, space.bigint_w(w_obj)) + + def string_to_w_long(space, w_longtype, s, base=10): try: bigint = rbigint.fromstr(s, base) From noreply at buildbot.pypy.org Thu Dec 19 00:48:57 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 19 Dec 2013 00:48:57 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20131218234857.951DC1C08A1@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68477:d37bd06f36fa Date: 2013-12-18 15:47 -0800 http://bitbucket.org/pypy/pypy/changeset/d37bd06f36fa/ Log: merge default diff --git a/lib_pypy/_sha1.py b/lib_pypy/_sha1.py --- a/lib_pypy/_sha1.py +++ b/lib_pypy/_sha1.py @@ -115,14 +115,14 @@ ] class sha: - "An implementation of the MD5 hash function in pure Python." + "An implementation of the SHA hash function in pure Python." digest_size = digestsize = 20 - block_size = 1 + block_size = 512 // 8 def __init__(self): "Initialisation." - + # Initial message length in bits(!). self.length = 0 self.count = [0, 0] @@ -209,7 +209,7 @@ self.H2 = (self.H2 + C) & 0xffffffff self.H3 = (self.H3 + D) & 0xffffffff self.H4 = (self.H4 + E) & 0xffffffff - + # Down from here all methods follow the Python Standard Library # API of the sha module. @@ -298,13 +298,13 @@ _long2bytesBigEndian(self.H3, 4) + \ _long2bytesBigEndian(self.H4, 4) - self.H0 = H0 - self.H1 = H1 + self.H0 = H0 + self.H1 = H1 self.H2 = H2 self.H3 = H3 self.H4 = H4 - self.input = input - self.count = count + self.input = input + self.count = count return digest diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -10,7 +10,7 @@ 'array': 'interp_numarray.array', 'zeros': 'interp_numarray.zeros', 'empty': 'interp_numarray.zeros', - 'ones': 'interp_numarray.ones', + 'empty_like': 'interp_numarray.empty_like', '_reconstruct' : 'interp_numarray._reconstruct', 'scalar' : 'interp_numarray.build_scalar', 'dot': 'interp_arrayops.dot', @@ -106,8 +106,6 @@ ('logaddexp2', 'logaddexp2'), ('real', 'real'), ('imag', 'imag'), - ('ones_like', 'ones_like'), - ('zeros_like', 'zeros_like'), ]: interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -47,7 +47,7 @@ def setslice(self, space, arr): impl = arr.implementation if impl.is_scalar(): - self.fill(impl.get_scalar_value()) + self.fill(space, impl.get_scalar_value()) return shape = shape_agreement(space, self.get_shape(), arr) if impl.storage == self.storage: @@ -100,7 +100,7 @@ tmp = self.get_real(orig_array) tmp.setslice(space, convert_to_array(space, w_value)) - def get_imag(self, orig_array): + def get_imag(self, space, orig_array): strides = self.get_strides() backstrides = self.get_backstrides() if self.dtype.is_complex_type(): @@ -110,11 +110,11 @@ impl = NonWritableArray(self.get_shape(), self.dtype, self.order, strides, backstrides) if not self.dtype.is_flexible_type(): - impl.fill(self.dtype.box(0)) + impl.fill(space, self.dtype.box(0)) return impl def set_imag(self, space, orig_array, w_value): - tmp = self.get_imag(orig_array) + tmp = self.get_imag(space, orig_array) tmp.setslice(space, convert_to_array(space, w_value)) # -------------------- applevel get/setitem ----------------------- @@ -357,7 +357,7 @@ self.get_backstrides(), self.get_shape()) - def fill(self, box): + def fill(self, space, box): self.dtype.itemtype.fill(self.storage, self.dtype.get_size(), box, 0, self.size, 0) @@ -435,8 +435,8 @@ def base(self): return self.orig_arr - def fill(self, box): - loop.fill(self, box.convert_to(self.dtype)) + def fill(self, space, box): + loop.fill(self, box.convert_to(space, self.dtype)) def create_iter(self, shape=None, backward_broadcast=False, require_index=False): if shape is not None and \ diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -54,8 +54,7 @@ return self.value def set_scalar_value(self, w_val): - assert isinstance(w_val, W_GenericBox) - self.value = w_val.convert_to(self.dtype) + self.value = w_val def copy(self, space): scalar = Scalar(self.dtype) @@ -96,12 +95,12 @@ ','.join([str(x) for x in w_arr.get_shape()],)))) if self.dtype.is_complex_type(): self.value = self.dtype.itemtype.composite( - w_arr.get_scalar_value().convert_to(dtype), + w_arr.get_scalar_value().convert_to(space, dtype), self.value.convert_imag_to(dtype)) else: self.value = w_arr.get_scalar_value() - def get_imag(self, orig_array): + def get_imag(self, space, orig_array): if self.dtype.is_complex_type(): scalar = Scalar(self.dtype.float_type) scalar.value = self.value.convert_imag_to(scalar.dtype) @@ -125,7 +124,7 @@ ','.join([str(x) for x in w_arr.get_shape()],)))) self.value = self.dtype.itemtype.composite( self.value.convert_real_to(dtype), - w_arr.get_scalar_value().convert_to(dtype), + w_arr.get_scalar_value().convert_to(space, dtype), ) def descr_getitem(self, space, _, w_idx): @@ -180,7 +179,7 @@ w_res.implementation.setitem(0, index_type.itemtype.box(0)) return space.newtuple([w_res]) - def fill(self, w_value): + def fill(self, space, w_value): self.value = w_value def get_storage_as_int(self, space): diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -89,7 +89,7 @@ shape = shape_agreement(space, arr.get_shape(), x) shape = shape_agreement(space, shape, y) out = W_NDimArray.from_shape(space, shape, dtype) - return loop.where(out, shape, arr, x, y, dtype) + return loop.where(space, out, shape, arr, x, y, dtype) def dot(space, w_obj1, w_obj2, w_out=None): w_arr = convert_to_array(space, w_obj1) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -66,7 +66,7 @@ def __init__(self, value): self.value = value - def convert_to(self, dtype): + def convert_to(self, space, dtype): return dtype.box(self.value) def __repr__(self): @@ -91,7 +91,7 @@ self.real = real self.imag = imag - def convert_to(self, dtype): + def convert_to(self, space, dtype): return dtype.box_complex(self.real, self.imag) def convert_real_to(self, dtype): @@ -149,12 +149,12 @@ return space.index(self.item(space)) def descr_int(self, space): - box = self.convert_to(W_LongBox._get_dtype(space)) + box = self.convert_to(space, W_LongBox._get_dtype(space)) assert isinstance(box, W_LongBox) return space.wrap(box.value) def descr_float(self, space): - box = self.convert_to(W_Float64Box._get_dtype(space)) + box = self.convert_to(space, W_Float64Box._get_dtype(space)) assert isinstance(box, W_Float64Box) return space.wrap(box.value) @@ -260,14 +260,13 @@ if not space.is_none(w_out): raise OperationError(space.w_NotImplementedError, space.wrap( "out not supported")) - v = self.convert_to(self.get_dtype(space)) - return self.get_dtype(space).itemtype.round(v, decimals) + return self.get_dtype(space).itemtype.round(self, decimals) def descr_astype(self, space, w_dtype): from pypy.module.micronumpy.interp_dtype import W_Dtype dtype = space.interp_w(W_Dtype, space.call_function(space.gettypefor(W_Dtype), w_dtype)) - return self.convert_to(dtype) + return self.convert_to(space, dtype) def descr_view(self, space, w_dtype): from pypy.module.micronumpy.interp_dtype import W_Dtype @@ -306,7 +305,10 @@ return space.wrap(0) def descr_copy(self, space): - return self.convert_to(self.get_dtype(space)) + return self.convert_to(space, self.get_dtype(space)) + + def descr_buffer(self, space): + return self.descr_ravel(space).descr_get_data(space) w_flags = None def descr_get_flags(self, space): @@ -468,14 +470,16 @@ dtype.itemtype.store(self.arr, self.ofs, ofs, dtype.coerce(space, w_value)) - def convert_to(self, dtype): + def convert_to(self, space, dtype): # if we reach here, the record fields are guarenteed to match. return self class W_CharacterBox(W_FlexibleBox): - def convert_to(self, dtype): - # XXX assert dtype is str type - return self + def convert_to(self, space, dtype): + return dtype.coerce(space, space.wrap(self.raw_str())) + + def descr_len(self, space): + return space.len(self.item(space)) class W_StringBox(W_CharacterBox): def descr__new__string_box(space, w_subtype, w_arg): @@ -514,6 +518,7 @@ __bool__ = interp2app(W_GenericBox.descr_nonzero), __oct__ = interp2app(W_GenericBox.descr_oct), __hex__ = interp2app(W_GenericBox.descr_hex), + __buffer__ = interp2app(W_GenericBox.descr_buffer), __add__ = interp2app(W_GenericBox.descr_add), __sub__ = interp2app(W_GenericBox.descr_sub), @@ -748,9 +753,11 @@ W_StringBox.typedef = TypeDef("bytes_", (W_CharacterBox.typedef, str_typedef), __module__ = "numpy", __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), + __len__ = interp2app(W_StringBox.descr_len), ) W_UnicodeBox.typedef = TypeDef("str_", (W_CharacterBox.typedef, unicode_typedef), __module__ = "numpy", __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), + __len__ = interp2app(W_UnicodeBox.descr_len), ) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -94,7 +94,7 @@ return space.wrap(self.get_size() * self.get_dtype().get_size()) def descr_fill(self, space, w_value): - self.fill(self.get_dtype().coerce(space, w_value)) + self.fill(space, self.get_dtype().coerce(space, w_value)) def descr_tostring(self, space, w_order=None): order = order_converter(space, w_order, NPY_CORDER) @@ -288,8 +288,8 @@ def set_scalar_value(self, w_val): self.implementation.set_scalar_value(w_val) - def fill(self, box): - self.implementation.fill(box) + def fill(self, space, box): + self.implementation.fill(space, box) def descr_get_size(self, space): return space.wrap(self.get_size()) @@ -314,7 +314,7 @@ self.implementation.get_real(self)) def descr_get_imag(self, space): - ret = self.implementation.get_imag(self) + ret = self.implementation.get_imag(space, self) return wrap_impl(space, space.type(self), self, ret) def descr_set_real(self, space, w_value): @@ -539,7 +539,7 @@ def descr_astype(self, space, w_dtype): dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) impl = self.implementation if isinstance(impl, scalar.Scalar): return W_NDimArray.new_scalar(space, dtype, impl.value) @@ -956,8 +956,7 @@ return func_with_new_name(impl, "reduce_%s_impl_%d_%d" % (ufunc_name, promote_to_largest, cumulative)) - descr_sum = _reduce_ufunc_impl("add") - descr_sum_promote = _reduce_ufunc_impl("add", True) + descr_sum = _reduce_ufunc_impl("add", True) descr_prod = _reduce_ufunc_impl("multiply", True) descr_max = _reduce_ufunc_impl("maximum") descr_min = _reduce_ufunc_impl("minimum") @@ -988,19 +987,49 @@ shape = self.get_shape() if len(shape) == 0: assert isinstance(self.implementation, scalar.Scalar) - return space.int(space.wrap(self.implementation.get_scalar_value())) - if shape == [1]: - return space.int(self.descr_getitem(space, space.wrap(0))) - raise OperationError(space.w_TypeError, space.wrap("only length-1 arrays can be converted to Python scalars")) + value = space.wrap(self.implementation.get_scalar_value()) + elif shape == [1]: + value = self.descr_getitem(space, space.wrap(0)) + else: + raise OperationError(space.w_TypeError, space.wrap( + "only length-1 arrays can be converted to Python scalars")) + if self.get_dtype().is_str_or_unicode(): + raise OperationError(space.w_TypeError, space.wrap( + "don't know how to convert scalar number to int")) + return space.int(value) def descr_float(self, space): shape = self.get_shape() if len(shape) == 0: assert isinstance(self.implementation, scalar.Scalar) - return space.float(space.wrap(self.implementation.get_scalar_value())) - if shape == [1]: - return space.float(self.descr_getitem(space, space.wrap(0))) - raise OperationError(space.w_TypeError, space.wrap("only length-1 arrays can be converted to Python scalars")) + value = space.wrap(self.implementation.get_scalar_value()) + elif shape == [1]: + value = self.descr_getitem(space, space.wrap(0)) + else: + raise OperationError(space.w_TypeError, space.wrap( + "only length-1 arrays can be converted to Python scalars")) + if self.get_dtype().is_str_or_unicode(): + raise OperationError(space.w_TypeError, space.wrap( + "don't know how to convert scalar number to float")) + return space.float(value) + + def descr_index(self, space): + shape = self.get_shape() + if len(shape) == 0: + assert isinstance(self.implementation, scalar.Scalar) + value = space.wrap(self.implementation.get_scalar_value()) + elif shape == [1]: + value = self.descr_getitem(space, space.wrap(0)) + else: + raise OperationError(space.w_TypeError, space.wrap( + "only integer arrays with one element " + "can be converted to an index")) + if not self.get_dtype().is_int_type() or self.get_dtype().is_bool_type(): + raise OperationError(space.w_TypeError, space.wrap( + "only integer arrays with one element " + "can be converted to an index")) + assert isinstance(value, interp_boxes.W_GenericBox) + return value.item(space) def descr_reduce(self, space): from rpython.rlib.rstring import StringBuilder @@ -1101,8 +1130,6 @@ w_base=w_buffer, writable=buf.is_writable()) - if not shape: - return W_NDimArray.new_scalar(space, dtype) order = order_converter(space, w_order, NPY_CORDER) if order == NPY_CORDER: order = 'C' @@ -1179,6 +1206,7 @@ __int__ = interp2app(W_NDimArray.descr_int), __float__ = interp2app(W_NDimArray.descr_float), __buffer__ = interp2app(W_NDimArray.descr_get_data), + __index__ = interp2app(W_NDimArray.descr_index), __pos__ = interp2app(W_NDimArray.descr_pos), __neg__ = interp2app(W_NDimArray.descr_neg), @@ -1358,36 +1386,34 @@ # arrays with correct dtype dtype = interp_dtype.decode_w_dtype(space, w_dtype) if isinstance(w_object, W_NDimArray) and \ - (space.is_none(w_dtype) or w_object.get_dtype() is dtype): + (space.is_none(w_dtype) or w_object.get_dtype() is dtype): shape = w_object.get_shape() if copy: w_ret = w_object.descr_copy(space) else: - if ndmin<= len(shape): + if ndmin <= len(shape): return w_object new_impl = w_object.implementation.set_shape(space, w_object, shape) w_ret = W_NDimArray(new_impl) if ndmin > len(shape): shape = [1] * (ndmin - len(shape)) + shape w_ret.implementation = w_ret.implementation.set_shape(space, - w_ret, shape) + w_ret, shape) return w_ret # not an array or incorrect dtype shape, elems_w = find_shape_and_elems(space, w_object, dtype) - if dtype is None or ( - dtype.is_str_or_unicode() and dtype.get_size() < 1): + if dtype is None or (dtype.is_str_or_unicode() and dtype.get_size() < 1): for w_elem in elems_w: - dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, - dtype) - #if dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: - # break - + if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): + w_elem = w_elem.get_scalar_value() + dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, dtype) if dtype is None: dtype = interp_dtype.get_dtype_cache(space).w_float64dtype - if dtype.is_str_or_unicode() and dtype.get_size() < 1: - # promote S0 -> S1, U0 -> U1 - dtype = interp_dtype.variable_dtype(space, dtype.char + '1') + elif dtype.is_str_or_unicode() and dtype.get_size() < 1: + # promote S0 -> S1, U0 -> U1 + dtype = interp_dtype.variable_dtype(space, dtype.char + '1') + if ndmin > len(shape): shape = [1] * (ndmin - len(shape)) + shape w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) @@ -1400,25 +1426,20 @@ @unwrap_spec(order=str) def zeros(space, w_shape, w_dtype=None, order='C'): dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) shape = _find_shape(space, w_shape, dtype) - if not shape: - return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) - return space.wrap(W_NDimArray.from_shape(space, shape, dtype=dtype, order=order)) + return W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) - at unwrap_spec(order=str) -def ones(space, w_shape, w_dtype=None, order='C'): - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) - shape = _find_shape(space, w_shape, dtype) - if not shape: - return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) - w_arr = W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) - one = dtype.box(1) - w_arr.fill(one) - return space.wrap(w_arr) + at unwrap_spec(subok=bool) +def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): + w_a = convert_to_array(space, w_a) + if w_dtype is None: + dtype = w_a.get_dtype() + else: + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, + w_instance=w_a if subok else None) def _reconstruct(space, w_subtype, w_shape, w_dtype): return descr_new_array(space, w_subtype, w_shape, w_dtype) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -226,7 +226,7 @@ dtype = out.get_dtype() else: out = W_NDimArray.from_shape(space, shape, dtype, w_instance=obj) - return loop.do_axis_reduce(shape, self.func, obj, dtype, axis, out, + return loop.do_axis_reduce(space, shape, self.func, obj, dtype, axis, out, self.identity, cumulative, temp) if cumulative: if out: @@ -235,7 +235,7 @@ "out of incompatible size")) else: out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, w_instance=obj) - loop.compute_reduce_cumulative(obj, out, dtype, self.func, + loop.compute_reduce_cumulative(space, obj, out, dtype, self.func, self.identity) return out if out: @@ -244,7 +244,7 @@ "for reduction operation %s has too many" " dimensions",self.name) dtype = out.get_dtype() - res = loop.compute_reduce(obj, dtype, self.func, self.done_func, + res = loop.compute_reduce(space, obj, dtype, self.func, self.done_func, self.identity) if out: out.set_scalar_value(res) @@ -303,13 +303,13 @@ res_dtype = interp_dtype.get_dtype_cache(space).w_float64dtype if w_obj.is_scalar(): w_val = self.func(calc_dtype, - w_obj.get_scalar_value().convert_to(calc_dtype)) + w_obj.get_scalar_value().convert_to(space, calc_dtype)) if out is None: return w_val if out.is_scalar(): out.set_scalar_value(w_val) else: - out.fill(res_dtype.coerce(space, w_val)) + out.fill(space, res_dtype.coerce(space, w_val)) return out shape = shape_agreement(space, w_obj.get_shape(), out, broadcast_down=False) @@ -395,14 +395,14 @@ res_dtype = calc_dtype if w_lhs.is_scalar() and w_rhs.is_scalar(): arr = self.func(calc_dtype, - w_lhs.get_scalar_value().convert_to(calc_dtype), - w_rhs.get_scalar_value().convert_to(calc_dtype) + w_lhs.get_scalar_value().convert_to(space, calc_dtype), + w_rhs.get_scalar_value().convert_to(space, calc_dtype) ) if isinstance(out, W_NDimArray): if out.is_scalar(): out.set_scalar_value(arr) else: - out.fill(arr) + out.fill(space, arr) else: out = arr return out @@ -496,6 +496,15 @@ @jit.unroll_safe def find_unaryop_result_dtype(space, dt, promote_to_float=False, promote_bools=False, promote_to_largest=False): + if promote_to_largest: + if dt.kind == NPY_GENBOOLLTR or dt.kind == NPY_SIGNEDLTR: + return interp_dtype.get_dtype_cache(space).w_int64dtype + elif dt.kind == NPY_UNSIGNEDLTR: + return interp_dtype.get_dtype_cache(space).w_uint64dtype + elif dt.kind == NPY_FLOATINGLTR or dt.kind == NPY_COMPLEXLTR: + return dt + else: + assert False if promote_bools and (dt.kind == NPY_GENBOOLLTR): return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: @@ -507,15 +516,6 @@ if (dtype.kind == NPY_FLOATINGLTR and dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): return dtype - if promote_to_largest: - if dt.kind == NPY_GENBOOLLTR or dt.kind == NPY_SIGNEDLTR: - return interp_dtype.get_dtype_cache(space).w_float64dtype - elif dt.kind == NPY_FLOATINGLTR: - return interp_dtype.get_dtype_cache(space).w_float64dtype - elif dt.kind == NPY_UNSIGNEDLTR: - return interp_dtype.get_dtype_cache(space).w_uint64dtype - else: - assert False return dt def find_dtype_for_scalar(space, w_obj, current_guess=None): @@ -685,9 +685,6 @@ "allow_complex": False}), ("logaddexp2", "logaddexp2", 2, {"promote_to_float": True, "allow_complex": False}), - - ("ones_like", "ones_like", 1), - ("zeros_like", "zeros_like", 1), ]: self.add_ufunc(space, *ufunc_def) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -58,10 +58,10 @@ out=out, left_iter=left_iter, right_iter=right_iter, out_iter=out_iter) - w_left = left_iter.getitem().convert_to(calc_dtype) - w_right = right_iter.getitem().convert_to(calc_dtype) + w_left = left_iter.getitem().convert_to(space, calc_dtype) + w_right = right_iter.getitem().convert_to(space, calc_dtype) out_iter.setitem(func(calc_dtype, w_left, w_right).convert_to( - res_dtype)) + space, res_dtype)) left_iter.next() right_iter.next() out_iter.next() @@ -84,8 +84,8 @@ calc_dtype=calc_dtype, res_dtype=res_dtype, shape=shape, w_obj=w_obj, out=out, obj_iter=obj_iter, out_iter=out_iter) - elem = obj_iter.getitem().convert_to(calc_dtype) - out_iter.setitem(func(calc_dtype, elem).convert_to(res_dtype)) + elem = obj_iter.getitem().convert_to(space, calc_dtype) + out_iter.setitem(func(calc_dtype, elem).convert_to(space, res_dtype)) out_iter.next() obj_iter.next() return out @@ -111,7 +111,7 @@ shapelen = len(shape) while not target_iter.done(): setslice_driver1.jit_merge_point(shapelen=shapelen, dtype=dtype) - target_iter.setitem(source_iter.getitem().convert_to(dtype)) + target_iter.setitem(source_iter.getitem().convert_to(space, dtype)) target_iter.next() source_iter.next() return target @@ -135,20 +135,20 @@ 'calc_dtype'], reds = 'auto') -def compute_reduce(obj, calc_dtype, func, done_func, identity): +def compute_reduce(space, obj, calc_dtype, func, done_func, identity): obj_iter = obj.create_iter() if identity is None: - cur_value = obj_iter.getitem().convert_to(calc_dtype) + cur_value = obj_iter.getitem().convert_to(space, calc_dtype) obj_iter.next() else: - cur_value = identity.convert_to(calc_dtype) + cur_value = identity.convert_to(space, calc_dtype) shapelen = len(obj.get_shape()) while not obj_iter.done(): reduce_driver.jit_merge_point(shapelen=shapelen, func=func, done_func=done_func, calc_dtype=calc_dtype, ) - rval = obj_iter.getitem().convert_to(calc_dtype) + rval = obj_iter.getitem().convert_to(space, calc_dtype) if done_func is not None and done_func(calc_dtype, rval): return rval cur_value = func(calc_dtype, cur_value, rval) @@ -159,22 +159,22 @@ greens = ['shapelen', 'func', 'dtype'], reds = 'auto') -def compute_reduce_cumulative(obj, out, calc_dtype, func, identity): +def compute_reduce_cumulative(space, obj, out, calc_dtype, func, identity): obj_iter = obj.create_iter() out_iter = out.create_iter() if identity is None: - cur_value = obj_iter.getitem().convert_to(calc_dtype) + cur_value = obj_iter.getitem().convert_to(space, calc_dtype) out_iter.setitem(cur_value) out_iter.next() obj_iter.next() else: - cur_value = identity.convert_to(calc_dtype) + cur_value = identity.convert_to(space, calc_dtype) shapelen = len(obj.get_shape()) while not obj_iter.done(): reduce_cum_driver.jit_merge_point(shapelen=shapelen, func=func, dtype=calc_dtype, ) - rval = obj_iter.getitem().convert_to(calc_dtype) + rval = obj_iter.getitem().convert_to(space, calc_dtype) cur_value = func(calc_dtype, cur_value, rval) out_iter.setitem(cur_value) out_iter.next() @@ -190,7 +190,7 @@ greens = ['shapelen', 'dtype', 'arr_dtype'], reds = 'auto') -def where(out, shape, arr, x, y, dtype): +def where(space, out, shape, arr, x, y, dtype): out_iter = out.create_iter(shape) arr_iter = arr.create_iter(shape) arr_dtype = arr.get_dtype() @@ -209,9 +209,9 @@ arr_dtype=arr_dtype) w_cond = arr_iter.getitem() if arr_dtype.itemtype.bool(w_cond): - w_val = x_iter.getitem().convert_to(dtype) + w_val = x_iter.getitem().convert_to(space, dtype) else: - w_val = y_iter.getitem().convert_to(dtype) + w_val = y_iter.getitem().convert_to(space, dtype) out_iter.setitem(w_val) out_iter.next() arr_iter.next() @@ -224,7 +224,7 @@ 'func', 'dtype'], reds='auto') -def do_axis_reduce(shape, func, arr, dtype, axis, out, identity, cumulative, +def do_axis_reduce(space, shape, func, arr, dtype, axis, out, identity, cumulative, temp): out_iter = out.create_axis_iter(arr.get_shape(), axis, cumulative) if cumulative: @@ -233,7 +233,7 @@ temp_iter = out_iter # hack arr_iter = arr.create_iter() if identity is not None: - identity = identity.convert_to(dtype) + identity = identity.convert_to(space, dtype) shapelen = len(shape) while not out_iter.done(): axis_reduce__driver.jit_merge_point(shapelen=shapelen, func=func, @@ -241,7 +241,7 @@ if arr_iter.done(): w_val = identity else: - w_val = arr_iter.getitem().convert_to(dtype) + w_val = arr_iter.getitem().convert_to(space, dtype) if out_iter.first_line: if identity is not None: w_val = func(dtype, identity, w_val) @@ -316,11 +316,11 @@ righti = right.create_dot_iter(broadcast_shape, right_skip) while not outi.done(): dot_driver.jit_merge_point(dtype=dtype) - lval = lefti.getitem().convert_to(dtype) - rval = righti.getitem().convert_to(dtype) - outval = outi.getitem().convert_to(dtype) + lval = lefti.getitem().convert_to(space, dtype) + rval = righti.getitem().convert_to(space, dtype) + outval = outi.getitem().convert_to(space, dtype) v = dtype.itemtype.mul(lval, rval) - value = dtype.itemtype.add(v, outval).convert_to(dtype) + value = dtype.itemtype.add(v, outval).convert_to(space, dtype) outi.setitem(value) outi.next() righti.next() @@ -457,7 +457,7 @@ arr_iter.next_skip_x(start) while length > 0: flatiter_setitem_driver1.jit_merge_point(dtype=dtype) - arr_iter.setitem(val_iter.getitem().convert_to(dtype)) + arr_iter.setitem(val_iter.getitem().convert_to(space, dtype)) # need to repeat i_nput values until all assignments are done arr_iter.next_skip_x(step) length -= 1 @@ -610,7 +610,7 @@ index = 0 else: index = len(iterators) - 1 - out_iter.setitem(iterators[index].getitem().convert_to(dtype)) + out_iter.setitem(iterators[index].getitem().convert_to(space, dtype)) for iter in iterators: iter.next() out_iter.next() @@ -629,9 +629,9 @@ out_iter = out.create_iter(shape) while not arr_iter.done(): clip_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_v = arr_iter.getitem().convert_to(dtype) - w_min = min_iter.getitem().convert_to(dtype) - w_max = max_iter.getitem().convert_to(dtype) + w_v = arr_iter.getitem().convert_to(space, dtype) + w_min = min_iter.getitem().convert_to(space, dtype) + w_max = max_iter.getitem().convert_to(space, dtype) if dtype.itemtype.lt(w_v, w_min): w_v = w_min elif dtype.itemtype.gt(w_v, w_max): @@ -652,7 +652,7 @@ out_iter = out.create_iter(shape) while not arr_iter.done(): round_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_v = dtype.itemtype.round(arr_iter.getitem().convert_to(dtype), + w_v = dtype.itemtype.round(arr_iter.getitem().convert_to(space, dtype), decimals) out_iter.setitem(w_v) arr_iter.next() diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -62,9 +62,10 @@ if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): return True if (space.isinstance_w(w_elem, space.w_tuple) or - isinstance(w_elem, W_NDimArray) or space.isinstance_w(w_elem, space.w_list)): return False + if isinstance(w_elem, W_NDimArray) and not w_elem.is_scalar(): + return False return True def find_shape_and_elems(space, w_iterable, dtype): @@ -72,7 +73,6 @@ batch = space.listview(w_iterable) is_rec_type = dtype is not None and dtype.is_record_type() while True: - new_batch = [] if not batch: return shape[:], [] if is_single_elem(space, batch[0], is_rec_type): @@ -81,6 +81,7 @@ raise OperationError(space.w_ValueError, space.wrap( "setting an array element with a sequence")) return shape[:], batch + new_batch = [] size = space.len_w(batch[0]) for w_elem in batch: if (is_single_elem(space, w_elem, is_rec_type) or diff --git a/pypy/module/micronumpy/test/dummy_module.py b/pypy/module/micronumpy/test/dummy_module.py --- a/pypy/module/micronumpy/test/dummy_module.py +++ b/pypy/module/micronumpy/test/dummy_module.py @@ -32,3 +32,8 @@ True_ = bool_(True) False_ = bool_(False) + +def ones(*args, **kwargs): + a = zeros(*args, **kwargs) + a.fill(1) + return a diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -345,7 +345,7 @@ # TypeError raises((TypeError, AttributeError), 'x.ndim = 3') - def test_init(self): + def test_zeros(self): from numpypy import zeros a = zeros(15) # Check that storage was actually zero'd. @@ -355,6 +355,34 @@ assert a[13] == 5.3 assert zeros(()).shape == () + def test_empty_like(self): + import numpy as np + a = np.empty_like(np.zeros(())) + assert a.shape == () + assert a.dtype == np.float_ + a = np.zeros((2, 3)) + assert a.shape == (2, 3) + a[0,0] = 1 + b = np.empty_like(a) + assert b.shape == a.shape + assert b.dtype == a.dtype + assert b[0,0] != 1 + b = np.empty_like(a, dtype='i4') + assert b.shape == a.shape + assert b.dtype == np.dtype('i4') + assert b[0,0] != 1 + b = np.empty_like([1,2,3]) + assert b.shape == (3,) + assert b.dtype == np.int_ + class A(np.ndarray): + pass + b = np.empty_like(A((2, 3))) + assert b.shape == (2, 3) + assert type(b) is A + b = np.empty_like(A((2, 3)), subok=False) + assert b.shape == (2, 3) + assert type(b) is np.ndarray + def test_size(self): from numpypy import array,arange,cos assert array(3).size == 1 @@ -455,6 +483,25 @@ a = array(range(5)) assert a[3] == 3 + def test_list_of_array_init(self): + import numpy as np + a = np.array([np.array(True), np.array(False)]) + assert a.shape == (2,) + assert a.dtype == np.bool_ + assert (a == [True, False]).all() + a = np.array([np.array(True), np.array(2)]) + assert a.shape == (2,) + assert a.dtype == np.int_ + assert (a == [1, 2]).all() + a = np.array([np.array(True), np.int_(2)]) + assert a.shape == (2,) + assert a.dtype == np.int_ + assert (a == [1, 2]).all() + a = np.array([np.array([True]), np.array([2])]) + assert a.shape == (2, 1) + assert a.dtype == np.int_ + assert (a == [[1], [2]]).all() + def test_getitem(self): from numpypy import array a = array(range(5)) @@ -1297,7 +1344,7 @@ assert d[1] == 12 def test_sum(self): - from numpypy import array, zeros + from numpypy import array, zeros, float16, complex64, str_ a = array(range(5)) assert a.sum() == 10 assert a[:4].sum() == 6 @@ -1305,6 +1352,12 @@ a = array([True] * 5, bool) assert a.sum() == 5 + assert array([True, False] * 200).sum() == 200 + assert array([True, False] * 200, dtype='int8').sum() == 200 + assert array([True, False] * 200).sum(dtype='int8') == -56 + assert type(array([True, False] * 200, dtype='float16').sum()) is float16 + assert type(array([True, False] * 200, dtype='complex64').sum()) is complex64 + raises(TypeError, 'a.sum(axis=0, out=3)') raises(ValueError, 'a.sum(axis=2)') d = array(0.) @@ -1347,10 +1400,16 @@ assert (array([[1,2],[3,4]]).prod(1) == [2, 12]).all() def test_prod(self): - from numpypy import array + from numpypy import array, int_, dtype a = array(range(1, 6)) assert a.prod() == 120.0 assert a[:4].prod() == 24.0 + a = array([True, False]) + assert a.prod() == 0 + assert type(a.prod()) is int_ + a = array([True, False], dtype='uint') + assert a.prod() == 0 + assert type(a.prod()) is dtype('uint').type def test_max(self): from numpypy import array, zeros @@ -1973,6 +2032,12 @@ else: raises(NotImplementedError, array(['1', '2', '3']).astype, float) + a = array('123') + assert a.astype('i8') == 123 + a = array('abcdefgh') + exc = raises(ValueError, a.astype, 'i8') + assert exc.value.message.startswith('invalid literal for int()') + def test_base(self): from numpypy import array assert array(1).base is None @@ -2068,6 +2133,11 @@ assert int(array([1])) == 1 assert raises(TypeError, "int(array([1, 2]))") assert int(array([1.5])) == 1 + for op in ["int", "float", "long"]: + for a in [array('123'), array(['123'])]: + exc = raises(TypeError, "%s(a)" % op) + assert exc.value.message == "don't know how to convert " \ + "scalar number to %s" % op def test__reduce__(self): from numpypy import array, dtype @@ -2698,6 +2768,17 @@ assert b[0] == 1 assert b[1] == 'ab' + def test_index(self): + import numpy as np + a = np.array([1], np.uint16) + i = a.__index__() + assert type(i) is int + assert i == 1 + for a in [np.array('abc'), np.array([1,2]), np.array([True])]: + exc = raises(TypeError, a.__index__) + assert exc.value.message == 'only integer arrays with one element ' \ + 'can be converted to an index' + def test_int_array_index(self): from numpypy import array assert (array([])[[]] == []).all() diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -20,6 +20,9 @@ def test_builtin(self): import numpy as np + assert int(np.str_('12')) == 12 + exc = raises(ValueError, "int(np.str_('abc'))") + assert exc.value.message.startswith('invalid literal for int()') assert oct(np.int32(11)) == '013' assert oct(np.float32(11.6)) == '013' assert oct(np.complex64(11-12j)) == '013' @@ -28,7 +31,10 @@ assert hex(np.complex64(11-12j)) == '0xb' assert bin(np.int32(11)) == '0b1011' exc = raises(TypeError, "bin(np.float32(11.6))") - assert exc.value.message.find('object cannot be interpreted as an index') != -1 + assert "index" in exc.value.message + exc = raises(TypeError, "len(np.int32(11))") + assert "has no len" in exc.value.message + assert len(np.string_('123')) == 3 def test_pickle(self): from numpypy import dtype, zeros @@ -77,6 +83,9 @@ a = np.bool_(True).astype('int32') assert type(a) is np.int32 assert a == 1 + a = np.str_('123').astype('int32') + assert type(a) is np.int32 + assert a == 123 def test_copy(self): import numpy as np @@ -86,6 +95,15 @@ assert b == a assert b is not a + def test_buffer(self): + import numpy as np + a = np.int32(123) + b = buffer(a) + assert type(b) is buffer + a = np.string_('abc') + b = buffer(a) + assert str(b) == a + def test_squeeze(self): import numpy as np assert np.True_.squeeze() is np.True_ diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -33,6 +33,11 @@ self = ndarray.__new__(subtype, shape, dtype) self.id = 'subtype' return self + a = C((), int) + assert type(a) is C + assert a.shape == () + assert a.dtype is dtype(int) + assert a.id == 'subtype' a = C([2, 2], int) assert isinstance(a, C) assert isinstance(a, ndarray) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -1029,22 +1029,6 @@ assert logaddexp2(float('inf'), float('-inf')) == float('inf') assert logaddexp2(float('inf'), float('inf')) == float('inf') - def test_ones_like(self): - from numpypy import array, ones_like - - assert ones_like(False) == array(True) - assert ones_like(2) == array(1) - assert ones_like(2.) == array(1.) - assert ones_like(complex(2)) == array(complex(1)) - - def test_zeros_like(self): - from numpypy import array, zeros_like - - assert zeros_like(True) == array(False) - assert zeros_like(2) == array(0) - assert zeros_like(2.) == array(0.) - assert zeros_like(complex(2)) == array(complex(0)) - def test_accumulate(self): from numpypy import add, multiply, arange assert (add.accumulate([2, 3, 5]) == [2, 5, 10]).all() diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -300,14 +300,6 @@ def min(self, v1, v2): return min(v1, v2) - @simple_unary_op - def ones_like(self, v): - return 1 - - @simple_unary_op - def zeros_like(self, v): - return 0 - @raw_unary_op def rint(self, v): float64 = Float64() @@ -1543,14 +1535,6 @@ except ValueError: return rfloat.NAN, rfloat.NAN - @complex_unary_op - def ones_like(self, v): - return 1, 0 - - @complex_unary_op - def zeros_like(self, v): - return 0, 0 - class Complex64(ComplexFloating, BaseType): T = rffi.FLOAT BoxType = interp_boxes.W_Complex64Box diff --git a/pypy/module/test_lib_pypy/test_sha_extra.py b/pypy/module/test_lib_pypy/test_sha_extra.py --- a/pypy/module/test_lib_pypy/test_sha_extra.py +++ b/pypy/module/test_lib_pypy/test_sha_extra.py @@ -8,7 +8,6 @@ class AppTestSHA: - spaceconfig = dict(usemodules=('struct',)) def setup_class(cls): @@ -37,3 +36,4 @@ assert _sha.blocksize == 1 assert _sha.sha1().digest_size == 20 assert _sha.sha1().digestsize == 20 + assert _sha.sha1().block_size == 64 diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -115,10 +115,11 @@ continue print "Picking %s" % p binaries.append((p, p.basename)) - if pypy_c.dirpath().join("libpypy-c.lib").check(): - shutil.copyfile(str(pypy_c.dirpath().join("libpypy-c.lib")), + importlib_name = 'python27.lib' + if pypy_c.dirpath().join(importlib_name).check(): + shutil.copyfile(str(pypy_c.dirpath().join(importlib_name)), str(pypydir.join('include/python27.lib'))) - print "Picking %s as %s" % (pypy_c.dirpath().join("libpypy-c.lib"), + print "Picking %s as %s" % (pypy_c.dirpath().join(importlib_name), pypydir.join('include/python27.lib')) else: pass From noreply at buildbot.pypy.org Thu Dec 19 00:48:58 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 19 Dec 2013 00:48:58 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge upstream Message-ID: <20131218234858.E65441C08A1@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68478:55a82ced3b8a Date: 2013-12-18 15:48 -0800 http://bitbucket.org/pypy/pypy/changeset/55a82ced3b8a/ Log: merge upstream diff --git a/lib-python/3/datetime.py b/lib-python/3/datetime.py --- a/lib-python/3/datetime.py +++ b/lib-python/3/datetime.py @@ -40,9 +40,9 @@ # for all computations. See the book for algorithms for converting between # proleptic Gregorian ordinals and many other calendar systems. -_DAYS_IN_MONTH = [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] +_DAYS_IN_MONTH = [-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] -_DAYS_BEFORE_MONTH = [None] +_DAYS_BEFORE_MONTH = [-1] dbm = 0 for dim in _DAYS_IN_MONTH[1:]: _DAYS_BEFORE_MONTH.append(dbm) @@ -806,7 +806,6 @@ month = self._month if day is None: day = self._day - year, month, day = _check_date_fields(year, month, day) return date(year, month, day) # Comparisons of date objects with other. @@ -1270,8 +1269,6 @@ microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) - _check_tzinfo_arg(tzinfo) return time(hour, minute, second, microsecond, tzinfo) def __bool__(self): @@ -1486,9 +1483,6 @@ microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo - year, month, day = _check_date_fields(year, month, day) - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) - _check_tzinfo_arg(tzinfo) return datetime(year, month, day, hour, minute, second, microsecond, tzinfo) From noreply at buildbot.pypy.org Thu Dec 19 01:29:42 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 19 Dec 2013 01:29:42 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: merge default Message-ID: <20131219002942.BD2141C3391@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r68479:2c1789c6e517 Date: 2013-12-18 16:10 -0800 http://bitbucket.org/pypy/pypy/changeset/2c1789c6e517/ Log: merge default diff too long, truncating to 2000 out of 8280 lines diff --git a/lib-python/2.7/ctypes/test/test_python_api.py b/lib-python/2.7/ctypes/test/test_python_api.py --- a/lib-python/2.7/ctypes/test/test_python_api.py +++ b/lib-python/2.7/ctypes/test/test_python_api.py @@ -73,6 +73,7 @@ del pyobj self.assertEqual(grc(s), ref) + @xfail def test_PyOS_snprintf(self): PyOS_snprintf = pythonapi.PyOS_snprintf PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -328,21 +328,23 @@ raise ValueError( "native COM method call without 'this' parameter" ) - thisarg = cast(args[0], POINTER(POINTER(c_void_p))) - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args[1:], kwargs) - newargs.insert(0, args[0].value) + thisvalue = args.pop(0) + thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) + args.insert(0, thisvalue) + newargs.insert(0, thisvalue.value) argtypes.insert(0, c_void_p) else: thisarg = None - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args, kwargs) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) funcptr = self._getfuncptr(argtypes, self._restype_, thisarg) result = self._call_funcptr(funcptr, *newargs) - result = self._do_errcheck(result, args) + result, forced = self._do_errcheck(result, errcheckargs) - if not outargs: + if not outargs or forced: return result from ctypes import c_void_p @@ -377,22 +379,22 @@ set_last_error(tmp) # try: - return self._build_result(self._restype_, result, newargs) + return self._build_result(self._restype_, result) finally: funcptr.free_temp_buffers() def _do_errcheck(self, result, args): # The 'errcheck' protocol if self._errcheck_: - v = self._errcheck_(result, self, args) + v = self._errcheck_(result, self, tuple(args)) # If the errcheck funtion failed, let it throw # If the errcheck function returned newargs unchanged, # continue normal processing. # If the errcheck function returned something else, # use that as result. if v is not args: - return v - return result + return v, True + return result, False def _getfuncptr_fromaddress(self, argtypes, restype): address = self._get_address() @@ -495,16 +497,16 @@ newargtypes = [] total = len(args) paramflags = self._paramflags - inargs_idx = 0 if not paramflags and total < len(argtypes): raise TypeError("not enough arguments") - for i, argtype in enumerate(argtypes): - flag = 0 - name = None - defval = marker - if paramflags: + if paramflags: + errcheckargs = [] + inargs_idx = 0 + for i, argtype in enumerate(argtypes): + flag = 0 + defval = marker paramflag = paramflags[i] paramlen = len(paramflag) name = None @@ -519,6 +521,7 @@ val = defval if val is marker: val = 0 + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) @@ -536,27 +539,31 @@ raise TypeError("required argument '%s' missing" % name) else: raise TypeError("not enough arguments") + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) elif flag == PARAMFLAG_FOUT: if defval is not marker: - outargs.append(defval) + val = defval keepalive, newarg, newargtype = self._conv_param(argtype, defval) else: import ctypes val = argtype._type_() - outargs.append(val) keepalive = None newarg = ctypes.byref(val) newargtype = type(newarg) + errcheckargs.append(val) + outargs.append(val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) else: raise ValueError("paramflag %d not yet implemented" % flag) - else: + else: + errcheckargs = args + for i, argtype in enumerate(argtypes): try: keepalive, newarg, newargtype = self._conv_param(argtype, args[i]) except (UnicodeError, TypeError, ValueError), e: @@ -564,7 +571,6 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - inargs_idx += 1 if len(newargs) < len(args): extra = args[len(newargs):] @@ -576,7 +582,7 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - return keepalives, newargs, newargtypes, outargs + return keepalives, newargs, newargtypes, outargs, errcheckargs @staticmethod def _is_primitive(argtype): @@ -601,7 +607,7 @@ retval = restype._CData_retval(buf) return retval - def _build_result(self, restype, result, argsandobjs): + def _build_result(self, restype, result): """Build the function result: If there is no OUT parameter, return the actual function result If there is one OUT parameter, return it @@ -611,11 +617,6 @@ # i.e. an array of ints. Now it takes a result, which is already a # python object. All places that do "resbuffer[0]" should check that # result is actually an int and just use it. - # - # Also, argsandobjs used to be "args" in __call__, now it's "newargs" - # (i.e., the already unwrapped objects). It's used only when we have a - # PARAMFLAG_FOUT and it's probably wrong, I'll fix it when I find a - # failing test retval = None @@ -704,7 +705,7 @@ funcptr = self._getfuncptr(argtypes, restype, thisarg) try: result = self._call_funcptr(funcptr, *args) - result = self._do_errcheck(result, args) + result, _ = self._do_errcheck(result, args) except (TypeError, ArgumentError, UnicodeDecodeError): assert self._slowpath_allowed return CFuncPtr.__call__(self, *args) diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -33,14 +33,13 @@ # set link options output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') + # XXX pyconfig.h uses a pragma to link to the import library, + # which is currently python27.lib + library = os.path.join(thisdir, '..', 'include', 'python27') if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') + # For a local translation or nightly build + library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python27') + assert os.path.exists(library + '.lib'),'Could not find import library "%s"' % library libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 '/EXPORT:init' + modulename] diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -115,14 +115,14 @@ ] class sha: - "An implementation of the MD5 hash function in pure Python." + "An implementation of the SHA hash function in pure Python." digest_size = digestsize = 20 - block_size = 1 + block_size = 512 // 8 def __init__(self): "Initialisation." - + # Initial message length in bits(!). self.length = 0 self.count = [0, 0] @@ -209,7 +209,7 @@ self.H2 = (self.H2 + C) & 0xffffffff self.H3 = (self.H3 + D) & 0xffffffff self.H4 = (self.H4 + E) & 0xffffffff - + # Down from here all methods follow the Python Standard Library # API of the sha module. @@ -295,13 +295,13 @@ _long2bytesBigEndian(self.H3, 4) + \ _long2bytesBigEndian(self.H4, 4) - self.H0 = H0 - self.H1 = H1 + self.H0 = H0 + self.H1 = H1 self.H2 = H2 self.H3 = H3 self.H4 = H4 - self.input = input - self.count = count + self.input = input + self.count = count return digest diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8" -__version_info__ = (0, 8) +__version__ = "0.8.1" +__version_info__ = (0, 8, 1) diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -878,7 +878,6 @@ month = self._month if day is None: day = self._day - year, month, day = _check_date_fields(year, month, day) return date(year, month, day) # Comparisons of date objects with other. @@ -1389,8 +1388,6 @@ microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) - _check_tzinfo_arg(tzinfo) return time(hour, minute, second, microsecond, tzinfo) def __nonzero__(self): @@ -1608,9 +1605,6 @@ microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo - year, month, day = _check_date_fields(year, month, day) - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) - _check_tzinfo_arg(tzinfo) return datetime(year, month, day, hour, minute, second, microsecond, tzinfo) diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '2.2' # The full version, including alpha/beta/rc tags. -release = '2.2.0' +release = '2.2.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -83,7 +83,7 @@ _winreg -* Supported by being rewritten in pure Python (possibly using ``ctypes``): +* Supported by being rewritten in pure Python (possibly using ``cffi``): see the `lib_pypy/`_ directory. Examples of modules that we support this way: ``ctypes``, ``cPickle``, ``cmath``, ``dbm``, ``datetime``... Note that some modules are both in there and in the list above; @@ -316,5 +316,4 @@ type and vice versa. For builtin types, a dictionary will be returned that cannot be changed (but still looks and behaves like a normal dictionary). - .. include:: _ref.txt diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -72,7 +72,13 @@ Here is a list of the limitations and missing features of the current implementation: -* No support for ``PyXxx`` functions from ``libpython``, for obvious reasons. +* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer + of PyPy, at your own risks and without doing anything sensible about + the GIL. Since PyPy 2.3, these functions are also named with an extra + "Py", for example ``PyPyInt_FromLong()``. Basically, don't use this, + but it might more or less work in simple cases if you do. (Obviously, + assuming the PyObject pointers you get have any particular fields in + any particular order is just going to crash.) * We copy Python strings instead of having pointers to raw buffers diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -5,119 +5,68 @@ This document tries to explain how to interface the PyPy python interpreter with any external library. -Note: We try to describe state-of-the art, but it -might fade out of date as this is the front on which things are changing -in pypy rapidly. +Right now, there are the following possibilities of providing +third-party modules for the PyPy python interpreter (in order of +usefulness): -Possibilities -============= +* Write them in pure Python and use CFFI_. -Right now, there are three possibilities of providing third-party modules -for the PyPy python interpreter (in order of usefulness): +* Write them in pure Python and use ctypes_. -* Write them in pure python and use ctypes, see ctypes_ - section +* Write them in C++ and bind them through Reflex_. -* Write them in pure python and use direct libffi low-level bindings, See - \_ffi_ module description. +* Write them in as `RPython mixed modules`_. -* Write them in RPython as mixedmodule_, using *rffi* as bindings. -* Write them in C++ and bind them through Reflex_ +CFFI +==== -.. _ctypes: #CTypes -.. _\_ffi: #LibFFI -.. _mixedmodule: #Mixed Modules +CFFI__ is the recommended way. It is a way to write pure Python code +that accesses C libraries. The idea is to support either ABI- or +API-level access to C --- so that you can sanely access C libraries +without depending on details like the exact field order in the C +structures or the numerical value of all the constants. It works on +both CPython (as a separate ``pip install cffi``) and on PyPy, where it +is included by default. + +PyPy's JIT does a quite reasonable job on the Python code that call C +functions or manipulate C pointers with CFFI. (As of PyPy 2.2.1, it +could still be improved, but is already good.) + +See the documentation here__. + +.. __: http://cffi.readthedocs.org/ +.. __: http://cffi.readthedocs.org/ + CTypes ====== -The ctypes module in PyPy is ready to use. -It's goal is to be as-compatible-as-possible with the -`CPython ctypes`_ version. Right now it's able to support large examples, -such as pyglet. PyPy is planning to have a 100% compatible ctypes -implementation, without the CPython C-level API bindings (so it is very -unlikely that direct object-manipulation trickery through this API will work). +The goal of the ctypes module of PyPy is to be as compatible as possible +with the `CPython ctypes`_ version. It works for large examples, such +as pyglet. PyPy's implementation is not strictly 100% compatible with +CPython, but close enough for most cases. -We also provide a `ctypes-configure`_ for overcoming the platform dependencies, -not relying on the ctypes codegen. This tool works by querying gcc about -platform-dependent details (compiling small snippets of C code and running -them), so it'll benefit not pypy-related ctypes-based modules as well. +We also used to provide ``ctypes-configure`` for some API-level access. +This is now viewed as a precursor of CFFI, which you should use instead. +More (but older) information is available here__. +Also, ctypes' performance is not as good as CFFI's. -ctypes call are optimized by the JIT and the resulting machine code contains a -direct call to the target C function. However, due to the very dynamic nature -of ctypes, some overhead over a bare C call is still present, in particular to -check/convert the types of the parameters. Moreover, even if most calls are -optimized, some cannot and thus need to follow the slow path, not optimized by -the JIT. +.. _`CPython ctypes`: http://docs.python.org/library/ctypes.html +.. __: ctypes-implementation.html -.. _`ctypes-configure`: ctypes-implementation.html#ctypes-configure -.. _`CPython ctypes`: http://docs.python.org/library/ctypes.html +PyPy implements ctypes as pure Python code around two built-in modules +called ``_ffi`` and ``_rawffi``, which give a very low-level binding to +the C library libffi_. Nowadays it is not recommended to use directly +these two modules. -Pros ----- +.. _libffi: http://sourceware.org/libffi/ -Stable, CPython-compatible API. Most calls are fast, optimized by JIT. - -Cons ----- - -Problems with platform-dependency (although we partially solve -those). Although the JIT optimizes ctypes calls, some overhead is still -present. The slow-path is very slow. - - -LibFFI -====== - -Mostly in order to be able to write a ctypes module, we developed a very -low-level libffi bindings called ``_ffi``. (libffi is a C-level library for dynamic calling, -which is used by CPython ctypes). This library provides stable and usable API, -although it's API is a very low-level one. It does not contain any -magic. It is also optimized by the JIT, but has much less overhead than ctypes. - -Pros ----- - -It Works. Probably more suitable for a delicate code where ctypes magic goes -in a way. All calls are optimized by the JIT, there is no slow path as in -ctypes. - -Cons ----- - -It combines disadvantages of using ctypes with disadvantages of using mixed -modules. CPython-incompatible API, very rough and low-level. - -Mixed Modules -============= - -This is the most advanced and powerful way of writing extension modules. -It has some serious disadvantages: - -* a mixed module needs to be written in RPython, which is far more - complicated than Python (XXX link) - -* due to lack of separate compilation (as of July 2011), each - compilation-check requires to recompile whole PyPy python interpreter, - which takes 0.5-1h. We plan to solve this at some point in near future. - -* although rpython is a garbage-collected language, the border between - C and RPython needs to be managed by hand (each object that goes into the - C level must be explicitly freed). - -Some documentation is available `here`_ - -.. _`here`: rffi.html - -XXX we should provide detailed docs about lltype and rffi, especially if we - want people to follow that way. Reflex ====== -This method is still experimental and is being exercised on a branch, -`reflex-support`_, which adds the `cppyy`_ module. +This method is still experimental. It adds the `cppyy`_ module. The method works by using the `Reflex package`_ to provide reflection information of the C++ code, which is then used to automatically generate bindings at runtime. @@ -168,3 +117,15 @@ to work around it in python or with a C++ helper function. Although Reflex works on various platforms, the bindings with PyPy have only been tested on Linux. + + +RPython Mixed Modules +===================== + +This is the internal way to write built-in extension modules in PyPy. +It cannot be used by any 3rd-party module: the extension modules are +*built-in*, not independently loadable DLLs. + +This is reserved for special cases: it gives direct access to e.g. the +details of the JIT, allowing us to tweak its interaction with user code. +This is how the numpy module is being developed. diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -72,13 +72,13 @@ .. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib .. _`Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2011/jit-hints.pdf -.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: http://codespeak.net/svn/pypy/extradoc/talk/pepm2011/bolz-allocation-removal.pdf -.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/publications/bolz-prolog-jit.pdf +.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/pepm2011/bolz-allocation-removal.pdf +.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/mediawiki/images/a/a7/Pub-BoLeSch2010.pdf .. _`High performance implementation of Python for CLI/.NET with JIT compiler generation for dynamic languages`: http://buildbot.pypy.org/misc/antocuni-thesis.pdf .. _`How to *not* write Virtual Machines for Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dyla2007/dyla.pdf .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf -.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://www.stups.uni-duesseldorf.de/thesis/final-master.pdf +.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html .. _`Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution`: http://sabi.net/nriley/pubs/dls6-riley.pdf diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -444,7 +444,7 @@ So the position of the core PyPy developers is that if anyone wants to make an N+1'th attempt with LLVM, they are welcome, and will be happy to provide help in the IRC channel, but they are left with the burden of proof -that it works. +that (a) it works and (b) it gives important benefits. ---------------------- How do I compile PyPy? diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.2.0`_: the latest official release +* `Release 2.2.1`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.2.0`: http://pypy.org/download.html +.. _`Release 2.2.1`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/release-2.2.1.rst b/pypy/doc/release-2.2.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.2.1.rst @@ -0,0 +1,47 @@ +======================================= +PyPy 2.2.1 - Incrementalism.1 +======================================= + +We're pleased to announce PyPy 2.2.1, which targets version 2.7.3 of the Python +language. This is a bugfix release over 2.2. + +You can download the PyPy 2.2.1 release here: + + http://pypy.org/download.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows +32, or ARM (ARMv6 or ARMv7, with VFPv3). + +Work on the native Windows 64 is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +This is a bugfix release. The most important bugs fixed are: + +* an issue in sockets' reference counting emulation, showing up + notably when using the ssl module and calling ``makefile()``. + +* Tkinter support on Windows. + +* If sys.maxunicode==65535 (on Windows and maybe OS/X), the json + decoder incorrectly decoded surrogate pairs. + +* some FreeBSD fixes. + +Note that CFFI 0.8.1 was released. Both versions 0.8 and 0.8.1 are +compatible with both PyPy 2.2 and 2.2.1. + + +Cheers, +Armin Rigo & everybody diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -18,3 +18,24 @@ .. branch: voidtype_strformat Better support for record numpy arrays + +.. branch: osx-eci-frameworks-makefile +OSX: Ensure frameworks end up in Makefile when specified in External compilation info + +.. branch: less-stringly-ops +Use subclasses of SpaceOperation instead of SpaceOperator objects. +Random cleanups in flowspace and annotator. + +.. branch: ndarray-buffer +adds support for the buffer= argument to the ndarray ctor + +.. branch: better_ftime_detect2 +On OpenBSD do not pull in libcompat.a as it is about to be removed. +And more generally, if you have gettimeofday(2) you will not need ftime(3). + +.. branch: timeb_h +Remove dependency upon on OpenBSD. This will be disappearing +along with libcompat.a. + +.. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 +Fix 3 broken links on PyPy published papers in docs. diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -47,6 +47,9 @@ def get_raw_address(self): raise ValueError("no raw buffer") + def is_writable(self): + return False + # __________ app-level support __________ def descr_len(self, space): @@ -135,6 +138,9 @@ __slots__ = () # no extra slot here + def is_writable(self): + return True + def setitem(self, index, char): "Write a character into the buffer." raise NotImplementedError # Must be overriden. No bounds checks. diff --git a/pypy/module/__builtin__/app_operation.py b/pypy/module/__builtin__/app_operation.py --- a/pypy/module/__builtin__/app_operation.py +++ b/pypy/module/__builtin__/app_operation.py @@ -1,4 +1,5 @@ +import operator + def bin(x): - if not isinstance(x, (int, long)): - raise TypeError("must be int or long") - return x.__format__("#b") + value = operator.index(x) + return value.__format__("#b") diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -46,6 +46,15 @@ assert bin(2L) == "0b10" assert bin(-2L) == "-0b10" raises(TypeError, bin, 0.) + class C(object): + def __index__(self): + return 42 + assert bin(C()) == bin(42) + class D(object): + def __int__(self): + return 42 + exc = raises(TypeError, bin, D()) + assert "index" in exc.value.message def test_unichr(self): import sys diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -5,7 +5,9 @@ from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray +from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw class LLBuffer(RWBuffer): @@ -34,8 +36,7 @@ def setslice(self, start, string): raw_cdata = rffi.ptradd(self.raw_cdata, start) - for i in range(len(string)): - raw_cdata[i] = string[i] + copy_string_to_raw(llstr(string), raw_cdata, 0, len(string)) class MiniBuffer(W_Root): @@ -59,6 +60,9 @@ def descr__buffer__(self, space): return self.buffer.descr__buffer__(space) + def descr_str(self, space): + return space.wrap(self.buffer.as_str()) + MiniBuffer.typedef = TypeDef( "buffer", @@ -68,6 +72,7 @@ __setitem__ = interp2app(MiniBuffer.descr_setitem), __buffer__ = interp2app(MiniBuffer.descr__buffer__), __weakref__ = make_weakref_descr(MiniBuffer), + __str__ = interp2app(MiniBuffer.descr_str), ) MiniBuffer.typedef.acceptable_as_base_class = False diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -206,8 +206,7 @@ w_value.get_array_length() == length): # fast path: copying from exactly the correct type s = w_value._cdata - for i in range(ctitemsize * length): - cdata[i] = s[i] + rffi.c_memcpy(cdata, s, ctitemsize * length) keepalive_until_here(w_value) return # @@ -259,7 +258,6 @@ space = self.space if isinstance(w_other, W_CData): from pypy.module._cffi_backend import ctypeptr, ctypearray - from pypy.module._cffi_backend import ctypevoid ct = w_other.ctype if isinstance(ct, ctypearray.W_CTypeArray): ct = ct.ctptr @@ -424,6 +422,12 @@ def get_array_length(self): return self.length + def _sizeof(self): + from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray + ctype = self.ctype + assert isinstance(ctype, W_CTypePtrOrArray) + return self.length * ctype.ctitem.size + class W_CDataHandle(W_CData): _attrs_ = ['w_keepalive'] diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2143,7 +2143,13 @@ c = newp(BCharArray, b"hi there") # buf = buffer(c) - assert str(buf).startswith('<_cffi_backend.buffer object at 0x') + assert repr(buf).startswith('<_cffi_backend.buffer object at 0x') + assert bytes(buf) == b"hi there\x00" + if sys.version_info < (3,): + assert str(buf) == "hi there\x00" + assert unicode(buf) == u+"hi there\x00" + else: + assert str(buf) == repr(buf) # --mb_length-- assert len(buf) == len(b"hi there\x00") # --mb_item-- @@ -3125,6 +3131,12 @@ py.test.raises(TypeError, "p + cast(new_primitive_type('int'), 42)") py.test.raises(TypeError, "p - cast(new_primitive_type('int'), 42)") +def test_sizeof_sliced_array(): + BInt = new_primitive_type("int") + BArray = new_array_type(new_pointer_type(BInt), 10) + p = newp(BArray, None) + assert sizeof(p[2:9]) == 7 * sizeof(BInt) + def test_version(): # this test is here mostly for PyPy diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -400,16 +400,16 @@ '_PyObject_CallFunction_SizeT', '_PyObject_CallMethod_SizeT', 'PyBuffer_FromMemory', 'PyBuffer_FromReadWriteMemory', 'PyBuffer_FromObject', - 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', 'init_bufferobject', + 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', '_Py_init_bufferobject', 'PyCObject_FromVoidPtr', 'PyCObject_FromVoidPtrAndDesc', 'PyCObject_AsVoidPtr', 'PyCObject_GetDesc', 'PyCObject_Import', 'PyCObject_SetVoidPtr', - 'PyCObject_Type', 'init_pycobject', + 'PyCObject_Type', '_Py_init_pycobject', 'PyCapsule_New', 'PyCapsule_IsValid', 'PyCapsule_GetPointer', 'PyCapsule_GetName', 'PyCapsule_GetDestructor', 'PyCapsule_GetContext', 'PyCapsule_SetPointer', 'PyCapsule_SetName', 'PyCapsule_SetDestructor', - 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', 'init_capsule', + 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', '_Py_init_capsule', 'PyObject_AsReadBuffer', 'PyObject_AsWriteBuffer', 'PyObject_CheckReadBuffer', @@ -687,11 +687,15 @@ globals()['va_get_%s' % name_no_star] = func def setup_init_functions(eci, translating): - init_buffer = rffi.llexternal('init_bufferobject', [], lltype.Void, + if translating: + prefix = 'PyPy' + else: + prefix = 'cpyexttest' + init_buffer = rffi.llexternal('_%s_init_bufferobject' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) - init_pycobject = rffi.llexternal('init_pycobject', [], lltype.Void, + init_pycobject = rffi.llexternal('_%s_init_pycobject' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) - init_capsule = rffi.llexternal('init_capsule', [], lltype.Void, + init_capsule = rffi.llexternal('_%s_init_capsule' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) INIT_FUNCTIONS.extend([ lambda space: init_buffer(), @@ -699,12 +703,8 @@ lambda space: init_capsule(), ]) from pypy.module.posix.interp_posix import add_fork_hook - if translating: - reinit_tls = rffi.llexternal('PyThread_ReInitTLS', [], lltype.Void, - compilation_info=eci) - else: - reinit_tls = rffi.llexternal('PyPyThread_ReInitTLS', [], lltype.Void, - compilation_info=eci) + reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], lltype.Void, + compilation_info=eci) add_fork_hook('child', reinit_tls) def init_function(func): @@ -746,7 +746,7 @@ from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() - generate_macros(export_symbols, rename=True, do_deref=True) + generate_macros(export_symbols, prefix='cpyexttest') # Structure declaration code members = [] @@ -812,7 +812,7 @@ INTERPLEVEL_API[name] = w_obj - name = name.replace('Py', 'PyPy') + name = name.replace('Py', 'cpyexttest') if isptr: ptr = ctypes.c_void_p.in_dll(bridge, name) if typ == 'PyObject*': @@ -824,7 +824,7 @@ ptr.value = ctypes.cast(ll2ctypes.lltype2ctypes(value), ctypes.c_void_p).value elif typ in ('PyObject*', 'PyTypeObject*'): - if name.startswith('PyPyExc_'): + if name.startswith('PyPyExc_') or name.startswith('cpyexttestExc_'): # we already have the pointer in_dll = ll2ctypes.get_ctypes_type(PyObject).in_dll(bridge, name) py_obj = ll2ctypes.ctypes2lltype(PyObject, in_dll) @@ -859,28 +859,27 @@ setup_init_functions(eci, translating=False) return modulename.new(ext='') -def generate_macros(export_symbols, rename=True, do_deref=True): +def mangle_name(prefix, name): + if name.startswith('Py'): + return prefix + name[2:] + elif name.startswith('_Py'): + return '_' + prefix + name[3:] + else: + return None + +def generate_macros(export_symbols, prefix): "NOT_RPYTHON" pypy_macros = [] renamed_symbols = [] for name in export_symbols: - if name.startswith("PyPy"): - renamed_symbols.append(name) - continue - if not rename: - continue name = name.replace("#", "") - newname = name.replace('Py', 'PyPy') - if not rename: - newname = name + newname = mangle_name(prefix, name) + assert newname, name pypy_macros.append('#define %s %s' % (name, newname)) if name.startswith("PyExc_"): pypy_macros.append('#define _%s _%s' % (name, newname)) renamed_symbols.append(newname) - if rename: - export_symbols[:] = renamed_symbols - else: - export_symbols[:] = [sym.replace("#", "") for sym in export_symbols] + export_symbols[:] = renamed_symbols # Generate defines for macro_name, size in [ @@ -1041,7 +1040,7 @@ from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() - generate_macros(export_symbols, rename=False, do_deref=False) + generate_macros(export_symbols, prefix='PyPy') functions = generate_decls_and_callbacks(db, [], api_struct=False) code = "#include \n" + "\n".join(functions) @@ -1071,7 +1070,8 @@ export_struct(name, struct) for name, func in FUNCTIONS.iteritems(): - deco = entrypoint_lowlevel("cpyext", func.argtypes, name, relax=True) + newname = mangle_name('PyPy', name) or name + deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, relax=True) deco(func.get_wrapper(space)) setup_init_functions(eci, translating=True) diff --git a/pypy/module/cpyext/include/bufferobject.h b/pypy/module/cpyext/include/bufferobject.h --- a/pypy/module/cpyext/include/bufferobject.h +++ b/pypy/module/cpyext/include/bufferobject.h @@ -37,7 +37,7 @@ PyObject* PyBuffer_New(Py_ssize_t size); -void init_bufferobject(void); +void _Py_init_bufferobject(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pycapsule.h b/pypy/module/cpyext/include/pycapsule.h --- a/pypy/module/cpyext/include/pycapsule.h +++ b/pypy/module/cpyext/include/pycapsule.h @@ -50,7 +50,7 @@ PyAPI_FUNC(void *) PyCapsule_Import(const char *name, int no_block); -void init_capsule(void); +void _Py_init_capsule(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pycobject.h b/pypy/module/cpyext/include/pycobject.h --- a/pypy/module/cpyext/include/pycobject.h +++ b/pypy/module/cpyext/include/pycobject.h @@ -48,7 +48,7 @@ } PyCObject; #endif -void init_pycobject(void); +void _Py_init_pycobject(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/src/bufferobject.c b/pypy/module/cpyext/src/bufferobject.c --- a/pypy/module/cpyext/src/bufferobject.c +++ b/pypy/module/cpyext/src/bufferobject.c @@ -783,7 +783,7 @@ return size; } -void init_bufferobject(void) +void _Py_init_bufferobject(void) { PyType_Ready(&PyBuffer_Type); } diff --git a/pypy/module/cpyext/src/capsule.c b/pypy/module/cpyext/src/capsule.c --- a/pypy/module/cpyext/src/capsule.c +++ b/pypy/module/cpyext/src/capsule.c @@ -321,7 +321,7 @@ PyCapsule_Type__doc__ /*tp_doc*/ }; -void init_capsule() +void _Py_init_capsule() { PyType_Ready(&PyCapsule_Type); } diff --git a/pypy/module/cpyext/src/cobject.c b/pypy/module/cpyext/src/cobject.c --- a/pypy/module/cpyext/src/cobject.c +++ b/pypy/module/cpyext/src/cobject.c @@ -156,7 +156,7 @@ PyCObject_Type__doc__ /*tp_doc*/ }; -void init_pycobject() +void _Py_init_pycobject() { PyType_Ready(&PyCObject_Type); } diff --git a/pypy/module/cpyext/test/test_thread.py b/pypy/module/cpyext/test/test_thread.py --- a/pypy/module/cpyext/test/test_thread.py +++ b/pypy/module/cpyext/test/test_thread.py @@ -8,8 +8,10 @@ module = self.import_extension('foo', [ ("get_thread_ident", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - return PyInt_FromLong(PyPyThread_get_thread_ident()); +#ifndef PyThread_get_thread_ident +#error "seems we are not accessing PyPy's functions" +#endif + return PyInt_FromLong(PyThread_get_thread_ident()); """), ]) import thread, threading @@ -32,17 +34,19 @@ module = self.import_extension('foo', [ ("test_acquire_lock", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - PyThread_type_lock lock = PyPyThread_allocate_lock(); - if (PyPyThread_acquire_lock(lock, 1) != 1) { +#ifndef PyThread_allocate_lock +#error "seems we are not accessing PyPy's functions" +#endif + PyThread_type_lock lock = PyThread_allocate_lock(); + if (PyThread_acquire_lock(lock, 1) != 1) { PyErr_SetString(PyExc_AssertionError, "first acquire"); return NULL; } - if (PyPyThread_acquire_lock(lock, 0) != 0) { + if (PyThread_acquire_lock(lock, 0) != 0) { PyErr_SetString(PyExc_AssertionError, "second acquire"); return NULL; } - PyPyThread_free_lock(lock); + PyThread_free_lock(lock); Py_RETURN_NONE; """), @@ -53,15 +57,17 @@ module = self.import_extension('foo', [ ("test_release_lock", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - PyThread_type_lock lock = PyPyThread_allocate_lock(); - PyPyThread_acquire_lock(lock, 1); - PyPyThread_release_lock(lock); - if (PyPyThread_acquire_lock(lock, 0) != 1) { +#ifndef PyThread_release_lock +#error "seems we are not accessing PyPy's functions" +#endif + PyThread_type_lock lock = PyThread_allocate_lock(); + PyThread_acquire_lock(lock, 1); + PyThread_release_lock(lock); + if (PyThread_acquire_lock(lock, 0) != 1) { PyErr_SetString(PyExc_AssertionError, "first acquire"); return NULL; } - PyPyThread_free_lock(lock); + PyThread_free_lock(lock); Py_RETURN_NONE; """), diff --git a/pypy/module/math/app_math.py b/pypy/module/math/app_math.py --- a/pypy/module/math/app_math.py +++ b/pypy/module/math/app_math.py @@ -1,13 +1,40 @@ def factorial(x): - """Find x!.""" + """factorial(x) -> Integral + + "Find x!. Raise a ValueError if x is negative or non-integral.""" if isinstance(x, float): fl = int(x) if fl != x: raise ValueError("float arguments must be integral") x = fl - if x < 0: - raise ValueError("x must be >= 0") - res = 1 - for i in range(1, x + 1): - res *= i - return res + + if x <= 100: + if x < 0: + raise ValueError("x must be >= 0") + res = 1 + for i in range(2, x + 1): + res *= i + return res + + # Experimentally this gap seems good + gap = max(100, x >> 7) + def _fac_odd(low, high): + if low + gap >= high: + t = 1 + for i in range(low, high, 2): + t *= i + return t + + mid = ((low + high) >> 1) | 1 + return _fac_odd(low, mid) * _fac_odd(mid, high) + + def _fac1(x): + if x <= 2: + return 1, 1, x - 1 + x2 = x >> 1 + f, g, shift = _fac1(x2) + g *= _fac_odd((x2 + 1) | 1, x + 1) + return (f * g, g, shift + x2) + + res, _, shift = _fac1(x) + return res << shift diff --git a/pypy/module/math/test/test_factorial.py b/pypy/module/math/test/test_factorial.py new file mode 100644 --- /dev/null +++ b/pypy/module/math/test/test_factorial.py @@ -0,0 +1,29 @@ +import py +import math +from pypy.module.math import app_math + +def test_factorial_extra(): + for x in range(1000): + r1 = app_math.factorial(x) + r2 = math.factorial(x) + assert r1 == r2 + assert type(r1) == type(r2) + +def test_timing(): + py.test.skip("for manual running only") + import time + x = 5000 + repeat = 1000 + r1 = app_math.factorial(x) + r2 = math.factorial(x) + assert r1 == r2 + t1 = time.time() + for i in range(repeat): + app_math.factorial(x) + t2 = time.time() + for i in range(repeat): + math.factorial(x) + t3 = time.time() + assert r1 == r2 + print (t2 - t1) / repeat + print (t3 - t2) / repeat diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -10,7 +10,7 @@ 'array': 'interp_numarray.array', 'zeros': 'interp_numarray.zeros', 'empty': 'interp_numarray.zeros', - 'ones': 'interp_numarray.ones', + 'empty_like': 'interp_numarray.empty_like', '_reconstruct' : 'interp_numarray._reconstruct', 'scalar' : 'interp_numarray.build_scalar', 'dot': 'interp_arrayops.dot', @@ -106,8 +106,6 @@ ('logaddexp2', 'logaddexp2'), ('real', 'real'), ('imag', 'imag'), - ('ones_like', 'ones_like'), - ('zeros_like', 'zeros_like'), ]: interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -12,7 +12,9 @@ if dtype is None: test = _numpypy.multiarray.array([start, stop, step, 0]) dtype = test.dtype - arr = _numpypy.multiarray.zeros(int(math.ceil((stop - start) / step)), dtype=dtype) + length = math.ceil((float(stop) - start) / step) + length = int(length) + arr = _numpypy.multiarray.zeros(length, dtype=dtype) i = start for j in range(arr.size): arr[j] = i diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -47,7 +47,7 @@ def setslice(self, space, arr): impl = arr.implementation if impl.is_scalar(): - self.fill(impl.get_scalar_value()) + self.fill(space, impl.get_scalar_value()) return shape = shape_agreement(space, self.get_shape(), arr) if impl.storage == self.storage: @@ -100,7 +100,7 @@ tmp = self.get_real(orig_array) tmp.setslice(space, convert_to_array(space, w_value)) - def get_imag(self, orig_array): + def get_imag(self, space, orig_array): strides = self.get_strides() backstrides = self.get_backstrides() if self.dtype.is_complex_type(): @@ -110,11 +110,11 @@ impl = NonWritableArray(self.get_shape(), self.dtype, self.order, strides, backstrides) if not self.dtype.is_flexible_type(): - impl.fill(self.dtype.box(0)) + impl.fill(space, self.dtype.box(0)) return impl def set_imag(self, space, orig_array, w_value): - tmp = self.get_imag(orig_array) + tmp = self.get_imag(space, orig_array) tmp.setslice(space, convert_to_array(space, w_value)) # -------------------- applevel get/setitem ----------------------- @@ -357,7 +357,7 @@ self.get_backstrides(), self.get_shape()) - def fill(self, box): + def fill(self, space, box): self.dtype.itemtype.fill(self.storage, self.dtype.get_size(), box, 0, self.size, 0) @@ -392,6 +392,21 @@ def __del__(self): free_raw_storage(self.storage, track_allocation=False) +class ConcreteArrayWithBase(ConcreteArrayNotOwning): + def __init__(self, shape, dtype, order, strides, backstrides, storage, orig_base): + ConcreteArrayNotOwning.__init__(self, shape, dtype, order, + strides, backstrides, storage) + self.orig_base = orig_base + + def base(self): + return self.orig_base + + +class ConcreteNonWritableArrayWithBase(ConcreteArrayWithBase): + def descr_setitem(self, space, orig_array, w_index, w_value): + raise OperationError(space.w_ValueError, space.wrap( + "assignment destination is read-only")) + class NonWritableArray(ConcreteArray): def descr_setitem(self, space, orig_array, w_index, w_value): @@ -420,8 +435,8 @@ def base(self): return self.orig_arr - def fill(self, box): - loop.fill(self, box.convert_to(self.dtype)) + def fill(self, space, box): + loop.fill(self, box.convert_to(space, self.dtype)) def create_iter(self, shape=None, backward_broadcast=False, require_index=False): if shape is not None and \ @@ -487,3 +502,6 @@ def getlength(self): return self.impl.size + + def get_raw_address(self): + return self.impl.storage diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -54,8 +54,7 @@ return self.value def set_scalar_value(self, w_val): - assert isinstance(w_val, W_GenericBox) - self.value = w_val.convert_to(self.dtype) + self.value = w_val def copy(self, space): scalar = Scalar(self.dtype) @@ -96,12 +95,12 @@ ','.join([str(x) for x in w_arr.get_shape()],)))) if self.dtype.is_complex_type(): self.value = self.dtype.itemtype.composite( - w_arr.get_scalar_value().convert_to(dtype), + w_arr.get_scalar_value().convert_to(space, dtype), self.value.convert_imag_to(dtype)) else: self.value = w_arr.get_scalar_value() - def get_imag(self, orig_array): + def get_imag(self, space, orig_array): if self.dtype.is_complex_type(): scalar = Scalar(self.dtype.float_type) scalar.value = self.value.convert_imag_to(scalar.dtype) @@ -125,13 +124,19 @@ ','.join([str(x) for x in w_arr.get_shape()],)))) self.value = self.dtype.itemtype.composite( self.value.convert_real_to(dtype), - w_arr.get_scalar_value().convert_to(dtype), + w_arr.get_scalar_value().convert_to(space, dtype), ) def descr_getitem(self, space, _, w_idx): if space.isinstance_w(w_idx, space.w_tuple): if space.len_w(w_idx) == 0: return self.get_scalar_value() + if space.is_none(w_idx): + new_shape = [1] + arr = W_NDimArray.from_shape(space, new_shape, self.dtype) + arr_iter = arr.create_iter(new_shape) + arr_iter.setitem(self.value) + return arr raise OperationError(space.w_IndexError, space.wrap("0-d arrays can't be indexed")) @@ -174,7 +179,7 @@ w_res.implementation.setitem(0, index_type.itemtype.box(0)) return space.newtuple([w_res]) - def fill(self, w_value): + def fill(self, space, w_value): self.value = w_value def get_storage_as_int(self, space): diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -49,11 +49,24 @@ return W_NDimArray(impl) @staticmethod - def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, w_subtype=None): + def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, + w_subtype=None, w_base=None, writable=True): from pypy.module.micronumpy.arrayimpl import concrete assert shape strides, backstrides = calc_strides(shape, dtype, order) - if owning: + if w_base is not None: + if owning: + raise OperationError(space.w_ValueError, + space.wrap("Cannot have owning=True when specifying a buffer")) + if writable: + impl = concrete.ConcreteArrayWithBase(shape, dtype, order, strides, + backstrides, storage, w_base) + else: + impl = concrete.ConcreteNonWritableArrayWithBase(shape, dtype, order, + strides, backstrides, + storage, w_base) + + elif owning: # Will free storage when GCd impl = concrete.ConcreteArray(shape, dtype, order, strides, backstrides, storage=storage) diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -89,7 +89,7 @@ shape = shape_agreement(space, arr.get_shape(), x) shape = shape_agreement(space, shape, y) out = W_NDimArray.from_shape(space, shape, dtype) - return loop.where(out, shape, arr, x, y, dtype) + return loop.where(space, out, shape, arr, x, y, dtype) def dot(space, w_obj1, w_obj2, w_out=None): w_arr = convert_to_array(space, w_obj1) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -66,7 +66,7 @@ def __init__(self, value): self.value = value - def convert_to(self, dtype): + def convert_to(self, space, dtype): return dtype.box(self.value) def __repr__(self): @@ -91,7 +91,7 @@ self.real = real self.imag = imag - def convert_to(self, dtype): + def convert_to(self, space, dtype): return dtype.box_complex(self.real, self.imag) def convert_real_to(self, dtype): @@ -149,20 +149,26 @@ return space.index(self.item(space)) def descr_int(self, space): - box = self.convert_to(W_LongBox._get_dtype(space)) + box = self.convert_to(space, W_LongBox._get_dtype(space)) assert isinstance(box, W_LongBox) return space.wrap(box.value) def descr_long(self, space): - box = self.convert_to(W_Int64Box._get_dtype(space)) + box = self.convert_to(space, W_Int64Box._get_dtype(space)) assert isinstance(box, W_Int64Box) return space.wrap(box.value) def descr_float(self, space): - box = self.convert_to(W_Float64Box._get_dtype(space)) + box = self.convert_to(space, W_Float64Box._get_dtype(space)) assert isinstance(box, W_Float64Box) return space.wrap(box.value) + def descr_oct(self, space): + return space.oct(self.descr_int(space)) + + def descr_hex(self, space): + return space.hex(self.descr_int(space)) + def descr_nonzero(self, space): dtype = self.get_dtype(space) return space.wrap(dtype.itemtype.bool(self)) @@ -259,14 +265,13 @@ if not space.is_none(w_out): raise OperationError(space.w_NotImplementedError, space.wrap( "out not supported")) - v = self.convert_to(self.get_dtype(space)) - return self.get_dtype(space).itemtype.round(v, decimals) + return self.get_dtype(space).itemtype.round(self, decimals) def descr_astype(self, space, w_dtype): from pypy.module.micronumpy.interp_dtype import W_Dtype dtype = space.interp_w(W_Dtype, space.call_function(space.gettypefor(W_Dtype), w_dtype)) - return self.convert_to(dtype) + return self.convert_to(space, dtype) def descr_view(self, space, w_dtype): from pypy.module.micronumpy.interp_dtype import W_Dtype @@ -305,7 +310,10 @@ return space.wrap(0) def descr_copy(self, space): - return self.convert_to(self.get_dtype(space)) + return self.convert_to(space, self.get_dtype(space)) + + def descr_buffer(self, space): + return self.descr_ravel(space).descr_get_data(space) w_flags = None def descr_get_flags(self, space): @@ -466,14 +474,16 @@ dtype.itemtype.store(self.arr, self.ofs, ofs, dtype.coerce(space, w_value)) - def convert_to(self, dtype): + def convert_to(self, space, dtype): # if we reach here, the record fields are guarenteed to match. return self class W_CharacterBox(W_FlexibleBox): - def convert_to(self, dtype): - # XXX assert dtype is str type - return self + def convert_to(self, space, dtype): + return dtype.coerce(space, space.wrap(self.raw_str())) + + def descr_len(self, space): + return space.len(self.item(space)) class W_StringBox(W_CharacterBox): def descr__new__string_box(space, w_subtype, w_arg): @@ -511,6 +521,9 @@ __long__ = interp2app(W_GenericBox.descr_long), __float__ = interp2app(W_GenericBox.descr_float), __nonzero__ = interp2app(W_GenericBox.descr_nonzero), + __oct__ = interp2app(W_GenericBox.descr_oct), + __hex__ = interp2app(W_GenericBox.descr_hex), + __buffer__ = interp2app(W_GenericBox.descr_buffer), __add__ = interp2app(W_GenericBox.descr_add), __sub__ = interp2app(W_GenericBox.descr_sub), @@ -745,9 +758,11 @@ W_StringBox.typedef = TypeDef("string_", (W_CharacterBox.typedef, str_typedef), __module__ = "numpy", __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), + __len__ = interp2app(W_StringBox.descr_len), ) W_UnicodeBox.typedef = TypeDef("unicode_", (W_CharacterBox.typedef, unicode_typedef), __module__ = "numpy", __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), + __len__ = interp2app(W_UnicodeBox.descr_len), ) diff --git a/pypy/module/micronumpy/interp_flagsobj.py b/pypy/module/micronumpy/interp_flagsobj.py --- a/pypy/module/micronumpy/interp_flagsobj.py +++ b/pypy/module/micronumpy/interp_flagsobj.py @@ -18,6 +18,16 @@ def descr_get_writeable(self, space): return space.w_True + def descr_get_fnc(self, space): + return space.wrap( + space.is_true(self.descr_get_fortran(space)) and not + space.is_true(self.descr_get_contiguous(space))) + + def descr_get_forc(self, space): + return space.wrap( + space.is_true(self.descr_get_fortran(space)) or + space.is_true(self.descr_get_contiguous(space))) + def descr_getitem(self, space, w_item): key = space.str_w(w_item) if key == "C" or key == "CONTIGUOUS" or key == "C_CONTIGUOUS": @@ -26,6 +36,10 @@ return self.descr_get_fortran(space) if key == "W" or key == "WRITEABLE": return self.descr_get_writeable(space) + if key == "FNC": + return self.descr_get_fnc(space) + if key == "FORC": + return self.descr_get_forc(space) raise OperationError(space.w_KeyError, space.wrap( "Unknown flag")) @@ -56,4 +70,6 @@ f_contiguous = GetSetProperty(W_FlagsObject.descr_get_fortran), fortran = GetSetProperty(W_FlagsObject.descr_get_fortran), writeable = GetSetProperty(W_FlagsObject.descr_get_writeable), + fnc = GetSetProperty(W_FlagsObject.descr_get_fnc), + forc = GetSetProperty(W_FlagsObject.descr_get_forc), ) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1,3 +1,5 @@ +from rpython.rtyper.lltypesystem import rffi +from rpython.rlib.rawstorage import RAW_STORAGE_PTR from pypy.interpreter.error import operationerrfmt, OperationError from pypy.interpreter.typedef import TypeDef, GetSetProperty, make_weakref_descr from pypy.interpreter.gateway import interp2app, unwrap_spec, applevel, \ @@ -20,6 +22,7 @@ from rpython.rlib.rstring import StringBuilder from pypy.module.micronumpy.arrayimpl.base import BaseArrayImplementation from pypy.module.micronumpy.conversion_utils import order_converter, multi_axis_converter +from pypy.module.micronumpy import support from pypy.module.micronumpy.constants import * def _find_shape(space, w_size, dtype): @@ -91,7 +94,7 @@ return space.wrap(self.get_size() * self.get_dtype().get_size()) def descr_fill(self, space, w_value): - self.fill(self.get_dtype().coerce(space, w_value)) + self.fill(space, self.get_dtype().coerce(space, w_value)) def descr_tostring(self, space, w_order=None): order = order_converter(space, w_order, NPY_CORDER) @@ -285,8 +288,8 @@ def set_scalar_value(self, w_val): self.implementation.set_scalar_value(w_val) - def fill(self, box): - self.implementation.fill(box) + def fill(self, space, box): + self.implementation.fill(space, box) def descr_get_size(self, space): return space.wrap(self.get_size()) @@ -311,7 +314,7 @@ self.implementation.get_real(self)) def descr_get_imag(self, space): - ret = self.implementation.get_imag(self) + ret = self.implementation.get_imag(space, self) return wrap_impl(space, space.type(self), self, ret) def descr_set_real(self, space, w_value): @@ -490,8 +493,11 @@ if not space.is_none(w_dtype): raise OperationError(space.w_NotImplementedError, space.wrap( "__array__(dtype) not implemented")) - # stub implementation of __array__() - return self + if type(self) is W_NDimArray: + return self + return W_NDimArray.from_shape_and_storage( + space, self.get_shape(), self.implementation.storage, + self.get_dtype(), w_base=self) def descr_array_iface(self, space): addr = self.implementation.get_storage_as_int(space) @@ -533,7 +539,7 @@ def descr_astype(self, space, w_dtype): dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) impl = self.implementation if isinstance(impl, scalar.Scalar): return W_NDimArray.new_scalar(space, dtype, impl.value) @@ -950,8 +956,7 @@ return func_with_new_name(impl, "reduce_%s_impl_%d_%d" % (ufunc_name, promote_to_largest, cumulative)) - descr_sum = _reduce_ufunc_impl("add") - descr_sum_promote = _reduce_ufunc_impl("add", True) + descr_sum = _reduce_ufunc_impl("add", True) descr_prod = _reduce_ufunc_impl("multiply", True) descr_max = _reduce_ufunc_impl("maximum") descr_min = _reduce_ufunc_impl("minimum") @@ -982,28 +987,64 @@ shape = self.get_shape() if len(shape) == 0: assert isinstance(self.implementation, scalar.Scalar) - return space.int(space.wrap(self.implementation.get_scalar_value())) - if shape == [1]: - return space.int(self.descr_getitem(space, space.wrap(0))) - raise OperationError(space.w_TypeError, space.wrap("only length-1 arrays can be converted to Python scalars")) + value = space.wrap(self.implementation.get_scalar_value()) + elif shape == [1]: + value = self.descr_getitem(space, space.wrap(0)) + else: + raise OperationError(space.w_TypeError, space.wrap( + "only length-1 arrays can be converted to Python scalars")) + if self.get_dtype().is_str_or_unicode(): + raise OperationError(space.w_TypeError, space.wrap( + "don't know how to convert scalar number to int")) + return space.int(value) def descr_long(self, space): shape = self.get_shape() if len(shape) == 0: assert isinstance(self.implementation, scalar.Scalar) - return space.long(space.wrap(self.implementation.get_scalar_value())) - if shape == [1]: - return space.int(self.descr_getitem(space, space.wrap(0))) - raise OperationError(space.w_TypeError, space.wrap("only length-1 arrays can be converted to Python scalars")) + value = space.wrap(self.implementation.get_scalar_value()) + elif shape == [1]: + value = self.descr_getitem(space, space.wrap(0)) + else: + raise OperationError(space.w_TypeError, space.wrap( + "only length-1 arrays can be converted to Python scalars")) + if self.get_dtype().is_str_or_unicode(): + raise OperationError(space.w_TypeError, space.wrap( + "don't know how to convert scalar number to long")) + return space.long(value) def descr_float(self, space): shape = self.get_shape() if len(shape) == 0: assert isinstance(self.implementation, scalar.Scalar) - return space.float(space.wrap(self.implementation.get_scalar_value())) - if shape == [1]: - return space.float(self.descr_getitem(space, space.wrap(0))) - raise OperationError(space.w_TypeError, space.wrap("only length-1 arrays can be converted to Python scalars")) + value = space.wrap(self.implementation.get_scalar_value()) + elif shape == [1]: + value = self.descr_getitem(space, space.wrap(0)) + else: + raise OperationError(space.w_TypeError, space.wrap( + "only length-1 arrays can be converted to Python scalars")) + if self.get_dtype().is_str_or_unicode(): + raise OperationError(space.w_TypeError, space.wrap( + "don't know how to convert scalar number to float")) + return space.float(value) + + def descr_index(self, space): + shape = self.get_shape() + if len(shape) == 0: + assert isinstance(self.implementation, scalar.Scalar) + value = space.wrap(self.implementation.get_scalar_value()) + elif shape == [1]: + value = self.descr_getitem(space, space.wrap(0)) + else: + raise OperationError(space.w_TypeError, space.wrap( + "only integer arrays with one element " + "can be converted to an index")) + if not self.get_dtype().is_int_type() or self.get_dtype().is_bool_type(): + raise OperationError(space.w_TypeError, space.wrap( + "only integer arrays with one element " + "can be converted to an index")) + assert isinstance(value, interp_boxes.W_GenericBox) + return value.item(space) def descr_reduce(self, space): from rpython.rlib.rstring import StringBuilder @@ -1015,8 +1056,8 @@ multiarray = numpypy.get("multiarray") assert isinstance(multiarray, MixedModule) reconstruct = multiarray.get("_reconstruct") - - parameters = space.newtuple([space.gettypefor(W_NDimArray), space.newtuple([space.wrap(0)]), space.wrap("b")]) + parameters = space.newtuple([self.getclass(space), + space.newtuple([space.wrap(0)]), space.wrap("b")]) builder = StringBuilder() if isinstance(self.implementation, SliceArray): @@ -1039,14 +1080,22 @@ return space.newtuple([reconstruct, parameters, state]) def descr_setstate(self, space, w_state): - from rpython.rtyper.lltypesystem import rffi - - shape = space.getitem(w_state, space.wrap(1)) - dtype = space.getitem(w_state, space.wrap(2)) - assert isinstance(dtype, interp_dtype.W_Dtype) - isfortran = space.getitem(w_state, space.wrap(3)) - storage = space.getitem(w_state, space.wrap(4)) - + lens = space.len_w(w_state) + # numpy compatability, see multiarray/methods.c + if lens == 5: + base_index = 1 + elif lens == 4: + base_index = 0 + else: + raise OperationError(space.w_ValueError, space.wrap( + "__setstate__ called with len(args[1])==%d, not 5 or 4" % lens)) + shape = space.getitem(w_state, space.wrap(base_index)) + dtype = space.getitem(w_state, space.wrap(base_index+1)) + isfortran = space.getitem(w_state, space.wrap(base_index+2)) + storage = space.getitem(w_state, space.wrap(base_index+3)) + if not isinstance(dtype, interp_dtype.W_Dtype): + raise OperationError(space.w_ValueError, space.wrap( + "__setstate__(self, (shape, dtype, .. called with improper dtype '%r'" % dtype)) self.implementation = W_NDimArray.from_shape_and_storage(space, [space.int_w(i) for i in space.listview(shape)], rffi.str2charp(space.str_w(storage), track_allocation=False), @@ -1062,20 +1111,45 @@ return w_obj pass - at unwrap_spec(offset=int, order=str) + at unwrap_spec(offset=int) def descr_new_array(space, w_subtype, w_shape, w_dtype=None, w_buffer=None, - offset=0, w_strides=None, order='C'): + offset=0, w_strides=None, w_order=None): from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArray from pypy.module.micronumpy.support import calc_strides - if (offset != 0 or not space.is_none(w_strides) or - not space.is_none(w_buffer)): - raise OperationError(space.w_NotImplementedError, - space.wrap("unsupported param")) dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) shape = _find_shape(space, w_shape, dtype) - if not shape: - return W_NDimArray.new_scalar(space, dtype) + + if not space.is_none(w_buffer): + if (not space.is_none(w_strides)): + raise OperationError(space.w_NotImplementedError, + space.wrap("unsupported param")) + + buf = space.buffer_w(w_buffer) + try: + raw_ptr = buf.get_raw_address() + except ValueError: + raise OperationError(space.w_TypeError, space.wrap( + "Only raw buffers are supported")) + if not shape: + raise OperationError(space.w_TypeError, space.wrap( + "numpy scalars from buffers not supported yet")) + totalsize = support.product(shape) * dtype.get_size() + if totalsize+offset > buf.getlength(): + raise OperationError(space.w_TypeError, space.wrap( + "buffer is too small for requested array")) + storage = rffi.cast(RAW_STORAGE_PTR, raw_ptr) + storage = rffi.ptradd(storage, offset) + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, + w_subtype=w_subtype, + w_base=w_buffer, + writable=buf.is_writable()) + + order = order_converter(space, w_order, NPY_CORDER) + if order == NPY_CORDER: + order = 'C' + else: + order = 'F' if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): return W_NDimArray.from_shape(space, shape, dtype, order) strides, backstrides = calc_strides(shape, dtype.base, order) @@ -1093,8 +1167,6 @@ Create an array from an existing buffer, given its address as int. PyPy-only implementation detail. """ - from rpython.rtyper.lltypesystem import rffi - from rpython.rlib.rawstorage import RAW_STORAGE_PTR storage = rffi.cast(RAW_STORAGE_PTR, addr) dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), @@ -1149,6 +1221,8 @@ __int__ = interp2app(W_NDimArray.descr_int), __long__ = interp2app(W_NDimArray.descr_long), __float__ = interp2app(W_NDimArray.descr_float), + __buffer__ = interp2app(W_NDimArray.descr_get_data), + __index__ = interp2app(W_NDimArray.descr_index), __pos__ = interp2app(W_NDimArray.descr_pos), __neg__ = interp2app(W_NDimArray.descr_neg), @@ -1328,36 +1402,34 @@ # arrays with correct dtype dtype = interp_dtype.decode_w_dtype(space, w_dtype) if isinstance(w_object, W_NDimArray) and \ - (space.is_none(w_dtype) or w_object.get_dtype() is dtype): + (space.is_none(w_dtype) or w_object.get_dtype() is dtype): shape = w_object.get_shape() if copy: w_ret = w_object.descr_copy(space) else: - if ndmin<= len(shape): + if ndmin <= len(shape): return w_object new_impl = w_object.implementation.set_shape(space, w_object, shape) w_ret = W_NDimArray(new_impl) if ndmin > len(shape): shape = [1] * (ndmin - len(shape)) + shape w_ret.implementation = w_ret.implementation.set_shape(space, - w_ret, shape) + w_ret, shape) return w_ret # not an array or incorrect dtype shape, elems_w = find_shape_and_elems(space, w_object, dtype) - if dtype is None or ( - dtype.is_str_or_unicode() and dtype.get_size() < 1): + if dtype is None or (dtype.is_str_or_unicode() and dtype.get_size() < 1): for w_elem in elems_w: - dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, - dtype) - #if dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: - # break - + if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): + w_elem = w_elem.get_scalar_value() + dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, dtype) if dtype is None: dtype = interp_dtype.get_dtype_cache(space).w_float64dtype - if dtype.is_str_or_unicode() and dtype.get_size() < 1: - # promote S0 -> S1, U0 -> U1 - dtype = interp_dtype.variable_dtype(space, dtype.char + '1') + elif dtype.is_str_or_unicode() and dtype.get_size() < 1: + # promote S0 -> S1, U0 -> U1 + dtype = interp_dtype.variable_dtype(space, dtype.char + '1') + if ndmin > len(shape): shape = [1] * (ndmin - len(shape)) + shape w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) @@ -1370,25 +1442,20 @@ @unwrap_spec(order=str) def zeros(space, w_shape, w_dtype=None, order='C'): dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) shape = _find_shape(space, w_shape, dtype) - if not shape: - return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) - return space.wrap(W_NDimArray.from_shape(space, shape, dtype=dtype, order=order)) + return W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) - at unwrap_spec(order=str) -def ones(space, w_shape, w_dtype=None, order='C'): - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) - shape = _find_shape(space, w_shape, dtype) - if not shape: - return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) - w_arr = W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) - one = dtype.box(1) - w_arr.fill(one) - return space.wrap(w_arr) + at unwrap_spec(subok=bool) +def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): + w_a = convert_to_array(space, w_a) + if w_dtype is None: + dtype = w_a.get_dtype() + else: + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, + w_instance=w_a if subok else None) def _reconstruct(space, w_subtype, w_shape, w_dtype): return descr_new_array(space, w_subtype, w_shape, w_dtype) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -226,7 +226,7 @@ dtype = out.get_dtype() else: out = W_NDimArray.from_shape(space, shape, dtype, w_instance=obj) - return loop.do_axis_reduce(shape, self.func, obj, dtype, axis, out, + return loop.do_axis_reduce(space, shape, self.func, obj, dtype, axis, out, self.identity, cumulative, temp) if cumulative: if out: @@ -235,7 +235,7 @@ "out of incompatible size")) else: out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, w_instance=obj) - loop.compute_reduce_cumulative(obj, out, dtype, self.func, + loop.compute_reduce_cumulative(space, obj, out, dtype, self.func, self.identity) return out if out: @@ -244,7 +244,7 @@ "for reduction operation %s has too many" " dimensions",self.name) dtype = out.get_dtype() - res = loop.compute_reduce(obj, dtype, self.func, self.done_func, + res = loop.compute_reduce(space, obj, dtype, self.func, self.done_func, self.identity) if out: out.set_scalar_value(res) @@ -303,13 +303,13 @@ res_dtype = interp_dtype.get_dtype_cache(space).w_float64dtype if w_obj.is_scalar(): w_val = self.func(calc_dtype, - w_obj.get_scalar_value().convert_to(calc_dtype)) + w_obj.get_scalar_value().convert_to(space, calc_dtype)) if out is None: return w_val if out.is_scalar(): out.set_scalar_value(w_val) else: - out.fill(res_dtype.coerce(space, w_val)) + out.fill(space, res_dtype.coerce(space, w_val)) return out shape = shape_agreement(space, w_obj.get_shape(), out, broadcast_down=False) @@ -395,14 +395,14 @@ res_dtype = calc_dtype if w_lhs.is_scalar() and w_rhs.is_scalar(): arr = self.func(calc_dtype, - w_lhs.get_scalar_value().convert_to(calc_dtype), - w_rhs.get_scalar_value().convert_to(calc_dtype) + w_lhs.get_scalar_value().convert_to(space, calc_dtype), + w_rhs.get_scalar_value().convert_to(space, calc_dtype) ) if isinstance(out, W_NDimArray): if out.is_scalar(): out.set_scalar_value(arr) else: - out.fill(arr) + out.fill(space, arr) else: out = arr return out @@ -496,6 +496,15 @@ @jit.unroll_safe def find_unaryop_result_dtype(space, dt, promote_to_float=False, promote_bools=False, promote_to_largest=False): + if promote_to_largest: + if dt.kind == NPY_GENBOOLLTR or dt.kind == NPY_SIGNEDLTR: + return interp_dtype.get_dtype_cache(space).w_int64dtype + elif dt.kind == NPY_UNSIGNEDLTR: + return interp_dtype.get_dtype_cache(space).w_uint64dtype + elif dt.kind == NPY_FLOATINGLTR or dt.kind == NPY_COMPLEXLTR: + return dt + else: + assert False From noreply at buildbot.pypy.org Thu Dec 19 02:54:53 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 19 Dec 2013 02:54:53 +0100 (CET) Subject: [pypy-commit] pypy py3k: fix -Ojit translation: kill stray/misplaced lines from a merge Message-ID: <20131219015453.9B81E1C314B@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68480:46e0449759bb Date: 2013-12-18 17:53 -0800 http://bitbucket.org/pypy/pypy/changeset/46e0449759bb/ Log: fix -Ojit translation: kill stray/misplaced lines from a merge diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -576,8 +576,6 @@ """ Initialize cellvars from self.locals_stack_w. """ - if self.cells is None: - return args_to_copy = self.pycode._args_as_cellvars for i in range(len(args_to_copy)): argnum = args_to_copy[i] From noreply at buildbot.pypy.org Thu Dec 19 03:28:30 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 19 Dec 2013 03:28:30 +0100 (CET) Subject: [pypy-commit] pypy default: test/fix promote_to_largest wrt 32bit, though demonstrates another problem Message-ID: <20131219022830.D62B81C346E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68481:84c4bc069470 Date: 2013-12-18 21:27 -0500 http://bitbucket.org/pypy/pypy/changeset/84c4bc069470/ Log: test/fix promote_to_largest wrt 32bit, though demonstrates another problem diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -498,13 +498,14 @@ promote_bools=False, promote_to_largest=False): if promote_to_largest: if dt.kind == NPY_GENBOOLLTR or dt.kind == NPY_SIGNEDLTR: - return interp_dtype.get_dtype_cache(space).w_int64dtype + if dt.get_size() * 8 < LONG_BIT: + return interp_dtype.get_dtype_cache(space).w_longdtype elif dt.kind == NPY_UNSIGNEDLTR: - return interp_dtype.get_dtype_cache(space).w_uint64dtype - elif dt.kind == NPY_FLOATINGLTR or dt.kind == NPY_COMPLEXLTR: - return dt + if dt.get_size() * 8 < LONG_BIT: + return interp_dtype.get_dtype_cache(space).w_ulongdtype else: - assert False + assert dt.kind == NPY_FLOATINGLTR or dt.kind == NPY_COMPLEXLTR + return dt if promote_bools and (dt.kind == NPY_GENBOOLLTR): return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1404,12 +1404,14 @@ a = array(range(1, 6)) assert a.prod() == 120.0 assert a[:4].prod() == 24.0 - a = array([True, False]) - assert a.prod() == 0 - assert type(a.prod()) is int_ - a = array([True, False], dtype='uint') - assert a.prod() == 0 - assert type(a.prod()) is dtype('uint').type + for dt in ['bool', 'int8', 'uint8', 'int16', 'uint16']: + a = array([True, False], dtype=dt) + assert a.prod() == 0 + assert a.prod().dtype is dtype('uint' if dt[0] == 'u' else 'int') + for dt in ['l', 'L', 'q', 'Q', 'e', 'f', 'd', 'F', 'D']: + a = array([True, False], dtype=dt) + assert a.prod() == 0 + assert a.prod().dtype is dtype(dt) def test_max(self): from numpypy import array, zeros From noreply at buildbot.pypy.org Thu Dec 19 04:31:24 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 19 Dec 2013 04:31:24 +0100 (CET) Subject: [pypy-commit] pypy py3k: 2to3ish Message-ID: <20131219033124.E96971C3391@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68482:231446df54bb Date: 2013-12-18 19:30 -0800 http://bitbucket.org/pypy/pypy/changeset/231446df54bb/ Log: 2to3ish diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -22,10 +22,10 @@ import numpy as np assert int(np.str_('12')) == 12 exc = raises(ValueError, "int(np.str_('abc'))") - assert exc.value.message.startswith('invalid literal for int()') - assert oct(np.int32(11)) == '013' - assert oct(np.float32(11.6)) == '013' - assert oct(np.complex64(11-12j)) == '013' + assert str(exc.value).startswith('invalid literal for int()') + assert oct(np.int32(11)) == '0o13' + assert oct(np.float32(11.6)) == '0o13' + assert oct(np.complex64(11-12j)) == '0o13' assert hex(np.int32(11)) == '0xb' assert hex(np.float32(11.6)) == '0xb' assert hex(np.complex64(11-12j)) == '0xb' @@ -43,7 +43,7 @@ except ImportError: # running on dummy module from numpy import scalar - from cPickle import loads, dumps + from pickle import loads, dumps i = dtype('int32').type(1337) f = dtype('float64').type(13.37) c = dtype('complex128').type(13 + 37.j) @@ -98,10 +98,10 @@ def test_buffer(self): import numpy as np a = np.int32(123) - b = buffer(a) - assert type(b) is buffer + b = memoryview(a) + assert type(b) is memoryview a = np.string_('abc') - b = buffer(a) + b = memoryview(a) assert str(b) == a def test_squeeze(self): @@ -137,7 +137,7 @@ import sys s = np.dtype('int64').type(12) exc = raises(ValueError, s.view, 'int8') - assert exc.value[0] == "new type not compatible with array." + assert str(exc.value) == "new type not compatible with array." t = s.view('double') assert type(t) is np.double assert t < 7e-323 @@ -146,7 +146,7 @@ assert 0 < t.real < 1 assert t.imag == 0 exc = raises(TypeError, s.view, 'string') - assert exc.value[0] == "data-type must not be 0-sized" + assert str(exc.value) == "data-type must not be 0-sized" t = s.view('S8') assert type(t) is np.string_ assert t == '\x0c' From noreply at buildbot.pypy.org Thu Dec 19 04:31:26 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 19 Dec 2013 04:31:26 +0100 (CET) Subject: [pypy-commit] pypy py3k: workaround lack of space.hex/oct in py3k Message-ID: <20131219033126.39D001C346E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68483:31ac6a26b147 Date: 2013-12-18 19:30 -0800 http://bitbucket.org/pypy/pypy/changeset/31ac6a26b147/ Log: workaround lack of space.hex/oct in py3k diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -159,10 +159,10 @@ return space.wrap(box.value) def descr_oct(self, space): - return space.oct(self.descr_int(space)) + return space.call_method(space.builtin, 'oct', self.descr_int(space)) def descr_hex(self, space): - return space.hex(self.descr_int(space)) + return space.call_method(space.builtin, 'hex', self.descr_int(space)) def descr_nonzero(self, space): dtype = self.get_dtype(space) From noreply at buildbot.pypy.org Thu Dec 19 05:18:33 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 19 Dec 2013 05:18:33 +0100 (CET) Subject: [pypy-commit] pypy py3k: 2to3 Message-ID: <20131219041833.7B4821C08A1@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68484:5547c4821d09 Date: 2013-12-18 20:17 -0800 http://bitbucket.org/pypy/pypy/changeset/5547c4821d09/ Log: 2to3 diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -92,7 +92,7 @@ def __int__(self): return 42 exc = raises(TypeError, bin, D()) - assert "index" in exc.value.message + assert "index" in str(exc.value) def test_oct(self): class Foo: diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -301,7 +301,7 @@ def test_long_before_string(self): class A(str): - def __long__(self): + def __int__(self): return 42 assert int(A('abc')) == 42 From noreply at buildbot.pypy.org Thu Dec 19 10:12:22 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 19 Dec 2013 10:12:22 +0100 (CET) Subject: [pypy-commit] stmgc c5: Put all write_history_s from a thread in the same page, as far as it fits Message-ID: <20131219091222.926C31C08A1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r559:fff713ac3eb7 Date: 2013-12-19 10:12 +0100 http://bitbucket.org/pypy/stmgc/changeset/fff713ac3eb7/ Log: Put all write_history_s from a thread in the same page, as far as it fits diff --git a/c5/core.c b/c5/core.c --- a/c5/core.c +++ b/c5/core.c @@ -73,8 +73,8 @@ struct read_marker_s { /* We associate a single byte to every object, by simply dividing - the address of the object by 16. This is the last byte of the - last time we have read the object. See stm_read(). */ + the address of the object by 16. The number in this single byte + gives the last time we have read the object. See stm_read(). */ unsigned char c; }; @@ -482,14 +482,25 @@ __sync_fetch_and_add(&d->next_transaction_version, 1u); assert(stm_local.transaction_version <= 0xffff); - struct page_header_s *newpage = _stm_reserve_page(); - newpage->kind = PGKIND_WRITE_HISTORY; - - struct write_history_s *cur = (struct write_history_s *)(newpage + 1); + struct write_history_s *cur = NULL; + if (stm_local.writes_by_this_transaction != NULL) { + cur = stm_local.writes_by_this_transaction; + char *next, *page_limit = (char *)cur; + page_limit += 4096 - (((uintptr_t)page_limit) & 4095); + next = (char *)(cur + 1) + 8 * cur->nb_updates; + if (page_limit - next < sizeof(struct write_history_s) + 8) + cur = NULL; + else + cur = (struct write_history_s *)next; + } + if (cur == NULL) { + struct page_header_s *newpage = _stm_reserve_page(); + newpage->kind = PGKIND_WRITE_HISTORY; + cur = (struct write_history_s *)(newpage + 1); + } cur->previous_older_transaction = NULL; cur->transaction_version = stm_local.transaction_version; cur->nb_updates = 0; - assert(stm_local.writes_by_this_transaction == NULL); stm_local.writes_by_this_transaction = cur; struct write_history_s *hist = d->most_recent_committed_transaction; @@ -520,7 +531,6 @@ hist, cur)) break; } - stm_local.writes_by_this_transaction = NULL; if (stm_get_read_marker_number() < 0xff) { stm_local.current_read_markers++; From noreply at buildbot.pypy.org Thu Dec 19 12:04:13 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 19 Dec 2013 12:04:13 +0100 (CET) Subject: [pypy-commit] stmgc c5: Next tests to pass Message-ID: <20131219110413.F09311C1358@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r560:40c429b525cf Date: 2013-12-19 10:42 +0100 http://bitbucket.org/pypy/stmgc/changeset/40c429b525cf/ Log: Next tests to pass diff --git a/c5/test/test_basic.py b/c5/test/test_basic.py --- a/c5/test/test_basic.py +++ b/c5/test/test_basic.py @@ -151,3 +151,86 @@ stm_stop_transaction(False) assert p1[8] == 'b' assert p2[8] == 'C' + + def test_page_extra_malloc_unchanged_page(self): + stm_start_transaction() + p1 = stm_allocate(16) + p2 = stm_allocate(16) + p1[8] = 'A' + p2[8] = 'a' + stm_stop_transaction(False) + stm_start_transaction() + # + self.switch("sub1") + stm_start_transaction() + stm_write(p1) + assert p1[8] == 'A' + p1[8] = 'B' + stm_stop_transaction(False) + # + self.switch("main") + stm_read(p2) + assert p2[8] == 'a' + p3 = stm_allocate(16) # goes into the same page, which is + p3[8] = ':' # not otherwise modified + stm_stop_transaction(False) + # + assert p1[8] == 'B' + assert p2[8] == 'a' + assert p3[8] == ':' + + def test_page_extra_malloc_changed_page_before(self): + stm_start_transaction() + p1 = stm_allocate(16) + p2 = stm_allocate(16) + p1[8] = 'A' + p2[8] = 'a' + stm_stop_transaction(False) + stm_start_transaction() + # + self.switch("sub1") + stm_start_transaction() + stm_write(p1) + assert p1[8] == 'A' + p1[8] = 'B' + stm_stop_transaction(False) + # + self.switch("main") + stm_write(p2) + assert p2[8] == 'a' + p2[8] = 'b' + p3 = stm_allocate(16) # goes into the same page, which I already + p3[8] = ':' # modified just above + stm_stop_transaction(False) + # + assert p1[8] == 'B' + assert p2[8] == 'b' + assert p3[8] == ':' + + def test_page_extra_malloc_changed_page_after(self): + stm_start_transaction() + p1 = stm_allocate(16) + p2 = stm_allocate(16) + p1[8] = 'A' + p2[8] = 'a' + stm_stop_transaction(False) + stm_start_transaction() + # + self.switch("sub1") + stm_start_transaction() + stm_write(p1) + assert p1[8] == 'A' + p1[8] = 'B' + stm_stop_transaction(False) + # + self.switch("main") + p3 = stm_allocate(16) # goes into the same page, which I will + p3[8] = ':' # modify just below + stm_write(p2) + assert p2[8] == 'a' + p2[8] = 'b' + stm_stop_transaction(False) + # + assert p1[8] == 'B' + assert p2[8] == 'b' + assert p3[8] == ':' From noreply at buildbot.pypy.org Thu Dec 19 12:04:15 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 19 Dec 2013 12:04:15 +0100 (CET) Subject: [pypy-commit] stmgc c5: Fix the test. Message-ID: <20131219110415.1B5931C1473@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r561:2c3f26799678 Date: 2013-12-19 12:04 +0100 http://bitbucket.org/pypy/stmgc/changeset/2c3f26799678/ Log: Fix the test. diff --git a/c5/core.c b/c5/core.c --- a/c5/core.c +++ b/c5/core.c @@ -98,7 +98,7 @@ char _pad1[CACHE_LINE_SIZE]; }; union { - unsigned int next_transaction_version; + unsigned int next_transaction_version; /* always EVEN */ char _pad2[CACHE_LINE_SIZE]; }; union { @@ -118,7 +118,7 @@ different forked processes. */ char *read_markers; struct read_marker_s *current_read_markers; - uint16_t transaction_version; + uint16_t transaction_version; /* always EVEN */ struct write_history_s *base_page_mapping; struct write_history_s *writes_by_this_transaction; struct alloc_for_size_s alloc[LARGE_OBJECT_WORDS]; @@ -335,7 +335,7 @@ /* the page at index 0 contains the '*stm_shared_descriptor' structure */ /* the page at index 1 is reserved for history_fast_forward() */ stm_shared_descriptor->index_page_never_used = 2; - stm_shared_descriptor->next_transaction_version = 1; + stm_shared_descriptor->next_transaction_version = 2; } void _stm_teardown(void) @@ -400,6 +400,22 @@ return (struct object_s *)(((char *)page) + offset); } +static _Bool must_merge_page(struct page_header_s *page) +{ + /* The remote page was modified. Look at the local page (at + 'page'). If 'page->version' is equal to: + + - stm_local.transaction_version: the local page was + also modified in this transaction. Then we need to merge. + + - stm_local.transaction_version - 1: the local page was + not, strictly speaking, modified, but *new* objects have + been written to it. In order not to loose them, ask for + a merge too. + */ + return ((uint32_t)(stm_local.transaction_version - page->version)) <= 1; +} + static int history_fast_forward(struct write_history_s *new, int conflict) { /* XXX do a non-recursive version, which also should avoid repeated @@ -419,7 +435,7 @@ struct page_header_s *page = get_page_by_local_index(local_index); struct page_header_s *mypage = page; - if (!conflict && page->version == stm_local.transaction_version) { + if (!conflict && must_merge_page(page)) { /* If we have also modified this page, then we must merge our changes with the ones done at 'new_pgoff'. In this case we map 'new_pgoff' at the local index 1. */ @@ -479,8 +495,9 @@ { struct shared_descriptor_s *d = stm_shared_descriptor; stm_local.transaction_version = - __sync_fetch_and_add(&d->next_transaction_version, 1u); + __sync_fetch_and_add(&d->next_transaction_version, 2u); assert(stm_local.transaction_version <= 0xffff); + assert((stm_local.transaction_version & 1) == 0); /* EVEN number */ struct write_history_s *cur = NULL; if (stm_local.writes_by_this_transaction != NULL) { @@ -507,6 +524,16 @@ if (hist != stm_local.base_page_mapping) { history_fast_forward(hist, 1); } + + int i; + for (i = 2; i < LARGE_OBJECT_WORDS; i++) { + struct page_header_s *page; + char *ptr = stm_local.alloc[i].next; + if (ptr != NULL) { + page = (struct page_header_s *)(((uintptr_t)ptr) & ~4095); + page->version = stm_local.transaction_version - 1; + } + } } _Bool stm_stop_transaction(void) From noreply at buildbot.pypy.org Thu Dec 19 14:23:13 2013 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 19 Dec 2013 14:23:13 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: start converting squeakjs bitblt Message-ID: <20131219132313.2D6551D2314@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r542:582625456d68 Date: 2013-12-19 14:22 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/582625456d68/ Log: start converting squeakjs bitblt diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -643,32 +643,16 @@ space = interp.space s_bitblt = w_rcvr.as_bitblt_get_shadow(space) - # See BlueBook p.356ff - s_bitblt.clip_range() - if s_bitblt.w <= 0 or s_bitblt.h <= 0: - return w_rcvr # null range - s_bitblt.compute_masks() - s_bitblt.check_overlap() - s_bitblt.calculate_offsets() - # print s_bitblt.as_string() - s_bitblt.copy_loop() + s_bitblt.copyBits() w_dest_form = w_rcvr.fetch(space, 0) - if w_dest_form.is_same_object(space.objtable['w_display']): + if (combinationRule == 22 or combinationRule == 32): + s_frame.pop() # pops the next value under BitBlt + s_frame.push(s_bitblt.bitCount()) + else if w_dest_form.is_same_object(space.objtable['w_display']): w_bitmap = w_dest_form.fetch(space, 0) assert isinstance(w_bitmap, model.W_DisplayBitmap) w_bitmap.flush_to_screen() - - # try: - # s_frame._sendSelfSelector(interp.image.w_simulateCopyBits, 0, interp) - # except Return: - # w_dest_form = w_rcvr.fetch(space, 0) - # if w_dest_form.is_same_object(space.objtable['w_display']): - # w_bitmap = w_dest_form.fetch(space, 0) - # assert isinstance(w_bitmap, model.W_DisplayBitmap) - # w_bitmap.flush_to_screen() - - # in case we return normally, we have to restore the removed w_rcvr return w_rcvr @expose_primitive(BE_CURSOR) @@ -893,8 +877,8 @@ raise PrimitiveFailedError signature = (w_modulename.as_string(), w_functionname.as_string()) - # if signature == ('BitBltPlugin', 'primitiveCopyBits'): - # return prim_holder.prim_table[BITBLT_COPY_BITS](interp, s_frame, argcount, s_method) + if signature == ('BitBltPlugin', 'primitiveCopyBits'): + return prim_holder.prim_table[BITBLT_COPY_BITS](interp, s_frame, argcount, s_method) if signature[0] == "SocketPlugin": from spyvm.plugins.socket import SocketPlugin return SocketPlugin.call(signature[1], interp, s_frame, argcount, s_method) diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -1128,111 +1128,117 @@ class BitBltShadow(AbstractCachingShadow): - _attrs_ = [# From BitBlt - "dest_form", "source_form", "halftone_form", - "combination_rule", "dest_x", "dest_y", "width", - "height", "source_x", "source_y", "clip_x", "clip_y", - "clip_width", "clip_height", "color_map", - # From BitBltSimulation - "w", "h", "sx", "sy", "dx", "dy", - "dest_bits", "dest_raster", "source_bits", "source_raster", - "halftone_bits", "skew", "mask1", "mask2", "skew_mask", - "n_words", "h_dir", "v_dir", "preload", "source_index", - "dest_index", "source_delta", "dest_delta"] - WordSize = 32 - RightMasks = [rarithmetic.r_uint(0)] + MaskTable = [rarithmetic.r_uint(0)] for i in xrange(WordSize): - RightMasks.append(rarithmetic.r_uint((2 ** (i + 1)) - 1)) + MaskTable.append(rarithmetic.r_uint((2 ** (i + 1)) - 1)) AllOnes = rarithmetic.r_uint((2 ** WordSize) - 1) def sync_cache(self): + self.loadBitBlt() + + def intOrIfNil(self, w_int, i): + if w_int is self.space.w_nil: + return i + else: + return self.space.unwrap_int(w_int) + + def loadForm(self, w_form): try: - w_form = self.fetch(0) - assert isinstance(w_form, model.W_PointersObject) + if not isinstance(w_form, model.W_PointersObject): + raise PrimitiveFailedError() s_form = w_form.as_form_get_shadow(self.space) - assert isinstance(s_form, FormShadow) - self.dest_form = s_form + if not isinstance(s_form, FormShadow): + raise PrimitiveFailedError() + return s_form except error.PrimitiveFailedError, e: w_self = self.w_self() assert isinstance(w_self, model.W_PointersObject) w_self._shadow = None raise e - w_source_form = self.fetch(1) - if w_source_form is self.space.w_nil: - self.source_form = None + + def loadHalftone(self, w_halftone_form): + if w_halftone_form is self.space.w_nil: + return None + elif isinstance(w_halftone_form, model.W_WordsObject): + # Already a bitmap + return w_halftone_form.words else: - try: - w_form = w_source_form - assert isinstance(w_form, model.W_PointersObject) - s_form = w_form.as_form_get_shadow(self.space) - assert isinstance(s_form, FormShadow) - self.source_form = s_form - except error.PrimitiveFailedError, e: - w_self = self.w_self() - assert isinstance(w_self, model.W_PointersObject) - w_self._shadow = None - raise e - w_halftone_form = self.fetch(2) - if w_halftone_form is not self.space.w_nil: - if isinstance(w_halftone_form, model.W_WordsObject): - # Already a bitmap - self.halftone_bits = w_halftone_form.words + assert isinstance(w_halftone_form, model.W_PointersObject) + w_bits = w_halftone_form.as_form_get_shadow(self.space).w_bits + assert isinstance(w_bits, model.W_WordsObject) + return w_bits.words + + def loadColorMap(self, w_color_map): + if isinstance(w_color_map, model.W_WordsObject): + self.cmLookupTable = w_color_map.words + self.cmMask = len(self.cmLookupTable) - 1 + else: + self.cmLookupTable = None + + def loadBitBlt(self): + self.success = True + self.destForm = self.fetch(0) + self.dest = self.loadForm(self.destForm) + self.sourceForm = self.fetch(1) + if self.sourceForm is not self.space.w_nil: + self.source = self.loadForm(self.sourceForm) + else: + self.source = None + self.halftone = self.loadHalftone(self.fetch(2)) + self.combinationRule = self.space.unwrap_int(self.fetch(3)) + self.destX = self.intOrIfNil(self.fetch(4), 0) + self.destY = self.intOrIfNil(self.fetch(5), 0) + self.width = self.intOrIfNil(self.fetch(6), self.dest_form.width) + self.height = self.intOrIfNil(self.fetch(7), self.dest_form.height) + self.clipX = self.intOrIfNil(self.fetch(10), 0) + self.clipY = self.intOrIfNil(self.fetch(11), 0) + self.clipW = self.intOrIfNil(self.fetch(12), self.width) + self.clipH = self.intOrIfNil(self.fetch(13), self.height) + if not self.source: + self.sourceX = 0 + self.sourceY = 0 + else: + self.loadColorMap(self.fetch(14)) + self.sourceX = self.intOrIfNil(self.fetch(8), 0) + self.sourceY = self.intOrIfNil(self.fetch(9), 0) + + def copyBits(self): + self.bitCount = 0 + self.clipRange() + if (self.bbW <= 0 ir self.bbH <= 0): + return + self.destMaskAndPointerInit() + if not self.source: + self.copyLoopNoSource() + else: + self.checkSourceOverlap() + if self.source.depth !== self.dest.depth: + self.copyLoopPixMap() else: - assert isinstance(w_halftone_form, model.W_PointersObject) - w_bits = w_halftone_form.as_form_get_shadow(self.space).w_bits - assert isinstance(w_bits, model.W_WordsObject) - self.halftone_bits = w_bits.words - else: - self.halftone_bits = None - self.combination_rule = self.space.unwrap_int(self.fetch(3)) - self.dest_x = self.space.unwrap_int(self.fetch(4)) - self.dest_y = self.space.unwrap_int(self.fetch(5)) - self.width = self.space.unwrap_int(self.fetch(6)) - self.height = self.space.unwrap_int(self.fetch(7)) - self.source_x = self.space.unwrap_int(self.fetch(8)) - self.source_y = self.space.unwrap_int(self.fetch(9)) - self.clip_x = self.space.unwrap_int(self.fetch(10)) - self.clip_y = self.space.unwrap_int(self.fetch(11)) - self.clip_width = self.space.unwrap_int(self.fetch(12)) - self.clip_height = self.space.unwrap_int(self.fetch(13)) - self.color_map = self.fetch(14) + self.sourceSkewAndPointerInit() + self.copyLoop() - def clip_range(self): - if self.dest_x >= self.clip_x: - self.sx = self.source_x - self.dx = self.dest_x - self.w = self.width - else: - self.sx = self.source_x + (self.clip_x - self.dest_x) - self.w = self.width - (self.clip_x - self.dest_x) - self.dx = self.clip_x - if self.dx + self.w > self.clip_x + self.clip_width: - self.w = self.w - (self.dx + self.w - (self.clip_x + self.clip_width)) - if self.dest_y >= self.clip_y: - self.sy = self.source_y - self.dy = self.dest_y - self.h = self.height - else: - self.sy = self.source_y + self.clip_y - self.dest_y - self.h = self.height - self.clip_y - self.dest_y - self.dy = self.clip_y - if self.dy + self.h > self.clip_y + self.clip_height: - self.h = self.h - (self.dy + self.h - (self.clip_y + self.clip_height)) - if self.source_form is None: - return - if self.sx < 0: - self.dx = self.dx - self.sx - self.w = self.w + self.sx - self.sx = 0 - if self.sx + self.w > self.source_form.width: - self.w = self.w - (self.sx + self.w - self.source_form.width) - if self.sy < 0: - self.dy = self.dy - self.sy - self.h = self.h + self.sy - self.sy = 0 - if self.sy + self.h > self.source_form.height: - self.h = self.h - (self.sy + self.h - self.source_form.height) + def clipRange(self): + # intersect with destForm bounds + if self.clipX < 0: + self.clipW += self.clipX + self.clipX = 0 + if self.clipY < 0: + self.clipH += self.clipY + self.clipY = 0 + if self.clipX + self.clipW > self.dest.width: + self.clipW = self.dest.width - self.clipX + if self.clipY + self.clipH > self.dest.height: + self.clipH = self.dest.height - self.clipY + + # intersect with clipRect + leftOffset = max(self.clipY - self.destY, 0) + self.sx = self.sourceX + leftOffset + self.dx = self.destX + leftOffset + self.bbW = self.width - leftOffset + rightOffset = + def compute_masks(self): self.dest_bits = self.dest_form.w_bits From noreply at buildbot.pypy.org Thu Dec 19 14:37:29 2013 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 19 Dec 2013 14:37:29 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: missing emitting_an_operation_that_can_collect() for the inevitable-fallback Message-ID: <20131219133729.C448B1D2314@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68485:853e37ea12e1 Date: 2013-12-19 14:36 +0100 http://bitbucket.org/pypy/pypy/changeset/853e37ea12e1/ Log: missing emitting_an_operation_that_can_collect() for the inevitable- fallback diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -154,6 +154,9 @@ self.newops.append(op) continue # ---------- fall-back ---------- + # Check that none of the ops handled here can_collect + # or cause a transaction break. This is not done by + # the fallback here self.fallback_inevitable(op) debug_print("fallback for", op.repr()) # @@ -316,6 +319,7 @@ def fallback_inevitable(self, op): self.known_category.clear() if not self.always_inevitable: + self.emitting_an_operation_that_can_collect() self._do_stm_call('stm_try_inevitable', [], None) self.always_inevitable = True self.newops.append(op) From noreply at buildbot.pypy.org Thu Dec 19 15:22:10 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 19 Dec 2013 15:22:10 +0100 (CET) Subject: [pypy-commit] stmgc c5: A completely new page doesn't need a version. Message-ID: <20131219142210.5AF011C1358@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r562:fec50fbe7690 Date: 2013-12-19 12:12 +0100 http://bitbucket.org/pypy/stmgc/changeset/fec50fbe7690/ Log: A completely new page doesn't need a version. diff --git a/c5/core.c b/c5/core.c --- a/c5/core.c +++ b/c5/core.c @@ -250,8 +250,8 @@ { struct page_header_s *newpage = _stm_reserve_page(); newpage->modif_head = 0xff; - newpage->kind = i; /* object size in words */ - newpage->version = stm_local.transaction_version; + newpage->kind = i; /* object size in words */ + newpage->version = 0; /* a completely new page doesn't need a version */ stm_local.alloc[i].next = ((char *)(newpage + 1)) + (i * 8); stm_local.alloc[i].end = ((char *)newpage) + 4096; assert(stm_local.alloc[i].next <= stm_local.alloc[i].end); @@ -498,6 +498,7 @@ __sync_fetch_and_add(&d->next_transaction_version, 2u); assert(stm_local.transaction_version <= 0xffff); assert((stm_local.transaction_version & 1) == 0); /* EVEN number */ + assert(stm_local.transaction_version >= 2); struct write_history_s *cur = NULL; if (stm_local.writes_by_this_transaction != NULL) { From noreply at buildbot.pypy.org Thu Dec 19 15:22:11 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 19 Dec 2013 15:22:11 +0100 (CET) Subject: [pypy-commit] stmgc c5: Add three XXX'es Message-ID: <20131219142211.995FF1C1358@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r563:2ec0dda36009 Date: 2013-12-19 13:46 +0100 http://bitbucket.org/pypy/stmgc/changeset/2ec0dda36009/ Log: Add three XXX'es diff --git a/c5/core.c b/c5/core.c --- a/c5/core.c +++ b/c5/core.c @@ -236,7 +236,7 @@ struct write_history_s *cur = stm_local.writes_by_this_transaction; uint64_t i = cur->nb_updates++; size_t history_size_max = 4096 - (((uintptr_t)cur) & 4095); - assert(sizeof(*cur) + cur->nb_updates * 8 <= history_size_max); + assert(sizeof(*cur) + cur->nb_updates * 8 <= history_size_max);//XXX cur->updates[i * 2 + 0] = get_local_index(page); cur->updates[i * 2 + 1] = new_pgoff; } @@ -262,7 +262,7 @@ { assert(size % 8 == 0); size_t i = size / 8; - assert(2 <= i && i < LARGE_OBJECT_WORDS); + assert(2 <= i && i < LARGE_OBJECT_WORDS);//XXX struct alloc_for_size_s *alloc = &stm_local.alloc[i]; char *p = alloc->next; @@ -356,8 +356,8 @@ abort(); } - stm_set_read_marker_number(42); - assert(stm_get_read_marker_number() == 42); + assert((stm_set_read_marker_number(42), + stm_get_read_marker_number() == 42)); stm_set_read_marker_number(1); } @@ -476,7 +476,7 @@ struct object_s *myobj = (struct object_s *) (((char *)obj) + diff_to_mypage); assert(obj->flags == 0x42); - assert(myobj->flags == 0x42); // || myobj->flags == 0); + assert(myobj->flags == 0x42); if (_stm_was_read(myobj)) { fprintf(stderr, "# conflict: %p\n", myobj); conflict = 1; @@ -496,7 +496,7 @@ struct shared_descriptor_s *d = stm_shared_descriptor; stm_local.transaction_version = __sync_fetch_and_add(&d->next_transaction_version, 2u); - assert(stm_local.transaction_version <= 0xffff); + assert(stm_local.transaction_version <= 0xffff);//XXX assert((stm_local.transaction_version & 1) == 0); /* EVEN number */ assert(stm_local.transaction_version >= 2); @@ -533,6 +533,8 @@ if (ptr != NULL) { page = (struct page_header_s *)(((uintptr_t)ptr) & ~4095); page->version = stm_local.transaction_version - 1; + /* ^^^ this is one of the only writes to shared memory; + usually it is read-only */ } } } From noreply at buildbot.pypy.org Thu Dec 19 15:22:12 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 19 Dec 2013 15:22:12 +0100 (CET) Subject: [pypy-commit] stmgc c5: Move stm_read() and stm_write() to core.h, from where calls to them can Message-ID: <20131219142212.C5E631C1358@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r564:a41dc13ba646 Date: 2013-12-19 14:02 +0100 http://bitbucket.org/pypy/stmgc/changeset/a41dc13ba646/ Log: Move stm_read() and stm_write() to core.h, from where calls to them can be inlined. diff --git a/c5/core.c b/c5/core.c --- a/c5/core.c +++ b/c5/core.c @@ -71,13 +71,6 @@ uint32_t pgoff; /* the mm page offset */ }; -struct read_marker_s { - /* We associate a single byte to every object, by simply dividing - the address of the object by 16. The number in this single byte - gives the last time we have read the object. See stm_read(). */ - unsigned char c; -}; - struct write_history_s { struct write_history_s *previous_older_transaction; uint16_t transaction_version; @@ -116,47 +109,36 @@ /* This is just a bunch of global variables, but during testing, we save it all away and restore different ones to simulate different forked processes. */ - char *read_markers; - struct read_marker_s *current_read_markers; - uint16_t transaction_version; /* always EVEN */ struct write_history_s *base_page_mapping; struct write_history_s *writes_by_this_transaction; struct alloc_for_size_s alloc[LARGE_OBJECT_WORDS]; + char *read_markers; +#ifdef STM_TESTS + struct _read_marker_s *_current_read_markers; + uint16_t _transaction_version; +#endif }; struct shared_descriptor_s *stm_shared_descriptor; +struct _read_marker_s *stm_current_read_markers; struct local_data_s stm_local; +uint16_t stm_transaction_version; /* always EVEN */ -void stm_read(struct object_s *object) -{ - stm_local.current_read_markers[((uintptr_t)object) >> 4].c = - (unsigned char)(uintptr_t)stm_local.current_read_markers; -} - _Bool _stm_was_read(struct object_s *object) { - return (stm_local.current_read_markers[((uintptr_t)object) >> 4].c == - (unsigned char)(uintptr_t)stm_local.current_read_markers); + return (stm_current_read_markers[((uintptr_t)object) >> 4].c == + (unsigned char)(uintptr_t)stm_current_read_markers); } -static struct read_marker_s *get_current_read_marker(struct object_s *object) +static struct _read_marker_s *get_current_read_marker(struct object_s *object) { - return stm_local.current_read_markers + (((uintptr_t)object) >> 4); -} - -void _stm_write_slowpath(struct object_s *); - -void stm_write(struct object_s *object) -{ - if (__builtin_expect(object->modified != stm_local.transaction_version, - 0)) - _stm_write_slowpath(object); + return stm_current_read_markers + (((uintptr_t)object) >> 4); } _Bool _stm_was_written(struct object_s *object) { - return (object->modified == stm_local.transaction_version); + return (object->modified == stm_transaction_version); } @@ -216,21 +198,21 @@ page = (struct page_header_s *)(((uintptr_t)object) & ~4095); assert(2 <= page->kind && page->kind < LARGE_OBJECT_WORDS); - if (page->version != stm_local.transaction_version) { + if (page->version != stm_transaction_version) { struct page_header_s *newpage = _stm_reserve_page(); uint32_t old_pgoff = get_pgoff(page); uint32_t new_pgoff = get_pgoff(newpage); pagecopy(newpage, page); - newpage->version = stm_local.transaction_version; + newpage->version = stm_transaction_version; newpage->modif_head = 0xff; newpage->pgoff = new_pgoff; - assert(page->version != stm_local.transaction_version); + assert(page->version != stm_transaction_version); assert(page->pgoff == old_pgoff); remap_file_pages((void *)page, 4096, 0, new_pgoff, MAP_PAGES_FLAGS); - assert(page->version == stm_local.transaction_version); + assert(page->version == stm_transaction_version); assert(page->pgoff == new_pgoff); struct write_history_s *cur = stm_local.writes_by_this_transaction; @@ -240,7 +222,7 @@ cur->updates[i * 2 + 0] = get_local_index(page); cur->updates[i * 2 + 1] = new_pgoff; } - object->modified = stm_local.transaction_version; + object->modified = stm_transaction_version; object->modif_next = page->modif_head; page->modif_head = (uint8_t)(((uintptr_t)object) >> 4); assert(page->modif_head != 0xff); @@ -271,7 +253,7 @@ p = _stm_alloc_next_page(i); struct object_s *result = (struct object_s *)p; - result->modified = stm_local.transaction_version; + result->modified = stm_transaction_version; /*result->modif_next is uninitialized*/ result->flags = 0x42; /* for debugging */ return result; @@ -280,17 +262,17 @@ unsigned char stm_get_read_marker_number(void) { - return (unsigned char)(uintptr_t)stm_local.current_read_markers; + return (unsigned char)(uintptr_t)stm_current_read_markers; } void stm_set_read_marker_number(uint8_t num) { char *stm_pages = ((char *)stm_shared_descriptor) + 4096; uintptr_t delta = ((uintptr_t)stm_pages) >> 4; - struct read_marker_s *crm = (struct read_marker_s *)stm_local.read_markers; - stm_local.current_read_markers = crm - delta; + struct _read_marker_s *crm = (struct _read_marker_s *)stm_local.read_markers; + stm_current_read_markers = crm - delta; assert(stm_get_read_marker_number() == 0); - stm_local.current_read_markers += num; + stm_current_read_markers += num; } static void clear_all_read_markers(void) @@ -405,15 +387,14 @@ /* The remote page was modified. Look at the local page (at 'page'). If 'page->version' is equal to: - - stm_local.transaction_version: the local page was - also modified in this transaction. Then we need to merge. + - stm_transaction_version: the local page was also modified in + this transaction. Then we need to merge. - - stm_local.transaction_version - 1: the local page was - not, strictly speaking, modified, but *new* objects have - been written to it. In order not to loose them, ask for - a merge too. + - stm_transaction_version - 1: the local page was not, strictly + speaking, modified, but *new* objects have been written to it. + In order not to loose them, ask for a merge too. */ - return ((uint32_t)(stm_local.transaction_version - page->version)) <= 1; + return ((uint32_t)(stm_transaction_version - page->version)) <= 1; } static int history_fast_forward(struct write_history_s *new, int conflict) @@ -494,11 +475,11 @@ void stm_start_transaction(void) { struct shared_descriptor_s *d = stm_shared_descriptor; - stm_local.transaction_version = + stm_transaction_version = __sync_fetch_and_add(&d->next_transaction_version, 2u); - assert(stm_local.transaction_version <= 0xffff);//XXX - assert((stm_local.transaction_version & 1) == 0); /* EVEN number */ - assert(stm_local.transaction_version >= 2); + assert(stm_transaction_version <= 0xffff);//XXX + assert((stm_transaction_version & 1) == 0); /* EVEN number */ + assert(stm_transaction_version >= 2); struct write_history_s *cur = NULL; if (stm_local.writes_by_this_transaction != NULL) { @@ -517,7 +498,7 @@ cur = (struct write_history_s *)(newpage + 1); } cur->previous_older_transaction = NULL; - cur->transaction_version = stm_local.transaction_version; + cur->transaction_version = stm_transaction_version; cur->nb_updates = 0; stm_local.writes_by_this_transaction = cur; @@ -532,7 +513,7 @@ char *ptr = stm_local.alloc[i].next; if (ptr != NULL) { page = (struct page_header_s *)(((uintptr_t)ptr) & ~4095); - page->version = stm_local.transaction_version - 1; + page->version = stm_transaction_version - 1; /* ^^^ this is one of the only writes to shared memory; usually it is read-only */ } @@ -563,7 +544,7 @@ } if (stm_get_read_marker_number() < 0xff) { - stm_local.current_read_markers++; + stm_current_read_markers++; } else { clear_all_read_markers(); @@ -580,6 +561,8 @@ page_count * sizeof(uint32_t)); assert(p != NULL); memcpy(p, &stm_local, sizeof(stm_local)); + p->_current_read_markers = stm_current_read_markers; + p->_transaction_version = stm_transaction_version; pgoffs = (uint32_t *)(p + 1); pgoffs[0] = page_count; @@ -607,6 +590,8 @@ } memcpy(&stm_local, p, sizeof(struct local_data_s)); + stm_current_read_markers = p->_current_read_markers; + stm_transaction_version = p->_transaction_version; free(p); } #endif diff --git a/c5/core.h b/c5/core.h --- a/c5/core.h +++ b/c5/core.h @@ -10,6 +10,19 @@ uint8_t flags; }; +struct _read_marker_s { + /* We associate a single byte to every object, by simply dividing + the address of the object by 16. The number in this single byte + gives the last time we have read the object. See stm_read(). */ + unsigned char c; +}; + +extern struct _read_marker_s *stm_current_read_markers; +extern uint16_t stm_transaction_version; + + +/************************************************************/ + void stm_setup(void); void stm_setup_process(void); @@ -17,8 +30,20 @@ _Bool stm_stop_transaction(void); struct object_s *stm_allocate(size_t size); -void stm_read(struct object_s *object); -void stm_write(struct object_s *object); +static inline void stm_read(struct object_s *object) +{ + stm_current_read_markers[((uintptr_t)object) >> 4].c = + (unsigned char)(uintptr_t)stm_current_read_markers; +} + +void _stm_write_slowpath(struct object_s *); + +static inline void stm_write(struct object_s *object) +{ + if (__builtin_expect(object->modified != stm_transaction_version, 0)) + _stm_write_slowpath(object); +} + _Bool _stm_was_read(struct object_s *object); _Bool _stm_was_written(struct object_s *object); From noreply at buildbot.pypy.org Thu Dec 19 15:22:13 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 19 Dec 2013 15:22:13 +0100 (CET) Subject: [pypy-commit] stmgc c5: The next test to pass Message-ID: <20131219142213.E7B351C1358@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r565:99e3546e6233 Date: 2013-12-19 14:02 +0100 http://bitbucket.org/pypy/stmgc/changeset/99e3546e6233/ Log: The next test to pass diff --git a/c5/test/test_basic.py b/c5/test/test_basic.py --- a/c5/test/test_basic.py +++ b/c5/test/test_basic.py @@ -234,3 +234,14 @@ assert p1[8] == 'B' assert p2[8] == 'b' assert p3[8] == ':' + + def test_overflow_write_history(self): + stm_start_transaction() + plist = [stm_allocate(n) for n in range(16, 256, 8)] + stm_stop_transaction(False) + # + for i in range(20): + stm_start_transaction() + for p in plist: + stm_write(p) + stm_stop_transaction(False) From noreply at buildbot.pypy.org Thu Dec 19 15:22:15 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 19 Dec 2013 15:22:15 +0100 (CET) Subject: [pypy-commit] stmgc c5: in-progress Message-ID: <20131219142215.131E21C1358@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r566:3c9724af3fb5 Date: 2013-12-19 14:58 +0100 http://bitbucket.org/pypy/stmgc/changeset/3c9724af3fb5/ Log: in-progress diff --git a/c5/core.c b/c5/core.c --- a/c5/core.c +++ b/c5/core.c @@ -87,15 +87,15 @@ char _pad0[CACHE_LINE_SIZE]; }; union { - uint64_t index_page_never_used; + uint64_t volatile index_page_never_used; char _pad1[CACHE_LINE_SIZE]; }; union { - unsigned int next_transaction_version; /* always EVEN */ + unsigned int volatile next_transaction_version; /* always EVEN */ char _pad2[CACHE_LINE_SIZE]; }; union { - struct write_history_s *most_recent_committed_transaction; + struct write_history_s *volatile most_recent_committed_transaction; char _pad3[CACHE_LINE_SIZE]; }; }; @@ -166,6 +166,13 @@ return result; } +static struct write_history_s *_reserve_page_write_history(void) +{ + struct page_header_s *newpage = _stm_reserve_page(); + newpage->kind = PGKIND_WRITE_HISTORY; + return (struct write_history_s *)(newpage + 1); +} + static uint32_t get_pgoff(struct page_header_s *page) { @@ -216,9 +223,17 @@ assert(page->pgoff == new_pgoff); struct write_history_s *cur = stm_local.writes_by_this_transaction; + size_t history_size_max = 4096 - (((uintptr_t)cur) & 4095); + if (sizeof(*cur) + (cur->nb_updates + 1) * 8 > history_size_max) { + /* The buffer would overflow its page. Allocate a new one. */ + cur = _reserve_page_write_history(); + cur->previous_older_transaction = + stm_local.writes_by_this_transaction; + cur->transaction_version = stm_transaction_version; + cur->nb_updates = 0; + stm_local.writes_by_this_transaction = cur; + } uint64_t i = cur->nb_updates++; - size_t history_size_max = 4096 - (((uintptr_t)cur) & 4095); - assert(sizeof(*cur) + cur->nb_updates * 8 <= history_size_max);//XXX cur->updates[i * 2 + 0] = get_local_index(page); cur->updates[i * 2 + 1] = new_pgoff; } @@ -487,15 +502,11 @@ char *next, *page_limit = (char *)cur; page_limit += 4096 - (((uintptr_t)page_limit) & 4095); next = (char *)(cur + 1) + 8 * cur->nb_updates; - if (page_limit - next < sizeof(struct write_history_s) + 8) - cur = NULL; - else + if (page_limit - next >= sizeof(struct write_history_s) + 8) cur = (struct write_history_s *)next; } if (cur == NULL) { - struct page_header_s *newpage = _stm_reserve_page(); - newpage->kind = PGKIND_WRITE_HISTORY; - cur = (struct write_history_s *)(newpage + 1); + cur = _reserve_page_write_history(); } cur->previous_older_transaction = NULL; cur->transaction_version = stm_transaction_version; @@ -527,6 +538,12 @@ int conflict = 0; //fprintf(stderr, "stm_stop_transaction\n"); + struct write_history_s *cur_head = stm_local.writes_by_this_transaction; + struct write_history_s *cur_tail = cur_head; + while (cur_tail->previous_older_transaction != NULL) { + cur_tail = cur_tail->previous_older_transaction; + } + while (1) { struct write_history_s *hist = d->most_recent_committed_transaction; if (hist != stm_local.base_page_mapping) { @@ -536,10 +553,9 @@ else continue; /* retry from the start of the loop */ } - struct write_history_s *cur = stm_local.writes_by_this_transaction; - cur->previous_older_transaction = hist; + cur_tail->previous_older_transaction = hist; if (__sync_bool_compare_and_swap(&d->most_recent_committed_transaction, - hist, cur)) + hist, cur_head)) break; } From noreply at buildbot.pypy.org Thu Dec 19 15:22:16 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 19 Dec 2013 15:22:16 +0100 (CET) Subject: [pypy-commit] stmgc c5: Bah, fix Message-ID: <20131219142216.3A3D21C1358@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r567:57a9caca7c44 Date: 2013-12-19 15:21 +0100 http://bitbucket.org/pypy/stmgc/changeset/57a9caca7c44/ Log: Bah, fix diff --git a/c5/core.c b/c5/core.c --- a/c5/core.c +++ b/c5/core.c @@ -502,12 +502,15 @@ char *next, *page_limit = (char *)cur; page_limit += 4096 - (((uintptr_t)page_limit) & 4095); next = (char *)(cur + 1) + 8 * cur->nb_updates; - if (page_limit - next >= sizeof(struct write_history_s) + 8) + if (page_limit - next < sizeof(struct write_history_s) + 8) + cur = NULL; + else cur = (struct write_history_s *)next; } if (cur == NULL) { cur = _reserve_page_write_history(); } + assert(cur != d->most_recent_committed_transaction); cur->previous_older_transaction = NULL; cur->transaction_version = stm_transaction_version; cur->nb_updates = 0; @@ -553,6 +556,7 @@ else continue; /* retry from the start of the loop */ } + assert(cur_head == stm_local.writes_by_this_transaction); cur_tail->previous_older_transaction = hist; if (__sync_bool_compare_and_swap(&d->most_recent_committed_transaction, hist, cur_head)) From noreply at buildbot.pypy.org Thu Dec 19 17:50:53 2013 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 19 Dec 2013 17:50:53 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: finish copying squeakjs bitblt Message-ID: <20131219165053.E2D081C1164@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r543:fb979403239a Date: 2013-12-19 16:26 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/fb979403239a/ Log: finish copying squeakjs bitblt diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -649,7 +649,7 @@ if (combinationRule == 22 or combinationRule == 32): s_frame.pop() # pops the next value under BitBlt s_frame.push(s_bitblt.bitCount()) - else if w_dest_form.is_same_object(space.objtable['w_display']): + elif w_dest_form.is_same_object(space.objtable['w_display']): w_bitmap = w_dest_form.fetch(space, 0) assert isinstance(w_bitmap, model.W_DisplayBitmap) w_bitmap.flush_to_screen() diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -1178,19 +1178,19 @@ def loadBitBlt(self): self.success = True - self.destForm = self.fetch(0) - self.dest = self.loadForm(self.destForm) - self.sourceForm = self.fetch(1) - if self.sourceForm is not self.space.w_nil: - self.source = self.loadForm(self.sourceForm) + self.w_destForm = self.fetch(0) + self.dest = self.loadForm(self.w_destForm) + self.w_sourceForm = self.fetch(1) + if self.w_sourceForm is not self.space.w_nil: + self.source = self.loadForm(self.w_sourceForm) else: self.source = None self.halftone = self.loadHalftone(self.fetch(2)) self.combinationRule = self.space.unwrap_int(self.fetch(3)) self.destX = self.intOrIfNil(self.fetch(4), 0) self.destY = self.intOrIfNil(self.fetch(5), 0) - self.width = self.intOrIfNil(self.fetch(6), self.dest_form.width) - self.height = self.intOrIfNil(self.fetch(7), self.dest_form.height) + self.width = self.intOrIfNil(self.fetch(6), self.dest.width) + self.height = self.intOrIfNil(self.fetch(7), self.dest.height) self.clipX = self.intOrIfNil(self.fetch(10), 0) self.clipY = self.intOrIfNil(self.fetch(11), 0) self.clipW = self.intOrIfNil(self.fetch(12), self.width) @@ -1206,19 +1206,72 @@ def copyBits(self): self.bitCount = 0 self.clipRange() - if (self.bbW <= 0 ir self.bbH <= 0): + if (self.bbW <= 0 or self.bbH <= 0): return self.destMaskAndPointerInit() if not self.source: self.copyLoopNoSource() else: self.checkSourceOverlap() - if self.source.depth !== self.dest.depth: + if self.source.depth != self.dest.depth: self.copyLoopPixMap() else: self.sourceSkewAndPointerInit() self.copyLoop() + def checkSourceOverlap(self): + if (self.w_sourceForm is self.w_destForm and self.dy >= self.sy): + if (self.dy > self.sy): + self.vDir = -1 + self.sy = (self.sy + self.bbH) - 1 + self.dy = (self.dy + self.bbH) - 1 + else: + if (self.dy == self.sy and self.dx > self.sx): + self.hDir = -1 + self.sx = (self.sx + self.bbW) - 1 # start at right + self.dx = (self.dx + self.bbW) - 1 + if (self.nWords > 1): + t = self.mask1 # and fix up masks + self.mask1 = self.mask2 + self.mask2 = t + self.destIndex = (self.dy * self.dest.pitch) + (self.dx / self.dest.pixPerWord | 0) # recompute since dx, dy change + self.destDelta = (self.dest.pitch * self.vDir) - (self.nWords * self.hDir) + + def sourceSkewAndPointerInit(self): + pixPerM1 = self.dest.pixPerWord - 1 # Pix per word is power of two, so self makes a mask + sxLowBits = self.sx & pixPerM1 + dxLowBits = self.dx & pixPerM1 + # check if need to preload buffer + # (i.e., two words of source needed for first word of destination) + dWid = -1 + if (self.hDir > 0): + if self.bbW < (self.dest.pixPerWord - dxLowBits): + dWid = self.bbW + else: + dWid = self.dest.pixPerWord - dxLowBits + self.preload = (sxLowBits + dWid) > pixPerM1 + else: + if self.bbW < (dxLowBits + 1): + dWid = self.bbW + else: + dWid = dxLowBits + 1 + self.preload = ((sxLowBits - dWid) + 1) < 0 + + if self.source.msb: + self.skew = (sxLowBits - dxLowBits) * self.dest.depth + else: + self.skew = (dxLowBits - sxLowBits) * self.dest.depth + if (self.preload): + if (self.skew < 0): + self.skew += 32 + else: + self.skew -= 32 + # calculate increments from end of one line to start of next + self.sourceIndex = (self.sy * self.source.pitch) + (self.sx / (32 / self.source.depth) |0) + self.sourceDelta = (self.source.pitch * self.vDir) - (self.nWords * self.hDir) + if (self.preload): + self.sourceDelta -= self.hDir + def clipRange(self): # intersect with destForm bounds if self.clipX < 0: @@ -1231,174 +1284,399 @@ self.clipW = self.dest.width - self.clipX if self.clipY + self.clipH > self.dest.height: self.clipH = self.dest.height - self.clipY - # intersect with clipRect - leftOffset = max(self.clipY - self.destY, 0) + leftOffset = max(self.clipX - self.destX, 0) self.sx = self.sourceX + leftOffset self.dx = self.destX + leftOffset self.bbW = self.width - leftOffset - rightOffset = + rightOffset = (self.dx + self.bbW) - (self.clipX + self.clipW) + if rightOffset > 0: + self.bbW -= rightOffset + topOffset = max(self.clipY - self.destY, 0) + self.sy = self.sourceY + topOffset + self.dy = self.destY + topOffset + self.bbH = self.height - topOffset + bottomOffset = (self.dy + self.bbH) - (self.clipY + self.clipH) + if bottomOffset > 0: + self.bbH -= bottomOffset + # intersect with sourceForm bounds + if not self.source: + return + if self.sx < 0: + self.dx -= self.sx + self.bbW += self.sx + self.sx = 0 + if (self.sx + self.bbW) > self.source.width: + self.bbW -= (self.sx + self.bbW) - self.source.width + if self.sy < 0: + self.dy -= self.sy + self.bbH += self.sy + self.sy = 0 + if (self.sy + self.bbH) > self.source.height: + self.bbH -= (self.sy + self.bbH) - self.source.height + def rshift(self, val, n): + return rarithmetic.intmask(val >> n if val >= 0 else (val + 0x100000000) >> n) - def compute_masks(self): - self.dest_bits = self.dest_form.w_bits - self.dest_raster = (self.dest_form.width - 1) // BitBltShadow.WordSize + 1 - if self.source_form is not None: - self.source_bits = self.source_form.w_bits - self.source_raster = (self.source_form.width - 1) // BitBltShadow.WordSize + 1 + def destMaskAndPointerInit(self): + pixPerM1 = self.dest.pixPerWord - 1 # pixPerWord is power-of-two, so this makes a mask + startBits = self.dest.pixPerWord - (self.dx & pixPerM1) # how many px in 1st word + endBits = (((self.dx + self.bbW) - 1) & pixPerM1) + 1 + if self.dest.msb: + self.mask1 = self.rshift(0xFFFFFFFF, (32 - (startBits * self.dest.depth))) + self.mask2 = 0xFFFFFFFF << (32 - (endBits * self.dest.depth)) else: - self.source_bits = None - self.source_raster = 0 - # Halftone form is set during synchronization - self.skew = (self.sx - self.dx) & (BitBltShadow.WordSize - 1) - start_bits = BitBltShadow.WordSize - (self.dx & (BitBltShadow.WordSize - 1)) - self.mask1 = BitBltShadow.RightMasks[start_bits] - end_bits = (BitBltShadow.WordSize - 1) - ((self.dx + self.w - 1) & (BitBltShadow.WordSize - 1)) - self.mask2 = ~BitBltShadow.RightMasks[end_bits] - if self.skew == 0: - self.skew_mask = rarithmetic.r_uint(0) + self.mask1 = 0xFFFFFFFF << (32 - (startBits * self.dest.depth)) + self.mask2 = self.rshift(0xFFFFFFFF, (32 - (endBits * self.dest.depth))) + if self.bbW < startBits: + self.mask1 = self.mask1 & self.mask2 + self.mask2 = 0 + self.nWords = 1 else: - self.skew_mask = BitBltShadow.RightMasks[BitBltShadow.WordSize - self.skew] - if self.w < start_bits: - self.mask1 = self.mask1 & self.mask2 - self.mask2 = rarithmetic.r_uint(0) - self.n_words = 1 + self.nWords = (((self.bbW - startBits) + pixPerM1) / self.dest.pixPerWord | 0) + 1 + self.hDir = 1 + self.vDir = 1 + self.destIndex = (self.dy * self.dest.pitch) + (self.dx / self.dest.pixPerWord | 0) + self.destDelta = (self.dest.pitch * self.vDir) - (self.nWords * self.hDir) + + def copyLoopNoSource(self): + halftoneWord = 0xFFFFFFFF + for i in range(self.bbH): + if self.halftone: + halftoneWord = self.halftone[(self.dy + i) % len(self.halftone)] + # first word in row is masked + destMask = self.mask1 + destWord = self.dest.w_bits.getword(self.destIndex) + mergeWord = self.mergeFn(halftoneWord, destWord) + destWord = (destMask & mergeWord) | (destWord & (~destMask)) + self.dest.w_bits.setword(self.destIndex, destWord) + self.destIndex += 1 + destMask = 0xFFFFFFFF + # the central horizontal loop requires no store masking + if self.combinationRule == 3: # store rule requires no dest merging + for word in range(2, self.nWords): + self.dest.w_bits.setword(self.destIndex, halftoneWord) + self.destIndex += 1 + else: + for word in range(2, self.nWords): + destWord = self.dest.w_bits.getword(self.destIndex) + mergeWord = self.mergeFn(halftoneWord, destWord) + self.dest.w_bits.setword(self.destIndex, mergeWord) + self.destIndex += 1 + # last word in row is masked + if self.nWords > 1: + destMask = self.mask2 + destWord = self.dest.w_bits.getword(self.destIndex) + mergeWord = self.mergeFn(halftoneWord, destWord) + destWord = (destMask & mergeWord) | (destWord & (~destMask)) + self.dest.w_bits.setword(self.destIndex, destWord) + self.destIndex += 1 + self.destIndex += self.destDelta + + def copyLoopPixMap(self): + # This version of the inner loop maps source pixels + # to a destination form with different depth. Because it is already + # unweildy, the loop is not unrolled as in the other versions. + # Preload, skew and skewMask are all overlooked, since pickSourcePixels + # delivers its destination word already properly aligned. + # Note that pickSourcePixels could be copied in-line at the top of + # the horizontal loop, and some of its inits moved out of the loop. + # + # The loop has been rewritten to use only one pickSourcePixels call. + # The idea is that the call itself could be inlined. If we decide not + # to inline pickSourcePixels we could optimize the loop instead. + sourcePixMask = BitBltShadow.MaskTable[this.source.depth] + destPixMask = BitBltShadow.MaskTable[this.dest.depth] + self.sourceIndex = (self.sy * self.source.pitch) + (self.sx / self.source.pixPerWord | 0) + scrStartBits = self.source.pixPerWord - (self.sx & (self.source.pixPerWord - 1)) + if self.bbW < scrStartBits: + nSourceIncs = 0 else: - self.n_words = (self.w - start_bits - 1) // BitBltShadow.WordSize + 2 + nSourceIncs = ((self.bbW - scrStartBits) / self.source.pixPerWord | 0) + 1 + # Note following two items were already calculated in destmask setup! + self.sourceDelta = self.source.pitch - nSourceIncs + startBits = self.dest.pixPerWord - (self.dx & (self.dest.pixPerWord - 1)) + endBits = (((self.dx + self.bbW) - 1) & (self.dest.pixPerWord - 1)) + 1 + if self.bbW < startBits: + startBits = self.bbW # ?! + srcShift = (self.sx & (self.source.pixPerWord - 1)) * self.source.depth + dstShift = (self.dx & (self.dest.pixPerWord - 1)) * self.dest.depth + srcShiftInc = self.source.depth + dstShiftInc = self.dest.depth + dstShiftLeft = 0 + if (self.source.msb): + srcShift = (32 - self.source.depth) - srcShift + srcShiftInc = -srcShiftInc - def check_overlap(self): - self.h_dir = 1 - self.v_dir = 1 - if (self.source_form is not None and - self.source_form.w_self().is_same_object(self.dest_form.w_self()) and - self.dy >= self.sy): - if self.dy > self.sy: - self.v_dir = -1 - self.sy = self.sy + self.h - 1 - self.dy = self.dy + self.h - 1 - elif self.dx > self.sx: - self.h_dir = -1 - self.sx = self.sx + self.w - 1 - self.dx = self.dx + self.w - 1 - self.skew_mask = ~self.skew_mask - self.mask1, self.mask2 = self.mask2, self.mask1 + if (self.dest.msb): + dstShift = (32 - self.dest.depth) - dstShift + dstShiftInc = -dstShiftInc + dstShiftLeft = 32 - self.dest.depth - def calculate_offsets(self): - self.preload = (self.source_form is not None and ( - self.skew != 0 and - self.skew <= (self.sx & (BitBltShadow.WordSize - 1)))) - if self.h_dir < 0: - self.preload = not self.preload - self.source_index = self.sy * self.source_raster + (self.sx // BitBltShadow.WordSize) - self.dest_index = self.dy * self.dest_raster + (self.dx // BitBltShadow.WordSize) - self.source_delta = (self.source_raster * - self.v_dir - - ((self.n_words + (1 if self.preload else 0)) * - self.h_dir)) - self.dest_delta = self.dest_raster * self.v_dir - self.n_words * self.h_dir + for i in range(self.bbH): + if self.halftone: + halftoneWord = self.halftone[(self.dy + i) % self.halftone.length] + else: + halftoneWord = 0xFFFFFFFF + self.srcBitShift = srcShift + self.dstBitShift = dstShift + self.destMask = self.mask1 + nPix = startBits + words = self.nWords + # Here is the horizontal loop... + for word in range(words + 1): + skewWord = self.pickSourcePixels(nPix, sourcePixMask, destPixMask, srcShiftInc, dstShiftInc) + # align next word to leftmost pixel + self.dstBitShift = dstShiftLeft + if self.destMask == 0xFFFFFFFF: # avoid read-modify-write + self.dest.w_bits.setword( + self.destIndex, + self.mergeFn(skewWord & halftoneWord, self.dest.w_bits.getword(self.destIndex)) + ) + else: # General version using dest masking + destWord = self.dest.w_bits.getword(self.destIndex) + mergeWord = self.mergeFn(skewWord & halftoneWord, destWord & self.destMask) + destWord = (self.destMask & mergeWord) | (destWord & (~self.destMask)) + self.dest.w_bits.setword(self.destIndex, destWord) - def copy_loop(self): - space = self.space - no_skew_mask = ~self.skew_mask - for i in xrange(1, self.h + 1): - if self.halftone_bits: - halftone_word = self.halftone_bits[self.dy % len(self.halftone_bits)] - self.dy = self.dy + self.v_dir + self.destIndex += 1 + if (words == 2): # is the next word the last word? + self.destMask = self.mask2 + nPix = endBits + else: # use fullword mask for inner loop + self.destMask = 0xFFFFFFFF + nPix = self.dest.pixPerWord + self.sourceIndex += self.sourceDelta + self.destIndex += self.destDelta + + def pickSourcePixels(self, nPixels, srcMask, dstMask, srcShiftInc, dstShiftInc): + # Pick nPix pixels starting at srcBitIndex from the source, map by the + # color map, and justify them according to dstBitIndex in the resulting destWord. + sourceWord = self.source.w_bits.getword(self.sourceIndex) + destWord = 0 + srcShift = self.srcBitShift # put into temp for speed + dstShift = self.dstBitShift + nPix = nPixels + # always > 0 so we can use do { } while(--nPix); + if (self.cmLookupTable): # a little optimization for (pretty crucial) blits using indexed lookups only + for px in range(nPix + 1): + sourcePix = self.rshift(sourceWord, srcShift) & srcMask + destPix = self.cmLookupTable[sourcePix & self.cmMask] + # adjust dest pix index + destWord = destWord | ((destPix & dstMask) << dstShift) + # adjust source pix index + dstShift += dstShiftInc + srcShift += srcShiftInc + if srcShift & 0xFFFFFFE0: + if (self.source.msb): + srcShift += 32 + else: + srcShift -= 32 + self.sourceIndex += 1 + sourceWord = self.source.w_bits.getword(self.sourceIndex) + else: + raise PrimitiveFailedError() + self.srcBitShift = srcShift # Store back + return destWord + + def rotate32bit(self, thisWord, prevWord, skewMask, notSkewMask, unskew): + if unskew < 0: + rotated = self.rshift(prevWord & notSkewMask, -unskew) + else: + rotated = (prevWord & notSkewMask) << unskew + if self.skew < 0: + rotated = rotated | self.rshift(thisWord & skewMask, -self.skew) + else: + rotated = rotated | (thisWord & skewMask) << self.skew + return rotated + + def copyLoop(self): + # self version of the inner loop assumes we do have a source + sourceLimit = self.source.w_bits.size() + hInc = self.hDir + # init skew (the difference in word alignment of source and dest) + unskew = 0 + skewMask = 0 + if (self.skew == -32): + self.skew = unskew = skewMask = 0 + else: + if (self.skew < 0): + unskew = self.skew + 32 + skewMask = 0xFFFFFFFF << -self.skew else: - halftone_word = BitBltShadow.AllOnes - skew_word = halftone_word - if self.preload: - prev_word = self.source_bits.getword(self.source_index) - self.source_index = self.source_index + self.h_dir + if (self.skew == 0): + unskew = 0 + skewMask = 0xFFFFFFFF + else: + unskew = self.skew - 32 + skewMask = self.rshift(0xFFFFFFFF, self.skew) + notSkewMask = ~skewMask + + # init halftones + halftoneWord = 0 + halftoneHeight = 0 + if (self.halftone): + halftoneWord = self.halftone[0] + halftoneHeight = len(self.halftone) + else: + halftoneWord = 0xFFFFFFFF + halftoneHeight = 0 + + # now loop over all lines + y = self.dy + for i in range(1, self.bbH + 1): + if (halftoneHeight > 1): + halftoneWord = self.halftone[y % halftoneHeight] + y += self.vDir + + if (self.preload): + prevWord = self.source.w_bits.getword(self.sourceIndex) + self.sourceIndex += hInc else: - prev_word = 0 - merge_mask = self.mask1 - for word in xrange(1, self.n_words + 1): - if self.source_form is not None: - prev_word = prev_word & self.skew_mask - if (self.source_index < 0 - or self.source_index >= self.source_bits.size()): - this_word = self.source_bits.getword(0) + prevWord = 0 + + destMask = self.mask1 + # pick up next word + thisWord = self.source.w_bits.getword(self.sourceIndex) + self.sourceIndex += hInc + skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) + prevWord = thisWord + destWord = self.dest.w_bits.getword(self.destIndex) + mergeWord = self.mergeFn(skewWord & halftoneWord, destWord) + destWord = (destMask & mergeWord) | (destWord & (~destMask)) + self.dest.w_bits.setword(self.destIndex, destWord) + # The central horizontal loop requires no store masking + self.destIndex += hInc + destMask = 0xFFFFFFFF + if (self.combinationRule == 3): # Store mode avoids dest merge function + if ((self.skew == 0) and (halftoneWord == 0xFFFFFFFF)): + # Non-skewed with no halftone + if (self.hDir == -1): + for word in range(2, self.nWords): + thisWord = self.source.w_bits.getword(self.sourceIndex) + self.dest.w_bits.setword(self.destIndex, thisWord) + self.sourceIndex += hInc + self.destIndex += hInc else: - this_word = self.source_bits.getword(self.source_index) - skew_word = prev_word | (this_word & no_skew_mask) - prev_word = this_word - skew_word = (self.bit_shift(skew_word, self.skew) | - self.bit_shift(skew_word, self.skew - BitBltShadow.WordSize)) - merge_word = rarithmetic.r_uint(self.merge( - skew_word & halftone_word, - self.dest_bits.getword(self.dest_index) - )) - __new = ( - (merge_mask & merge_word) | - (~merge_mask & self.dest_bits.getword(self.dest_index)) - ) - self.dest_bits.setword(self.dest_index, __new) - self.source_index = self.source_index + self.h_dir - self.dest_index = self.dest_index + self.h_dir - if word == (self.n_words - 1): - merge_mask = self.mask2 + for word in range(2, self.nWords): + self.dest.w_bits.setword(self.destIndex, prevWord) + prevWord = self.source.w_bits.getword(self.sourceIndex) + self.destIndex += hInc + self.sourceIndex += hInc else: - merge_mask = BitBltShadow.AllOnes - self.source_index = self.source_index + self.source_delta - self.dest_index = self.dest_index + self.dest_delta + # skewed and/or halftoned + for word in range(2, self.nWords): + thisWord = self.source.w_bits.getword(self.sourceIndex) + self.sourceIndex += hInc + skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) + prevWord = thisWord + self.dest.w_bits.setword(self.destIndex, skewWord & halftoneWord) + self.destIndex += hInc + else: # Dest merging here... + for word in range(2, self.nWords): + thisWord = self.source.w_bits.getword(self.sourceIndex) # pick up next word + self.sourceIndex += hInc + skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) + prevWord = thisWord + mergeWord = self.mergeFn(skewWord & halftoneWord, self.dest.w_bits.getword(self.destIndex)) + self.dest.w_bits.setword(self.destIndex, mergeWord) + self.destIndex += hInc + # last word with masking and all + if (self.nWords > 1): + destMask = self.mask2 + if (self.sourceIndex >= 0 and self.sourceIndex < sourceLimit): + # NOTE: we are currently overrunning source bits in some cases + # self test makes up for it. + thisWord = self.source.w_bits.getword(self.sourceIndex) # pick up next word + self.sourceIndex += hInc + skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) + destWord = self.dest.w_bits.getword(self.destIndex) + mergeWord = self.mergeFn(skewWord & halftoneWord, destWord) + destWord = (destMask & mergeWord) | (destWord & (~destMask)) + self.dest.w_bits.setword(self.destIndex, destWord) + self.destIndex += hInc + self.sourceIndex += self.sourceDelta + self.destIndex += self.destDelta - def bit_shift(self, target, amount): - if amount > 31 or amount < -31: - return 0 - elif amount > 0: - return (rarithmetic.r_uint(target) << amount) & BitBltShadow.AllOnes - elif amount == 0: - return target - else: - return (rarithmetic.r_uint(target) >> -amount) + def mergeFn(self, src, dest): + return rarithmetic.r_uint(self.merge( + rarithmetic.r_uint(src), + rarithmetic.r_uint(dest) + )) def merge(self, source_word, dest_word): assert isinstance(source_word, rarithmetic.r_uint) and isinstance(dest_word, rarithmetic.r_uint) - if self.combination_rule == 0: + if self.combinationRule == 0: return 0 - elif self.combination_rule == 1: + elif self.combinationRule == 1: return source_word & dest_word - elif self.combination_rule == 2: + elif self.combinationRule == 2: return source_word & ~dest_word - elif self.combination_rule == 3: + elif self.combinationRule == 3: return source_word - elif self.combination_rule == 4: + elif self.combinationRule == 4: return ~source_word & dest_word - elif self.combination_rule == 5: + elif self.combinationRule == 5: return dest_word - elif self.combination_rule == 6: + elif self.combinationRule == 6: return source_word ^ dest_word - elif self.combination_rule == 7: + elif self.combinationRule == 7: return source_word | dest_word - elif self.combination_rule == 8: + elif self.combinationRule == 8: return ~source_word & ~dest_word - elif self.combination_rule == 9: + elif self.combinationRule == 9: return ~source_word ^ dest_word - elif self.combination_rule == 10: + elif self.combinationRule == 10: return ~dest_word - elif self.combination_rule == 11: + elif self.combinationRule == 11: return source_word | ~dest_word - elif self.combination_rule == 12: + elif self.combinationRule == 12: return ~source_word - elif self.combination_rule == 13: + elif self.combinationRule == 13: return ~source_word | dest_word - elif self.combination_rule == 14: + elif self.combinationRule == 14: return ~source_word | ~dest_word - elif self.combination_rule == 15: - return dest_word & BitBltShadow.AllOnes - elif self.combination_rule >= 16 and self.combination_rule <= 24: + elif self.combinationRule >= 15 and self.combinationRule <= 17: return dest_word - elif self.combination_rule == 25: + elif self.combinationRule == 18: + return source_word + dest_word + elif self.combinationRule == 19: + return source_word - dest_word + elif self.combinationRule >= 20 and self.combinationRule <= 24: + return source_word + elif self.combinationRule == 25: if source_word == 0: return dest_word else: - return source_word | (dest_word & ~source_word) - elif 26 <= self.combination_rule <= 41: + return self.partitionedANDtonBitsnPartitions( + ~source_word, + dest_word, + self.dest.depth, + self.dest.pixPerWord + ) + elif self.combinationRule == 26: + return self.partitionedANDtonBitsnPartitions( + ~source_word, + dest_word, + self.dest.depth, + self.dest.pixPerWord + ) + elif 26 < self.combinationRule <= 41: return dest_word else: raise error.PrimitiveFailedError() + def partitionedANDtonBitsnPartitions(self, word1, word2, nBits, nParts): + # partition mask starts at the right + mask = BitBltShadow.MaskTable[nBits] + result = 0 + for i in range(1, nParts + 1): + if ((word1 & mask) == mask): + result = result | (word2 & mask) + # slide left to next partition + mask = mask << nBits + return result + def as_string(bb): return 'aBitBlt (destX: %d, destY: %d, sx: %d, sy: %d, dx: %d, dy: %d, w: %d, h: %d, hDir: %d, vDir: %d, sDelta: %d, dDelta: %d, skew: %d, sI: %d, dI: %d)' % ( bb.dest_x, bb.dest_y, bb.sx, bb.sy, bb.dx, bb.dy, bb.w, bb.h, bb.h_dir, bb.v_dir, bb.source_delta, bb.dest_delta, bb.skew, bb.source_index, bb.dest_index) @@ -1424,11 +1702,22 @@ self.width = self.space.unwrap_int(self.fetch(1)) self.height = self.space.unwrap_int(self.fetch(2)) self.depth = self.space.unwrap_int(self.fetch(3)) + if self.width < 0 or self.height < 0: + raise PrimitiveFailedError() + self.msb = self.depth > 0 + if self.depth < 0: + self.depth = -self.depth + if self.depth == 0: + raise PrimitiveFailedError() w_offset = self.fetch(4) assert isinstance(w_offset, model.W_PointersObject) if not w_offset is self.space.w_nil: - self.offset_x = self.space.unwrap_int(w_offset._fetch(0)) - self.offset_y = self.space.unwrap_int(w_offset._fetch(1)) + self.offsetX = self.space.unwrap_int(w_offset._fetch(0)) + self.offsetY = self.space.unwrap_int(w_offset._fetch(1)) + self.pixPerWord = 32 / self.depth + self.pitch = (self.width + (self.pixPerWord - 1)) / self.pixPerWord | 0 + if self.w_bits.size() != (self.pitch * self.height): + raise PrimitiveFailedError() # def replace_bits(self): # w_bits = self.w_bits From noreply at buildbot.pypy.org Thu Dec 19 17:50:55 2013 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 19 Dec 2013 17:50:55 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: fix translation, displays 4.5 image, but crashes in various places along the copyLoopPixMap path Message-ID: <20131219165055.1CCC81C1164@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r544:d4bb7a494adf Date: 2013-12-19 17:50 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/d4bb7a494adf/ Log: fix translation, displays 4.5 image, but crashes in various places along the copyLoopPixMap path diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -648,7 +648,7 @@ w_dest_form = w_rcvr.fetch(space, 0) if (combinationRule == 22 or combinationRule == 32): s_frame.pop() # pops the next value under BitBlt - s_frame.push(s_bitblt.bitCount()) + s_frame.push(interp.space.wrap_int(s_bitblt.bitCount)) elif w_dest_form.is_same_object(space.objtable['w_display']): w_bitmap = w_dest_form.fetch(space, 0) assert isinstance(w_bitmap, model.W_DisplayBitmap) diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -1132,7 +1132,7 @@ MaskTable = [rarithmetic.r_uint(0)] for i in xrange(WordSize): MaskTable.append(rarithmetic.r_uint((2 ** (i + 1)) - 1)) - AllOnes = rarithmetic.r_uint((2 ** WordSize) - 1) + AllOnes = rarithmetic.r_uint(0xFFFFFFFF) def sync_cache(self): self.loadBitBlt() @@ -1146,10 +1146,10 @@ def loadForm(self, w_form): try: if not isinstance(w_form, model.W_PointersObject): - raise PrimitiveFailedError() + raise error.PrimitiveFailedError() s_form = w_form.as_form_get_shadow(self.space) if not isinstance(s_form, FormShadow): - raise PrimitiveFailedError() + raise error.PrimitiveFailedError() return s_form except error.PrimitiveFailedError, e: w_self = self.w_self() @@ -1171,10 +1171,10 @@ def loadColorMap(self, w_color_map): if isinstance(w_color_map, model.W_WordsObject): - self.cmLookupTable = w_color_map.words - self.cmMask = len(self.cmLookupTable) - 1 + self.w_cmLookupTable = w_color_map + self.cmMask = self.w_cmLookupTable.size() - 1 else: - self.cmLookupTable = None + self.w_cmLookupTable = None def loadBitBlt(self): self.success = True @@ -1316,18 +1316,19 @@ self.bbH -= (self.sy + self.bbH) - self.source.height def rshift(self, val, n): - return rarithmetic.intmask(val >> n if val >= 0 else (val + 0x100000000) >> n) + # return rarithmetic.r_uint(val >> n if val >= 0 else (val + 0x100000000) >> n) + return rarithmetic.r_uint(rarithmetic.r_uint(val) >> n & BitBltShadow.AllOnes) def destMaskAndPointerInit(self): pixPerM1 = self.dest.pixPerWord - 1 # pixPerWord is power-of-two, so this makes a mask startBits = self.dest.pixPerWord - (self.dx & pixPerM1) # how many px in 1st word endBits = (((self.dx + self.bbW) - 1) & pixPerM1) + 1 if self.dest.msb: - self.mask1 = self.rshift(0xFFFFFFFF, (32 - (startBits * self.dest.depth))) - self.mask2 = 0xFFFFFFFF << (32 - (endBits * self.dest.depth)) + self.mask1 = self.rshift(BitBltShadow.AllOnes, (32 - (startBits * self.dest.depth))) + self.mask2 = BitBltShadow.AllOnes << (32 - (endBits * self.dest.depth)) else: - self.mask1 = 0xFFFFFFFF << (32 - (startBits * self.dest.depth)) - self.mask2 = self.rshift(0xFFFFFFFF, (32 - (endBits * self.dest.depth))) + self.mask1 = BitBltShadow.AllOnes << (32 - (startBits * self.dest.depth)) + self.mask2 = self.rshift(BitBltShadow.AllOnes, (32 - (endBits * self.dest.depth))) if self.bbW < startBits: self.mask1 = self.mask1 & self.mask2 self.mask2 = 0 @@ -1340,10 +1341,10 @@ self.destDelta = (self.dest.pitch * self.vDir) - (self.nWords * self.hDir) def copyLoopNoSource(self): - halftoneWord = 0xFFFFFFFF + halftoneWord = BitBltShadow.AllOnes for i in range(self.bbH): if self.halftone: - halftoneWord = self.halftone[(self.dy + i) % len(self.halftone)] + halftoneWord = rarithmetic.r_uint(self.halftone[(self.dy + i) % len(self.halftone)]) # first word in row is masked destMask = self.mask1 destWord = self.dest.w_bits.getword(self.destIndex) @@ -1351,7 +1352,7 @@ destWord = (destMask & mergeWord) | (destWord & (~destMask)) self.dest.w_bits.setword(self.destIndex, destWord) self.destIndex += 1 - destMask = 0xFFFFFFFF + destMask = BitBltShadow.AllOnes # the central horizontal loop requires no store masking if self.combinationRule == 3: # store rule requires no dest merging for word in range(2, self.nWords): @@ -1385,8 +1386,8 @@ # The loop has been rewritten to use only one pickSourcePixels call. # The idea is that the call itself could be inlined. If we decide not # to inline pickSourcePixels we could optimize the loop instead. - sourcePixMask = BitBltShadow.MaskTable[this.source.depth] - destPixMask = BitBltShadow.MaskTable[this.dest.depth] + sourcePixMask = BitBltShadow.MaskTable[self.source.depth] + destPixMask = BitBltShadow.MaskTable[self.dest.depth] self.sourceIndex = (self.sy * self.source.pitch) + (self.sx / self.source.pixPerWord | 0) scrStartBits = self.source.pixPerWord - (self.sx & (self.source.pixPerWord - 1)) if self.bbW < scrStartBits: @@ -1415,9 +1416,9 @@ for i in range(self.bbH): if self.halftone: - halftoneWord = self.halftone[(self.dy + i) % self.halftone.length] + halftoneWord = rarithmetic.r_uint(self.halftone[(self.dy + i) % len(self.halftone)]) else: - halftoneWord = 0xFFFFFFFF + halftoneWord = BitBltShadow.AllOnes self.srcBitShift = srcShift self.dstBitShift = dstShift self.destMask = self.mask1 @@ -1428,7 +1429,7 @@ skewWord = self.pickSourcePixels(nPix, sourcePixMask, destPixMask, srcShiftInc, dstShiftInc) # align next word to leftmost pixel self.dstBitShift = dstShiftLeft - if self.destMask == 0xFFFFFFFF: # avoid read-modify-write + if self.destMask == BitBltShadow.AllOnes: # avoid read-modify-write self.dest.w_bits.setword( self.destIndex, self.mergeFn(skewWord & halftoneWord, self.dest.w_bits.getword(self.destIndex)) @@ -1444,7 +1445,7 @@ self.destMask = self.mask2 nPix = endBits else: # use fullword mask for inner loop - self.destMask = 0xFFFFFFFF + self.destMask = BitBltShadow.AllOnes nPix = self.dest.pixPerWord self.sourceIndex += self.sourceDelta self.destIndex += self.destDelta @@ -1452,22 +1453,22 @@ def pickSourcePixels(self, nPixels, srcMask, dstMask, srcShiftInc, dstShiftInc): # Pick nPix pixels starting at srcBitIndex from the source, map by the # color map, and justify them according to dstBitIndex in the resulting destWord. - sourceWord = self.source.w_bits.getword(self.sourceIndex) + sourceWord = rarithmetic.r_uint(self.source.w_bits.getword(self.sourceIndex)) destWord = 0 srcShift = self.srcBitShift # put into temp for speed dstShift = self.dstBitShift nPix = nPixels # always > 0 so we can use do { } while(--nPix); - if (self.cmLookupTable): # a little optimization for (pretty crucial) blits using indexed lookups only + if (self.w_cmLookupTable): # a little optimization for (pretty crucial) blits using indexed lookups only for px in range(nPix + 1): - sourcePix = self.rshift(sourceWord, srcShift) & srcMask - destPix = self.cmLookupTable[sourcePix & self.cmMask] + sourcePix = self.rshift(rarithmetic.r_uint(sourceWord), srcShift) & srcMask + destPix = self.w_cmLookupTable.getword(rarithmetic.intmask(sourcePix & self.cmMask)) # adjust dest pix index destWord = destWord | ((destPix & dstMask) << dstShift) # adjust source pix index dstShift += dstShiftInc srcShift += srcShiftInc - if srcShift & 0xFFFFFFE0: + if srcShift & rarithmetic.r_uint(0xFFFFFFE0): if (self.source.msb): srcShift += 32 else: @@ -1475,17 +1476,17 @@ self.sourceIndex += 1 sourceWord = self.source.w_bits.getword(self.sourceIndex) else: - raise PrimitiveFailedError() + raise error.PrimitiveFailedError() self.srcBitShift = srcShift # Store back return destWord def rotate32bit(self, thisWord, prevWord, skewMask, notSkewMask, unskew): if unskew < 0: - rotated = self.rshift(prevWord & notSkewMask, -unskew) + rotated = self.rshift(rarithmetic.r_uint(prevWord & notSkewMask), -unskew) else: rotated = (prevWord & notSkewMask) << unskew if self.skew < 0: - rotated = rotated | self.rshift(thisWord & skewMask, -self.skew) + rotated = rotated | self.rshift(rarithmetic.r_uint(thisWord & skewMask), -self.skew) else: rotated = rotated | (thisWord & skewMask) << self.skew return rotated @@ -1495,49 +1496,46 @@ sourceLimit = self.source.w_bits.size() hInc = self.hDir # init skew (the difference in word alignment of source and dest) - unskew = 0 - skewMask = 0 if (self.skew == -32): - self.skew = unskew = skewMask = 0 + self.skew = unskew = 0 + skewMask = rarithmetic.r_uint(0) else: if (self.skew < 0): unskew = self.skew + 32 - skewMask = 0xFFFFFFFF << -self.skew + skewMask = rarithmetic.r_uint(BitBltShadow.AllOnes << -self.skew) else: if (self.skew == 0): unskew = 0 - skewMask = 0xFFFFFFFF + skewMask = BitBltShadow.AllOnes else: unskew = self.skew - 32 - skewMask = self.rshift(0xFFFFFFFF, self.skew) - notSkewMask = ~skewMask + skewMask = self.rshift(BitBltShadow.AllOnes, self.skew) + notSkewMask = rarithmetic.r_uint(~skewMask) # init halftones - halftoneWord = 0 - halftoneHeight = 0 if (self.halftone): - halftoneWord = self.halftone[0] + halftoneWord = rarithmetic.r_uint(self.halftone[0]) halftoneHeight = len(self.halftone) else: - halftoneWord = 0xFFFFFFFF + halftoneWord = BitBltShadow.AllOnes halftoneHeight = 0 # now loop over all lines y = self.dy for i in range(1, self.bbH + 1): if (halftoneHeight > 1): - halftoneWord = self.halftone[y % halftoneHeight] + halftoneWord = rarithmetic.r_uint(self.halftone[y % halftoneHeight]) y += self.vDir if (self.preload): - prevWord = self.source.w_bits.getword(self.sourceIndex) + prevWord = rarithmetic.r_uint(self.source.w_bits.getword(self.sourceIndex)) self.sourceIndex += hInc else: - prevWord = 0 + prevWord = rarithmetic.r_uint(0) destMask = self.mask1 # pick up next word - thisWord = self.source.w_bits.getword(self.sourceIndex) + thisWord = rarithmetic.r_uint(self.source.w_bits.getword(self.sourceIndex)) self.sourceIndex += hInc skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) prevWord = thisWord @@ -1547,9 +1545,9 @@ self.dest.w_bits.setword(self.destIndex, destWord) # The central horizontal loop requires no store masking self.destIndex += hInc - destMask = 0xFFFFFFFF + destMask = BitBltShadow.AllOnes if (self.combinationRule == 3): # Store mode avoids dest merge function - if ((self.skew == 0) and (halftoneWord == 0xFFFFFFFF)): + if ((self.skew == 0) and (halftoneWord == BitBltShadow.AllOnes)): # Non-skewed with no halftone if (self.hDir == -1): for word in range(2, self.nWords): @@ -1685,7 +1683,8 @@ # "n_words", "preload" class FormShadow(AbstractCachingShadow): - _attrs_ = ["w_bits", "width", "height", "depth", "offset_x", "offset_y"] + _attrs_ = ["w_bits", "width", "height", "depth", "offsetX", + "offsetY", "msb", "pixPerWord", "pitch"] def sync_cache(self): if self.size() < 5: @@ -1703,12 +1702,12 @@ self.height = self.space.unwrap_int(self.fetch(2)) self.depth = self.space.unwrap_int(self.fetch(3)) if self.width < 0 or self.height < 0: - raise PrimitiveFailedError() + raise error.PrimitiveFailedError() self.msb = self.depth > 0 if self.depth < 0: self.depth = -self.depth if self.depth == 0: - raise PrimitiveFailedError() + raise error.PrimitiveFailedError() w_offset = self.fetch(4) assert isinstance(w_offset, model.W_PointersObject) if not w_offset is self.space.w_nil: @@ -1717,7 +1716,7 @@ self.pixPerWord = 32 / self.depth self.pitch = (self.width + (self.pixPerWord - 1)) / self.pixPerWord | 0 if self.w_bits.size() != (self.pitch * self.height): - raise PrimitiveFailedError() + raise error.PrimitiveFailedError() # def replace_bits(self): # w_bits = self.w_bits From noreply at buildbot.pypy.org Thu Dec 19 20:03:52 2013 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 19 Dec 2013 20:03:52 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup, make the prod test pass for numpypy and numpy ( -A ) Message-ID: <20131219190352.A50771C1164@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r68486:862f40eff396 Date: 2013-12-19 21:03 +0200 http://bitbucket.org/pypy/pypy/changeset/862f40eff396/ Log: cleanup, make the prod test pass for numpypy and numpy ( -A ) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -1,4 +1,3 @@ -import sys from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1400,18 +1400,18 @@ assert (array([[1,2],[3,4]]).prod(1) == [2, 12]).all() def test_prod(self): - from numpypy import array, int_, dtype + from numpypy import array, dtype a = array(range(1, 6)) assert a.prod() == 120.0 assert a[:4].prod() == 24.0 for dt in ['bool', 'int8', 'uint8', 'int16', 'uint16']: a = array([True, False], dtype=dt) assert a.prod() == 0 - assert a.prod().dtype is dtype('uint' if dt[0] == 'u' else 'int') + assert a.prod().dtype == dtype('uint' if dt[0] == 'u' else 'int') for dt in ['l', 'L', 'q', 'Q', 'e', 'f', 'd', 'F', 'D']: a = array([True, False], dtype=dt) assert a.prod() == 0 - assert a.prod().dtype is dtype(dt) + assert a.prod().dtype == dtype(dt) def test_max(self): from numpypy import array, zeros From noreply at buildbot.pypy.org Thu Dec 19 22:13:38 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 19 Dec 2013 22:13:38 +0100 (CET) Subject: [pypy-commit] stmgc c5: in-progress Message-ID: <20131219211338.1CDFA1C1473@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r568:7171e5c9d978 Date: 2013-12-19 22:13 +0100 http://bitbucket.org/pypy/stmgc/changeset/7171e5c9d978/ Log: in-progress diff --git a/c5/largemalloc.c b/c5/largemalloc.c new file mode 100644 --- /dev/null +++ b/c5/largemalloc.c @@ -0,0 +1,190 @@ +/* This contains a lot of inspiration from malloc() in the GNU C Library. + More precisely, this is (a subset of) the part that handles large + blocks, which in our case means at least 288 bytes. +*/ + + +#define MMAP_LIMIT (1280*1024) + +#define largebin_index(sz) \ + ((((sz) >> 6) <= 47) ? ((sz) >> 6): /* 0 - 47 */ \ + (((sz) >> 9) <= 23) ? 42 + ((sz) >> 9): /* 48 - 65 */ \ + (((sz) >> 12) <= 11) ? 63 + ((sz) >> 12): /* 66 - 74 */ \ + (((sz) >> 15) <= 5) ? 74 + ((sz) >> 15): /* 75 - 79 */ \ + (((sz) >> 18) <= 2) ? 80 + ((sz) >> 18): /* 80 - 82 */ \ + 83) +#define N_BINS 84 + +typedef struct malloc_chunk { + size_t prev_size; /* - if the previous chunk is free: its size + - otherwise, if this chunk is free: 1 + - otherwise, 0. */ + size_t size; /* size of this chunk */ + + union { + char data[1]; /* if not free: start of the user data */ + struct malloc_chunk *next; /* if free: a doubly-linked list */ + }; + struct malloc_chunk *prev; + + /* The chunk has a total size of 'size'. It is immediately followed + in memory by another chunk. This list ends with the last "chunk" + being actually only one word long, 'size_t prev_size'. Both this + last chunk and the theoretical chunk before the first one are + considered "not free". */ +} *mchunk_t; + +#define THIS_CHUNK_FREE 1 +#define BOTH_CHUNKS_USED 0 +#define CHUNK_HEADER_SIZE offsetof(struct malloc_chunk, data) +#define MINSIZE sizeof(struct malloc_chunk) + +#define chunk_at_offset(p, ofs) ((mchunk_t *)(((char *)(p)) + ofs)) +#define next_chunk(p) chunk_at_offset(p, (p)->size) + + +/* The free chunks are stored in "bins". Each bin is a doubly-linked + list of chunks. There are 84 bins, with largebin_index() giving the + correspondence between sizes are bin indices. + + Each free chunk is preceeded in memory by a non-free chunk (or no + chunk at all). Each free chunk is followed in memory by a non-free + chunk (or no chunk at all). Chunks are consolidated with their + neighbors to ensure this. + + In each bin's doubly-linked list, chunks are sorted by their size in + decreasing order . Among chunks of equal size, they are ordered with + the most recently freed first, and we take them from the back. This + results in FIFO order, which is better to give each block a while to + rest in the list and be consolidated into potentially larger blocks. +*/ + +static struct { mchunk_t *head, mchunk_t *tail; } largebins[N_BINS]; + + +static char *allocate_more(size_t request_size); + +static void insert_sort(mchunk_t *new) +{ + size_t index = largebin_index(new->size); + mchunk_t *head = largebins[index]->head; + + if (head == NULL) { + assert(largebins[index]->tail == NULL); + new->prev = NULL; + new->next = NULL; + largebins[index]->tail = new; + largebins[index]->head = new; + return; + } + assert(largebins[index]->tail != NULL); + + size_t new_size = new->size; + if (new_size >= head->size) { + new->prev = NULL; + new->next = head; + assert(head->prev == NULL); + head->prev = new; + largebins[index]->head = new; + return; + } + mchunk_t *search; + for (search = head; search != NULL; search = search->next) { + if (new_size >= search->size) { + new->prev = search->prev; + new->prev->next = new; + new->next = search; + search->prev = new; + return; + } + } + new->prev = largebins[index]->tail; + new->prev->next = new; + new->next = NULL; + largebins[index]->tail = new; +} + +char *stm_large_malloc(size_t request_size) +{ + /* 'request_size' should already be a multiple of the word size here */ + assert((request_size & (sizeof(char *)-1)) == 0); + + size_t chunk_size = request_size + CHUNK_HEADER_SIZE; + if (chunk_size < request_size) { + /* 'request_size' is so large that the addition wrapped around */ + fprintf(stderr, "allocation request too large\n"); + abort(); + } + + size_t index = largebin_index(chunk_size); + + /* scan through the chunks of current bin in reverse order + to find the smallest that fits. */ + mchunk_t *scan = largebins[index]->tail; + mchunk_t *head = largebins[index]->head; + while (scan != head) { + assert(scan->prev_size == THIS_CHUNK_FREE); + assert(next_chunk(scan)->prev_size == scan->size); + + if (scan->size >= chunk_size) { + /* found! */ + found: + if (scan == largebins[index]->tail) { + largebins[index]->tail = scan->prev; + } + else { + scan->next->prev = scan->prev; + } + + size_t remaining_size = scan->size - chunk_size; + if (remaining_size < MINSIZE) { + next_chunk(scan)->prev_size = BOTH_CHUNKS_USED; + } + else { + /* only part of the chunk is being used; reduce the size + of 'scan' down to 'chunk_size', and create a new chunk + of the 'remaining_size' afterwards */ + mchunk_t *new = chunk_at_offset(scan, chunk_size); + new->prev_size = THIS_CHUNK_FREE; + new->size = remaining_size; + next_chunk(new)->prev_size = remaining_size; + insert_sort(new); + scan->size = chunk_size; + } + scan->prev_size = BOTH_CHUNKS_USED; + return scan->data; + } + scan = scan->prev; + } + + /* search now through all higher bins. We only need to take the + smallest item of the first non-empty bin, as it will be large + enough. xxx use a bitmap to speed this up */ + while (++index < N_BINS) { + scan = largebins[index]->tail; + if (scan != NULL) + goto found; + } + + /* not enough free memory. We need to allocate more. */ + return allocate_more(request_size); +} + +static char *allocate_more(size_t request_size) +{ + assert(request_size < MMAP_LIMIT);//XXX + + size_t big_size = MMAP_LIMIT * 8 - 48; + mchunk_t *big_chunk = (mchunk_t *)malloc(big_size); + if (!big_chunk) { + fprintf(stderr, "out of memory!\n"); + abort(); + } + + big_chunk->prev_size = THIS_CHUNK_FREE; + big_chunk->size = big_size - sizeof(size_t); + next_chunk(big_chunk)->prev_size = big_chunk->size; + insert_sort(big_chunk); + + return stm_large_malloc(request_size); +} From noreply at buildbot.pypy.org Thu Dec 19 22:22:41 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 19 Dec 2013 22:22:41 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20131219212241.A702F1C1473@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r68487:6ae2e7a522af Date: 2013-12-19 13:20 -0800 http://bitbucket.org/pypy/pypy/changeset/6ae2e7a522af/ Log: cleanup diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -371,10 +371,9 @@ self._handle = handle def __repr__(self): - return "<%s '%s', handle %r at %x>" % \ - (self.__class__.__name__, self._name, - (self._handle), - id(self) & (_sys.maxint*2 + 1)) + return "<%s '%s', handle %r at 0x%x>" % ( + self.__class__.__name__, self._name, self._handle, + id(self) & (_sys.maxint * 2 + 1)) def __getattr__(self, name): From noreply at buildbot.pypy.org Thu Dec 19 22:22:43 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 19 Dec 2013 22:22:43 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20131219212243.0CD811C1473@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68488:a2ebaff30cc5 Date: 2013-12-19 13:20 -0800 http://bitbucket.org/pypy/pypy/changeset/a2ebaff30cc5/ Log: merge default diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -371,10 +371,9 @@ self._handle = handle def __repr__(self): - return "<%s '%s', handle %r at %x>" % \ - (self.__class__.__name__, self._name, - (self._handle), - id(self) & (_sys.maxint*2 + 1)) + return "<%s '%s', handle %r at 0x%x>" % ( + self.__class__.__name__, self._name, self._handle, + id(self) & (_sys.maxint * 2 + 1)) def __getattr__(self, name): diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -1,4 +1,3 @@ -import sys from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -498,13 +498,14 @@ promote_bools=False, promote_to_largest=False): if promote_to_largest: if dt.kind == NPY_GENBOOLLTR or dt.kind == NPY_SIGNEDLTR: - return interp_dtype.get_dtype_cache(space).w_int64dtype + if dt.get_size() * 8 < LONG_BIT: + return interp_dtype.get_dtype_cache(space).w_longdtype elif dt.kind == NPY_UNSIGNEDLTR: - return interp_dtype.get_dtype_cache(space).w_uint64dtype - elif dt.kind == NPY_FLOATINGLTR or dt.kind == NPY_COMPLEXLTR: - return dt + if dt.get_size() * 8 < LONG_BIT: + return interp_dtype.get_dtype_cache(space).w_ulongdtype else: - assert False + assert dt.kind == NPY_FLOATINGLTR or dt.kind == NPY_COMPLEXLTR + return dt if promote_bools and (dt.kind == NPY_GENBOOLLTR): return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1400,16 +1400,18 @@ assert (array([[1,2],[3,4]]).prod(1) == [2, 12]).all() def test_prod(self): - from numpypy import array, int_, dtype + from numpypy import array, dtype a = array(range(1, 6)) assert a.prod() == 120.0 assert a[:4].prod() == 24.0 - a = array([True, False]) - assert a.prod() == 0 - assert type(a.prod()) is int_ - a = array([True, False], dtype='uint') - assert a.prod() == 0 - assert type(a.prod()) is dtype('uint').type + for dt in ['bool', 'int8', 'uint8', 'int16', 'uint16']: + a = array([True, False], dtype=dt) + assert a.prod() == 0 + assert a.prod().dtype == dtype('uint' if dt[0] == 'u' else 'int') + for dt in ['l', 'L', 'q', 'Q', 'e', 'f', 'd', 'F', 'D']: + a = array([True, False], dtype=dt) + assert a.prod() == 0 + assert a.prod().dtype == dtype(dt) def test_max(self): from numpypy import array, zeros From noreply at buildbot.pypy.org Thu Dec 19 22:22:44 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 19 Dec 2013 22:22:44 +0100 (CET) Subject: [pypy-commit] pypy py3k: fix __repr__ Message-ID: <20131219212244.389E01C1473@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68489:ea48ea97deda Date: 2013-12-19 13:21 -0800 http://bitbucket.org/pypy/pypy/changeset/ea48ea97deda/ Log: fix __repr__ diff --git a/lib-python/3/ctypes/__init__.py b/lib-python/3/ctypes/__init__.py --- a/lib-python/3/ctypes/__init__.py +++ b/lib-python/3/ctypes/__init__.py @@ -359,10 +359,9 @@ self._handle = handle def __repr__(self): - return "<%s '%s', handle %x at %x>" % \ - (self.__class__.__name__, self._name, - (self._handle & (_sys.maxsize*2 + 1)), - id(self) & (_sys.maxsize*2 + 1)) + return "<%s '%s', handle %r at 0x%x>" % ( + self.__class__.__name__, self._name, self._handle, + id(self) & (_sys.maxsize * 2 + 1)) def __getattr__(self, name): if name.startswith('__') and name.endswith('__'): From noreply at buildbot.pypy.org Thu Dec 19 22:22:45 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 19 Dec 2013 22:22:45 +0100 (CET) Subject: [pypy-commit] pypy py3k: bring over @xfail from default Message-ID: <20131219212245.593521C1473@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68490:2cba9ecdb773 Date: 2013-12-19 13:21 -0800 http://bitbucket.org/pypy/pypy/changeset/2cba9ecdb773/ Log: bring over @xfail from default diff --git a/lib-python/3/ctypes/test/test_python_api.py b/lib-python/3/ctypes/test/test_python_api.py --- a/lib-python/3/ctypes/test/test_python_api.py +++ b/lib-python/3/ctypes/test/test_python_api.py @@ -73,6 +73,7 @@ del pyobj self.assertEqual(grc(s), ref) + @xfail def test_PyOS_snprintf(self): PyOS_snprintf = pythonapi.PyOS_snprintf PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p From noreply at buildbot.pypy.org Thu Dec 19 22:39:15 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 19 Dec 2013 22:39:15 +0100 (CET) Subject: [pypy-commit] pypy default: fix confusion between numpy int types by making LongBox a real box so we can differentiate it Message-ID: <20131219213915.6889B1C1164@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68491:3a4a7e1a41a1 Date: 2013-12-19 16:28 -0500 http://bitbucket.org/pypy/pypy/changeset/3a4a7e1a41a1/ Log: fix confusion between numpy int types by making LongBox a real box so we can differentiate it diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -350,28 +350,22 @@ descr__new__, _get_dtype, descr_reduce = new_dtype_getter("uint16") class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("int32") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("i") class W_UInt32Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("uint32") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("I") + +class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("q") + +class W_UInt64Box(W_UnsignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("Q") class W_LongBox(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("long") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("l") class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("ulong") - -class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("int64") - -class W_LongLongBox(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter('longlong') - -class W_UInt64Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("uint64") - -class W_ULongLongBox(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter('ulonglong') + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("L") class W_InexactBox(W_NumberBox): pass @@ -663,13 +657,6 @@ __reduce__ = interp2app(W_Int64Box.descr_reduce), ) -if LONG_BIT == 32: - W_LongBox = W_Int32Box - W_ULongBox = W_UInt32Box -elif LONG_BIT == 64: - W_LongBox = W_Int64Box - W_ULongBox = W_UInt64Box - W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntegerBox.typedef, __module__ = "numpy", __new__ = interp2app(W_UInt64Box.descr__new__.im_func), @@ -677,6 +664,22 @@ __reduce__ = interp2app(W_UInt64Box.descr_reduce), ) +W_LongBox.typedef = TypeDef("int%d" % LONG_BIT, + (W_SignedIntegerBox.typedef, int_typedef), + __module__ = "numpy", + __new__ = interp2app(W_LongBox.descr__new__.im_func), + __index__ = interp2app(W_LongBox.descr_index), + __reduce__ = interp2app(W_LongBox.descr_reduce), +) + +W_ULongBox.typedef = TypeDef("uint%d" % LONG_BIT, + (W_UnsignedIntegerBox.typedef, int_typedef), + __module__ = "numpy", + __new__ = interp2app(W_ULongBox.descr__new__.im_func), + __index__ = interp2app(W_ULongBox.descr_index), + __reduce__ = interp2app(W_ULongBox.descr_reduce), +) + W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, __module__ = "numpy", ) diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -4,7 +4,7 @@ spaceconfig = dict(usemodules=["micronumpy", "binascii", "struct"]) def test_init(self): - import numpypy as np + import numpy as np import math assert np.intp() == np.intp(0) assert np.intp('123') == np.intp(123) @@ -17,6 +17,8 @@ assert np.complex_() == np.complex_(0) #raises(TypeError, np.complex_, '1+2j') assert math.isnan(np.complex_(None)) + for c in ['i', 'I', 'l', 'L', 'q', 'Q']: + assert np.dtype(c).type().dtype.char == c def test_builtin(self): import numpy as np @@ -37,7 +39,7 @@ assert len(np.string_('123')) == 3 def test_pickle(self): - from numpypy import dtype, zeros + from numpy import dtype, zeros try: from numpy.core.multiarray import scalar except ImportError: @@ -112,7 +114,7 @@ raises(TypeError, a.squeeze, 2) def test_attributes(self): - import numpypy as np + import numpy as np value = np.dtype('int64').type(12345) assert value.dtype == np.dtype('int64') assert value.size == 1 From noreply at buildbot.pypy.org Thu Dec 19 22:39:16 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 19 Dec 2013 22:39:16 +0100 (CET) Subject: [pypy-commit] pypy default: the is comparison is important in this test, put it back Message-ID: <20131219213916.A89C11C1164@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68492:cee13fb60662 Date: 2013-12-19 16:38 -0500 http://bitbucket.org/pypy/pypy/changeset/cee13fb60662/ Log: the is comparison is important in this test, put it back diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1407,11 +1407,11 @@ for dt in ['bool', 'int8', 'uint8', 'int16', 'uint16']: a = array([True, False], dtype=dt) assert a.prod() == 0 - assert a.prod().dtype == dtype('uint' if dt[0] == 'u' else 'int') + assert a.prod().dtype is dtype('uint' if dt[0] == 'u' else 'int') for dt in ['l', 'L', 'q', 'Q', 'e', 'f', 'd', 'F', 'D']: a = array([True, False], dtype=dt) assert a.prod() == 0 - assert a.prod().dtype == dtype(dt) + assert a.prod().dtype is dtype(dt) def test_max(self): from numpypy import array, zeros From noreply at buildbot.pypy.org Thu Dec 19 22:55:34 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 19 Dec 2013 22:55:34 +0100 (CET) Subject: [pypy-commit] pypy default: fix uint64 mro Message-ID: <20131219215534.17EBB1C1164@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68493:4d06bf382d94 Date: 2013-12-19 16:54 -0500 http://bitbucket.org/pypy/pypy/changeset/4d06bf382d94/ Log: fix uint64 mro diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -672,8 +672,7 @@ __reduce__ = interp2app(W_LongBox.descr_reduce), ) -W_ULongBox.typedef = TypeDef("uint%d" % LONG_BIT, - (W_UnsignedIntegerBox.typedef, int_typedef), +W_ULongBox.typedef = TypeDef("uint%d" % LONG_BIT, W_UnsignedIntegerBox.typedef, __module__ = "numpy", __new__ = interp2app(W_ULongBox.descr__new__.im_func), __index__ = interp2app(W_ULongBox.descr_index), From noreply at buildbot.pypy.org Thu Dec 19 23:19:04 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 19 Dec 2013 23:19:04 +0100 (CET) Subject: [pypy-commit] pypy default: simplify/fix some intp attributes Message-ID: <20131219221904.51CB41C1164@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68494:5076b09e7c83 Date: 2013-12-19 17:15 -0500 http://bitbucket.org/pypy/pypy/changeset/5076b09e7c83/ Log: simplify/fix some intp attributes diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -735,38 +735,21 @@ char=NPY_HALFLTR, w_box_type=space.gettypefor(interp_boxes.W_Float16Box), ) - ptr_size = rffi.sizeof(rffi.CCHARP) - if ptr_size == 4: - intp_box = interp_boxes.W_Int32Box - intp_type = types.Int32() - intp_num = NPY_INT - uintp_box = interp_boxes.W_UInt32Box - uintp_type = types.UInt32() - uintp_num = NPY_UINT - elif ptr_size == 8: - intp_box = interp_boxes.W_Int64Box - intp_type = types.Int64() - intp_num = NPY_LONG - uintp_box = interp_boxes.W_UInt64Box - uintp_type = types.UInt64() - uintp_num = NPY_ULONG - else: - raise ValueError('unknown point size %d' % ptr_size) self.w_intpdtype = W_Dtype( - intp_type, - num=intp_num, - kind=NPY_INTPLTR, + types.Long(), + num=NPY_LONG, + kind=NPY_SIGNEDLTR, name='intp', char=NPY_INTPLTR, - w_box_type = space.gettypefor(intp_box), + w_box_type = space.gettypefor(interp_boxes.W_LongBox), ) self.w_uintpdtype = W_Dtype( - uintp_type, - num=uintp_num, - kind=NPY_UINTPLTR, + types.ULong(), + num=NPY_ULONG, + kind=NPY_UNSIGNEDLTR, name='uintp', char=NPY_UINTPLTR, - w_box_type = space.gettypefor(uintp_box), + w_box_type = space.gettypefor(interp_boxes.W_ULongBox), ) float_dtypes = [self.w_float16dtype, self.w_float32dtype, self.w_float64dtype, self.w_floatlongdtype] diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -715,10 +715,14 @@ assert numpy.int16 is numpy.short assert numpy.int8 is numpy.byte assert numpy.bool_ is numpy.bool8 + assert numpy.intp().dtype.num == 7 + assert numpy.intp().dtype.char == 'l' if self.ptr_size == 4: + assert numpy.intp().dtype.name == 'int32' assert numpy.intp is numpy.int32 assert numpy.uintp is numpy.uint32 elif self.ptr_size == 8: + assert numpy.intp().dtype.name == 'int64' assert numpy.intp is numpy.int64 assert numpy.uintp is numpy.uint64 @@ -787,8 +791,22 @@ def test_intp(self): from numpypy import dtype - assert dtype('p') == dtype('intp') - assert dtype('P') == dtype('uintp') + assert dtype('p') is dtype('intp') + assert dtype('P') is dtype('uintp') + #assert dtype('p') is dtype('int') + #assert dtype('P') is dtype('uint') + assert dtype('p').num == 7 + assert dtype('P').num == 8 + #assert dtype('p').char == 'l' + #assert dtype('P').char == 'L' + assert dtype('p').kind == 'i' + assert dtype('P').kind == 'u' + #if self.ptr_size == 4: + # assert dtype('p').name == 'int32' + # assert dtype('P').name == 'uint32' + #else: + # assert dtype('p').name == 'int64' + # assert dtype('P').name == 'uint64' def test_alignment(self): from numpypy import dtype From noreply at buildbot.pypy.org Thu Dec 19 23:23:11 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 19 Dec 2013 23:23:11 +0100 (CET) Subject: [pypy-commit] pypy default: test typeinfo of intp also Message-ID: <20131219222311.6871D1C1164@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68495:b91626777307 Date: 2013-12-19 17:22 -0500 http://bitbucket.org/pypy/pypy/changeset/b91626777307/ Log: test typeinfo of intp also diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -33,6 +33,10 @@ assert typeinfo['CFLOAT'] == ('F', 14, 64, 8, np.complex64) assert typeinfo['CDOUBLE'] == ('D', 15, 128, 16, np.complex128) assert typeinfo['HALF'] == ('e', 23, 16, 2, np.float16) + assert typeinfo['INTP'] == ('p', 7, 64, 8, + 2**(self.ptr_size*8 - 1) - 1, + -2**(self.ptr_size*8 - 1), + np.int64) def test_dtype_basic(self): from numpypy import dtype From noreply at buildbot.pypy.org Thu Dec 19 23:25:31 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 19 Dec 2013 23:25:31 +0100 (CET) Subject: [pypy-commit] pypy default: for 32bit also Message-ID: <20131219222531.1C0071C1164@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68496:808bd00c35ee Date: 2013-12-19 17:24 -0500 http://bitbucket.org/pypy/pypy/changeset/808bd00c35ee/ Log: for 32bit also diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -33,10 +33,11 @@ assert typeinfo['CFLOAT'] == ('F', 14, 64, 8, np.complex64) assert typeinfo['CDOUBLE'] == ('D', 15, 128, 16, np.complex128) assert typeinfo['HALF'] == ('e', 23, 16, 2, np.float16) - assert typeinfo['INTP'] == ('p', 7, 64, 8, + assert typeinfo['INTP'] == ('p', np.dtype('int').num, + self.ptr_size*8, self.ptr_size, 2**(self.ptr_size*8 - 1) - 1, -2**(self.ptr_size*8 - 1), - np.int64) + np.dtype('int').type) def test_dtype_basic(self): from numpypy import dtype From noreply at buildbot.pypy.org Thu Dec 19 23:37:30 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 19 Dec 2013 23:37:30 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20131219223730.651E51C1164@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68497:df40c2b6928d Date: 2013-12-19 17:36 -0500 http://bitbucket.org/pypy/pypy/changeset/df40c2b6928d/ Log: cleanup diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -478,8 +478,7 @@ assert numpy.int16('32768') == -32768 def test_uint16(self): - import numpypy as numpy - + import numpy assert numpy.uint16(65535) == 65535 assert numpy.uint16(65536) == 0 assert numpy.uint16('65535') == 65535 @@ -487,8 +486,7 @@ def test_int32(self): import sys - import numpypy as numpy - + import numpy x = numpy.int32(23) assert x == 23 assert numpy.int32(2147483647) == 2147483647 @@ -503,10 +501,8 @@ def test_uint32(self): import sys - import numpypy as numpy - + import numpy assert numpy.uint32(10) == 10 - if sys.maxint > 2 ** 31 - 1: assert numpy.uint32(4294967295) == 4294967295 assert numpy.uint32(4294967296) == 0 @@ -523,8 +519,7 @@ def test_int64(self): import sys - import numpypy as numpy - + import numpy if sys.maxint == 2 ** 63 -1: assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, numpy.integer, numpy.number, @@ -539,30 +534,30 @@ assert numpy.int64(9223372036854775807) == 9223372036854775807 assert numpy.int64(9223372036854775807) == 9223372036854775807 - raises(OverflowError, numpy.int64, 9223372036854775808) raises(OverflowError, numpy.int64, 9223372036854775808L) def test_uint64(self): - import sys - import numpypy as numpy - + import numpy + assert numpy.dtype(numpy.uint64).type is numpy.uint64 assert numpy.uint64.mro() == [numpy.uint64, numpy.unsignedinteger, numpy.integer, numpy.number, numpy.generic, object] - - assert numpy.dtype(numpy.uint64).type is numpy.uint64 - skip("see comment") - # These tests pass "by chance" on numpy, things that are larger than - # platform long (i.e. a python int), don't get put in a normal box, - # instead they become an object array containing a long, we don't have - # yet, so these can't pass. - assert numpy.uint64(9223372036854775808) == 9223372036854775808 - assert numpy.uint64(18446744073709551615) == 18446744073709551615 - raises(OverflowError, numpy.uint64(18446744073709551616)) + import sys + if '__pypy__' not in sys.builtin_module_names: + # These tests pass "by chance" on numpy, things that are larger than + # platform long (i.e. a python int), don't get put in a normal box, + # instead they become an object array containing a long, we don't have + # yet, so these can't pass. + assert numpy.uint64(9223372036854775808) == 9223372036854775808 + assert numpy.uint64(18446744073709551615) == 18446744073709551615 + else: + raises(OverflowError, numpy.int64, 9223372036854775808) + raises(OverflowError, numpy.int64, 18446744073709551615) + raises(OverflowError, numpy.uint64, 18446744073709551616) def test_float16(self): - import numpypy as numpy + import numpy assert numpy.float16.mro() == [numpy.float16, numpy.floating, numpy.inexact, numpy.number, numpy.generic, object] @@ -573,8 +568,7 @@ def test_float32(self): - import numpypy as numpy - + import numpy assert numpy.float32.mro() == [numpy.float32, numpy.floating, numpy.inexact, numpy.number, numpy.generic, object] @@ -584,8 +578,7 @@ raises(ValueError, numpy.float32, '23.2df') def test_float64(self): - import numpypy as numpy - + import numpy assert numpy.float64.mro() == [numpy.float64, numpy.floating, numpy.inexact, numpy.number, numpy.generic, float, object] @@ -601,14 +594,14 @@ raises(ValueError, numpy.float64, '23.2df') def test_float_None(self): - import numpypy as numpy + import numpy from math import isnan assert isnan(numpy.float32(None)) assert isnan(numpy.float64(None)) assert isnan(numpy.longdouble(None)) def test_longfloat(self): - import numpypy as numpy + import numpy # it can be float96 or float128 if numpy.longfloat != numpy.float64: assert numpy.longfloat.mro()[1:] == [numpy.floating, @@ -621,8 +614,7 @@ raises(ValueError, numpy.longfloat, '23.2df') def test_complex_floating(self): - import numpypy as numpy - + import numpy assert numpy.complexfloating.__mro__ == (numpy.complexfloating, numpy.inexact, numpy.number, numpy.generic, object) @@ -955,10 +947,14 @@ raises(KeyError, 'd.fields["xyz"]') def test_create_from_dict(self): - skip("not yet") - from numpypy import dtype - d = dtype({'names': ['a', 'b', 'c'], - }) + import numpy as np + import sys + d = {'names': ['r','g','b','a'], + 'formats': [np.uint8, np.uint8, np.uint8, np.uint8]} + if '__pypy__' not in sys.builtin_module_names: + dt = np.dtype(d) + else: + raises(NotImplementedError, np.dtype, d) def test_create_subarrays(self): from numpypy import dtype From noreply at buildbot.pypy.org Fri Dec 20 00:24:27 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 20 Dec 2013 00:24:27 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20131219232427.4382A1C1164@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68498:4e4d5daef8fe Date: 2013-12-19 18:17 -0500 http://bitbucket.org/pypy/pypy/changeset/4e4d5daef8fe/ Log: cleanup diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2978,17 +2978,18 @@ assert j[0] == 12 k = fromstring(self.float16val, dtype='float16') assert k[0] == dtype('float16').type(5.) - dt = array([5],dtype='longfloat').dtype - if dt.itemsize == 12: + dt = array([5], dtype='longfloat').dtype + if dt.itemsize == 8: + m = fromstring('\x00\x00\x00\x00\x00\x00\x14@', + dtype='float64') + elif dt.itemsize == 12: m = fromstring('\x00\x00\x00\x00\x00\x00\x00\xa0\x01@\x00\x00', dtype='float96') elif dt.itemsize == 16: m = fromstring('\x00\x00\x00\x00\x00\x00\x00\xa0\x01@\x00\x00' \ '\x00\x00\x00\x00', dtype='float128') - elif dt.itemsize == 8: - skip('longfloat is float64') else: - skip('unknown itemsize for longfloat') + assert False, 'unknown itemsize for longfloat' assert m[0] == dtype('longfloat').type(5.) def test_fromstring_invalid(self): @@ -3311,14 +3312,16 @@ a = array([('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])], dtype=dt) - s = str(a) i = a.item() assert isinstance(i, tuple) assert len(i) == 4 - skip('incorrect formatting via dump_data') - assert s.endswith("[('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], " - "[[7, 8, 9], [10, 11, 12]]])]") - + import sys + if '__pypy__' not in sys.builtin_module_names: + assert str(a) == "[('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], " \ + "[[7, 8, 9], [10, 11, 12]]])]" + else: + assert str(a) == "array([('aaaa', 1.0, 8.0, [1, 2, 3, 4, 5, 6, " \ + "7, 8, 9, 10, 11, 12])])" def test_issue_1589(self): import numpypy as numpy diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -390,23 +390,17 @@ assert (a == ref).all() def test_signbit(self): - from numpypy import signbit, add - + from numpy import signbit, add + assert signbit(add.identity) == False assert (signbit([0, 0.0, 1, 1.0, float('inf')]) == - [False, False, False, False, False]).all() + [False, False, False, False, False]).all() assert (signbit([-0, -0.0, -1, -1.0, float('-inf')]) == - [False, True, True, True, True]).all() - - a = add.identity - assert signbit(a) == False - - skip('sign of nan is non-determinant') + [False, True, True, True, True]).all() assert (signbit([float('nan'), float('-nan'), -float('nan')]) == - [False, True, True]).all() + [False, True, True]).all() def test_reciprocal(self): - from numpypy import array, reciprocal - + from numpy import array, reciprocal inf = float('inf') nan = float('nan') reference = [-0.2, inf, -inf, 2.0, nan] From noreply at buildbot.pypy.org Fri Dec 20 00:44:09 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 20 Dec 2013 00:44:09 +0100 (CET) Subject: [pypy-commit] pypy default: remove bogus test (testing uninitialized memory) Message-ID: <20131219234409.41EE91D22C5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68499:11541c9f799a Date: 2013-12-19 18:42 -0500 http://bitbucket.org/pypy/pypy/changeset/11541c9f799a/ Log: remove bogus test (testing uninitialized memory) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -217,6 +217,7 @@ assert get(1, 0) == 2 assert get(1, 1) == 3 + class AppTestNumArray(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) @@ -2185,12 +2186,6 @@ a[b] = 1. assert (a == [[1., 1., 1.]]).all() - @py.test.mark.xfail - def test_boolean_array(self): - import numpypy as np - a = np.ndarray([1], dtype=bool) - assert a[0] == True - class AppTestNumArrayFromBuffer(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "array", "mmap"]) @@ -2253,7 +2248,6 @@ f.close() - class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy @@ -3334,6 +3328,7 @@ a = np.array([1,2,3], dtype='int16') assert (a * 2).dtype == np.dtype('int16') + class AppTestPyPy(BaseNumpyAppTest): def setup_class(cls): if option.runappdirect and '__pypy__' not in sys.builtin_module_names: From noreply at buildbot.pypy.org Fri Dec 20 00:44:10 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 20 Dec 2013 00:44:10 +0100 (CET) Subject: [pypy-commit] pypy default: fix ndarray.take with axis argument Message-ID: <20131219234410.733101D22C5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68500:aca7d2177494 Date: 2013-12-19 18:37 -0500 http://bitbucket.org/pypy/pypy/changeset/aca7d2177494/ Log: fix ndarray.take with axis argument diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1187,10 +1187,11 @@ if axis is None: res = a.ravel()[indices] else: + from operator import mul if axis < 0: axis += len(a.shape) s0, s1 = a.shape[:axis], a.shape[axis+1:] - l0 = prod(s0) if s0 else 1 - l1 = prod(s1) if s1 else 1 + l0 = reduce(mul, s0) if s0 else 1 + l1 = reduce(mul, s1) if s1 else 1 res = a.reshape((l0, -1, l1))[:,indices,:].reshape(s0 + (-1,) + s1) if out is not None: out[:] = res diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2722,6 +2722,8 @@ raises(IndexError, "arange(3).take([15])") a = arange(6).reshape(2, 3) assert (a.take([1, 0, 3]) == [1, 0, 3]).all() + assert (a.take([1], axis=0) == [[3, 4, 5]]).all() + assert (a.take([1], axis=1) == [[1], [4]]).all() assert ((a + a).take([3]) == [6]).all() a = arange(12).reshape(2, 6) assert (a[:,::2].take([3, 2, 1]) == [6, 4, 2]).all() From noreply at buildbot.pypy.org Fri Dec 20 01:11:50 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 20 Dec 2013 01:11:50 +0100 (CET) Subject: [pypy-commit] pypy default: this test works now Message-ID: <20131220001151.0732B1C1164@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68501:13ac2969a2eb Date: 2013-12-19 19:11 -0500 http://bitbucket.org/pypy/pypy/changeset/13ac2969a2eb/ Log: this test works now diff --git a/pypy/module/micronumpy/test/test_sorting.py b/pypy/module/micronumpy/test/test_sorting.py --- a/pypy/module/micronumpy/test/test_sorting.py +++ b/pypy/module/micronumpy/test/test_sorting.py @@ -306,9 +306,8 @@ # tests from numpy/core/tests/test_regression.py def test_sort_bigendian(self): - skip('not implemented yet') - from numpypy import array, dtype - a = array(range(11),dtype='float64') + from numpy import array, dtype + a = array(range(11), dtype='float64') c = a.astype(dtype(' Author: Brian Kearns Branch: Changeset: r68502:1fa0a9d16200 Date: 2013-12-19 19:29 -0500 http://bitbucket.org/pypy/pypy/changeset/1fa0a9d16200/ Log: fix test on 32bit diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -114,15 +114,11 @@ assert dtype(bool).num == 0 if self.ptr_size == 4: - assert dtype('intp').num == 5 - assert dtype('uintp').num == 6 assert dtype('int32').num == 7 assert dtype('uint32').num == 8 assert dtype('int64').num == 9 assert dtype('uint64').num == 10 else: - assert dtype('intp').num == 7 - assert dtype('uintp').num == 8 assert dtype('int32').num == 5 assert dtype('uint32').num == 6 assert dtype('int64').num == 7 @@ -130,6 +126,8 @@ assert dtype(int).num == 7 assert dtype('int').num == 7 assert dtype('uint').num == 8 + assert dtype('intp').num == 7 + assert dtype('uintp').num == 8 assert dtype(long).num == 9 assert dtype(float).num == 12 assert dtype('float').num == 12 From noreply at buildbot.pypy.org Fri Dec 20 02:04:12 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 20 Dec 2013 02:04:12 +0100 (CET) Subject: [pypy-commit] pypy default: add ufunc.__name__ property Message-ID: <20131220010412.80A6D1C1172@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68503:91105b7ff82b Date: 2013-12-19 20:03 -0500 http://bitbucket.org/pypy/pypy/changeset/91105b7ff82b/ Log: add ufunc.__name__ property diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -33,6 +33,9 @@ self.allow_complex = allow_complex self.complex_to_float = complex_to_float + def descr_get_name(self, space): + return space.wrap(self.name) + def descr_repr(self, space): return space.wrap("" % self.name) @@ -417,6 +420,7 @@ __call__ = interp2app(W_Ufunc.descr_call), __repr__ = interp2app(W_Ufunc.descr_repr), + __name__ = GetSetProperty(W_Ufunc.descr_get_name), identity = GetSetProperty(W_Ufunc.descr_get_identity), accumulate = interp2app(W_Ufunc.descr_accumulate), diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -82,6 +82,7 @@ assert isinstance(add, ufunc) assert repr(add) == "" assert repr(ufunc) == "" + assert add.__name__ == 'add' def test_ufunc_attrs(self): from numpypy import add, multiply, sin From noreply at buildbot.pypy.org Fri Dec 20 04:34:50 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 20 Dec 2013 04:34:50 +0100 (CET) Subject: [pypy-commit] pypy default: implement dtype.descr for record types Message-ID: <20131220033450.714F21C1172@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68504:7a6f7ca7c635 Date: 2013-12-19 22:23 -0500 http://bitbucket.org/pypy/pypy/changeset/7a6f7ca7c635/ Log: implement dtype.descr for record types diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -157,8 +157,20 @@ return space.newlist([space.newtuple([space.wrap(""), self.descr_get_str(space)])]) else: - raise OperationError(space.w_NotImplementedError, space.wrap( - "descr not implemented for record types")) + descr = [] + for name in self.fieldnames: + subdtype = self.fields[name][1] + subdescr = [space.wrap(name)] + if subdtype.is_record_type(): + subdescr.append(subdtype.descr_get_descr(space)) + elif subdtype.subdtype is not None: + subdescr.append(subdtype.subdtype.descr_get_str(space)) + else: + subdescr.append(subdtype.descr_get_str(space)) + if subdtype.shape != []: + subdescr.append(subdtype.descr_get_shape(space)) + descr.append(space.newtuple(subdescr)) + return space.newlist(descr) def descr_get_base(self, space): return space.wrap(self.base) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -849,12 +849,12 @@ import numpy as np assert np.dtype(' Author: Brian Kearns Branch: Changeset: r68505:70c1284ebcc9 Date: 2013-12-19 22:57 -0500 http://bitbucket.org/pypy/pypy/changeset/70c1284ebcc9/ Log: fix translation (odd?) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -169,7 +169,7 @@ subdescr.append(subdtype.descr_get_str(space)) if subdtype.shape != []: subdescr.append(subdtype.descr_get_shape(space)) - descr.append(space.newtuple(subdescr)) + descr.append(space.newtuple(subdescr[:])) return space.newlist(descr) def descr_get_base(self, space): From noreply at buildbot.pypy.org Fri Dec 20 05:50:38 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 20 Dec 2013 05:50:38 +0100 (CET) Subject: [pypy-commit] pypy default: fix dtype creation from some abstract numpy types Message-ID: <20131220045038.34B201C1172@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68506:7126a9a7a21e Date: 2013-12-19 23:41 -0500 http://bitbucket.org/pypy/pypy/changeset/7126a9a7a21e/ Log: fix dtype creation from some abstract numpy types diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -662,6 +662,7 @@ w_box_type = space.gettypefor(interp_boxes.W_Float64Box), alternate_constructors=[space.w_float, space.gettypefor(interp_boxes.W_NumberBox), + space.gettypefor(interp_boxes.W_FloatingBox), ], aliases=["float", "double"], ) @@ -691,7 +692,8 @@ name="complex128", char=NPY_CDOUBLELTR, w_box_type = space.gettypefor(interp_boxes.W_Complex128Box), - alternate_constructors=[space.w_complex], + alternate_constructors=[space.w_complex, + space.gettypefor(interp_boxes.W_ComplexFloatingBox)], aliases=["complex", 'cfloat', 'cdouble'], float_type = self.w_float64dtype, ) @@ -713,7 +715,8 @@ name='string', char=NPY_STRINGLTR, w_box_type = space.gettypefor(interp_boxes.W_StringBox), - alternate_constructors=[space.w_str, space.gettypefor(interp_boxes.W_CharacterBox)], + alternate_constructors=[space.w_str, + space.gettypefor(interp_boxes.W_CharacterBox)], aliases=["str"], ) self.w_unicodedtype = W_Dtype( diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -369,16 +369,22 @@ # numpy allows abstract types in array creation a_n = numpy.array([4,4], numpy.number) + a_f = numpy.array([4,4], numpy.floating) + a_c = numpy.array([4,4], numpy.complexfloating) a_i = numpy.array([4,4], numpy.integer) a_s = numpy.array([4,4], numpy.signedinteger) a_u = numpy.array([4,4], numpy.unsignedinteger) assert a_n.dtype.num == 12 + assert a_f.dtype.num == 12 + assert a_c.dtype.num == 15 assert a_i.dtype.num == 7 assert a_s.dtype.num == 7 assert a_u.dtype.num == 8 assert a_n.dtype is numpy.dtype('float64') + assert a_f.dtype is numpy.dtype('float64') + assert a_c.dtype is numpy.dtype('complex128') if self.ptr_size == 4: assert a_i.dtype is numpy.dtype('int32') assert a_s.dtype is numpy.dtype('int32') From noreply at buildbot.pypy.org Fri Dec 20 09:08:39 2013 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 20 Dec 2013 09:08:39 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: Mark some more llops as canmallocgc. It may have been working before but this Message-ID: <20131220080839.10A431C01F6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68507:a49c63960e2f Date: 2013-12-20 09:07 +0100 http://bitbucket.org/pypy/pypy/changeset/a49c63960e2f/ Log: Mark some more llops as canmallocgc. It may have been working before but this is more correct. diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -348,14 +348,12 @@ next.prev = prev @staticmethod - @rgc.no_collect def _release_gil_shadowstack(): before = rffi.aroundstate.before if before: before() @staticmethod - @rgc.no_collect def _reacquire_gil_shadowstack(): after = rffi.aroundstate.after if after: diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -415,8 +415,8 @@ # possible GC safe-points! (also sync with stmframework.py) # (some ops like stm_commit_transaction don't need it because there # must be no gc-var access afterwards anyway) - 'stm_initialize': LLOp(), - 'stm_finalize': LLOp(), + 'stm_initialize': LLOp(canmallocgc=True), + 'stm_finalize': LLOp(canmallocgc=True), 'stm_barrier': LLOp(sideeffects=False), 'stm_allocate': LLOp(sideeffects=False, canmallocgc=True), 'stm_allocate_nonmovable_int_adr': LLOp(sideeffects=False, canmallocgc=True), @@ -431,16 +431,16 @@ 'stm_hash': LLOp(sideeffects=False), 'stm_push_root': LLOp(), 'stm_pop_root_into': LLOp(), - 'stm_commit_transaction': LLOp(), - 'stm_begin_inevitable_transaction': LLOp(), + 'stm_commit_transaction': LLOp(canmallocgc=True), + 'stm_begin_inevitable_transaction': LLOp(canmallocgc=True), 'stm_should_break_transaction': LLOp(sideeffects=False), 'stm_set_transaction_length': LLOp(canmallocgc=True), 'stm_change_atomic': LLOp(), 'stm_get_atomic': LLOp(sideeffects=False), 'stm_perform_transaction':LLOp(canmallocgc=True), - 'stm_enter_callback_call':LLOp(), - 'stm_leave_callback_call':LLOp(), - 'stm_abort_and_retry': LLOp(), + 'stm_enter_callback_call':LLOp(canmallocgc=True), + 'stm_leave_callback_call':LLOp(canmallocgc=True), + 'stm_abort_and_retry': LLOp(canmallocgc=True), 'stm_weakref_allocate': LLOp(sideeffects=False, canmallocgc=True), @@ -524,7 +524,7 @@ 'jit_assembler_call': LLOp(canrun=True, # similar to an 'indirect_call' canraise=(Exception,), canmallocgc=True), - 'jit_stm_transaction_break_point' : LLOp(), + 'jit_stm_transaction_break_point' : LLOp(canmallocgc=True), # __________ GC operations __________ diff --git a/rpython/translator/stm/breakfinder.py b/rpython/translator/stm/breakfinder.py --- a/rpython/translator/stm/breakfinder.py +++ b/rpython/translator/stm/breakfinder.py @@ -7,6 +7,7 @@ 'stm_perform_transaction', 'stm_partial_commit_and_resume_other_threads', # new priv_revision 'jit_assembler_call', + 'jit_stm_transaction_break_point', ]) From noreply at buildbot.pypy.org Fri Dec 20 10:57:15 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 20 Dec 2013 10:57:15 +0100 (CET) Subject: [pypy-commit] stmgc c5: more in-progress Message-ID: <20131220095715.DA2D91C0219@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r569:96ac76cc0a32 Date: 2013-12-20 10:57 +0100 http://bitbucket.org/pypy/stmgc/changeset/96ac76cc0a32/ Log: more in-progress diff --git a/c5/demo2.c b/c5/demo2.c new file mode 100644 --- /dev/null +++ b/c5/demo2.c @@ -0,0 +1,14 @@ +#include +#include + + +char *stm_large_malloc(size_t request_size); + + +int main() +{ + printf("%p\n", stm_large_malloc(10000)); + printf("%p\n", stm_large_malloc(10000)); + printf("%p\n", stm_large_malloc(10000)); + return 0; +} diff --git a/c5/largemalloc.c b/c5/largemalloc.c --- a/c5/largemalloc.c +++ b/c5/largemalloc.c @@ -3,6 +3,11 @@ blocks, which in our case means at least 288 bytes. */ +#include +#include +#include +#include + #define MMAP_LIMIT (1280*1024) @@ -15,32 +20,34 @@ 83) #define N_BINS 84 +typedef struct dlist_s { + struct dlist_s *next; /* a doubly-linked list */ + struct dlist_s *prev; +} dlist_t; + typedef struct malloc_chunk { - size_t prev_size; /* - if the previous chunk is free: its size + size_t prev_size; /* - if the previous chunk is free: size of its data - otherwise, if this chunk is free: 1 - otherwise, 0. */ - size_t size; /* size of this chunk */ + size_t size; /* size of the data in this chunk */ - union { - char data[1]; /* if not free: start of the user data */ - struct malloc_chunk *next; /* if free: a doubly-linked list */ - }; - struct malloc_chunk *prev; + dlist_t d; /* if free: a doubly-linked list */ + /* if not free: the user data starts here */ /* The chunk has a total size of 'size'. It is immediately followed in memory by another chunk. This list ends with the last "chunk" being actually only one word long, 'size_t prev_size'. Both this last chunk and the theoretical chunk before the first one are considered "not free". */ -} *mchunk_t; +} mchunk_t; #define THIS_CHUNK_FREE 1 #define BOTH_CHUNKS_USED 0 -#define CHUNK_HEADER_SIZE offsetof(struct malloc_chunk, data) -#define MINSIZE sizeof(struct malloc_chunk) +#define CHUNK_HEADER_SIZE offsetof(struct malloc_chunk, d) -#define chunk_at_offset(p, ofs) ((mchunk_t *)(((char *)(p)) + ofs)) -#define next_chunk(p) chunk_at_offset(p, (p)->size) +#define chunk_at_offset(p, ofs) ((mchunk_t *)(((char *)(p)) + (ofs))) +#define data2chunk(p) chunk_at_offset(p, -CHUNK_HEADER_SIZE) +#define next_chunk(p) chunk_at_offset(p, CHUNK_HEADER_SIZE + (p)->size) /* The free chunks are stored in "bins". Each bin is a doubly-linked @@ -53,55 +60,100 @@ neighbors to ensure this. In each bin's doubly-linked list, chunks are sorted by their size in - decreasing order . Among chunks of equal size, they are ordered with - the most recently freed first, and we take them from the back. This - results in FIFO order, which is better to give each block a while to - rest in the list and be consolidated into potentially larger blocks. + decreasing order (if you start from 'd.next'). */ -static struct { mchunk_t *head, mchunk_t *tail; } largebins[N_BINS]; +static struct { dlist_t d; dlist_t *unsorted; } largebins[N_BINS] = { + +#define INIT(num) { { &largebins[num].d, &largebins[num].d }, NULL } + INIT(0), INIT(1), INIT(2), INIT(3), INIT(4), + INIT(5), INIT(6), INIT(7), INIT(8), INIT(9), + INIT(10), INIT(11), INIT(12), INIT(13), INIT(14), + INIT(15), INIT(16), INIT(17), INIT(18), INIT(19), + INIT(20), INIT(21), INIT(22), INIT(23), INIT(24), + INIT(25), INIT(26), INIT(27), INIT(28), INIT(29), + INIT(30), INIT(31), INIT(32), INIT(33), INIT(34), + INIT(35), INIT(36), INIT(37), INIT(38), INIT(39), + INIT(40), INIT(41), INIT(42), INIT(43), INIT(44), + INIT(45), INIT(46), INIT(47), INIT(48), INIT(49), + INIT(50), INIT(51), INIT(52), INIT(53), INIT(54), + INIT(55), INIT(56), INIT(57), INIT(58), INIT(59), + INIT(60), INIT(61), INIT(62), INIT(63), INIT(64), + INIT(65), INIT(66), INIT(67), INIT(68), INIT(69), + INIT(70), INIT(71), INIT(72), INIT(73), INIT(74), + INIT(75), INIT(76), INIT(77), INIT(78), INIT(79), + INIT(80), INIT(81), INIT(82), INIT(83) }; +#undef INIT static char *allocate_more(size_t request_size); -static void insert_sort(mchunk_t *new) +static void insert_unsorted(mchunk_t *new) { size_t index = largebin_index(new->size); - mchunk_t *head = largebins[index]->head; + new->d.next = largebins[index].unsorted; + largebins[index].unsorted = &new->d; +} - if (head == NULL) { - assert(largebins[index]->tail == NULL); - new->prev = NULL; - new->next = NULL; - largebins[index]->tail = new; - largebins[index]->head = new; - return; +static int compare_chunks(const void *vchunk1, const void *vchunk2) +{ + /* sort by decreasing size */ + const mchunk_t *chunk1 = (const mchunk_t *)vchunk1; + const mchunk_t *chunk2 = (const mchunk_t *)vchunk2; + if (chunk1->size < chunk2->size) + return 1; + if (chunk1->size == chunk2->size) + return 0; + else + return -1; +} + +static void really_sort_bin(size_t index) +{ + dlist_t *unsorted = largebins[index].unsorted; + largebins[index].unsorted = NULL; + + dlist_t *scan = unsorted->next; + size_t count = 1; + while (scan != NULL) { + scan = scan->next; + ++count; } - assert(largebins[index]->tail != NULL); - size_t new_size = new->size; - if (new_size >= head->size) { - new->prev = NULL; - new->next = head; - assert(head->prev == NULL); - head->prev = new; - largebins[index]->head = new; - return; + mchunk_t *chunks[count]; + size_t i; + for (i = 0; i < count; i++) { + chunks[i] = data2chunk(unsorted); + unsorted = unsorted->next; } - mchunk_t *search; - for (search = head; search != NULL; search = search->next) { - if (new_size >= search->size) { - new->prev = search->prev; - new->prev->next = new; - new->next = search; - search->prev = new; - return; + qsort(chunks, count, sizeof(mchunk_t *), compare_chunks); + + dlist_t *head = largebins[index].d.next; + dlist_t *end = &largebins[index].d; + size_t search_size = chunks[0]->size; + i = 0; + + while (1) { + if (head == end || search_size >= data2chunk(head)->size) { + /* insert 'chunks[i]' here, before the current head */ + head->prev->next = &chunks[i]->d; + chunks[i]->d.prev = head->prev; + head->prev = &chunks[i]->d; + chunks[i]->d.next = head; + if (++i == count) + break; /* all done */ + search_size = chunks[i]->size; + } + else { + head = head->next; } } - new->prev = largebins[index]->tail; - new->prev->next = new; - new->next = NULL; - largebins[index]->tail = new; +} + +static void sort_bin(size_t index) +{ + if (largebins[index].unsorted != NULL) + really_sort_bin(index); } char *stm_large_malloc(size_t request_size) @@ -109,61 +161,59 @@ /* 'request_size' should already be a multiple of the word size here */ assert((request_size & (sizeof(char *)-1)) == 0); - size_t chunk_size = request_size + CHUNK_HEADER_SIZE; - if (chunk_size < request_size) { - /* 'request_size' is so large that the addition wrapped around */ - fprintf(stderr, "allocation request too large\n"); - abort(); - } - - size_t index = largebin_index(chunk_size); + size_t index = largebin_index(request_size); + sort_bin(index); /* scan through the chunks of current bin in reverse order to find the smallest that fits. */ - mchunk_t *scan = largebins[index]->tail; - mchunk_t *head = largebins[index]->head; + dlist_t *scan = largebins[index].d.prev; + dlist_t *head = largebins[index].d.next; + mchunk_t *mscan; while (scan != head) { - assert(scan->prev_size == THIS_CHUNK_FREE); - assert(next_chunk(scan)->prev_size == scan->size); + mscan = data2chunk(scan); + assert(mscan->prev_size == THIS_CHUNK_FREE); + assert(next_chunk(mscan)->prev_size == mscan->size); - if (scan->size >= chunk_size) { + if (mscan->size >= request_size) { /* found! */ found: - if (scan == largebins[index]->tail) { - largebins[index]->tail = scan->prev; - } - else { - scan->next->prev = scan->prev; - } + /* unlink mscan from the doubly-linked list */ + mscan->d.next->prev = mscan->d.prev; + mscan->d.prev->next = mscan->d.next; - size_t remaining_size = scan->size - chunk_size; - if (remaining_size < MINSIZE) { - next_chunk(scan)->prev_size = BOTH_CHUNKS_USED; + size_t remaining_size = mscan->size - request_size; + if (remaining_size < sizeof(struct malloc_chunk)) { + next_chunk(mscan)->prev_size = BOTH_CHUNKS_USED; } else { /* only part of the chunk is being used; reduce the size - of 'scan' down to 'chunk_size', and create a new chunk - of the 'remaining_size' afterwards */ - mchunk_t *new = chunk_at_offset(scan, chunk_size); + of 'mscan' down to 'request_size', and create a new + chunk of the 'remaining_size' afterwards */ + mchunk_t *new = chunk_at_offset(mscan, CHUNK_HEADER_SIZE + + request_size); new->prev_size = THIS_CHUNK_FREE; - new->size = remaining_size; + new->size = remaining_size - CHUNK_HEADER_SIZE; next_chunk(new)->prev_size = remaining_size; - insert_sort(new); - scan->size = chunk_size; + insert_unsorted(new); + mscan->size = request_size; } - scan->prev_size = BOTH_CHUNKS_USED; - return scan->data; + mscan->prev_size = BOTH_CHUNKS_USED; + return (char *)&mscan->d; } - scan = scan->prev; + scan = mscan->d.prev; } /* search now through all higher bins. We only need to take the smallest item of the first non-empty bin, as it will be large enough. xxx use a bitmap to speed this up */ while (++index < N_BINS) { - scan = largebins[index]->tail; - if (scan != NULL) + sort_bin(index); + scan = largebins[index].d.prev; + if (scan != &largebins[index].d) { + mscan = data2chunk(scan); + assert(mscan->size >= request_size); goto found; + } } /* not enough free memory. We need to allocate more. */ @@ -182,9 +232,9 @@ } big_chunk->prev_size = THIS_CHUNK_FREE; - big_chunk->size = big_size - sizeof(size_t); + big_chunk->size = big_size - CHUNK_HEADER_SIZE - sizeof(size_t); next_chunk(big_chunk)->prev_size = big_chunk->size; - insert_sort(big_chunk); + insert_unsorted(big_chunk); return stm_large_malloc(request_size); } From noreply at buildbot.pypy.org Fri Dec 20 11:14:48 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 20 Dec 2013 11:14:48 +0100 (CET) Subject: [pypy-commit] pypy default: pass order on zeros for now Message-ID: <20131220101449.014EF1C01F6@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68508:cd3ca3ce6fc9 Date: 2013-12-20 03:42 -0500 http://bitbucket.org/pypy/pypy/changeset/cd3ca3ce6fc9/ Log: pass order on zeros for now diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1440,12 +1440,11 @@ arr_iter.next() return w_arr - at unwrap_spec(order=str) -def zeros(space, w_shape, w_dtype=None, order='C'): +def zeros(space, w_shape, w_dtype=None, w_order=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) shape = _find_shape(space, w_shape, dtype) - return W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) + return W_NDimArray.from_shape(space, shape, dtype=dtype) @unwrap_spec(subok=bool) def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): From noreply at buildbot.pypy.org Fri Dec 20 11:55:15 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 20 Dec 2013 11:55:15 +0100 (CET) Subject: [pypy-commit] stmgc c5: fix fix fix Message-ID: <20131220105515.17DB71C0219@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r570:87e71aeae74c Date: 2013-12-20 11:54 +0100 http://bitbucket.org/pypy/stmgc/changeset/87e71aeae74c/ Log: fix fix fix diff --git a/c5/demo2.c b/c5/demo2.c --- a/c5/demo2.c +++ b/c5/demo2.c @@ -5,10 +5,24 @@ char *stm_large_malloc(size_t request_size); +static void dump(char *data) +{ + fprintf(stderr, "[ %p: %zu\n", data - 16, *(size_t*)(data - 16)); + fprintf(stderr, " %p: %zu\n", data - 8, *(size_t*)(data - 8)); + size_t n = (*(size_t*)(data - 8)) & ~1; + fprintf(stderr, " %p: %zu ]\n", data + n, *(size_t*)(data + n)); +} + int main() { - printf("%p\n", stm_large_malloc(10000)); - printf("%p\n", stm_large_malloc(10000)); - printf("%p\n", stm_large_malloc(10000)); + char *d1 = stm_large_malloc(10000); + char *d2 = stm_large_malloc(10000); + char *d3 = stm_large_malloc(10000); + + dump(d1); + dump(d2); + dump(d3); + dump(d3 + 10016); + return 0; } diff --git a/c5/largemalloc.c b/c5/largemalloc.c --- a/c5/largemalloc.c +++ b/c5/largemalloc.c @@ -11,14 +11,15 @@ #define MMAP_LIMIT (1280*1024) -#define largebin_index(sz) \ - ((((sz) >> 6) <= 47) ? ((sz) >> 6): /* 0 - 47 */ \ - (((sz) >> 9) <= 23) ? 42 + ((sz) >> 9): /* 48 - 65 */ \ - (((sz) >> 12) <= 11) ? 63 + ((sz) >> 12): /* 66 - 74 */ \ - (((sz) >> 15) <= 5) ? 74 + ((sz) >> 15): /* 75 - 79 */ \ - (((sz) >> 18) <= 2) ? 80 + ((sz) >> 18): /* 80 - 82 */ \ - 83) -#define N_BINS 84 +#define largebin_index(sz) \ + (((sz) < (48 << 6)) ? ((sz) >> 6): /* 0 - 47 */ \ + ((sz) < (24 << 9)) ? 42 + ((sz) >> 9): /* 48 - 65 */ \ + ((sz) < (12 << 12)) ? 63 + ((sz) >> 12): /* 66 - 74 */ \ + ((sz) < (6 << 15)) ? 74 + ((sz) >> 15): /* 75 - 79 */ \ + ((sz) < (3 << 18)) ? 80 + ((sz) >> 18): /* 80 - 82 */ \ + 83) +#define N_BINS 84 +#define LAST_BIN_INDEX(sz) ((sz) >= (3 << 18)) typedef struct dlist_s { struct dlist_s *next; /* a doubly-linked list */ @@ -29,7 +30,8 @@ size_t prev_size; /* - if the previous chunk is free: size of its data - otherwise, if this chunk is free: 1 - otherwise, 0. */ - size_t size; /* size of the data in this chunk */ + size_t size; /* size of the data in this chunk, + plus optionally the FLAG_UNSORTED */ dlist_t d; /* if free: a doubly-linked list */ /* if not free: the user data starts here */ @@ -41,6 +43,7 @@ considered "not free". */ } mchunk_t; +#define FLAG_UNSORTED 1 #define THIS_CHUNK_FREE 1 #define BOTH_CHUNKS_USED 0 #define CHUNK_HEADER_SIZE offsetof(struct malloc_chunk, d) @@ -60,12 +63,14 @@ neighbors to ensure this. In each bin's doubly-linked list, chunks are sorted by their size in - decreasing order (if you start from 'd.next'). + decreasing order (if you start from 'd.next'). At the end of this + list are some unsorted chunks (with FLAG_UNSORTED). All unsorted + chunks are after all sorted chunks. */ -static struct { dlist_t d; dlist_t *unsorted; } largebins[N_BINS] = { +static dlist_t largebins[N_BINS] = { -#define INIT(num) { { &largebins[num].d, &largebins[num].d }, NULL } +#define INIT(num) { largebins + num, largebins + num } INIT(0), INIT(1), INIT(2), INIT(3), INIT(4), INIT(5), INIT(6), INIT(7), INIT(8), INIT(9), INIT(10), INIT(11), INIT(12), INIT(13), INIT(14), @@ -90,59 +95,67 @@ static void insert_unsorted(mchunk_t *new) { - size_t index = largebin_index(new->size); - new->d.next = largebins[index].unsorted; - largebins[index].unsorted = &new->d; + size_t index = LAST_BIN_INDEX(new->size) ? N_BINS - 1 + : largebin_index(new->size); + new->d.next = &largebins[index]; + new->d.prev = largebins[index].prev; + new->d.prev->next = &new->d; + largebins[index].prev = &new->d; + new->size |= FLAG_UNSORTED; } static int compare_chunks(const void *vchunk1, const void *vchunk2) { - /* sort by decreasing size */ + /* sort by size */ const mchunk_t *chunk1 = (const mchunk_t *)vchunk1; const mchunk_t *chunk2 = (const mchunk_t *)vchunk2; if (chunk1->size < chunk2->size) - return 1; + return -1; if (chunk1->size == chunk2->size) return 0; else - return -1; + return +1; } static void really_sort_bin(size_t index) { - dlist_t *unsorted = largebins[index].unsorted; - largebins[index].unsorted = NULL; - - dlist_t *scan = unsorted->next; + dlist_t *unsorted = largebins[index].prev; + dlist_t *end = &largebins[index]; + dlist_t *scan = unsorted->prev; size_t count = 1; - while (scan != NULL) { - scan = scan->next; + while (scan != end && (data2chunk(scan)->size & FLAG_UNSORTED)) { + scan = scan->prev; ++count; } + end->prev = scan; + scan->next = end; mchunk_t *chunks[count]; size_t i; for (i = 0; i < count; i++) { chunks[i] = data2chunk(unsorted); - unsorted = unsorted->next; + unsorted = unsorted->prev; } + assert(unsorted == scan); qsort(chunks, count, sizeof(mchunk_t *), compare_chunks); - dlist_t *head = largebins[index].d.next; - dlist_t *end = &largebins[index].d; - size_t search_size = chunks[0]->size; - i = 0; + --count; + chunks[count]->size &= ~FLAG_UNSORTED; + size_t search_size = chunks[count]->size; + dlist_t *head = largebins[index].next; while (1) { if (head == end || search_size >= data2chunk(head)->size) { - /* insert 'chunks[i]' here, before the current head */ - head->prev->next = &chunks[i]->d; - chunks[i]->d.prev = head->prev; - head->prev = &chunks[i]->d; - chunks[i]->d.next = head; - if (++i == count) + /* insert 'chunks[count]' here, before the current head */ + head->prev->next = &chunks[count]->d; + chunks[count]->d.prev = head->prev; + head->prev = &chunks[count]->d; + chunks[count]->d.next = head; + if (count == 0) break; /* all done */ - search_size = chunks[i]->size; + --count; + chunks[count]->size &= ~FLAG_UNSORTED; + search_size = chunks[count]->size; } else { head = head->next; @@ -152,7 +165,8 @@ static void sort_bin(size_t index) { - if (largebins[index].unsorted != NULL) + dlist_t *last = largebins[index].prev; + if (last != &largebins[index] && (data2chunk(last)->size & FLAG_UNSORTED)) really_sort_bin(index); } @@ -166,40 +180,16 @@ /* scan through the chunks of current bin in reverse order to find the smallest that fits. */ - dlist_t *scan = largebins[index].d.prev; - dlist_t *head = largebins[index].d.next; + dlist_t *scan = largebins[index].prev; + dlist_t *end = &largebins[index]; mchunk_t *mscan; - while (scan != head) { + while (scan != end) { mscan = data2chunk(scan); assert(mscan->prev_size == THIS_CHUNK_FREE); assert(next_chunk(mscan)->prev_size == mscan->size); - if (mscan->size >= request_size) { - /* found! */ - found: - /* unlink mscan from the doubly-linked list */ - mscan->d.next->prev = mscan->d.prev; - mscan->d.prev->next = mscan->d.next; - - size_t remaining_size = mscan->size - request_size; - if (remaining_size < sizeof(struct malloc_chunk)) { - next_chunk(mscan)->prev_size = BOTH_CHUNKS_USED; - } - else { - /* only part of the chunk is being used; reduce the size - of 'mscan' down to 'request_size', and create a new - chunk of the 'remaining_size' afterwards */ - mchunk_t *new = chunk_at_offset(mscan, CHUNK_HEADER_SIZE + - request_size); - new->prev_size = THIS_CHUNK_FREE; - new->size = remaining_size - CHUNK_HEADER_SIZE; - next_chunk(new)->prev_size = remaining_size; - insert_unsorted(new); - mscan->size = request_size; - } - mscan->prev_size = BOTH_CHUNKS_USED; - return (char *)&mscan->d; - } + if (mscan->size >= request_size) + goto found; scan = mscan->d.prev; } @@ -208,8 +198,9 @@ enough. xxx use a bitmap to speed this up */ while (++index < N_BINS) { sort_bin(index); - scan = largebins[index].d.prev; - if (scan != &largebins[index].d) { + scan = largebins[index].prev; + end = &largebins[index]; + if (scan != end) { mscan = data2chunk(scan); assert(mscan->size >= request_size); goto found; @@ -218,6 +209,31 @@ /* not enough free memory. We need to allocate more. */ return allocate_more(request_size); + + found: + /* unlink mscan from the doubly-linked list */ + mscan->d.next->prev = mscan->d.prev; + mscan->d.prev->next = mscan->d.next; + + size_t remaining_size = mscan->size - request_size; + if (remaining_size < sizeof(struct malloc_chunk)) { + next_chunk(mscan)->prev_size = BOTH_CHUNKS_USED; + } + else { + /* only part of the chunk is being used; reduce the size + of 'mscan' down to 'request_size', and create a new + chunk of the 'remaining_size' afterwards */ + mchunk_t *new = chunk_at_offset(mscan, CHUNK_HEADER_SIZE + + request_size); + new->prev_size = THIS_CHUNK_FREE; + remaining_size -= CHUNK_HEADER_SIZE; + new->size = remaining_size; + next_chunk(new)->prev_size = remaining_size; + insert_unsorted(new); + mscan->size = request_size; + } + mscan->prev_size = BOTH_CHUNKS_USED; + return (char *)&mscan->d; } static char *allocate_more(size_t request_size) @@ -233,8 +249,30 @@ big_chunk->prev_size = THIS_CHUNK_FREE; big_chunk->size = big_size - CHUNK_HEADER_SIZE - sizeof(size_t); + + assert((char *)&next_chunk(big_chunk)->prev_size == + ((char *)big_chunk) + big_size - sizeof(size_t)); next_chunk(big_chunk)->prev_size = big_chunk->size; + insert_unsorted(big_chunk); return stm_large_malloc(request_size); } + +void stm_large_free(char *data) +{ +#if 0 + mchunk_t *chunk = data2chunk(data); + assert(chunk->prev_size != THIS_CHUNK_FREE); + + if (chunk->prev_size == BOTH_CHUNKS_USED) { + chunk->prev_size = THIS_CHUNK_FREE; + } + else { + assert((chunk->prev_size & (sizeof(char *) - 1)) == 0); + + /* merge with the previous chunk */ + ... + } +#endif +} From noreply at buildbot.pypy.org Fri Dec 20 12:31:36 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 20 Dec 2013 12:31:36 +0100 (CET) Subject: [pypy-commit] stmgc c5: stm_large_free() Message-ID: <20131220113136.6D5D51C01F6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r571:9f1e3f703bf8 Date: 2013-12-20 12:31 +0100 http://bitbucket.org/pypy/stmgc/changeset/9f1e3f703bf8/ Log: stm_large_free() diff --git a/c5/demo2.c b/c5/demo2.c --- a/c5/demo2.c +++ b/c5/demo2.c @@ -1,28 +1,50 @@ #include #include +#include char *stm_large_malloc(size_t request_size); +void stm_large_free(char *data); -static void dump(char *data) +static void dump(char *start) { - fprintf(stderr, "[ %p: %zu\n", data - 16, *(size_t*)(data - 16)); - fprintf(stderr, " %p: %zu\n", data - 8, *(size_t*)(data - 8)); - size_t n = (*(size_t*)(data - 8)) & ~1; - fprintf(stderr, " %p: %zu ]\n", data + n, *(size_t*)(data + n)); + char *data = start; + char *stop = start + 999999; + + while (data < stop) { + fprintf(stderr, "[ %p: %zu\n", data - 16, *(size_t*)(data - 16)); + fprintf(stderr, " %p: %zu ]\n", data - 8, *(size_t*)(data - 8)); + data += (*(size_t*)(data - 8)) & ~1; + data += 16; + } + fprintf(stderr, ". %p: %zu\n\n", data - 16, *(size_t*)(data - 16)); } int main() { - char *d1 = stm_large_malloc(10000); - char *d2 = stm_large_malloc(10000); - char *d3 = stm_large_malloc(10000); + char *d1 = stm_large_malloc(7000); + char *start = d1; + char *d2 = stm_large_malloc(8000); + char *d3 = stm_large_malloc(9000); - dump(d1); - dump(d2); - dump(d3); - dump(d3 + 10016); + dump(start); + + stm_large_free(d1); + stm_large_free(d2); + + dump(start); + + char *d4 = stm_large_malloc(600); + assert(d4 == d1); + char *d5 = stm_large_malloc(600); + assert(d5 == d4 + 616); + + dump(start); + + stm_large_free(d5); + + dump(start); return 0; } diff --git a/c5/largemalloc.c b/c5/largemalloc.c --- a/c5/largemalloc.c +++ b/c5/largemalloc.c @@ -261,18 +261,61 @@ void stm_large_free(char *data) { -#if 0 mchunk_t *chunk = data2chunk(data); + assert((chunk->size & (sizeof(char *) - 1)) == 0); assert(chunk->prev_size != THIS_CHUNK_FREE); + /* try to merge with the following chunk in memory */ + size_t msize = chunk->size + CHUNK_HEADER_SIZE; + mchunk_t *mscan = chunk_at_offset(chunk, msize); + + if (mscan->prev_size == BOTH_CHUNKS_USED) { + assert((mscan->size & (sizeof(char *) - 1)) == 0); + mscan->prev_size = chunk->size; + } + else { + mscan->size &= ~FLAG_UNSORTED; + size_t fsize = mscan->size; + mchunk_t *fscan = chunk_at_offset(mscan, fsize + CHUNK_HEADER_SIZE); + + /* unlink the following chunk */ + mscan->d.next->prev = mscan->d.prev; + mscan->d.prev->next = mscan->d.next; + assert(mscan->prev_size = (size_t)-1); + assert(mscan->size = (size_t)-1); + + /* merge the two chunks */ + assert(fsize == fscan->prev_size); + fsize += msize; + fscan->prev_size = fsize; + chunk->size = fsize; + } + + /* try to merge with the previous chunk in memory */ if (chunk->prev_size == BOTH_CHUNKS_USED) { chunk->prev_size = THIS_CHUNK_FREE; } else { assert((chunk->prev_size & (sizeof(char *) - 1)) == 0); - /* merge with the previous chunk */ - ... + /* get at the previous chunk */ + msize = chunk->prev_size + CHUNK_HEADER_SIZE; + mscan = chunk_at_offset(chunk, -msize); + assert(mscan->prev_size == THIS_CHUNK_FREE); + assert((mscan->size & ~FLAG_UNSORTED) == chunk->prev_size); + + /* unlink the previous chunk */ + mscan->d.next->prev = mscan->d.prev; + mscan->d.prev->next = mscan->d.next; + + /* merge the two chunks */ + mscan->size = msize + chunk->size; + next_chunk(mscan)->prev_size = mscan->size; + + assert(chunk->prev_size = (size_t)-1); + assert(chunk->size = (size_t)-1); + chunk = mscan; } -#endif + + insert_unsorted(chunk); } From noreply at buildbot.pypy.org Fri Dec 20 13:39:24 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 20 Dec 2013 13:39:24 +0100 (CET) Subject: [pypy-commit] stmgc c5: Fixes. Invert the flag from FLAG_UNSORTED to FLAG_SORTED. Message-ID: <20131220123924.21C141C01F6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r572:7446839d5142 Date: 2013-12-20 13:11 +0100 http://bitbucket.org/pypy/stmgc/changeset/7446839d5142/ Log: Fixes. Invert the flag from FLAG_UNSORTED to FLAG_SORTED. diff --git a/c5/demo2.c b/c5/demo2.c --- a/c5/demo2.c +++ b/c5/demo2.c @@ -3,22 +3,23 @@ #include +#define END_MARKER 0xDEADBEEF + char *stm_large_malloc(size_t request_size); void stm_large_free(char *data); -static void dump(char *start) +static void dump(char *data) { - char *data = start; - char *stop = start + 999999; - - while (data < stop) { + while (1) { fprintf(stderr, "[ %p: %zu\n", data - 16, *(size_t*)(data - 16)); + if (*(size_t*)(data - 8) == END_MARKER) + break; fprintf(stderr, " %p: %zu ]\n", data - 8, *(size_t*)(data - 8)); data += (*(size_t*)(data - 8)) & ~1; data += 16; } - fprintf(stderr, ". %p: %zu\n\n", data - 16, *(size_t*)(data - 16)); + fprintf(stderr, " %p: end. ]\n\n", data - 8); } int main() @@ -46,5 +47,14 @@ dump(start); + stm_large_malloc(600); + stm_large_free(d4); + + dump(start); + + stm_large_malloc(608); + + dump(start); + return 0; } diff --git a/c5/largemalloc.c b/c5/largemalloc.c --- a/c5/largemalloc.c +++ b/c5/largemalloc.c @@ -31,7 +31,7 @@ - otherwise, if this chunk is free: 1 - otherwise, 0. */ size_t size; /* size of the data in this chunk, - plus optionally the FLAG_UNSORTED */ + plus optionally the FLAG_SORTED */ dlist_t d; /* if free: a doubly-linked list */ /* if not free: the user data starts here */ @@ -43,14 +43,25 @@ considered "not free". */ } mchunk_t; -#define FLAG_UNSORTED 1 +#define FLAG_SORTED 1 #define THIS_CHUNK_FREE 1 #define BOTH_CHUNKS_USED 0 #define CHUNK_HEADER_SIZE offsetof(struct malloc_chunk, d) +#define END_MARKER 0xDEADBEEF #define chunk_at_offset(p, ofs) ((mchunk_t *)(((char *)(p)) + (ofs))) #define data2chunk(p) chunk_at_offset(p, -CHUNK_HEADER_SIZE) -#define next_chunk(p) chunk_at_offset(p, CHUNK_HEADER_SIZE + (p)->size) + +static mchunk_t *next_chunk_s(mchunk_t *p) +{ + assert(p->size & FLAG_SORTED); + return chunk_at_offset(p, CHUNK_HEADER_SIZE + p->size - FLAG_SORTED); +} +static mchunk_t *next_chunk_u(mchunk_t *p) +{ + assert(!(p->size & FLAG_SORTED)); + return chunk_at_offset(p, CHUNK_HEADER_SIZE + p->size); +} /* The free chunks are stored in "bins". Each bin is a doubly-linked @@ -64,8 +75,8 @@ In each bin's doubly-linked list, chunks are sorted by their size in decreasing order (if you start from 'd.next'). At the end of this - list are some unsorted chunks (with FLAG_UNSORTED). All unsorted - chunks are after all sorted chunks. + list are some unsorted chunks. All unsorted chunks are after all + sorted chunks. The flag 'FLAG_SORTED' distinguishes them. */ static dlist_t largebins[N_BINS] = { @@ -101,7 +112,7 @@ new->d.prev = largebins[index].prev; new->d.prev->next = &new->d; largebins[index].prev = &new->d; - new->size |= FLAG_UNSORTED; + assert(!(new->size & FLAG_SORTED)); } static int compare_chunks(const void *vchunk1, const void *vchunk2) @@ -123,7 +134,7 @@ dlist_t *end = &largebins[index]; dlist_t *scan = unsorted->prev; size_t count = 1; - while (scan != end && (data2chunk(scan)->size & FLAG_UNSORTED)) { + while (scan != end && !(data2chunk(scan)->size & FLAG_SORTED)) { scan = scan->prev; ++count; } @@ -140,7 +151,7 @@ qsort(chunks, count, sizeof(mchunk_t *), compare_chunks); --count; - chunks[count]->size &= ~FLAG_UNSORTED; + chunks[count]->size |= FLAG_SORTED; size_t search_size = chunks[count]->size; dlist_t *head = largebins[index].next; @@ -154,7 +165,7 @@ if (count == 0) break; /* all done */ --count; - chunks[count]->size &= ~FLAG_UNSORTED; + chunks[count]->size |= FLAG_SORTED; search_size = chunks[count]->size; } else { @@ -166,7 +177,7 @@ static void sort_bin(size_t index) { dlist_t *last = largebins[index].prev; - if (last != &largebins[index] && (data2chunk(last)->size & FLAG_UNSORTED)) + if (last != &largebins[index] && !(data2chunk(last)->size & FLAG_SORTED)) really_sort_bin(index); } @@ -186,9 +197,9 @@ while (scan != end) { mscan = data2chunk(scan); assert(mscan->prev_size == THIS_CHUNK_FREE); - assert(next_chunk(mscan)->prev_size == mscan->size); + assert(next_chunk_s(mscan)->prev_size == mscan->size - FLAG_SORTED); - if (mscan->size >= request_size) + if (mscan->size > request_size) goto found; scan = mscan->d.prev; } @@ -202,7 +213,6 @@ end = &largebins[index]; if (scan != end) { mscan = data2chunk(scan); - assert(mscan->size >= request_size); goto found; } } @@ -211,13 +221,16 @@ return allocate_more(request_size); found: + assert(mscan->size & FLAG_SORTED); + assert(mscan->size > request_size); + /* unlink mscan from the doubly-linked list */ mscan->d.next->prev = mscan->d.prev; mscan->d.prev->next = mscan->d.next; - size_t remaining_size = mscan->size - request_size; - if (remaining_size < sizeof(struct malloc_chunk)) { - next_chunk(mscan)->prev_size = BOTH_CHUNKS_USED; + size_t remaining_size_plus_1 = mscan->size - request_size; + if (remaining_size_plus_1 <= sizeof(struct malloc_chunk)) { + next_chunk_s(mscan)->prev_size = BOTH_CHUNKS_USED; } else { /* only part of the chunk is being used; reduce the size @@ -226,9 +239,9 @@ mchunk_t *new = chunk_at_offset(mscan, CHUNK_HEADER_SIZE + request_size); new->prev_size = THIS_CHUNK_FREE; - remaining_size -= CHUNK_HEADER_SIZE; + size_t remaining_size = remaining_size_plus_1 - 1 - CHUNK_HEADER_SIZE; new->size = remaining_size; - next_chunk(new)->prev_size = remaining_size; + next_chunk_u(new)->prev_size = remaining_size; insert_unsorted(new); mscan->size = request_size; } @@ -248,11 +261,12 @@ } big_chunk->prev_size = THIS_CHUNK_FREE; - big_chunk->size = big_size - CHUNK_HEADER_SIZE - sizeof(size_t); + big_chunk->size = big_size - CHUNK_HEADER_SIZE * 2; - assert((char *)&next_chunk(big_chunk)->prev_size == - ((char *)big_chunk) + big_size - sizeof(size_t)); - next_chunk(big_chunk)->prev_size = big_chunk->size; + assert((char *)&next_chunk_u(big_chunk)->prev_size == + ((char *)big_chunk) + big_size - CHUNK_HEADER_SIZE); + next_chunk_u(big_chunk)->prev_size = big_chunk->size; + next_chunk_u(big_chunk)->size = END_MARKER; insert_unsorted(big_chunk); @@ -274,7 +288,7 @@ mscan->prev_size = chunk->size; } else { - mscan->size &= ~FLAG_UNSORTED; + mscan->size &= ~FLAG_SORTED; size_t fsize = mscan->size; mchunk_t *fscan = chunk_at_offset(mscan, fsize + CHUNK_HEADER_SIZE); @@ -302,7 +316,7 @@ msize = chunk->prev_size + CHUNK_HEADER_SIZE; mscan = chunk_at_offset(chunk, -msize); assert(mscan->prev_size == THIS_CHUNK_FREE); - assert((mscan->size & ~FLAG_UNSORTED) == chunk->prev_size); + assert((mscan->size & ~FLAG_SORTED) == chunk->prev_size); /* unlink the previous chunk */ mscan->d.next->prev = mscan->d.prev; @@ -310,7 +324,7 @@ /* merge the two chunks */ mscan->size = msize + chunk->size; - next_chunk(mscan)->prev_size = mscan->size; + next_chunk_u(mscan)->prev_size = mscan->size; assert(chunk->prev_size = (size_t)-1); assert(chunk->size = (size_t)-1); From noreply at buildbot.pypy.org Fri Dec 20 14:12:56 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 20 Dec 2013 14:12:56 +0100 (CET) Subject: [pypy-commit] stmgc c5: Make a proper test file. Message-ID: <20131220131256.47FED1C11A9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r573:cfd6bed8501b Date: 2013-12-20 13:49 +0100 http://bitbucket.org/pypy/stmgc/changeset/cfd6bed8501b/ Log: Make a proper test file. diff --git a/c5/largemalloc.c b/c5/largemalloc.c --- a/c5/largemalloc.c +++ b/c5/largemalloc.c @@ -3,10 +3,10 @@ blocks, which in our case means at least 288 bytes. */ -#include #include #include #include +#include "largemalloc.h" #define MMAP_LIMIT (1280*1024) diff --git a/c5/largemalloc.h b/c5/largemalloc.h new file mode 100644 --- /dev/null +++ b/c5/largemalloc.h @@ -0,0 +1,4 @@ +#include + +char *stm_large_malloc(size_t request_size); +void stm_large_free(char *data); diff --git a/c5/test/support.py b/c5/test/support.py --- a/c5/test/support.py +++ b/c5/test/support.py @@ -6,9 +6,9 @@ parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) header_files = [os.path.join(parent_dir, _n) for _n in - "core.h pagecopy.h".split()] + "core.h pagecopy.h largemalloc.h".split()] source_files = [os.path.join(parent_dir, _n) for _n in - "core.c pagecopy.c".split()] + "core.c pagecopy.c largemalloc.c".split()] _pycache_ = os.path.join(parent_dir, 'test', '__pycache__') if os.path.exists(_pycache_): @@ -41,15 +41,19 @@ void _stm_restore_local_state(struct local_data_s *p); void _stm_teardown(void); void _stm_teardown_process(void); + +char *stm_large_malloc(size_t request_size); +void stm_large_free(char *data); """) lib = ffi.verify(''' #include "core.h" +#include "largemalloc.h" ''', sources=source_files, define_macros=[('STM_TESTS', '1')], undef_macros=['NDEBUG'], include_dirs=[parent_dir], - extra_compile_args=['-g', '-O0']) + extra_compile_args=['-g', '-O0', '-Werror']) def intptr(p): return int(ffi.cast("intptr_t", p)) diff --git a/c5/test/test_largemalloc.py b/c5/test/test_largemalloc.py new file mode 100644 --- /dev/null +++ b/c5/test/test_largemalloc.py @@ -0,0 +1,31 @@ +from support import * + + +class TestLargeMalloc(object): + + def test_simple(self): + d1 = lib.stm_large_malloc(7000) + d2 = lib.stm_large_malloc(8000) + assert d2 - d1 == 7016 + d3 = lib.stm_large_malloc(9000) + assert d3 - d2 == 8016 + # + lib.stm_large_free(d1) + lib.stm_large_free(d2) + # + d4 = lib.stm_large_malloc(600) + assert d4 == d1 + d5 = lib.stm_large_malloc(600) + assert d5 == d4 + 616 + # + lib.stm_large_free(d5) + # + d6 = lib.stm_large_malloc(600) + assert d6 == d5 + # + lib.stm_large_free(d4) + # + d7 = lib.stm_large_malloc(608) + assert d7 == d6 + 616 + d8 = lib.stm_large_malloc(600) + assert d8 == d4 From noreply at buildbot.pypy.org Fri Dec 20 14:12:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 20 Dec 2013 14:12:57 +0100 (CET) Subject: [pypy-commit] stmgc c5: Test and fix Message-ID: <20131220131257.849751C11A9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r574:d90013065da3 Date: 2013-12-20 14:12 +0100 http://bitbucket.org/pypy/stmgc/changeset/d90013065da3/ Log: Test and fix diff --git a/c5/demo2.c b/c5/demo2.c --- a/c5/demo2.c +++ b/c5/demo2.c @@ -3,24 +3,12 @@ #include -#define END_MARKER 0xDEADBEEF - char *stm_large_malloc(size_t request_size); void stm_large_free(char *data); +void _stm_large_dump(char *data); +#define dump _stm_large_dump -static void dump(char *data) -{ - while (1) { - fprintf(stderr, "[ %p: %zu\n", data - 16, *(size_t*)(data - 16)); - if (*(size_t*)(data - 8) == END_MARKER) - break; - fprintf(stderr, " %p: %zu ]\n", data - 8, *(size_t*)(data - 8)); - data += (*(size_t*)(data - 8)) & ~1; - data += 16; - } - fprintf(stderr, " %p: end. ]\n\n", data - 8); -} int main() { diff --git a/c5/largemalloc.c b/c5/largemalloc.c --- a/c5/largemalloc.c +++ b/c5/largemalloc.c @@ -231,6 +231,7 @@ size_t remaining_size_plus_1 = mscan->size - request_size; if (remaining_size_plus_1 <= sizeof(struct malloc_chunk)) { next_chunk_s(mscan)->prev_size = BOTH_CHUNKS_USED; + request_size = mscan->size & ~FLAG_SORTED; } else { /* only part of the chunk is being used; reduce the size @@ -243,8 +244,8 @@ new->size = remaining_size; next_chunk_u(new)->prev_size = remaining_size; insert_unsorted(new); - mscan->size = request_size; } + mscan->size = request_size; mscan->prev_size = BOTH_CHUNKS_USED; return (char *)&mscan->d; } @@ -259,6 +260,7 @@ fprintf(stderr, "out of memory!\n"); abort(); } + fprintf(stderr, "allocate_more: %p\n", &big_chunk->d); big_chunk->prev_size = THIS_CHUNK_FREE; big_chunk->size = big_size - CHUNK_HEADER_SIZE * 2; @@ -284,7 +286,7 @@ mchunk_t *mscan = chunk_at_offset(chunk, msize); if (mscan->prev_size == BOTH_CHUNKS_USED) { - assert((mscan->size & (sizeof(char *) - 1)) == 0); + assert((mscan->size & ((sizeof(char *) - 1) & ~FLAG_SORTED)) == 0); mscan->prev_size = chunk->size; } else { @@ -333,3 +335,31 @@ insert_unsorted(chunk); } + + +void _stm_large_dump(char *data) +{ + size_t prev_size_if_free = 0; + while (1) { + fprintf(stderr, "[ %p: %zu\n", data - 16, *(size_t*)(data - 16)); + if (prev_size_if_free == 0) { + assert(*(size_t*)(data - 16) == THIS_CHUNK_FREE || + *(size_t*)(data - 16) == BOTH_CHUNKS_USED); + if (*(size_t*)(data - 16) == THIS_CHUNK_FREE) + prev_size_if_free = (*(size_t*)(data - 8)) & ~FLAG_SORTED; + } + else { + assert(*(size_t*)(data - 16) == prev_size_if_free); + prev_size_if_free = 0; + } + if (*(size_t*)(data - 8) == END_MARKER) + break; + fprintf(stderr, " %p: %zu]%s\n", data - 8, *(size_t*)(data - 8), + prev_size_if_free ? " (free)" : ""); + if (!prev_size_if_free) + assert(!((*(size_t*)(data - 8)) & FLAG_SORTED)); + data += (*(size_t*)(data - 8)) & ~FLAG_SORTED; + data += 16; + } + fprintf(stderr, " %p: end. ]\n\n", data - 8); +} diff --git a/c5/largemalloc.h b/c5/largemalloc.h --- a/c5/largemalloc.h +++ b/c5/largemalloc.h @@ -2,3 +2,4 @@ char *stm_large_malloc(size_t request_size); void stm_large_free(char *data); +void _stm_large_dump(char *data); diff --git a/c5/test/support.py b/c5/test/support.py --- a/c5/test/support.py +++ b/c5/test/support.py @@ -44,9 +44,13 @@ char *stm_large_malloc(size_t request_size); void stm_large_free(char *data); +void _stm_large_dump(char *data); + +void *memset(void *s, int c, size_t n); """) lib = ffi.verify(''' +#include #include "core.h" #include "largemalloc.h" ''', sources=source_files, diff --git a/c5/test/test_largemalloc.py b/c5/test/test_largemalloc.py --- a/c5/test/test_largemalloc.py +++ b/c5/test/test_largemalloc.py @@ -1,4 +1,5 @@ from support import * +import random class TestLargeMalloc(object): @@ -29,3 +30,25 @@ assert d7 == d6 + 616 d8 = lib.stm_large_malloc(600) assert d8 == d4 + + def test_random(self): + r = random.Random(1005) + p = [] + for i in range(100000): + if len(p) != 0 and (len(p) > 100 or r.randrange(0, 5) < 2): + index = r.randrange(0, len(p)) + d, length, content1, content2 = p.pop(index) + print ' free %5d (%s)' % (length, d) + assert d[0] == content1 + assert d[length - 1] == content2 + lib.stm_large_free(d) + else: + sz = r.randrange(8, 160) * 8 + d = lib.stm_large_malloc(sz) + print 'alloc %5d (%s)' % (sz, d) + lib.memset(d, 0xdd, sz) + content1 = chr(r.randrange(0, 256)) + content2 = chr(r.randrange(0, 256)) + d[0] = content1 + d[sz - 1] = content2 + p.append((d, sz, content1, content2)) From noreply at buildbot.pypy.org Fri Dec 20 14:20:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 20 Dec 2013 14:20:57 +0100 (CET) Subject: [pypy-commit] stmgc c5: Tweak the tests Message-ID: <20131220132057.DEA5A1C11A9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r575:aa88fa9f2d50 Date: 2013-12-20 14:20 +0100 http://bitbucket.org/pypy/stmgc/changeset/aa88fa9f2d50/ Log: Tweak the tests diff --git a/c5/largemalloc.c b/c5/largemalloc.c --- a/c5/largemalloc.c +++ b/c5/largemalloc.c @@ -77,6 +77,11 @@ decreasing order (if you start from 'd.next'). At the end of this list are some unsorted chunks. All unsorted chunks are after all sorted chunks. The flag 'FLAG_SORTED' distinguishes them. + + Note that if the user always calls stm_large_malloc() with a large + enough argument, then the few bins corresponding to smaller values + will never be sorted at all. They are still populated with the + fragments of space between bigger allocations. */ static dlist_t largebins[N_BINS] = { @@ -101,6 +106,13 @@ INIT(80), INIT(81), INIT(82), INIT(83) }; #undef INIT +void _stm_large_reset(void) +{ + int i; + for (i = 0; i < N_BINS; i++) + largebins[i].prev = largebins[i].next = &largebins[i]; +} + static char *allocate_more(size_t request_size); diff --git a/c5/largemalloc.h b/c5/largemalloc.h --- a/c5/largemalloc.h +++ b/c5/largemalloc.h @@ -3,3 +3,4 @@ char *stm_large_malloc(size_t request_size); void stm_large_free(char *data); void _stm_large_dump(char *data); +void _stm_large_reset(void); diff --git a/c5/test/support.py b/c5/test/support.py --- a/c5/test/support.py +++ b/c5/test/support.py @@ -45,6 +45,7 @@ char *stm_large_malloc(size_t request_size); void stm_large_free(char *data); void _stm_large_dump(char *data); +void _stm_large_reset(void); void *memset(void *s, int c, size_t n); """) diff --git a/c5/test/test_largemalloc.py b/c5/test/test_largemalloc.py --- a/c5/test/test_largemalloc.py +++ b/c5/test/test_largemalloc.py @@ -4,6 +4,9 @@ class TestLargeMalloc(object): + def setup_method(self, meth): + lib._stm_large_reset() + def test_simple(self): d1 = lib.stm_large_malloc(7000) d2 = lib.stm_large_malloc(8000) @@ -32,7 +35,8 @@ assert d8 == d4 def test_random(self): - r = random.Random(1005) + r = random.Random(1007) + first = None p = [] for i in range(100000): if len(p) != 0 and (len(p) > 100 or r.randrange(0, 5) < 2): @@ -46,9 +50,12 @@ sz = r.randrange(8, 160) * 8 d = lib.stm_large_malloc(sz) print 'alloc %5d (%s)' % (sz, d) + if first is None: + first = d lib.memset(d, 0xdd, sz) content1 = chr(r.randrange(0, 256)) content2 = chr(r.randrange(0, 256)) d[0] = content1 d[sz - 1] = content2 p.append((d, sz, content1, content2)) + lib._stm_large_dump(first) From noreply at buildbot.pypy.org Fri Dec 20 16:58:44 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 20 Dec 2013 16:58:44 +0100 (CET) Subject: [pypy-commit] stmgc c5: Finally, make largemalloc.c work on an externally-provided arena of Message-ID: <20131220155844.4F60B1C356B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r576:2925f0ed8eed Date: 2013-12-20 16:58 +0100 http://bitbucket.org/pypy/stmgc/changeset/2925f0ed8eed/ Log: Finally, make largemalloc.c work on an externally-provided arena of memory, which can grow or shrink at the end. diff --git a/c5/largemalloc.c b/c5/largemalloc.c --- a/c5/largemalloc.c +++ b/c5/largemalloc.c @@ -84,37 +84,9 @@ fragments of space between bigger allocations. */ -static dlist_t largebins[N_BINS] = { +static dlist_t largebins[N_BINS]; +static mchunk_t *first_chunk, *last_chunk; -#define INIT(num) { largebins + num, largebins + num } - INIT(0), INIT(1), INIT(2), INIT(3), INIT(4), - INIT(5), INIT(6), INIT(7), INIT(8), INIT(9), - INIT(10), INIT(11), INIT(12), INIT(13), INIT(14), - INIT(15), INIT(16), INIT(17), INIT(18), INIT(19), - INIT(20), INIT(21), INIT(22), INIT(23), INIT(24), - INIT(25), INIT(26), INIT(27), INIT(28), INIT(29), - INIT(30), INIT(31), INIT(32), INIT(33), INIT(34), - INIT(35), INIT(36), INIT(37), INIT(38), INIT(39), - INIT(40), INIT(41), INIT(42), INIT(43), INIT(44), - INIT(45), INIT(46), INIT(47), INIT(48), INIT(49), - INIT(50), INIT(51), INIT(52), INIT(53), INIT(54), - INIT(55), INIT(56), INIT(57), INIT(58), INIT(59), - INIT(60), INIT(61), INIT(62), INIT(63), INIT(64), - INIT(65), INIT(66), INIT(67), INIT(68), INIT(69), - INIT(70), INIT(71), INIT(72), INIT(73), INIT(74), - INIT(75), INIT(76), INIT(77), INIT(78), INIT(79), - INIT(80), INIT(81), INIT(82), INIT(83) }; -#undef INIT - -void _stm_large_reset(void) -{ - int i; - for (i = 0; i < N_BINS; i++) - largebins[i].prev = largebins[i].next = &largebins[i]; -} - - -static char *allocate_more(size_t request_size); static void insert_unsorted(mchunk_t *new) { @@ -229,8 +201,8 @@ } } - /* not enough free memory. We need to allocate more. */ - return allocate_more(request_size); + /* not enough memory. */ + return NULL; found: assert(mscan->size & FLAG_SORTED); @@ -262,31 +234,6 @@ return (char *)&mscan->d; } -static char *allocate_more(size_t request_size) -{ - assert(request_size < MMAP_LIMIT);//XXX - - size_t big_size = MMAP_LIMIT * 8 - 48; - mchunk_t *big_chunk = (mchunk_t *)malloc(big_size); - if (!big_chunk) { - fprintf(stderr, "out of memory!\n"); - abort(); - } - fprintf(stderr, "allocate_more: %p\n", &big_chunk->d); - - big_chunk->prev_size = THIS_CHUNK_FREE; - big_chunk->size = big_size - CHUNK_HEADER_SIZE * 2; - - assert((char *)&next_chunk_u(big_chunk)->prev_size == - ((char *)big_chunk) + big_size - CHUNK_HEADER_SIZE); - next_chunk_u(big_chunk)->prev_size = big_chunk->size; - next_chunk_u(big_chunk)->size = END_MARKER; - - insert_unsorted(big_chunk); - - return stm_large_malloc(request_size); -} - void stm_large_free(char *data) { mchunk_t *chunk = data2chunk(data); @@ -349,8 +296,9 @@ } -void _stm_large_dump(char *data) +void _stm_large_dump(void) { + char *data = ((char *)first_chunk) + 16; size_t prev_size_if_free = 0; while (1) { fprintf(stderr, "[ %p: %zu\n", data - 16, *(size_t*)(data - 16)); @@ -366,12 +314,88 @@ } if (*(size_t*)(data - 8) == END_MARKER) break; - fprintf(stderr, " %p: %zu]%s\n", data - 8, *(size_t*)(data - 8), + fprintf(stderr, " %p: %zu ]%s\n", data - 8, *(size_t*)(data - 8), prev_size_if_free ? " (free)" : ""); if (!prev_size_if_free) assert(!((*(size_t*)(data - 8)) & FLAG_SORTED)); + assert(*(ssize_t*)(data - 8) > 0); data += (*(size_t*)(data - 8)) & ~FLAG_SORTED; data += 16; } fprintf(stderr, " %p: end. ]\n\n", data - 8); + assert(data - 16 == (char *)last_chunk); } + +void stm_largemalloc_init(char *data_start, size_t data_size) +{ + int i; + for (i = 0; i < N_BINS; i++) + largebins[i].prev = largebins[i].next = &largebins[i]; + + assert(data_size >= 2 * sizeof(struct malloc_chunk)); + assert((data_size & 31) == 0); + first_chunk = (mchunk_t *)data_start; + first_chunk->prev_size = THIS_CHUNK_FREE; + first_chunk->size = data_size - 2 * CHUNK_HEADER_SIZE; + last_chunk = chunk_at_offset(first_chunk, data_size - CHUNK_HEADER_SIZE); + last_chunk->prev_size = first_chunk->size; + last_chunk->size = END_MARKER; + assert(last_chunk == next_chunk_u(first_chunk)); + + insert_unsorted(first_chunk); +} + +int stm_largemalloc_resize_arena(size_t new_size) +{ + assert(new_size >= 2 * sizeof(struct malloc_chunk)); + assert((new_size & 31) == 0); + + new_size -= CHUNK_HEADER_SIZE; + mchunk_t *new_last_chunk = chunk_at_offset(first_chunk, new_size); + mchunk_t *old_last_chunk = last_chunk; + size_t old_size = ((char *)old_last_chunk) - (char *)first_chunk; + + if (new_size < old_size) { + /* check if there is enough free space at the end to allow + such a reduction */ + size_t lsize = last_chunk->prev_size; + assert(lsize != THIS_CHUNK_FREE); + if (lsize == BOTH_CHUNKS_USED) + return 0; + lsize += CHUNK_HEADER_SIZE; + mchunk_t *prev_chunk = chunk_at_offset(last_chunk, -lsize); + if (((char *)new_last_chunk) < ((char *)prev_chunk) + + sizeof(struct malloc_chunk)) + return 0; + + /* unlink the prev_chunk from the doubly-linked list */ + prev_chunk->d.next->prev = prev_chunk->d.prev; + prev_chunk->d.prev->next = prev_chunk->d.next; + + /* reduce the prev_chunk */ + assert((prev_chunk->size & ~FLAG_SORTED) == last_chunk->prev_size); + prev_chunk->size = ((char*)new_last_chunk) - (char *)prev_chunk + - CHUNK_HEADER_SIZE; + + /* make a fresh-new last chunk */ + new_last_chunk->prev_size = prev_chunk->size; + new_last_chunk->size = END_MARKER; + last_chunk = new_last_chunk; + assert(last_chunk == next_chunk_u(prev_chunk)); + + insert_unsorted(prev_chunk); + } + else if (new_size > old_size) { + /* make the new last chunk first, with only the extra size */ + mchunk_t *old_last_chunk = last_chunk; + old_last_chunk->size = (new_size - old_size) - CHUNK_HEADER_SIZE; + new_last_chunk->prev_size = BOTH_CHUNKS_USED; + new_last_chunk->size = END_MARKER; + last_chunk = new_last_chunk; + assert(last_chunk == next_chunk_u(old_last_chunk)); + + /* then free the last_chunk (turn it from "used" to "free) */ + stm_large_free((char *)&old_last_chunk->d); + } + return 1; +} diff --git a/c5/largemalloc.h b/c5/largemalloc.h --- a/c5/largemalloc.h +++ b/c5/largemalloc.h @@ -1,6 +1,9 @@ #include +void stm_largemalloc_init(char *data_start, size_t data_size); +int stm_largemalloc_resize_arena(size_t new_size); + char *stm_large_malloc(size_t request_size); void stm_large_free(char *data); -void _stm_large_dump(char *data); -void _stm_large_reset(void); + +void _stm_large_dump(void); diff --git a/c5/test/support.py b/c5/test/support.py --- a/c5/test/support.py +++ b/c5/test/support.py @@ -42,10 +42,11 @@ void _stm_teardown(void); void _stm_teardown_process(void); +void stm_largemalloc_init(char *data_start, size_t data_size); +int stm_largemalloc_resize_arena(size_t new_size); char *stm_large_malloc(size_t request_size); void stm_large_free(char *data); -void _stm_large_dump(char *data); -void _stm_large_reset(void); +void _stm_large_dump(void); void *memset(void *s, int c, size_t n); """) diff --git a/c5/test/test_largemalloc.py b/c5/test/test_largemalloc.py --- a/c5/test/test_largemalloc.py +++ b/c5/test/test_largemalloc.py @@ -1,11 +1,15 @@ from support import * -import random +import sys, random class TestLargeMalloc(object): def setup_method(self, meth): - lib._stm_large_reset() + size = 1024 * 1024 # 1MB + self.rawmem = ffi.new("char[]", size) + self.size = size + lib.memset(self.rawmem, 0xcd, size) + lib.stm_largemalloc_init(self.rawmem, size) def test_simple(self): d1 = lib.stm_large_malloc(7000) @@ -33,10 +37,51 @@ assert d7 == d6 + 616 d8 = lib.stm_large_malloc(600) assert d8 == d4 + # + lib._stm_large_dump() + + def test_overflow_1(self): + d = lib.stm_large_malloc(self.size - 32) + assert d == self.rawmem + 16 + lib._stm_large_dump() + + def test_overflow_2(self): + d = lib.stm_large_malloc(self.size - 16) + assert d == ffi.NULL + lib._stm_large_dump() + + def test_overflow_3(self): + d = lib.stm_large_malloc(sys.maxint & ~7) + assert d == ffi.NULL + lib._stm_large_dump() + + def test_resize_arena_reduce_1(self): + r = lib.stm_largemalloc_resize_arena(self.size - 32) + assert r == 1 + d = lib.stm_large_malloc(self.size - 32) + assert d == ffi.NULL + lib._stm_large_dump() + + def test_resize_arena_reduce_2(self): + lib.stm_large_malloc(self.size // 2 - 64) + r = lib.stm_largemalloc_resize_arena(self.size // 2) + assert r == 1 + lib._stm_large_dump() + + def test_resize_arena_cannot_reduce_1(self): + lib.stm_large_malloc(self.size // 2) + r = lib.stm_largemalloc_resize_arena(self.size // 2) + assert r == 0 + lib._stm_large_dump() + + def test_resize_arena_cannot_reduce_2(self): + lib.stm_large_malloc(self.size // 2 - 56) + r = lib.stm_largemalloc_resize_arena(self.size // 2) + assert r == 0 + lib._stm_large_dump() def test_random(self): r = random.Random(1007) - first = None p = [] for i in range(100000): if len(p) != 0 and (len(p) > 100 or r.randrange(0, 5) < 2): @@ -50,12 +95,11 @@ sz = r.randrange(8, 160) * 8 d = lib.stm_large_malloc(sz) print 'alloc %5d (%s)' % (sz, d) - if first is None: - first = d + assert d != ffi.NULL lib.memset(d, 0xdd, sz) content1 = chr(r.randrange(0, 256)) content2 = chr(r.randrange(0, 256)) d[0] = content1 d[sz - 1] = content2 p.append((d, sz, content1, content2)) - lib._stm_large_dump(first) + lib._stm_large_dump() From noreply at buildbot.pypy.org Fri Dec 20 17:59:39 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 20 Dec 2013 17:59:39 +0100 (CET) Subject: [pypy-commit] stmgc c5: Update demo2. Message-ID: <20131220165939.7979E1C35DA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r578:d48250c2bf8a Date: 2013-12-20 17:31 +0100 http://bitbucket.org/pypy/stmgc/changeset/d48250c2bf8a/ Log: Update demo2. diff --git a/c5/Makefile b/c5/Makefile --- a/c5/Makefile +++ b/c5/Makefile @@ -6,5 +6,8 @@ demo1: demo1.c $(C_FILES) $(H_FILES) gcc -o $@ -O2 -g demo1.c $(C_FILES) -Wall +demo2: demo2.c largemalloc.c largemalloc.h + gcc -o $@ -g demo2.c largemalloc.c -Wall + clean: - rm -f demo1 + rm -f demo1 demo2 diff --git a/c5/demo2.c b/c5/demo2.c --- a/c5/demo2.c +++ b/c5/demo2.c @@ -3,46 +3,48 @@ #include -char *stm_large_malloc(size_t request_size); -void stm_large_free(char *data); -void _stm_large_dump(char *data); +#include "largemalloc.h" -#define dump _stm_large_dump +#define dump() _stm_large_dump() + + +char buffer[65536]; int main() { + stm_largemalloc_init(buffer, sizeof(buffer)); + char *d1 = stm_large_malloc(7000); - char *start = d1; char *d2 = stm_large_malloc(8000); - char *d3 = stm_large_malloc(9000); + /*char *d3 = */ stm_large_malloc(9000); - dump(start); + dump(); stm_large_free(d1); stm_large_free(d2); - dump(start); + dump(); char *d4 = stm_large_malloc(600); assert(d4 == d1); char *d5 = stm_large_malloc(600); assert(d5 == d4 + 616); - dump(start); + dump(); stm_large_free(d5); - dump(start); + dump(); stm_large_malloc(600); stm_large_free(d4); - dump(start); + dump(); stm_large_malloc(608); - dump(start); + dump(); return 0; } From noreply at buildbot.pypy.org Fri Dec 20 17:59:40 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 20 Dec 2013 17:59:40 +0100 (CET) Subject: [pypy-commit] stmgc c5: A test for insert_unsorted() which I initially forgot in Message-ID: <20131220165940.96F8F1C35DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r579:6994d62df796 Date: 2013-12-20 17:35 +0100 http://bitbucket.org/pypy/stmgc/changeset/6994d62df796/ Log: A test for insert_unsorted() which I initially forgot in stm_largemalloc_resize_arena(). diff --git a/c5/test/test_largemalloc.py b/c5/test/test_largemalloc.py --- a/c5/test/test_largemalloc.py +++ b/c5/test/test_largemalloc.py @@ -68,6 +68,15 @@ assert r == 1 lib._stm_large_dump() + def test_resize_arena_reduce_3(self): + d1 = lib.stm_large_malloc(128) + r = lib.stm_largemalloc_resize_arena(self.size // 2) + assert r == 1 + d2 = lib.stm_large_malloc(128) + assert d1 == self.rawmem + 16 + assert d2 == d1 + 128 + 16 + lib._stm_large_dump() + def test_resize_arena_cannot_reduce_1(self): lib.stm_large_malloc(self.size // 2) r = lib.stm_largemalloc_resize_arena(self.size // 2) From noreply at buildbot.pypy.org Fri Dec 20 17:59:38 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 20 Dec 2013 17:59:38 +0100 (CET) Subject: [pypy-commit] stmgc c5: Comments and minor speed-ups. Message-ID: <20131220165938.676EC1C35D2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r577:f95f25b6f41d Date: 2013-12-20 17:26 +0100 http://bitbucket.org/pypy/stmgc/changeset/f95f25b6f41d/ Log: Comments and minor speed-ups. diff --git a/c5/largemalloc.c b/c5/largemalloc.c --- a/c5/largemalloc.c +++ b/c5/largemalloc.c @@ -1,6 +1,8 @@ /* This contains a lot of inspiration from malloc() in the GNU C Library. More precisely, this is (a subset of) the part that handles large - blocks, which in our case means at least 288 bytes. + blocks, which in our case means at least 288 bytes. It is actually + a general allocator, although it doesn't contain any of the small- + or medium-block support that are also present in the GNU C Library. */ #include @@ -190,12 +192,13 @@ /* search now through all higher bins. We only need to take the smallest item of the first non-empty bin, as it will be large - enough. xxx use a bitmap to speed this up */ + enough. */ while (++index < N_BINS) { - sort_bin(index); - scan = largebins[index].prev; - end = &largebins[index]; - if (scan != end) { + if (largebins[index].prev != &largebins[index]) { + /* non-empty bin. */ + sort_bin(index); + scan = largebins[index].prev; + end = &largebins[index]; mscan = data2chunk(scan); goto found; } From noreply at buildbot.pypy.org Fri Dec 20 17:59:41 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 20 Dec 2013 17:59:41 +0100 (CET) Subject: [pypy-commit] stmgc c5: Tweaks Message-ID: <20131220165941.A82161C35D2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r580:f33785501e2c Date: 2013-12-20 17:59 +0100 http://bitbucket.org/pypy/stmgc/changeset/f33785501e2c/ Log: Tweaks diff --git a/c5/largemalloc.c b/c5/largemalloc.c --- a/c5/largemalloc.c +++ b/c5/largemalloc.c @@ -127,32 +127,39 @@ end->prev = scan; scan->next = end; - mchunk_t *chunks[count]; - size_t i; - for (i = 0; i < count; i++) { - chunks[i] = data2chunk(unsorted); - unsorted = unsorted->prev; + mchunk_t *chunk1; + mchunk_t *chunks[count]; /* dynamically-sized */ + if (count == 1) { + chunk1 = data2chunk(unsorted); /* common case */ + count = 0; } - assert(unsorted == scan); - qsort(chunks, count, sizeof(mchunk_t *), compare_chunks); + else { + size_t i; + for (i = 0; i < count; i++) { + chunks[i] = data2chunk(unsorted); + unsorted = unsorted->prev; + } + assert(unsorted == scan); + qsort(chunks, count, sizeof(mchunk_t *), compare_chunks); - --count; - chunks[count]->size |= FLAG_SORTED; - size_t search_size = chunks[count]->size; + chunk1 = chunks[--count]; + } + chunk1->size |= FLAG_SORTED; + size_t search_size = chunk1->size; dlist_t *head = largebins[index].next; while (1) { if (head == end || search_size >= data2chunk(head)->size) { - /* insert 'chunks[count]' here, before the current head */ - head->prev->next = &chunks[count]->d; - chunks[count]->d.prev = head->prev; - head->prev = &chunks[count]->d; - chunks[count]->d.next = head; + /* insert 'chunk1' here, before the current head */ + head->prev->next = &chunk1->d; + chunk1->d.prev = head->prev; + head->prev = &chunk1->d; + chunk1->d.next = head; if (count == 0) break; /* all done */ - --count; - chunks[count]->size |= FLAG_SORTED; - search_size = chunks[count]->size; + chunk1 = chunks[--count]; + chunk1->size |= FLAG_SORTED; + search_size = chunk1->size; } else { head = head->next; @@ -317,11 +324,17 @@ } if (*(size_t*)(data - 8) == END_MARKER) break; - fprintf(stderr, " %p: %zu ]%s\n", data - 8, *(size_t*)(data - 8), - prev_size_if_free ? " (free)" : ""); + fprintf(stderr, " %p: %zu ]", data - 8, *(size_t*)(data - 8)); + if (prev_size_if_free) { + fprintf(stderr, " (free %p / %p)\n", + *(void **)data, *(void **)(data + 8)); + } + else { + fprintf(stderr, "\n"); + } if (!prev_size_if_free) assert(!((*(size_t*)(data - 8)) & FLAG_SORTED)); - assert(*(ssize_t*)(data - 8) > 0); + assert(*(ssize_t*)(data - 8) >= 16); data += (*(size_t*)(data - 8)) & ~FLAG_SORTED; data += 16; } From noreply at buildbot.pypy.org Fri Dec 20 18:09:46 2013 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 20 Dec 2013 18:09:46 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: refactor bitblt code into plugin Message-ID: <20131220170946.60DE41C35D2@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r545:a8b07d33f26a Date: 2013-12-20 17:29 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/a8b07d33f26a/ Log: refactor bitblt code into plugin diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -599,14 +599,6 @@ from spyvm.shadow import ObserveeShadow return self.as_special_get_shadow(space, ObserveeShadow) - def as_bitblt_get_shadow(self, space): - from spyvm.shadow import BitBltShadow - return self.as_special_get_shadow(space, BitBltShadow) - - def as_form_get_shadow(self, space): - from spyvm.shadow import FormShadow - return self.as_special_get_shadow(space, FormShadow) - def has_shadow(self): return self._shadow is not None diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py new file mode 100644 --- /dev/null +++ b/spyvm/plugins/bitblt.py @@ -0,0 +1,629 @@ +from spyvm import model, error +from spyvm.shadow import AbstractCachingShadow +from spyvm.plugins.plugin import Plugin + +from rpython.rlib import rarithmetic, jit + + +BitBltPlugin = Plugin() + + at BitBltPlugin.expose_primitive(unwrap_spec=[object], clean_stack=False) +def primitiveCopyBits(interp, s_frame, w_rcvr): + from spyvm.interpreter import Return + if not isinstance(w_rcvr, model.W_PointersObject) or w_rcvr.size() < 15: + raise PrimitiveFailedError + + # only allow combinationRules 0-41 + combinationRule = interp.space.unwrap_positive_32bit_int(w_rcvr.fetch(interp.space, 3)) + if combinationRule > 41: + raise PrimitiveFailedError + + space = interp.space + s_bitblt = w_rcvr.as_special_get_shadow(space, BitBltShadow) + s_bitblt.copyBits() + + w_dest_form = w_rcvr.fetch(space, 0) + if (combinationRule == 22 or combinationRule == 32): + s_frame.pop() # pops the next value under BitBlt + s_frame.push(interp.space.wrap_int(s_bitblt.bitCount)) + elif w_dest_form.is_same_object(space.objtable['w_display']): + w_bitmap = w_dest_form.fetch(space, 0) + assert isinstance(w_bitmap, model.W_DisplayBitmap) + w_bitmap.flush_to_screen() + return w_rcvr + + def as_bitblt_get_shadow(self, space): + return + + +class BitBltShadow(AbstractCachingShadow): + WordSize = 32 + MaskTable = [rarithmetic.r_uint(0)] + for i in xrange(WordSize): + MaskTable.append(rarithmetic.r_uint((2 ** (i + 1)) - 1)) + AllOnes = rarithmetic.r_uint(0xFFFFFFFF) + + def sync_cache(self): + self.loadBitBlt() + + def intOrIfNil(self, w_int, i): + if w_int is self.space.w_nil: + return i + else: + return self.space.unwrap_int(w_int) + + def loadForm(self, w_form): + try: + if not isinstance(w_form, model.W_PointersObject): + raise error.PrimitiveFailedError() + s_form = w_form.as_special_get_shadow(self.space, FormShadow) + if not isinstance(s_form, FormShadow): + raise error.PrimitiveFailedError() + return s_form + except error.PrimitiveFailedError, e: + w_self = self.w_self() + assert isinstance(w_self, model.W_PointersObject) + w_self._shadow = None + raise e + + def loadHalftone(self, w_halftone_form): + if w_halftone_form is self.space.w_nil: + return None + elif isinstance(w_halftone_form, model.W_WordsObject): + # Already a bitmap + return w_halftone_form.words + else: + assert isinstance(w_halftone_form, model.W_PointersObject) + w_bits = w_halftone_form.as_special_get_shadow(self.space, FormShadow).w_bits + assert isinstance(w_bits, model.W_WordsObject) + return w_bits.words + + def loadColorMap(self, w_color_map): + if isinstance(w_color_map, model.W_WordsObject): + self.w_cmLookupTable = w_color_map + self.cmMask = self.w_cmLookupTable.size() - 1 + else: + self.w_cmLookupTable = None + + def loadBitBlt(self): + self.success = True + self.w_destForm = self.fetch(0) + self.dest = self.loadForm(self.w_destForm) + self.w_sourceForm = self.fetch(1) + if self.w_sourceForm is not self.space.w_nil: + self.source = self.loadForm(self.w_sourceForm) + else: + self.source = None + self.halftone = self.loadHalftone(self.fetch(2)) + self.combinationRule = self.space.unwrap_int(self.fetch(3)) + self.destX = self.intOrIfNil(self.fetch(4), 0) + self.destY = self.intOrIfNil(self.fetch(5), 0) + self.width = self.intOrIfNil(self.fetch(6), self.dest.width) + self.height = self.intOrIfNil(self.fetch(7), self.dest.height) + self.clipX = self.intOrIfNil(self.fetch(10), 0) + self.clipY = self.intOrIfNil(self.fetch(11), 0) + self.clipW = self.intOrIfNil(self.fetch(12), self.width) + self.clipH = self.intOrIfNil(self.fetch(13), self.height) + if not self.source: + self.sourceX = 0 + self.sourceY = 0 + else: + self.loadColorMap(self.fetch(14)) + self.sourceX = self.intOrIfNil(self.fetch(8), 0) + self.sourceY = self.intOrIfNil(self.fetch(9), 0) + + def copyBits(self): + self.bitCount = 0 + self.clipRange() + if (self.bbW <= 0 or self.bbH <= 0): + return + self.destMaskAndPointerInit() + if not self.source: + self.copyLoopNoSource() + else: + self.checkSourceOverlap() + if self.source.depth != self.dest.depth: + self.copyLoopPixMap() + else: + self.sourceSkewAndPointerInit() + self.copyLoop() + + def checkSourceOverlap(self): + if (self.w_sourceForm is self.w_destForm and self.dy >= self.sy): + if (self.dy > self.sy): + self.vDir = -1 + self.sy = (self.sy + self.bbH) - 1 + self.dy = (self.dy + self.bbH) - 1 + else: + if (self.dy == self.sy and self.dx > self.sx): + self.hDir = -1 + self.sx = (self.sx + self.bbW) - 1 # start at right + self.dx = (self.dx + self.bbW) - 1 + if (self.nWords > 1): + t = self.mask1 # and fix up masks + self.mask1 = self.mask2 + self.mask2 = t + self.destIndex = (self.dy * self.dest.pitch) + (self.dx / self.dest.pixPerWord | 0) # recompute since dx, dy change + self.destDelta = (self.dest.pitch * self.vDir) - (self.nWords * self.hDir) + + def sourceSkewAndPointerInit(self): + pixPerM1 = self.dest.pixPerWord - 1 # Pix per word is power of two, so self makes a mask + sxLowBits = self.sx & pixPerM1 + dxLowBits = self.dx & pixPerM1 + # check if need to preload buffer + # (i.e., two words of source needed for first word of destination) + dWid = -1 + if (self.hDir > 0): + if self.bbW < (self.dest.pixPerWord - dxLowBits): + dWid = self.bbW + else: + dWid = self.dest.pixPerWord - dxLowBits + self.preload = (sxLowBits + dWid) > pixPerM1 + else: + if self.bbW < (dxLowBits + 1): + dWid = self.bbW + else: + dWid = dxLowBits + 1 + self.preload = ((sxLowBits - dWid) + 1) < 0 + + if self.source.msb: + self.skew = (sxLowBits - dxLowBits) * self.dest.depth + else: + self.skew = (dxLowBits - sxLowBits) * self.dest.depth + if (self.preload): + if (self.skew < 0): + self.skew += 32 + else: + self.skew -= 32 + # calculate increments from end of one line to start of next + self.sourceIndex = (self.sy * self.source.pitch) + (self.sx / (32 / self.source.depth) |0) + self.sourceDelta = (self.source.pitch * self.vDir) - (self.nWords * self.hDir) + if (self.preload): + self.sourceDelta -= self.hDir + + def clipRange(self): + # intersect with destForm bounds + if self.clipX < 0: + self.clipW += self.clipX + self.clipX = 0 + if self.clipY < 0: + self.clipH += self.clipY + self.clipY = 0 + if self.clipX + self.clipW > self.dest.width: + self.clipW = self.dest.width - self.clipX + if self.clipY + self.clipH > self.dest.height: + self.clipH = self.dest.height - self.clipY + # intersect with clipRect + leftOffset = max(self.clipX - self.destX, 0) + self.sx = self.sourceX + leftOffset + self.dx = self.destX + leftOffset + self.bbW = self.width - leftOffset + rightOffset = (self.dx + self.bbW) - (self.clipX + self.clipW) + if rightOffset > 0: + self.bbW -= rightOffset + topOffset = max(self.clipY - self.destY, 0) + self.sy = self.sourceY + topOffset + self.dy = self.destY + topOffset + self.bbH = self.height - topOffset + bottomOffset = (self.dy + self.bbH) - (self.clipY + self.clipH) + if bottomOffset > 0: + self.bbH -= bottomOffset + # intersect with sourceForm bounds + if not self.source: + return + if self.sx < 0: + self.dx -= self.sx + self.bbW += self.sx + self.sx = 0 + if (self.sx + self.bbW) > self.source.width: + self.bbW -= (self.sx + self.bbW) - self.source.width + if self.sy < 0: + self.dy -= self.sy + self.bbH += self.sy + self.sy = 0 + if (self.sy + self.bbH) > self.source.height: + self.bbH -= (self.sy + self.bbH) - self.source.height + + def rshift(self, val, n): + # return rarithmetic.r_uint(val >> n if val >= 0 else (val + 0x100000000) >> n) + return rarithmetic.r_uint(rarithmetic.r_uint(val) >> n & BitBltShadow.AllOnes) + + def destMaskAndPointerInit(self): + pixPerM1 = self.dest.pixPerWord - 1 # pixPerWord is power-of-two, so this makes a mask + startBits = self.dest.pixPerWord - (self.dx & pixPerM1) # how many px in 1st word + endBits = (((self.dx + self.bbW) - 1) & pixPerM1) + 1 + if self.dest.msb: + self.mask1 = self.rshift(BitBltShadow.AllOnes, (32 - (startBits * self.dest.depth))) + self.mask2 = BitBltShadow.AllOnes << (32 - (endBits * self.dest.depth)) + else: + self.mask1 = BitBltShadow.AllOnes << (32 - (startBits * self.dest.depth)) + self.mask2 = self.rshift(BitBltShadow.AllOnes, (32 - (endBits * self.dest.depth))) + if self.bbW < startBits: + self.mask1 = self.mask1 & self.mask2 + self.mask2 = 0 + self.nWords = 1 + else: + self.nWords = (((self.bbW - startBits) + pixPerM1) / self.dest.pixPerWord | 0) + 1 + self.hDir = 1 + self.vDir = 1 + self.destIndex = (self.dy * self.dest.pitch) + (self.dx / self.dest.pixPerWord | 0) + self.destDelta = (self.dest.pitch * self.vDir) - (self.nWords * self.hDir) + + def copyLoopNoSource(self): + halftoneWord = BitBltShadow.AllOnes + for i in range(self.bbH): + if self.halftone: + halftoneWord = rarithmetic.r_uint(self.halftone[(self.dy + i) % len(self.halftone)]) + # first word in row is masked + destMask = self.mask1 + destWord = self.dest.w_bits.getword(self.destIndex) + mergeWord = self.mergeFn(halftoneWord, destWord) + destWord = (destMask & mergeWord) | (destWord & (~destMask)) + self.dest.w_bits.setword(self.destIndex, destWord) + self.destIndex += 1 + destMask = BitBltShadow.AllOnes + # the central horizontal loop requires no store masking + if self.combinationRule == 3: # store rule requires no dest merging + for word in range(2, self.nWords): + self.dest.w_bits.setword(self.destIndex, halftoneWord) + self.destIndex += 1 + else: + for word in range(2, self.nWords): + destWord = self.dest.w_bits.getword(self.destIndex) + mergeWord = self.mergeFn(halftoneWord, destWord) + self.dest.w_bits.setword(self.destIndex, mergeWord) + self.destIndex += 1 + # last word in row is masked + if self.nWords > 1: + destMask = self.mask2 + destWord = self.dest.w_bits.getword(self.destIndex) + mergeWord = self.mergeFn(halftoneWord, destWord) + destWord = (destMask & mergeWord) | (destWord & (~destMask)) + self.dest.w_bits.setword(self.destIndex, destWord) + self.destIndex += 1 + self.destIndex += self.destDelta + + def copyLoopPixMap(self): + # This version of the inner loop maps source pixels + # to a destination form with different depth. Because it is already + # unweildy, the loop is not unrolled as in the other versions. + # Preload, skew and skewMask are all overlooked, since pickSourcePixels + # delivers its destination word already properly aligned. + # Note that pickSourcePixels could be copied in-line at the top of + # the horizontal loop, and some of its inits moved out of the loop. + # + # The loop has been rewritten to use only one pickSourcePixels call. + # The idea is that the call itself could be inlined. If we decide not + # to inline pickSourcePixels we could optimize the loop instead. + sourcePixMask = BitBltShadow.MaskTable[self.source.depth] + destPixMask = BitBltShadow.MaskTable[self.dest.depth] + self.sourceIndex = (self.sy * self.source.pitch) + (self.sx / self.source.pixPerWord | 0) + scrStartBits = self.source.pixPerWord - (self.sx & (self.source.pixPerWord - 1)) + if self.bbW < scrStartBits: + nSourceIncs = 0 + else: + nSourceIncs = ((self.bbW - scrStartBits) / self.source.pixPerWord | 0) + 1 + # Note following two items were already calculated in destmask setup! + self.sourceDelta = self.source.pitch - nSourceIncs + startBits = self.dest.pixPerWord - (self.dx & (self.dest.pixPerWord - 1)) + endBits = (((self.dx + self.bbW) - 1) & (self.dest.pixPerWord - 1)) + 1 + if self.bbW < startBits: + startBits = self.bbW # ?! + srcShift = (self.sx & (self.source.pixPerWord - 1)) * self.source.depth + dstShift = (self.dx & (self.dest.pixPerWord - 1)) * self.dest.depth + srcShiftInc = self.source.depth + dstShiftInc = self.dest.depth + dstShiftLeft = 0 + if (self.source.msb): + srcShift = (32 - self.source.depth) - srcShift + srcShiftInc = -srcShiftInc + + if (self.dest.msb): + dstShift = (32 - self.dest.depth) - dstShift + dstShiftInc = -dstShiftInc + dstShiftLeft = 32 - self.dest.depth + + for i in range(self.bbH): + if self.halftone: + halftoneWord = rarithmetic.r_uint(self.halftone[(self.dy + i) % len(self.halftone)]) + else: + halftoneWord = BitBltShadow.AllOnes + self.srcBitShift = srcShift + self.dstBitShift = dstShift + self.destMask = self.mask1 + nPix = startBits + words = self.nWords + # Here is the horizontal loop... + for word in range(words + 1): + skewWord = self.pickSourcePixels(nPix, sourcePixMask, destPixMask, srcShiftInc, dstShiftInc) + # align next word to leftmost pixel + self.dstBitShift = dstShiftLeft + if self.destMask == BitBltShadow.AllOnes: # avoid read-modify-write + self.dest.w_bits.setword( + self.destIndex, + self.mergeFn(skewWord & halftoneWord, self.dest.w_bits.getword(self.destIndex)) + ) + else: # General version using dest masking + destWord = self.dest.w_bits.getword(self.destIndex) + mergeWord = self.mergeFn(skewWord & halftoneWord, destWord & self.destMask) + destWord = (self.destMask & mergeWord) | (destWord & (~self.destMask)) + self.dest.w_bits.setword(self.destIndex, destWord) + + self.destIndex += 1 + if (words == 2): # is the next word the last word? + self.destMask = self.mask2 + nPix = endBits + else: # use fullword mask for inner loop + self.destMask = BitBltShadow.AllOnes + nPix = self.dest.pixPerWord + self.sourceIndex += self.sourceDelta + self.destIndex += self.destDelta + + def pickSourcePixels(self, nPixels, srcMask, dstMask, srcShiftInc, dstShiftInc): + # Pick nPix pixels starting at srcBitIndex from the source, map by the + # color map, and justify them according to dstBitIndex in the resulting destWord. + sourceWord = rarithmetic.r_uint(self.source.w_bits.getword(self.sourceIndex)) + destWord = 0 + srcShift = self.srcBitShift # put into temp for speed + dstShift = self.dstBitShift + nPix = nPixels + # always > 0 so we can use do { } while(--nPix); + if (self.w_cmLookupTable): # a little optimization for (pretty crucial) blits using indexed lookups only + for px in range(nPix + 1): + sourcePix = self.rshift(rarithmetic.r_uint(sourceWord), srcShift) & srcMask + destPix = self.w_cmLookupTable.getword(rarithmetic.intmask(sourcePix & self.cmMask)) + # adjust dest pix index + destWord = destWord | ((destPix & dstMask) << dstShift) + # adjust source pix index + dstShift += dstShiftInc + srcShift += srcShiftInc + if srcShift & rarithmetic.r_uint(0xFFFFFFE0): + if (self.source.msb): + srcShift += 32 + else: + srcShift -= 32 + self.sourceIndex += 1 + sourceWord = self.source.w_bits.getword(self.sourceIndex) + else: + raise error.PrimitiveFailedError() + self.srcBitShift = srcShift # Store back + return destWord + + def rotate32bit(self, thisWord, prevWord, skewMask, notSkewMask, unskew): + if unskew < 0: + rotated = self.rshift(rarithmetic.r_uint(prevWord & notSkewMask), -unskew) + else: + rotated = (prevWord & notSkewMask) << unskew + if self.skew < 0: + rotated = rotated | self.rshift(rarithmetic.r_uint(thisWord & skewMask), -self.skew) + else: + rotated = rotated | (thisWord & skewMask) << self.skew + return rotated + + def copyLoop(self): + # self version of the inner loop assumes we do have a source + sourceLimit = self.source.w_bits.size() + hInc = self.hDir + # init skew (the difference in word alignment of source and dest) + if (self.skew == -32): + self.skew = unskew = 0 + skewMask = rarithmetic.r_uint(0) + else: + if (self.skew < 0): + unskew = self.skew + 32 + skewMask = rarithmetic.r_uint(BitBltShadow.AllOnes << -self.skew) + else: + if (self.skew == 0): + unskew = 0 + skewMask = BitBltShadow.AllOnes + else: + unskew = self.skew - 32 + skewMask = self.rshift(BitBltShadow.AllOnes, self.skew) + notSkewMask = rarithmetic.r_uint(~skewMask) + + # init halftones + if (self.halftone): + halftoneWord = rarithmetic.r_uint(self.halftone[0]) + halftoneHeight = len(self.halftone) + else: + halftoneWord = BitBltShadow.AllOnes + halftoneHeight = 0 + + # now loop over all lines + y = self.dy + for i in range(1, self.bbH + 1): + if (halftoneHeight > 1): + halftoneWord = rarithmetic.r_uint(self.halftone[y % halftoneHeight]) + y += self.vDir + + if (self.preload): + prevWord = rarithmetic.r_uint(self.source.w_bits.getword(self.sourceIndex)) + self.sourceIndex += hInc + else: + prevWord = rarithmetic.r_uint(0) + + destMask = self.mask1 + # pick up next word + thisWord = rarithmetic.r_uint(self.source.w_bits.getword(self.sourceIndex)) + self.sourceIndex += hInc + skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) + prevWord = thisWord + destWord = self.dest.w_bits.getword(self.destIndex) + mergeWord = self.mergeFn(skewWord & halftoneWord, destWord) + destWord = (destMask & mergeWord) | (destWord & (~destMask)) + self.dest.w_bits.setword(self.destIndex, destWord) + # The central horizontal loop requires no store masking + self.destIndex += hInc + destMask = BitBltShadow.AllOnes + if (self.combinationRule == 3): # Store mode avoids dest merge function + if ((self.skew == 0) and (halftoneWord == BitBltShadow.AllOnes)): + # Non-skewed with no halftone + if (self.hDir == -1): + for word in range(2, self.nWords): + thisWord = self.source.w_bits.getword(self.sourceIndex) + self.dest.w_bits.setword(self.destIndex, thisWord) + self.sourceIndex += hInc + self.destIndex += hInc + else: + for word in range(2, self.nWords): + self.dest.w_bits.setword(self.destIndex, prevWord) + prevWord = self.source.w_bits.getword(self.sourceIndex) + self.destIndex += hInc + self.sourceIndex += hInc + else: + # skewed and/or halftoned + for word in range(2, self.nWords): + thisWord = self.source.w_bits.getword(self.sourceIndex) + self.sourceIndex += hInc + skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) + prevWord = thisWord + self.dest.w_bits.setword(self.destIndex, skewWord & halftoneWord) + self.destIndex += hInc + else: # Dest merging here... + for word in range(2, self.nWords): + thisWord = self.source.w_bits.getword(self.sourceIndex) # pick up next word + self.sourceIndex += hInc + skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) + prevWord = thisWord + mergeWord = self.mergeFn(skewWord & halftoneWord, self.dest.w_bits.getword(self.destIndex)) + self.dest.w_bits.setword(self.destIndex, mergeWord) + self.destIndex += hInc + # last word with masking and all + if (self.nWords > 1): + destMask = self.mask2 + if (self.sourceIndex >= 0 and self.sourceIndex < sourceLimit): + # NOTE: we are currently overrunning source bits in some cases + # self test makes up for it. + thisWord = self.source.w_bits.getword(self.sourceIndex) # pick up next word + self.sourceIndex += hInc + skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) + destWord = self.dest.w_bits.getword(self.destIndex) + mergeWord = self.mergeFn(skewWord & halftoneWord, destWord) + destWord = (destMask & mergeWord) | (destWord & (~destMask)) + self.dest.w_bits.setword(self.destIndex, destWord) + self.destIndex += hInc + self.sourceIndex += self.sourceDelta + self.destIndex += self.destDelta + + def mergeFn(self, src, dest): + return rarithmetic.r_uint(self.merge( + rarithmetic.r_uint(src), + rarithmetic.r_uint(dest) + )) + + def merge(self, source_word, dest_word): + assert isinstance(source_word, rarithmetic.r_uint) and isinstance(dest_word, rarithmetic.r_uint) + if self.combinationRule == 0: + return 0 + elif self.combinationRule == 1: + return source_word & dest_word + elif self.combinationRule == 2: + return source_word & ~dest_word + elif self.combinationRule == 3: + return source_word + elif self.combinationRule == 4: + return ~source_word & dest_word + elif self.combinationRule == 5: + return dest_word + elif self.combinationRule == 6: + return source_word ^ dest_word + elif self.combinationRule == 7: + return source_word | dest_word + elif self.combinationRule == 8: + return ~source_word & ~dest_word + elif self.combinationRule == 9: + return ~source_word ^ dest_word + elif self.combinationRule == 10: + return ~dest_word + elif self.combinationRule == 11: + return source_word | ~dest_word + elif self.combinationRule == 12: + return ~source_word + elif self.combinationRule == 13: + return ~source_word | dest_word + elif self.combinationRule == 14: + return ~source_word | ~dest_word + elif self.combinationRule >= 15 and self.combinationRule <= 17: + return dest_word + elif self.combinationRule == 18: + return source_word + dest_word + elif self.combinationRule == 19: + return source_word - dest_word + elif self.combinationRule >= 20 and self.combinationRule <= 24: + return source_word + elif self.combinationRule == 25: + if source_word == 0: + return dest_word + else: + return self.partitionedANDtonBitsnPartitions( + ~source_word, + dest_word, + self.dest.depth, + self.dest.pixPerWord + ) + elif self.combinationRule == 26: + return self.partitionedANDtonBitsnPartitions( + ~source_word, + dest_word, + self.dest.depth, + self.dest.pixPerWord + ) + elif 26 < self.combinationRule <= 41: + return dest_word + else: + raise error.PrimitiveFailedError() + + def partitionedANDtonBitsnPartitions(self, word1, word2, nBits, nParts): + # partition mask starts at the right + mask = BitBltShadow.MaskTable[nBits] + result = 0 + for i in range(1, nParts + 1): + if ((word1 & mask) == mask): + result = result | (word2 & mask) + # slide left to next partition + mask = mask << nBits + return result + + def as_string(bb): + return 'aBitBlt (destX: %d, destY: %d, sx: %d, sy: %d, dx: %d, dy: %d, w: %d, h: %d, hDir: %d, vDir: %d, sDelta: %d, dDelta: %d, skew: %d, sI: %d, dI: %d)' % ( + bb.dest_x, bb.dest_y, bb.sx, bb.sy, bb.dx, bb.dy, bb.w, bb.h, bb.h_dir, bb.v_dir, bb.source_delta, bb.dest_delta, bb.skew, bb.source_index, bb.dest_index) + # "dest_raster", "source_raster", + # "halftone_bits", "mask1", "mask2", "skew_mask", + # "n_words", "preload" + + +class FormShadow(AbstractCachingShadow): + _attrs_ = ["w_bits", "width", "height", "depth", "offsetX", + "offsetY", "msb", "pixPerWord", "pitch"] + + def sync_cache(self): + if self.size() < 5: + w_self = self.w_self() + assert isinstance(w_self, model.W_PointersObject) + w_self._shadow = None + raise error.PrimitiveFailedError + self.w_bits = self.fetch(0) + if not (isinstance(self.w_bits, model.W_WordsObject) or isinstance(self.w_bits, model.W_DisplayBitmap)): + w_self = self.w_self() + assert isinstance(w_self, model.W_PointersObject) + w_self._shadow = None + raise error.PrimitiveFailedError + self.width = self.space.unwrap_int(self.fetch(1)) + self.height = self.space.unwrap_int(self.fetch(2)) + self.depth = self.space.unwrap_int(self.fetch(3)) + if self.width < 0 or self.height < 0: + raise error.PrimitiveFailedError() + self.msb = self.depth > 0 + if self.depth < 0: + self.depth = -self.depth + if self.depth == 0: + raise error.PrimitiveFailedError() + w_offset = self.fetch(4) + assert isinstance(w_offset, model.W_PointersObject) + if not w_offset is self.space.w_nil: + self.offsetX = self.space.unwrap_int(w_offset._fetch(0)) + self.offsetY = self.space.unwrap_int(w_offset._fetch(1)) + self.pixPerWord = 32 / self.depth + self.pitch = (self.width + (self.pixPerWord - 1)) / self.pixPerWord | 0 + if self.w_bits.size() != (self.pitch * self.height): + raise error.PrimitiveFailedError() diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -629,31 +629,10 @@ def func(interp, s_frame, w_rcvr, w_into): raise PrimitiveNotYetWrittenError() - at expose_primitive(BITBLT_COPY_BITS, unwrap_spec=[object], clean_stack=False) -def func(interp, s_frame, w_rcvr): - from spyvm.interpreter import Return - if not isinstance(w_rcvr, model.W_PointersObject) or w_rcvr.size() < 15: - raise PrimitiveFailedError - - # only allow combinationRules 0-41 - combinationRule = interp.space.unwrap_positive_32bit_int(w_rcvr.fetch(interp.space, 3)) - if combinationRule > 41: - raise PrimitiveFailedError - - space = interp.space - - s_bitblt = w_rcvr.as_bitblt_get_shadow(space) - s_bitblt.copyBits() - - w_dest_form = w_rcvr.fetch(space, 0) - if (combinationRule == 22 or combinationRule == 32): - s_frame.pop() # pops the next value under BitBlt - s_frame.push(interp.space.wrap_int(s_bitblt.bitCount)) - elif w_dest_form.is_same_object(space.objtable['w_display']): - w_bitmap = w_dest_form.fetch(space, 0) - assert isinstance(w_bitmap, model.W_DisplayBitmap) - w_bitmap.flush_to_screen() - return w_rcvr + at expose_primitive(BITBLT_COPY_BITS, clean_stack=False, no_result=True, compiled_method=True) +def func(interp, s_frame, argcount, s_method): + from spyvm.plugins.bitblt import BitBltPlugin + return BitBltPlugin.call("primitiveCopyBits", interp, s_frame, argcount, s_method) @expose_primitive(BE_CURSOR) def func(interp, s_frame, argcount): @@ -877,9 +856,10 @@ raise PrimitiveFailedError signature = (w_modulename.as_string(), w_functionname.as_string()) - if signature == ('BitBltPlugin', 'primitiveCopyBits'): - return prim_holder.prim_table[BITBLT_COPY_BITS](interp, s_frame, argcount, s_method) - if signature[0] == "SocketPlugin": + if signature == 'BitBltPlugin': + from spyvm.plugins.bitblt import BitBltPlugin + return BitBltPlugin.call(signature[1], interp, s_frame, argcount, s_method) + elif signature[0] == "SocketPlugin": from spyvm.plugins.socket import SocketPlugin return SocketPlugin.call(signature[1], interp, s_frame, argcount, s_method) elif signature[0] == "FilePlugin": @@ -889,6 +869,7 @@ from spyvm.plugins.vmdebugging import DebuggingPlugin return DebuggingPlugin.call(signature[1], interp, s_frame, argcount, s_method) else: + print signature from spyvm.interpreter_proxy import IProxy return IProxy.call(signature, interp, s_frame, argcount, s_method) raise PrimitiveFailedError diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -1125,607 +1125,3 @@ self.dependent = dependent def update(self): pass - - -class BitBltShadow(AbstractCachingShadow): - WordSize = 32 - MaskTable = [rarithmetic.r_uint(0)] - for i in xrange(WordSize): - MaskTable.append(rarithmetic.r_uint((2 ** (i + 1)) - 1)) - AllOnes = rarithmetic.r_uint(0xFFFFFFFF) - - def sync_cache(self): - self.loadBitBlt() - - def intOrIfNil(self, w_int, i): - if w_int is self.space.w_nil: - return i - else: - return self.space.unwrap_int(w_int) - - def loadForm(self, w_form): - try: - if not isinstance(w_form, model.W_PointersObject): - raise error.PrimitiveFailedError() - s_form = w_form.as_form_get_shadow(self.space) - if not isinstance(s_form, FormShadow): - raise error.PrimitiveFailedError() - return s_form - except error.PrimitiveFailedError, e: - w_self = self.w_self() - assert isinstance(w_self, model.W_PointersObject) - w_self._shadow = None - raise e - - def loadHalftone(self, w_halftone_form): - if w_halftone_form is self.space.w_nil: - return None - elif isinstance(w_halftone_form, model.W_WordsObject): - # Already a bitmap - return w_halftone_form.words - else: - assert isinstance(w_halftone_form, model.W_PointersObject) - w_bits = w_halftone_form.as_form_get_shadow(self.space).w_bits - assert isinstance(w_bits, model.W_WordsObject) - return w_bits.words - - def loadColorMap(self, w_color_map): - if isinstance(w_color_map, model.W_WordsObject): - self.w_cmLookupTable = w_color_map - self.cmMask = self.w_cmLookupTable.size() - 1 - else: - self.w_cmLookupTable = None - - def loadBitBlt(self): - self.success = True - self.w_destForm = self.fetch(0) - self.dest = self.loadForm(self.w_destForm) - self.w_sourceForm = self.fetch(1) - if self.w_sourceForm is not self.space.w_nil: - self.source = self.loadForm(self.w_sourceForm) - else: - self.source = None - self.halftone = self.loadHalftone(self.fetch(2)) - self.combinationRule = self.space.unwrap_int(self.fetch(3)) - self.destX = self.intOrIfNil(self.fetch(4), 0) - self.destY = self.intOrIfNil(self.fetch(5), 0) - self.width = self.intOrIfNil(self.fetch(6), self.dest.width) - self.height = self.intOrIfNil(self.fetch(7), self.dest.height) - self.clipX = self.intOrIfNil(self.fetch(10), 0) - self.clipY = self.intOrIfNil(self.fetch(11), 0) - self.clipW = self.intOrIfNil(self.fetch(12), self.width) - self.clipH = self.intOrIfNil(self.fetch(13), self.height) - if not self.source: - self.sourceX = 0 - self.sourceY = 0 - else: - self.loadColorMap(self.fetch(14)) - self.sourceX = self.intOrIfNil(self.fetch(8), 0) - self.sourceY = self.intOrIfNil(self.fetch(9), 0) - - def copyBits(self): - self.bitCount = 0 - self.clipRange() - if (self.bbW <= 0 or self.bbH <= 0): - return - self.destMaskAndPointerInit() - if not self.source: - self.copyLoopNoSource() - else: - self.checkSourceOverlap() - if self.source.depth != self.dest.depth: - self.copyLoopPixMap() - else: - self.sourceSkewAndPointerInit() - self.copyLoop() - - def checkSourceOverlap(self): - if (self.w_sourceForm is self.w_destForm and self.dy >= self.sy): - if (self.dy > self.sy): - self.vDir = -1 - self.sy = (self.sy + self.bbH) - 1 - self.dy = (self.dy + self.bbH) - 1 - else: - if (self.dy == self.sy and self.dx > self.sx): - self.hDir = -1 - self.sx = (self.sx + self.bbW) - 1 # start at right - self.dx = (self.dx + self.bbW) - 1 - if (self.nWords > 1): - t = self.mask1 # and fix up masks - self.mask1 = self.mask2 - self.mask2 = t - self.destIndex = (self.dy * self.dest.pitch) + (self.dx / self.dest.pixPerWord | 0) # recompute since dx, dy change - self.destDelta = (self.dest.pitch * self.vDir) - (self.nWords * self.hDir) - - def sourceSkewAndPointerInit(self): - pixPerM1 = self.dest.pixPerWord - 1 # Pix per word is power of two, so self makes a mask - sxLowBits = self.sx & pixPerM1 - dxLowBits = self.dx & pixPerM1 - # check if need to preload buffer - # (i.e., two words of source needed for first word of destination) - dWid = -1 - if (self.hDir > 0): - if self.bbW < (self.dest.pixPerWord - dxLowBits): - dWid = self.bbW - else: - dWid = self.dest.pixPerWord - dxLowBits - self.preload = (sxLowBits + dWid) > pixPerM1 - else: - if self.bbW < (dxLowBits + 1): - dWid = self.bbW - else: - dWid = dxLowBits + 1 - self.preload = ((sxLowBits - dWid) + 1) < 0 - - if self.source.msb: - self.skew = (sxLowBits - dxLowBits) * self.dest.depth - else: - self.skew = (dxLowBits - sxLowBits) * self.dest.depth - if (self.preload): - if (self.skew < 0): - self.skew += 32 - else: - self.skew -= 32 - # calculate increments from end of one line to start of next - self.sourceIndex = (self.sy * self.source.pitch) + (self.sx / (32 / self.source.depth) |0) - self.sourceDelta = (self.source.pitch * self.vDir) - (self.nWords * self.hDir) - if (self.preload): - self.sourceDelta -= self.hDir - - def clipRange(self): - # intersect with destForm bounds - if self.clipX < 0: - self.clipW += self.clipX - self.clipX = 0 - if self.clipY < 0: - self.clipH += self.clipY - self.clipY = 0 - if self.clipX + self.clipW > self.dest.width: - self.clipW = self.dest.width - self.clipX - if self.clipY + self.clipH > self.dest.height: - self.clipH = self.dest.height - self.clipY - # intersect with clipRect - leftOffset = max(self.clipX - self.destX, 0) - self.sx = self.sourceX + leftOffset - self.dx = self.destX + leftOffset - self.bbW = self.width - leftOffset - rightOffset = (self.dx + self.bbW) - (self.clipX + self.clipW) - if rightOffset > 0: - self.bbW -= rightOffset - topOffset = max(self.clipY - self.destY, 0) - self.sy = self.sourceY + topOffset - self.dy = self.destY + topOffset - self.bbH = self.height - topOffset - bottomOffset = (self.dy + self.bbH) - (self.clipY + self.clipH) - if bottomOffset > 0: - self.bbH -= bottomOffset - # intersect with sourceForm bounds - if not self.source: - return - if self.sx < 0: - self.dx -= self.sx - self.bbW += self.sx - self.sx = 0 - if (self.sx + self.bbW) > self.source.width: - self.bbW -= (self.sx + self.bbW) - self.source.width - if self.sy < 0: - self.dy -= self.sy - self.bbH += self.sy - self.sy = 0 - if (self.sy + self.bbH) > self.source.height: - self.bbH -= (self.sy + self.bbH) - self.source.height - - def rshift(self, val, n): - # return rarithmetic.r_uint(val >> n if val >= 0 else (val + 0x100000000) >> n) - return rarithmetic.r_uint(rarithmetic.r_uint(val) >> n & BitBltShadow.AllOnes) - - def destMaskAndPointerInit(self): - pixPerM1 = self.dest.pixPerWord - 1 # pixPerWord is power-of-two, so this makes a mask - startBits = self.dest.pixPerWord - (self.dx & pixPerM1) # how many px in 1st word - endBits = (((self.dx + self.bbW) - 1) & pixPerM1) + 1 - if self.dest.msb: - self.mask1 = self.rshift(BitBltShadow.AllOnes, (32 - (startBits * self.dest.depth))) - self.mask2 = BitBltShadow.AllOnes << (32 - (endBits * self.dest.depth)) - else: - self.mask1 = BitBltShadow.AllOnes << (32 - (startBits * self.dest.depth)) - self.mask2 = self.rshift(BitBltShadow.AllOnes, (32 - (endBits * self.dest.depth))) - if self.bbW < startBits: - self.mask1 = self.mask1 & self.mask2 - self.mask2 = 0 - self.nWords = 1 - else: - self.nWords = (((self.bbW - startBits) + pixPerM1) / self.dest.pixPerWord | 0) + 1 - self.hDir = 1 - self.vDir = 1 - self.destIndex = (self.dy * self.dest.pitch) + (self.dx / self.dest.pixPerWord | 0) - self.destDelta = (self.dest.pitch * self.vDir) - (self.nWords * self.hDir) - - def copyLoopNoSource(self): - halftoneWord = BitBltShadow.AllOnes - for i in range(self.bbH): - if self.halftone: - halftoneWord = rarithmetic.r_uint(self.halftone[(self.dy + i) % len(self.halftone)]) - # first word in row is masked - destMask = self.mask1 - destWord = self.dest.w_bits.getword(self.destIndex) - mergeWord = self.mergeFn(halftoneWord, destWord) - destWord = (destMask & mergeWord) | (destWord & (~destMask)) - self.dest.w_bits.setword(self.destIndex, destWord) - self.destIndex += 1 - destMask = BitBltShadow.AllOnes - # the central horizontal loop requires no store masking - if self.combinationRule == 3: # store rule requires no dest merging - for word in range(2, self.nWords): - self.dest.w_bits.setword(self.destIndex, halftoneWord) - self.destIndex += 1 - else: - for word in range(2, self.nWords): - destWord = self.dest.w_bits.getword(self.destIndex) - mergeWord = self.mergeFn(halftoneWord, destWord) - self.dest.w_bits.setword(self.destIndex, mergeWord) - self.destIndex += 1 - # last word in row is masked - if self.nWords > 1: - destMask = self.mask2 - destWord = self.dest.w_bits.getword(self.destIndex) - mergeWord = self.mergeFn(halftoneWord, destWord) - destWord = (destMask & mergeWord) | (destWord & (~destMask)) - self.dest.w_bits.setword(self.destIndex, destWord) - self.destIndex += 1 - self.destIndex += self.destDelta - - def copyLoopPixMap(self): - # This version of the inner loop maps source pixels - # to a destination form with different depth. Because it is already - # unweildy, the loop is not unrolled as in the other versions. - # Preload, skew and skewMask are all overlooked, since pickSourcePixels - # delivers its destination word already properly aligned. - # Note that pickSourcePixels could be copied in-line at the top of - # the horizontal loop, and some of its inits moved out of the loop. - # - # The loop has been rewritten to use only one pickSourcePixels call. - # The idea is that the call itself could be inlined. If we decide not - # to inline pickSourcePixels we could optimize the loop instead. - sourcePixMask = BitBltShadow.MaskTable[self.source.depth] - destPixMask = BitBltShadow.MaskTable[self.dest.depth] - self.sourceIndex = (self.sy * self.source.pitch) + (self.sx / self.source.pixPerWord | 0) - scrStartBits = self.source.pixPerWord - (self.sx & (self.source.pixPerWord - 1)) - if self.bbW < scrStartBits: - nSourceIncs = 0 - else: - nSourceIncs = ((self.bbW - scrStartBits) / self.source.pixPerWord | 0) + 1 - # Note following two items were already calculated in destmask setup! - self.sourceDelta = self.source.pitch - nSourceIncs - startBits = self.dest.pixPerWord - (self.dx & (self.dest.pixPerWord - 1)) - endBits = (((self.dx + self.bbW) - 1) & (self.dest.pixPerWord - 1)) + 1 - if self.bbW < startBits: - startBits = self.bbW # ?! - srcShift = (self.sx & (self.source.pixPerWord - 1)) * self.source.depth - dstShift = (self.dx & (self.dest.pixPerWord - 1)) * self.dest.depth - srcShiftInc = self.source.depth - dstShiftInc = self.dest.depth - dstShiftLeft = 0 - if (self.source.msb): - srcShift = (32 - self.source.depth) - srcShift - srcShiftInc = -srcShiftInc - - if (self.dest.msb): - dstShift = (32 - self.dest.depth) - dstShift - dstShiftInc = -dstShiftInc - dstShiftLeft = 32 - self.dest.depth - - for i in range(self.bbH): - if self.halftone: - halftoneWord = rarithmetic.r_uint(self.halftone[(self.dy + i) % len(self.halftone)]) - else: - halftoneWord = BitBltShadow.AllOnes - self.srcBitShift = srcShift - self.dstBitShift = dstShift - self.destMask = self.mask1 - nPix = startBits - words = self.nWords - # Here is the horizontal loop... - for word in range(words + 1): - skewWord = self.pickSourcePixels(nPix, sourcePixMask, destPixMask, srcShiftInc, dstShiftInc) - # align next word to leftmost pixel - self.dstBitShift = dstShiftLeft - if self.destMask == BitBltShadow.AllOnes: # avoid read-modify-write - self.dest.w_bits.setword( - self.destIndex, - self.mergeFn(skewWord & halftoneWord, self.dest.w_bits.getword(self.destIndex)) - ) - else: # General version using dest masking - destWord = self.dest.w_bits.getword(self.destIndex) - mergeWord = self.mergeFn(skewWord & halftoneWord, destWord & self.destMask) - destWord = (self.destMask & mergeWord) | (destWord & (~self.destMask)) - self.dest.w_bits.setword(self.destIndex, destWord) - - self.destIndex += 1 - if (words == 2): # is the next word the last word? - self.destMask = self.mask2 - nPix = endBits - else: # use fullword mask for inner loop - self.destMask = BitBltShadow.AllOnes - nPix = self.dest.pixPerWord - self.sourceIndex += self.sourceDelta - self.destIndex += self.destDelta - - def pickSourcePixels(self, nPixels, srcMask, dstMask, srcShiftInc, dstShiftInc): - # Pick nPix pixels starting at srcBitIndex from the source, map by the - # color map, and justify them according to dstBitIndex in the resulting destWord. - sourceWord = rarithmetic.r_uint(self.source.w_bits.getword(self.sourceIndex)) - destWord = 0 - srcShift = self.srcBitShift # put into temp for speed - dstShift = self.dstBitShift - nPix = nPixels - # always > 0 so we can use do { } while(--nPix); - if (self.w_cmLookupTable): # a little optimization for (pretty crucial) blits using indexed lookups only - for px in range(nPix + 1): - sourcePix = self.rshift(rarithmetic.r_uint(sourceWord), srcShift) & srcMask - destPix = self.w_cmLookupTable.getword(rarithmetic.intmask(sourcePix & self.cmMask)) - # adjust dest pix index - destWord = destWord | ((destPix & dstMask) << dstShift) - # adjust source pix index - dstShift += dstShiftInc - srcShift += srcShiftInc - if srcShift & rarithmetic.r_uint(0xFFFFFFE0): - if (self.source.msb): - srcShift += 32 - else: - srcShift -= 32 - self.sourceIndex += 1 - sourceWord = self.source.w_bits.getword(self.sourceIndex) - else: - raise error.PrimitiveFailedError() - self.srcBitShift = srcShift # Store back - return destWord - - def rotate32bit(self, thisWord, prevWord, skewMask, notSkewMask, unskew): - if unskew < 0: - rotated = self.rshift(rarithmetic.r_uint(prevWord & notSkewMask), -unskew) - else: - rotated = (prevWord & notSkewMask) << unskew - if self.skew < 0: - rotated = rotated | self.rshift(rarithmetic.r_uint(thisWord & skewMask), -self.skew) - else: - rotated = rotated | (thisWord & skewMask) << self.skew - return rotated - - def copyLoop(self): - # self version of the inner loop assumes we do have a source - sourceLimit = self.source.w_bits.size() - hInc = self.hDir - # init skew (the difference in word alignment of source and dest) - if (self.skew == -32): - self.skew = unskew = 0 - skewMask = rarithmetic.r_uint(0) - else: - if (self.skew < 0): - unskew = self.skew + 32 - skewMask = rarithmetic.r_uint(BitBltShadow.AllOnes << -self.skew) - else: - if (self.skew == 0): - unskew = 0 - skewMask = BitBltShadow.AllOnes - else: - unskew = self.skew - 32 - skewMask = self.rshift(BitBltShadow.AllOnes, self.skew) - notSkewMask = rarithmetic.r_uint(~skewMask) - - # init halftones - if (self.halftone): - halftoneWord = rarithmetic.r_uint(self.halftone[0]) - halftoneHeight = len(self.halftone) - else: - halftoneWord = BitBltShadow.AllOnes - halftoneHeight = 0 - - # now loop over all lines - y = self.dy - for i in range(1, self.bbH + 1): - if (halftoneHeight > 1): - halftoneWord = rarithmetic.r_uint(self.halftone[y % halftoneHeight]) - y += self.vDir - - if (self.preload): - prevWord = rarithmetic.r_uint(self.source.w_bits.getword(self.sourceIndex)) - self.sourceIndex += hInc - else: - prevWord = rarithmetic.r_uint(0) - - destMask = self.mask1 - # pick up next word - thisWord = rarithmetic.r_uint(self.source.w_bits.getword(self.sourceIndex)) - self.sourceIndex += hInc - skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) - prevWord = thisWord - destWord = self.dest.w_bits.getword(self.destIndex) - mergeWord = self.mergeFn(skewWord & halftoneWord, destWord) - destWord = (destMask & mergeWord) | (destWord & (~destMask)) - self.dest.w_bits.setword(self.destIndex, destWord) - # The central horizontal loop requires no store masking - self.destIndex += hInc - destMask = BitBltShadow.AllOnes - if (self.combinationRule == 3): # Store mode avoids dest merge function - if ((self.skew == 0) and (halftoneWord == BitBltShadow.AllOnes)): - # Non-skewed with no halftone - if (self.hDir == -1): - for word in range(2, self.nWords): - thisWord = self.source.w_bits.getword(self.sourceIndex) - self.dest.w_bits.setword(self.destIndex, thisWord) - self.sourceIndex += hInc - self.destIndex += hInc - else: - for word in range(2, self.nWords): - self.dest.w_bits.setword(self.destIndex, prevWord) - prevWord = self.source.w_bits.getword(self.sourceIndex) - self.destIndex += hInc - self.sourceIndex += hInc - else: - # skewed and/or halftoned - for word in range(2, self.nWords): - thisWord = self.source.w_bits.getword(self.sourceIndex) - self.sourceIndex += hInc - skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) - prevWord = thisWord - self.dest.w_bits.setword(self.destIndex, skewWord & halftoneWord) - self.destIndex += hInc - else: # Dest merging here... - for word in range(2, self.nWords): - thisWord = self.source.w_bits.getword(self.sourceIndex) # pick up next word - self.sourceIndex += hInc - skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) - prevWord = thisWord - mergeWord = self.mergeFn(skewWord & halftoneWord, self.dest.w_bits.getword(self.destIndex)) - self.dest.w_bits.setword(self.destIndex, mergeWord) - self.destIndex += hInc - # last word with masking and all - if (self.nWords > 1): - destMask = self.mask2 - if (self.sourceIndex >= 0 and self.sourceIndex < sourceLimit): - # NOTE: we are currently overrunning source bits in some cases - # self test makes up for it. - thisWord = self.source.w_bits.getword(self.sourceIndex) # pick up next word - self.sourceIndex += hInc - skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) - destWord = self.dest.w_bits.getword(self.destIndex) - mergeWord = self.mergeFn(skewWord & halftoneWord, destWord) - destWord = (destMask & mergeWord) | (destWord & (~destMask)) - self.dest.w_bits.setword(self.destIndex, destWord) - self.destIndex += hInc - self.sourceIndex += self.sourceDelta - self.destIndex += self.destDelta - - def mergeFn(self, src, dest): - return rarithmetic.r_uint(self.merge( - rarithmetic.r_uint(src), - rarithmetic.r_uint(dest) - )) - - def merge(self, source_word, dest_word): - assert isinstance(source_word, rarithmetic.r_uint) and isinstance(dest_word, rarithmetic.r_uint) - if self.combinationRule == 0: - return 0 - elif self.combinationRule == 1: - return source_word & dest_word - elif self.combinationRule == 2: - return source_word & ~dest_word - elif self.combinationRule == 3: - return source_word - elif self.combinationRule == 4: - return ~source_word & dest_word - elif self.combinationRule == 5: - return dest_word - elif self.combinationRule == 6: - return source_word ^ dest_word - elif self.combinationRule == 7: - return source_word | dest_word - elif self.combinationRule == 8: - return ~source_word & ~dest_word - elif self.combinationRule == 9: - return ~source_word ^ dest_word - elif self.combinationRule == 10: - return ~dest_word - elif self.combinationRule == 11: - return source_word | ~dest_word - elif self.combinationRule == 12: - return ~source_word - elif self.combinationRule == 13: - return ~source_word | dest_word - elif self.combinationRule == 14: - return ~source_word | ~dest_word - elif self.combinationRule >= 15 and self.combinationRule <= 17: - return dest_word - elif self.combinationRule == 18: - return source_word + dest_word - elif self.combinationRule == 19: - return source_word - dest_word - elif self.combinationRule >= 20 and self.combinationRule <= 24: - return source_word - elif self.combinationRule == 25: - if source_word == 0: - return dest_word - else: - return self.partitionedANDtonBitsnPartitions( - ~source_word, - dest_word, - self.dest.depth, - self.dest.pixPerWord - ) - elif self.combinationRule == 26: - return self.partitionedANDtonBitsnPartitions( - ~source_word, - dest_word, - self.dest.depth, - self.dest.pixPerWord - ) - elif 26 < self.combinationRule <= 41: - return dest_word - else: - raise error.PrimitiveFailedError() - - def partitionedANDtonBitsnPartitions(self, word1, word2, nBits, nParts): - # partition mask starts at the right - mask = BitBltShadow.MaskTable[nBits] - result = 0 - for i in range(1, nParts + 1): - if ((word1 & mask) == mask): - result = result | (word2 & mask) - # slide left to next partition - mask = mask << nBits - return result - - def as_string(bb): - return 'aBitBlt (destX: %d, destY: %d, sx: %d, sy: %d, dx: %d, dy: %d, w: %d, h: %d, hDir: %d, vDir: %d, sDelta: %d, dDelta: %d, skew: %d, sI: %d, dI: %d)' % ( - bb.dest_x, bb.dest_y, bb.sx, bb.sy, bb.dx, bb.dy, bb.w, bb.h, bb.h_dir, bb.v_dir, bb.source_delta, bb.dest_delta, bb.skew, bb.source_index, bb.dest_index) - # "dest_raster", "source_raster", - # "halftone_bits", "mask1", "mask2", "skew_mask", - # "n_words", "preload" - -class FormShadow(AbstractCachingShadow): - _attrs_ = ["w_bits", "width", "height", "depth", "offsetX", - "offsetY", "msb", "pixPerWord", "pitch"] - - def sync_cache(self): - if self.size() < 5: - w_self = self.w_self() - assert isinstance(w_self, model.W_PointersObject) - w_self._shadow = None - raise error.PrimitiveFailedError - self.w_bits = self.fetch(0) - if not (isinstance(self.w_bits, model.W_WordsObject) or isinstance(self.w_bits, model.W_DisplayBitmap)): - w_self = self.w_self() - assert isinstance(w_self, model.W_PointersObject) - w_self._shadow = None - raise error.PrimitiveFailedError - self.width = self.space.unwrap_int(self.fetch(1)) - self.height = self.space.unwrap_int(self.fetch(2)) - self.depth = self.space.unwrap_int(self.fetch(3)) - if self.width < 0 or self.height < 0: - raise error.PrimitiveFailedError() - self.msb = self.depth > 0 - if self.depth < 0: - self.depth = -self.depth - if self.depth == 0: - raise error.PrimitiveFailedError() - w_offset = self.fetch(4) - assert isinstance(w_offset, model.W_PointersObject) - if not w_offset is self.space.w_nil: - self.offsetX = self.space.unwrap_int(w_offset._fetch(0)) - self.offsetY = self.space.unwrap_int(w_offset._fetch(1)) - self.pixPerWord = 32 / self.depth - self.pitch = (self.width + (self.pixPerWord - 1)) / self.pixPerWord | 0 - if self.w_bits.size() != (self.pitch * self.height): - raise error.PrimitiveFailedError() - - # def replace_bits(self): - # w_bits = self.w_bits - # if isinstance(w_bits, model.W_WordsObject): - # pass - # elif isinstance(w_bits, model.W_DisplayBitmap): - # w_bits.update_from_buffer() - # else: - # w_self = self.w_self() - # assert isinstance(w_self, model.W_PointersObject) - # w_self._shadow = None - # raise error.PrimitiveFailedError From noreply at buildbot.pypy.org Fri Dec 20 18:09:47 2013 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 20 Dec 2013 18:09:47 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: fix translation Message-ID: <20131220170947.766711C35D2@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r546:62ecddca5a84 Date: 2013-12-20 18:04 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/62ecddca5a84/ Log: fix translation diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -1,4 +1,5 @@ -from spyvm import model, error +from spyvm import model +from spyvm.error import PrimitiveFailedError from spyvm.shadow import AbstractCachingShadow from spyvm.plugins.plugin import Plugin @@ -55,12 +56,12 @@ def loadForm(self, w_form): try: if not isinstance(w_form, model.W_PointersObject): - raise error.PrimitiveFailedError() + raise PrimitiveFailedError() s_form = w_form.as_special_get_shadow(self.space, FormShadow) if not isinstance(s_form, FormShadow): - raise error.PrimitiveFailedError() + raise PrimitiveFailedError() return s_form - except error.PrimitiveFailedError, e: + except PrimitiveFailedError, e: w_self = self.w_self() assert isinstance(w_self, model.W_PointersObject) w_self._shadow = None @@ -385,7 +386,7 @@ self.sourceIndex += 1 sourceWord = self.source.w_bits.getword(self.sourceIndex) else: - raise error.PrimitiveFailedError() + raise PrimitiveFailedError() self.srcBitShift = srcShift # Store back return destWord @@ -571,7 +572,7 @@ elif 26 < self.combinationRule <= 41: return dest_word else: - raise error.PrimitiveFailedError() + raise PrimitiveFailedError() def partitionedANDtonBitsnPartitions(self, word1, word2, nBits, nParts): # partition mask starts at the right @@ -601,23 +602,23 @@ w_self = self.w_self() assert isinstance(w_self, model.W_PointersObject) w_self._shadow = None - raise error.PrimitiveFailedError + raise PrimitiveFailedError self.w_bits = self.fetch(0) if not (isinstance(self.w_bits, model.W_WordsObject) or isinstance(self.w_bits, model.W_DisplayBitmap)): w_self = self.w_self() assert isinstance(w_self, model.W_PointersObject) w_self._shadow = None - raise error.PrimitiveFailedError + raise PrimitiveFailedError self.width = self.space.unwrap_int(self.fetch(1)) self.height = self.space.unwrap_int(self.fetch(2)) self.depth = self.space.unwrap_int(self.fetch(3)) if self.width < 0 or self.height < 0: - raise error.PrimitiveFailedError() + raise PrimitiveFailedError() self.msb = self.depth > 0 if self.depth < 0: self.depth = -self.depth if self.depth == 0: - raise error.PrimitiveFailedError() + raise PrimitiveFailedError() w_offset = self.fetch(4) assert isinstance(w_offset, model.W_PointersObject) if not w_offset is self.space.w_nil: @@ -626,4 +627,4 @@ self.pixPerWord = 32 / self.depth self.pitch = (self.width + (self.pixPerWord - 1)) / self.pixPerWord | 0 if self.w_bits.size() != (self.pitch * self.height): - raise error.PrimitiveFailedError() + raise PrimitiveFailedError() From noreply at buildbot.pypy.org Fri Dec 20 19:22:07 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 20 Dec 2013 19:22:07 +0100 (CET) Subject: [pypy-commit] pypy default: fix dtype.subdtype for simple types Message-ID: <20131220182207.5F2E41C12CD@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68509:0146c76b3d20 Date: 2013-12-20 13:04 -0500 http://bitbucket.org/pypy/pypy/changeset/0146c76b3d20/ Log: fix dtype.subdtype for simple types diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -136,6 +136,8 @@ return space.wrap(self.itemtype.alignment) def descr_get_subdtype(self, space): + if self.subdtype is None: + return space.w_None return space.newtuple([space.wrap(self.subdtype), self.descr_get_shape(space)]) def descr_get_str(self, space): diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -54,6 +54,7 @@ assert dtype(int).fields is None assert dtype(int).names is None assert dtype(int).hasobject is False + assert dtype(int).subdtype is None assert dtype(None) is dtype(float) From noreply at buildbot.pypy.org Fri Dec 20 20:19:16 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 20 Dec 2013 20:19:16 +0100 (CET) Subject: [pypy-commit] pypy default: fix segfault on argsort of empty array Message-ID: <20131220191916.2D9A01C35D2@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68510:5b62fe371a3d Date: 2013-12-20 14:15 -0500 http://bitbucket.org/pypy/pypy/changeset/5b62fe371a3d/ Log: fix segfault on argsort of empty array diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -123,7 +123,8 @@ if w_axis is space.w_None: # note that it's fine ot pass None here as we're not going # to pass the result around (None is the link to base in slices) - arr = arr.reshape(space, None, [arr.get_size()]) + if arr.get_size() > 0: + arr = arr.reshape(space, None, [arr.get_size()]) axis = 0 elif w_axis is None: axis = -1 diff --git a/pypy/module/micronumpy/test/test_sorting.py b/pypy/module/micronumpy/test/test_sorting.py --- a/pypy/module/micronumpy/test/test_sorting.py +++ b/pypy/module/micronumpy/test/test_sorting.py @@ -45,6 +45,9 @@ def test_argsort_axis(self): from numpypy import array + a = array([]) + for axis in [None, -1, 0]: + assert a.argsort(axis=axis).shape == (0,) a = array([[4, 2], [1, 3]]) assert (a.argsort(axis=None) == [2, 1, 3, 0]).all() assert (a.argsort(axis=-1) == [[1, 0], [0, 1]]).all() From noreply at buildbot.pypy.org Fri Dec 20 20:27:43 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 20 Dec 2013 20:27:43 +0100 (CET) Subject: [pypy-commit] pypy default: Test and fix (thanks ionelmc on irc). Message-ID: <20131220192743.CF6F21C35DA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68511:8331c42d7d4a Date: 2013-12-20 20:27 +0100 http://bitbucket.org/pypy/pypy/changeset/8331c42d7d4a/ Log: Test and fix (thanks ionelmc on irc). diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -51,6 +51,13 @@ return w_iter list_iter._annspecialcase_ = 'specialize:memo' +def tuple_iter(space): + "Utility that returns the app-level descriptor tuple.__iter__." + w_src, w_iter = space.lookup_in_type_where(space.w_tuple, + '__iter__') + return w_iter +tuple_iter._annspecialcase_ = 'specialize:memo' + def raiseattrerror(space, w_obj, name, w_descr=None): if w_descr is None: raise operationerrfmt(space.w_AttributeError, diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -945,7 +945,8 @@ def _extend_from_iterable(self, w_list, w_iterable): space = self.space - if isinstance(w_iterable, W_AbstractTupleObject): + if (isinstance(w_iterable, W_AbstractTupleObject) + and space._uses_tuple_iter(w_iterable)): w_list.__init__(space, w_iterable.getitems_copy()) return diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -382,7 +382,7 @@ self.wrap("expected length %d, got %d" % (expected, got))) def unpackiterable(self, w_obj, expected_length=-1): - if isinstance(w_obj, W_AbstractTupleObject): + if isinstance(w_obj, W_AbstractTupleObject) and self._uses_tuple_iter(w_obj): t = w_obj.getitems_copy() elif type(w_obj) is W_ListObject: t = w_obj.getitems_copy() @@ -396,7 +396,7 @@ def fixedview(self, w_obj, expected_length=-1, unroll=False): """ Fast paths """ - if isinstance(w_obj, W_AbstractTupleObject): + if isinstance(w_obj, W_AbstractTupleObject) and self._uses_tuple_iter(w_obj): t = w_obj.tolist() elif type(w_obj) is W_ListObject: if unroll: @@ -421,7 +421,7 @@ def listview(self, w_obj, expected_length=-1): if type(w_obj) is W_ListObject: t = w_obj.getitems() - elif isinstance(w_obj, W_AbstractTupleObject): + elif isinstance(w_obj, W_AbstractTupleObject) and self._uses_tuple_iter(w_obj): t = w_obj.getitems_copy() elif isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): t = w_obj.getitems() @@ -440,7 +440,7 @@ return w_obj.listview_str() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_str() - if isinstance(w_obj, W_StringObject): + if isinstance(w_obj, W_StringObject) and self._uses_no_iter(w_obj): return w_obj.listview_str() if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): return w_obj.getitems_str() @@ -455,7 +455,7 @@ return w_obj.listview_unicode() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_unicode() - if isinstance(w_obj, W_UnicodeObject): + if isinstance(w_obj, W_UnicodeObject) and self._uses_no_iter(w_obj): return w_obj.listview_unicode() if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): return w_obj.getitems_unicode() @@ -490,6 +490,13 @@ from pypy.objspace.descroperation import list_iter return self.lookup(w_obj, '__iter__') is list_iter(self) + def _uses_tuple_iter(self, w_obj): + from pypy.objspace.descroperation import tuple_iter + return self.lookup(w_obj, '__iter__') is tuple_iter(self) + + def _uses_no_iter(self, w_obj): + return self.lookup(w_obj, '__iter__') is None + def sliceindices(self, w_slice, w_length): if isinstance(w_slice, W_SliceObject): a, b, c = w_slice.indices3(self, self.int_w(w_length)) diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -1314,6 +1314,32 @@ non_list = NonList() assert [] != non_list + def test_extend_from_empty_list_with_subclasses(self): + # some of these tests used to fail by ignoring the + # custom __iter__() --- but only if the list has so + # far the empty strategy, as opposed to .extend()ing + # a non-empty list. + class T(tuple): + def __iter__(self): + yield "ok" + assert list(T([5, 6])) == ["ok"] + # + class L(list): + def __iter__(self): + yield "ok" + assert list(L([5, 6])) == ["ok"] + assert list(L([5.2, 6.3])) == ["ok"] + # + class S(str): + def __iter__(self): + yield "ok" + assert list(S("don't see me")) == ["ok"] + # + class U(unicode): + def __iter__(self): + yield "ok" + assert list(U(u"don't see me")) == ["ok"] + class AppTestForRangeLists(AppTestW_ListObject): spaceconfig = {"objspace.std.withrangelist": True} From noreply at buildbot.pypy.org Fri Dec 20 20:30:51 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 20 Dec 2013 20:30:51 +0100 (CET) Subject: [pypy-commit] pypy default: A similar test (passing) that starts with a list in a non-empty strategy. Message-ID: <20131220193051.161B21C35DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68512:873b55f2169c Date: 2013-12-20 20:29 +0100 http://bitbucket.org/pypy/pypy/changeset/873b55f2169c/ Log: A similar test (passing) that starts with a list in a non-empty strategy. diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -1340,6 +1340,31 @@ yield "ok" assert list(U(u"don't see me")) == ["ok"] + def test_extend_from_nonempty_list_with_subclasses(self): + l = ["hi!"] + class T(tuple): + def __iter__(self): + yield "okT" + l.extend(T([5, 6])) + # + class L(list): + def __iter__(self): + yield "okL" + l.extend(L([5, 6])) + l.extend(L([5.2, 6.3])) + # + class S(str): + def __iter__(self): + yield "okS" + l.extend(S("don't see me")) + # + class U(unicode): + def __iter__(self): + yield "okU" + l.extend(U(u"don't see me")) + # + assert l == ["hi!", "okT", "okL", "okL", "okS", "okU"] + class AppTestForRangeLists(AppTestW_ListObject): spaceconfig = {"objspace.std.withrangelist": True} From noreply at buildbot.pypy.org Fri Dec 20 20:55:34 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 20 Dec 2013 20:55:34 +0100 (CET) Subject: [pypy-commit] pypy default: fix initialization of scalar flexible types Message-ID: <20131220195534.591CE1D22B0@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68513:ba9a05a2fcc4 Date: 2013-12-20 14:52 -0500 http://bitbucket.org/pypy/pypy/changeset/ba9a05a2fcc4/ Log: fix initialization of scalar flexible types diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -38,7 +38,7 @@ from pypy.module.micronumpy.arrayimpl import concrete, scalar if not shape: - w_val = dtype.base.coerce(space, space.wrap(0)) + w_val = dtype.base.coerce(space, None) impl = scalar.Scalar(dtype.base, w_val) else: strides, backstrides = calc_strides(shape, dtype.base, order) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -354,7 +354,10 @@ # And check that changes stick. a[13] = 5.3 assert a[13] == 5.3 + assert zeros(()) == 0 assert zeros(()).shape == () + assert zeros((), dtype='S') == '' + assert zeros((), dtype='S').shape == () def test_empty_like(self): import numpy as np @@ -3046,6 +3049,10 @@ def test_zeros(self): from numpypy import zeros + a = zeros((), dtype=[('x', int), ('y', float)]) + assert a[()]['x'] == 0 + assert a[()]['y'] == 0 + assert a.shape == () a = zeros(2, dtype=[('x', int), ('y', float)]) raises(IndexError, 'a[0]["xyz"]') assert a[0]['x'] == 0 diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1620,6 +1620,8 @@ from pypy.module.micronumpy.interp_dtype import new_string_dtype if isinstance(w_item, interp_boxes.W_StringBox): return w_item + if w_item is None: + w_item = space.wrap('') arg = space.str_w(space.str(w_item)) arr = VoidBoxStorage(len(arg), new_string_dtype(space, len(arg))) for i in range(len(arg)): @@ -1819,6 +1821,8 @@ def coerce(self, space, dtype, w_item): if isinstance(w_item, interp_boxes.W_VoidBox): return w_item + if w_item is None: + w_item = space.newtuple([None] * len(dtype.fields)) # we treat every sequence as sequence, no special support # for arrays if not space.issequence_w(w_item): From noreply at buildbot.pypy.org Fri Dec 20 20:55:35 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 20 Dec 2013 20:55:35 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20131220195535.A88C01D22B0@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68514:4cc2e74ada93 Date: 2013-12-20 14:54 -0500 http://bitbucket.org/pypy/pypy/changeset/4cc2e74ada93/ Log: merge heads diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -51,6 +51,13 @@ return w_iter list_iter._annspecialcase_ = 'specialize:memo' +def tuple_iter(space): + "Utility that returns the app-level descriptor tuple.__iter__." + w_src, w_iter = space.lookup_in_type_where(space.w_tuple, + '__iter__') + return w_iter +tuple_iter._annspecialcase_ = 'specialize:memo' + def raiseattrerror(space, w_obj, name, w_descr=None): if w_descr is None: raise operationerrfmt(space.w_AttributeError, diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -945,7 +945,8 @@ def _extend_from_iterable(self, w_list, w_iterable): space = self.space - if isinstance(w_iterable, W_AbstractTupleObject): + if (isinstance(w_iterable, W_AbstractTupleObject) + and space._uses_tuple_iter(w_iterable)): w_list.__init__(space, w_iterable.getitems_copy()) return diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -382,7 +382,7 @@ self.wrap("expected length %d, got %d" % (expected, got))) def unpackiterable(self, w_obj, expected_length=-1): - if isinstance(w_obj, W_AbstractTupleObject): + if isinstance(w_obj, W_AbstractTupleObject) and self._uses_tuple_iter(w_obj): t = w_obj.getitems_copy() elif type(w_obj) is W_ListObject: t = w_obj.getitems_copy() @@ -396,7 +396,7 @@ def fixedview(self, w_obj, expected_length=-1, unroll=False): """ Fast paths """ - if isinstance(w_obj, W_AbstractTupleObject): + if isinstance(w_obj, W_AbstractTupleObject) and self._uses_tuple_iter(w_obj): t = w_obj.tolist() elif type(w_obj) is W_ListObject: if unroll: @@ -421,7 +421,7 @@ def listview(self, w_obj, expected_length=-1): if type(w_obj) is W_ListObject: t = w_obj.getitems() - elif isinstance(w_obj, W_AbstractTupleObject): + elif isinstance(w_obj, W_AbstractTupleObject) and self._uses_tuple_iter(w_obj): t = w_obj.getitems_copy() elif isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): t = w_obj.getitems() @@ -440,7 +440,7 @@ return w_obj.listview_str() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_str() - if isinstance(w_obj, W_StringObject): + if isinstance(w_obj, W_StringObject) and self._uses_no_iter(w_obj): return w_obj.listview_str() if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): return w_obj.getitems_str() @@ -455,7 +455,7 @@ return w_obj.listview_unicode() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_unicode() - if isinstance(w_obj, W_UnicodeObject): + if isinstance(w_obj, W_UnicodeObject) and self._uses_no_iter(w_obj): return w_obj.listview_unicode() if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): return w_obj.getitems_unicode() @@ -490,6 +490,13 @@ from pypy.objspace.descroperation import list_iter return self.lookup(w_obj, '__iter__') is list_iter(self) + def _uses_tuple_iter(self, w_obj): + from pypy.objspace.descroperation import tuple_iter + return self.lookup(w_obj, '__iter__') is tuple_iter(self) + + def _uses_no_iter(self, w_obj): + return self.lookup(w_obj, '__iter__') is None + def sliceindices(self, w_slice, w_length): if isinstance(w_slice, W_SliceObject): a, b, c = w_slice.indices3(self, self.int_w(w_length)) diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -1314,6 +1314,57 @@ non_list = NonList() assert [] != non_list + def test_extend_from_empty_list_with_subclasses(self): + # some of these tests used to fail by ignoring the + # custom __iter__() --- but only if the list has so + # far the empty strategy, as opposed to .extend()ing + # a non-empty list. + class T(tuple): + def __iter__(self): + yield "ok" + assert list(T([5, 6])) == ["ok"] + # + class L(list): + def __iter__(self): + yield "ok" + assert list(L([5, 6])) == ["ok"] + assert list(L([5.2, 6.3])) == ["ok"] + # + class S(str): + def __iter__(self): + yield "ok" + assert list(S("don't see me")) == ["ok"] + # + class U(unicode): + def __iter__(self): + yield "ok" + assert list(U(u"don't see me")) == ["ok"] + + def test_extend_from_nonempty_list_with_subclasses(self): + l = ["hi!"] + class T(tuple): + def __iter__(self): + yield "okT" + l.extend(T([5, 6])) + # + class L(list): + def __iter__(self): + yield "okL" + l.extend(L([5, 6])) + l.extend(L([5.2, 6.3])) + # + class S(str): + def __iter__(self): + yield "okS" + l.extend(S("don't see me")) + # + class U(unicode): + def __iter__(self): + yield "okU" + l.extend(U(u"don't see me")) + # + assert l == ["hi!", "okT", "okL", "okL", "okS", "okU"] + class AppTestForRangeLists(AppTestW_ListObject): spaceconfig = {"objspace.std.withrangelist": True} From noreply at buildbot.pypy.org Fri Dec 20 21:23:23 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 20 Dec 2013 21:23:23 +0100 (CET) Subject: [pypy-commit] pypy default: test this too Message-ID: <20131220202323.71A881C35DF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68515:7c8c667c0851 Date: 2013-12-20 15:12 -0500 http://bitbucket.org/pypy/pypy/changeset/7c8c667c0851/ Log: test this too diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3048,8 +3048,10 @@ spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) def test_zeros(self): - from numpypy import zeros + from numpypy import zeros, void a = zeros((), dtype=[('x', int), ('y', float)]) + assert type(a[()]) is void + assert type(a.item()) is tuple assert a[()]['x'] == 0 assert a[()]['y'] == 0 assert a.shape == () From noreply at buildbot.pypy.org Fri Dec 20 21:23:24 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 20 Dec 2013 21:23:24 +0100 (CET) Subject: [pypy-commit] pypy default: fix view of record array scalar Message-ID: <20131220202324.AE1C21C35DF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68516:1bb4e20a626f Date: 2013-12-20 15:19 -0500 http://bitbucket.org/pypy/pypy/changeset/1bb4e20a626f/ Log: fix view of record array scalar diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -131,7 +131,10 @@ if space.isinstance_w(w_idx, space.w_tuple): if space.len_w(w_idx) == 0: return self.get_scalar_value() - if space.is_none(w_idx): + elif space.isinstance_w(w_idx, space.w_str): + if self.dtype.is_record_type(): + return self.value.descr_getitem(space, w_idx).descr_ravel(space) + elif space.is_none(w_idx): new_shape = [1] arr = W_NDimArray.from_shape(space, new_shape, self.dtype) arr_iter = arr.create_iter(new_shape) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3069,7 +3069,12 @@ assert a[1]['y'] == 2 def test_views(self): - from numpypy import array + from numpypy import array, zeros, ndarray + a = zeros((), dtype=[('x', int), ('y', float)]) + raises(IndexError, 'a[0]') + assert type(a['x']) is ndarray + assert a['x'] == 0 + assert a['y'] == 0 a = array([(1, 2), (3, 4)], dtype=[('x', int), ('y', float)]) raises((IndexError, ValueError), 'array([1])["x"]') raises((IndexError, ValueError), 'a["z"]') From noreply at buildbot.pypy.org Fri Dec 20 21:38:49 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 20 Dec 2013 21:38:49 +0100 (CET) Subject: [pypy-commit] pypy default: fix translation Message-ID: <20131220203849.0A5A91C35E4@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68517:2607be88f864 Date: 2013-12-20 15:38 -0500 http://bitbucket.org/pypy/pypy/changeset/2607be88f864/ Log: fix translation diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -133,7 +133,9 @@ return self.get_scalar_value() elif space.isinstance_w(w_idx, space.w_str): if self.dtype.is_record_type(): - return self.value.descr_getitem(space, w_idx).descr_ravel(space) + w_val = self.value.descr_getitem(space, w_idx) + assert isinstance(w_val, W_GenericBox) + return w_val.descr_ravel(space) elif space.is_none(w_idx): new_shape = [1] arr = W_NDimArray.from_shape(space, new_shape, self.dtype) From noreply at buildbot.pypy.org Sat Dec 21 00:16:41 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 21 Dec 2013 00:16:41 +0100 (CET) Subject: [pypy-commit] pypy default: fix init of scalar w subarrays Message-ID: <20131220231641.D25921C12CD@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68518:b29074fcba99 Date: 2013-12-20 17:15 -0500 http://bitbucket.org/pypy/pypy/changeset/b29074fcba99/ Log: fix init of scalar w subarrays diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3227,11 +3227,14 @@ def test_subarrays(self): from numpypy import dtype, array, zeros - d = dtype([("x", "int", 3), ("y", "float", 5)]) + + a = zeros((), dtype=d) + #assert a['x'].shape == (3,) + #assert (a['x'] == [0, 0, 0]).all() + a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) - for v in ['x', u'x', 0, -2]: assert (a[0][v] == [1, 2, 3]).all() assert (a[1][v] == [4, 5, 6]).all() diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1735,6 +1735,8 @@ def _coerce(self, space, arr, ofs, dtype, w_items, shape): # TODO: Make sure the shape and the array match from interp_dtype import W_Dtype + if w_items is None: + w_items = space.newtuple([None] * shape[0]) items_w = space.fixedview(w_items) subdtype = dtype.subdtype assert isinstance(subdtype, W_Dtype) From noreply at buildbot.pypy.org Sat Dec 21 00:16:43 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 21 Dec 2013 00:16:43 +0100 (CET) Subject: [pypy-commit] pypy default: partially fix scalar record getitem Message-ID: <20131220231643.120451C356B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68519:a93131d9b1c2 Date: 2013-12-20 17:38 -0500 http://bitbucket.org/pypy/pypy/changeset/a93131d9b1c2/ Log: partially fix scalar record getitem diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3102,6 +3102,12 @@ from numpypy import zeros a = [('x', int), ('y', float)] b = [('x', int), ('y', a)] + arr = zeros((), dtype=b) + assert arr['x'] == 0 + arr['y'] + #assert arr['y'].shape == () + #assert arr['y'][()][0] == 0 + #assert arr['y'][()][0] == 0 arr = zeros(3, dtype=b) arr[1]['x'] = 15 assert arr[1]['x'] == 15 diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1846,7 +1846,7 @@ @jit.unroll_safe def store(self, arr, i, ofs, box): assert isinstance(box, interp_boxes.W_VoidBox) - for k in range(box.arr.dtype.get_size()): + for k in range(box.dtype.get_size()): arr.storage[k + i] = box.arr.storage[k + box.ofs] def to_builtin_type(self, space, box): From noreply at buildbot.pypy.org Sat Dec 21 00:16:44 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 21 Dec 2013 00:16:44 +0100 (CET) Subject: [pypy-commit] pypy default: fix some scalar setitem cases Message-ID: <20131220231644.3BA831C12CD@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68520:bf10cd3e2fa4 Date: 2013-12-20 18:01 -0500 http://bitbucket.org/pypy/pypy/changeset/bf10cd3e2fa4/ Log: fix some scalar setitem cases diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -150,6 +150,12 @@ space.wrap("0-d arrays can't be indexed")) def descr_setitem(self, space, _, w_idx, w_val): + if space.isinstance_w(w_idx, space.w_tuple): + if space.len_w(w_idx) == 0: + return self.set_scalar_value(self.dtype.coerce(space, w_val)) + elif space.isinstance_w(w_idx, space.w_str): + if self.dtype.is_record_type(): + return self.value.descr_setitem(space, w_idx, w_val) raise OperationError(space.w_IndexError, space.wrap("0-d arrays can't be indexed")) @@ -181,7 +187,7 @@ s = self.dtype.itemtype.bool(self.value) w_res = W_NDimArray.from_shape(space, [s], index_type) if s == 1: - w_res.implementation.setitem(0, index_type.itemtype.box(0)) + w_res.implementation.setitem(0, index_type.itemtype.box(0)) return space.newtuple([w_res]) def fill(self, space, w_value): diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -458,13 +458,17 @@ return space.wrap(dtype.itemtype.to_str(read_val)) return read_val - @unwrap_spec(item=str) - def descr_setitem(self, space, item, w_value): + def descr_setitem(self, space, w_item, w_value): + if space.isinstance_w(w_item, space.w_basestring): + item = space.str_w(w_item) + else: + raise OperationError(space.w_IndexError, space.wrap( + "invalid index")) try: ofs, dtype = self.dtype.fields[item] except KeyError: - raise OperationError(space.w_IndexError, - space.wrap("Field %s does not exist" % item)) + raise OperationError(space.w_ValueError, + space.wrap("field named %s not found" % item)) dtype.itemtype.store(self.arr, self.ofs, ofs, dtype.coerce(space, w_value)) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -748,6 +748,8 @@ b = a[()] assert type(b) is int_ assert b == 3 + a[()] = 4 + assert a == 4 def test_len(self): from numpypy import array @@ -3104,6 +3106,12 @@ b = [('x', int), ('y', a)] arr = zeros((), dtype=b) assert arr['x'] == 0 + arr['x'] = 2 + assert arr['x'] == 2 + exc = raises(IndexError, "arr[3L]") + assert exc.value.message == "0-d arrays can't be indexed" + exc = raises(ValueError, "arr['xx'] = 2") + assert exc.value.message == "field named xx not found" arr['y'] #assert arr['y'].shape == () #assert arr['y'][()][0] == 0 From noreply at buildbot.pypy.org Sat Dec 21 01:53:13 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 21 Dec 2013 01:53:13 +0100 (CET) Subject: [pypy-commit] pypy default: fix scalar recordtype getitem Message-ID: <20131221005313.0AD821C12CD@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68521:c477a1c101c4 Date: 2013-12-20 18:50 -0500 http://bitbucket.org/pypy/pypy/changeset/c477a1c101c4/ Log: fix scalar recordtype getitem diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -134,8 +134,7 @@ elif space.isinstance_w(w_idx, space.w_str): if self.dtype.is_record_type(): w_val = self.value.descr_getitem(space, w_idx) - assert isinstance(w_val, W_GenericBox) - return w_val.descr_ravel(space) + return convert_to_array(space, w_val) elif space.is_none(w_idx): new_shape = [1] arr = W_NDimArray.from_shape(space, new_shape, self.dtype) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -421,7 +421,7 @@ self.dtype = dtype def get_dtype(self, space): - return self.arr.dtype + return self.dtype def raw_str(self): return self.arr.dtype.itemtype.to_str(self) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3112,10 +3112,10 @@ assert exc.value.message == "0-d arrays can't be indexed" exc = raises(ValueError, "arr['xx'] = 2") assert exc.value.message == "field named xx not found" - arr['y'] - #assert arr['y'].shape == () - #assert arr['y'][()][0] == 0 - #assert arr['y'][()][0] == 0 + assert arr['y'].dtype == a + assert arr['y'].shape == () + #assert arr['y'][()]['x'] == 0 + #assert arr['y'][()]['y'] == 0 arr = zeros(3, dtype=b) arr[1]['x'] = 15 assert arr[1]['x'] == 15 From noreply at buildbot.pypy.org Sat Dec 21 01:53:14 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 21 Dec 2013 01:53:14 +0100 (CET) Subject: [pypy-commit] pypy default: fix recordtype store data Message-ID: <20131221005314.3A11C1C12CD@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68522:31ccdc34a8cb Date: 2013-12-20 19:10 -0500 http://bitbucket.org/pypy/pypy/changeset/31ccdc34a8cb/ Log: fix recordtype store data diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3101,10 +3101,10 @@ assert repr(a[0]) == '(1, 2.0)' def test_nested_dtype(self): - from numpypy import zeros + import numpy as np a = [('x', int), ('y', float)] b = [('x', int), ('y', a)] - arr = zeros((), dtype=b) + arr = np.zeros((), dtype=b) assert arr['x'] == 0 arr['x'] = 2 assert arr['x'] == 2 @@ -3114,9 +3114,13 @@ assert exc.value.message == "field named xx not found" assert arr['y'].dtype == a assert arr['y'].shape == () - #assert arr['y'][()]['x'] == 0 - #assert arr['y'][()]['y'] == 0 - arr = zeros(3, dtype=b) + assert arr['y'][()]['x'] == 0 + assert arr['y'][()]['y'] == 0 + arr['y'][()]['x'] = 2 + arr['y'][()]['y'] = 3 + assert arr['y'][()]['x'] == 2 + assert arr['y'][()]['y'] == 3 + arr = np.zeros(3, dtype=b) arr[1]['x'] = 15 assert arr[1]['x'] == 15 arr[1]['y']['y'] = 3.5 diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1847,7 +1847,7 @@ def store(self, arr, i, ofs, box): assert isinstance(box, interp_boxes.W_VoidBox) for k in range(box.dtype.get_size()): - arr.storage[k + i] = box.arr.storage[k + box.ofs] + arr.storage[k + i + ofs] = box.arr.storage[k + box.ofs] def to_builtin_type(self, space, box): assert isinstance(box, interp_boxes.W_VoidBox) From noreply at buildbot.pypy.org Sat Dec 21 01:53:15 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 21 Dec 2013 01:53:15 +0100 (CET) Subject: [pypy-commit] pypy default: extra test Message-ID: <20131221005315.6D0E01C12CD@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68523:dfa3a3ad98e7 Date: 2013-12-20 19:52 -0500 http://bitbucket.org/pypy/pypy/changeset/dfa3a3ad98e7/ Log: extra test diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3248,6 +3248,7 @@ d = dtype([("x", "int", 3), ("y", "float", 5)]) a = zeros((), dtype=d) + #assert a['x'].dtype == int #assert a['x'].shape == (3,) #assert (a['x'] == [0, 0, 0]).all() @@ -3270,6 +3271,13 @@ a[0]["x"][0] = 200 assert a[0]["x"][0] == 200 + a[1]["x"][2] = 123 + assert (a[1]["x"] == [4, 5, 123]).all() + a[1]['y'][3] = 4 + assert a[1]['y'][3] == 4 + assert a['y'][1][3] == 4 + a['y'][1][4] = 5 + assert a[1]['y'][4] == 5 d = dtype([("x", "int64", (2, 3))]) a = array([([[1, 2, 3], [4, 5, 6]],)], dtype=d) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1762,7 +1762,9 @@ @jit.unroll_safe def store(self, arr, i, ofs, box): + assert i == 0 assert isinstance(box, interp_boxes.W_VoidBox) + assert box.dtype is box.arr.dtype for k in range(box.arr.dtype.get_size()): arr.storage[k + ofs] = box.arr.storage[k + box.ofs] From noreply at buildbot.pypy.org Sat Dec 21 05:12:22 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 21 Dec 2013 05:12:22 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20131221041222.3ECFF1C12CD@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68524:f8bb533bfd86 Date: 2013-12-20 23:11 -0500 http://bitbucket.org/pypy/pypy/changeset/f8bb533bfd86/ Log: cleanup diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1734,16 +1734,15 @@ def _coerce(self, space, arr, ofs, dtype, w_items, shape): # TODO: Make sure the shape and the array match - from interp_dtype import W_Dtype - if w_items is None: - w_items = space.newtuple([None] * shape[0]) - items_w = space.fixedview(w_items) + if w_items is not None: + items_w = space.fixedview(w_items) + else: + items_w = [None] * shape[0] subdtype = dtype.subdtype - assert isinstance(subdtype, W_Dtype) itemtype = subdtype.itemtype if len(shape) <= 1: for i in range(len(items_w)): - w_box = itemtype.coerce(space, dtype.subdtype, items_w[i]) + w_box = itemtype.coerce(space, subdtype, items_w[i]) itemtype.store(arr, 0, ofs, w_box) ofs += itemtype.get_element_size() else: @@ -1825,23 +1824,23 @@ def coerce(self, space, dtype, w_item): if isinstance(w_item, interp_boxes.W_VoidBox): return w_item - if w_item is None: - w_item = space.newtuple([None] * len(dtype.fields)) - # we treat every sequence as sequence, no special support - # for arrays - if not space.issequence_w(w_item): - raise OperationError(space.w_TypeError, space.wrap( - "expected sequence")) - if len(dtype.fields) != space.len_w(w_item): - raise OperationError(space.w_ValueError, space.wrap( - "wrong length")) - items_w = space.fixedview(w_item) + if w_item is not None: + # we treat every sequence as sequence, no special support + # for arrays + if not space.issequence_w(w_item): + raise OperationError(space.w_TypeError, space.wrap( + "expected sequence")) + if len(dtype.fields) != space.len_w(w_item): + raise OperationError(space.w_ValueError, space.wrap( + "wrong length")) + items_w = space.fixedview(w_item) + else: + items_w = [None] * len(dtype.fields) arr = VoidBoxStorage(dtype.get_size(), dtype) for i in range(len(items_w)): ofs, subdtype = dtype.fields[dtype.fieldnames[i]] itemtype = subdtype.itemtype - w_item = items_w[i] - w_box = itemtype.coerce(space, subdtype, w_item) + w_box = itemtype.coerce(space, subdtype, items_w[i]) itemtype.store(arr, 0, ofs, w_box) return interp_boxes.W_VoidBox(arr, 0, dtype) From noreply at buildbot.pypy.org Sat Dec 21 05:36:01 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 21 Dec 2013 05:36:01 +0100 (CET) Subject: [pypy-commit] pypy default: this is needed Message-ID: <20131221043601.10F3B1C356B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68525:dbcaa585c8d8 Date: 2013-12-20 23:35 -0500 http://bitbucket.org/pypy/pypy/changeset/dbcaa585c8d8/ Log: this is needed diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1734,11 +1734,13 @@ def _coerce(self, space, arr, ofs, dtype, w_items, shape): # TODO: Make sure the shape and the array match + from interp_dtype import W_Dtype if w_items is not None: items_w = space.fixedview(w_items) else: items_w = [None] * shape[0] subdtype = dtype.subdtype + assert isinstance(subdtype, W_Dtype) itemtype = subdtype.itemtype if len(shape) <= 1: for i in range(len(items_w)): From noreply at buildbot.pypy.org Sat Dec 21 06:36:54 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 21 Dec 2013 06:36:54 +0100 (CET) Subject: [pypy-commit] pypy default: fix take with multidimensional indices argument Message-ID: <20131221053654.448F31C35E0@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68526:016e3a6acf4b Date: 2013-12-21 00:35 -0500 http://bitbucket.org/pypy/pypy/changeset/016e3a6acf4b/ Log: fix take with multidimensional indices argument diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1185,7 +1185,9 @@ def take(a, indices, axis, out, mode): assert mode == 'raise' if axis is None: - res = a.ravel()[indices] + from numpy import array + indices = array(indices) + res = a.ravel()[indices.ravel()].reshape(indices.shape) else: from operator import mul if axis < 0: axis += len(a.shape) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2726,7 +2726,10 @@ assert (arange(10).take([1, 2, 1, 1]) == [1, 2, 1, 1]).all() raises(IndexError, "arange(3).take([15])") a = arange(6).reshape(2, 3) + assert a.take(3) == 3 + assert a.take(3).shape == () assert (a.take([1, 0, 3]) == [1, 0, 3]).all() + assert (a.take([[1, 0], [2, 3]]) == [[1, 0], [2, 3]]).all() assert (a.take([1], axis=0) == [[3, 4, 5]]).all() assert (a.take([1], axis=1) == [[1], [4]]).all() assert ((a + a).take([3]) == [6]).all() From noreply at buildbot.pypy.org Sat Dec 21 09:18:45 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 21 Dec 2013 09:18:45 +0100 (CET) Subject: [pypy-commit] pypy default: fix record array scalar creation Message-ID: <20131221081845.B66691C02D8@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68527:fed3e23427f5 Date: 2013-12-21 02:14 -0500 http://bitbucket.org/pypy/pypy/changeset/fed3e23427f5/ Log: fix record array scalar creation diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -69,9 +69,11 @@ return True def find_shape_and_elems(space, w_iterable, dtype): + is_rec_type = dtype is not None and dtype.is_record_type() + if is_rec_type and is_single_elem(space, w_iterable, is_rec_type): + return [], [w_iterable] shape = [space.len_w(w_iterable)] batch = space.listview(w_iterable) - is_rec_type = dtype is not None and dtype.is_record_type() while True: if not batch: return shape[:], [] diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3100,6 +3100,9 @@ def test_creation_and_repr(self): from numpypy import array + a = array((1, 2), dtype=[('x', int), ('y', float)]) + assert a.shape == () + assert repr(a[()]) == '(1, 2.0)' a = array([(1, 2), (3, 4)], dtype=[('x', int), ('y', float)]) assert repr(a[0]) == '(1, 2.0)' From noreply at buildbot.pypy.org Sat Dec 21 09:18:46 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 21 Dec 2013 09:18:46 +0100 (CET) Subject: [pypy-commit] pypy default: provide stub byteswap for record types to avoid segfault Message-ID: <20131221081846.E78471C02D8@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68528:a3b136aca239 Date: 2013-12-21 03:17 -0500 http://bitbucket.org/pypy/pypy/changeset/a3b136aca239/ Log: provide stub byteswap for record types to avoid segfault diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3106,6 +3106,17 @@ a = array([(1, 2), (3, 4)], dtype=[('x', int), ('y', float)]) assert repr(a[0]) == '(1, 2.0)' + def test_void_copyswap(self): + import numpy as np + dt = np.dtype([('one', ' 0 and x['two'] > 2 + else: + assert x['one'] == 1 and x['two'] == 2 + def test_nested_dtype(self): import numpy as np a = [('x', int), ('y', float)] diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1852,6 +1852,10 @@ for k in range(box.dtype.get_size()): arr.storage[k + i + ofs] = box.arr.storage[k + box.ofs] + def byteswap(self, w_v): + # XXX implement + return w_v + def to_builtin_type(self, space, box): assert isinstance(box, interp_boxes.W_VoidBox) items = [] From noreply at buildbot.pypy.org Sat Dec 21 19:03:16 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 21 Dec 2013 19:03:16 +0100 (CET) Subject: [pypy-commit] pypy default: fix scalar view as subtype of ndarray Message-ID: <20131221180316.27CE01C35A9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68529:401a27a83ed5 Date: 2013-12-21 03:34 -0500 http://bitbucket.org/pypy/pypy/changeset/401a27a83ed5/ Log: fix scalar view as subtype of ndarray diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -7,10 +7,12 @@ from pypy.objspace.std.unicodetype import unicode_typedef, unicode_from_object from pypy.objspace.std.inttype import int_typedef from pypy.objspace.std.complextype import complex_typedef +from pypy.objspace.std.typeobject import W_TypeObject from rpython.rlib.rarithmetic import LONG_BIT from rpython.rtyper.lltypesystem import rffi from rpython.tool.sourcetools import func_with_new_name from pypy.module.micronumpy.arrayimpl.voidbox import VoidBoxStorage +from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.interp_flagsobj import W_FlagsObject from pypy.interpreter.mixedmodule import MixedModule from rpython.rtyper.lltypesystem import lltype @@ -275,14 +277,18 @@ def descr_view(self, space, w_dtype): from pypy.module.micronumpy.interp_dtype import W_Dtype - dtype = space.interp_w(W_Dtype, - space.call_function(space.gettypefor(W_Dtype), w_dtype)) - if dtype.get_size() == 0: - raise OperationError(space.w_TypeError, space.wrap( - "data-type must not be 0-sized")) - if dtype.get_size() != self.get_dtype(space).get_size(): - raise OperationError(space.w_ValueError, space.wrap( - "new type not compatible with array.")) + if type(w_dtype) is W_TypeObject and \ + space.abstract_issubclass_w(w_dtype, space.gettypefor(W_NDimArray)): + dtype = self.get_dtype(space) + else: + dtype = space.interp_w(W_Dtype, + space.call_function(space.gettypefor(W_Dtype), w_dtype)) + if dtype.get_size() == 0: + raise OperationError(space.w_TypeError, space.wrap( + "data-type must not be 0-sized")) + if dtype.get_size() != self.get_dtype(space).get_size(): + raise OperationError(space.w_ValueError, space.wrap( + "new type not compatible with array.")) if dtype.is_str_or_unicode(): return dtype.coerce(space, space.wrap(self.raw_str())) elif dtype.is_record_type(): diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -75,6 +75,11 @@ b = matrix(a) assert isinstance(b, matrix) assert (b == a).all() + a = array(5)[()] + for s in [matrix, ndarray]: + b = a.view(s) + assert b == a + assert type(b) is type(a) def test_subtype_like_matrix(self): import numpy as np From noreply at buildbot.pypy.org Sun Dec 22 01:49:09 2013 From: noreply at buildbot.pypy.org (vbernat) Date: Sun, 22 Dec 2013 01:49:09 +0100 (CET) Subject: [pypy-commit] cffi fix/sinl: Link math tests using `sin()` to `libm` Message-ID: <20131222004909.429E41C12CD@cobra.cs.uni-duesseldorf.de> Author: Vincent Bernat Branch: fix/sinl Changeset: r1445:f3dabd52fbeb Date: 2013-12-21 22:30 +0100 http://bitbucket.org/cffi/cffi/changeset/f3dabd52fbeb/ Log: Link math tests using `sin()` to `libm` While this linking seems to be done indirectly on most platforms, it fails to work correctly on Sparc and S390x where an inappropriate version of `sin()` is used. This should additional occurrences of bug #68. Tests are passing on amd64, sparc and s390x Debian Linux platforms. Both Python 2.x and Python 3.x. diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -63,13 +63,13 @@ def test_simple_case(): ffi = FFI() ffi.cdef("double sin(double x);") - lib = ffi.verify('#include ') + lib = ffi.verify('#include ', libraries=["m"]) assert lib.sin(1.23) == math.sin(1.23) def test_rounding_1(): ffi = FFI() ffi.cdef("float sin(double x);") - lib = ffi.verify('#include ') + lib = ffi.verify('#include ', libraries=["m"]) res = lib.sin(1.23) assert res != math.sin(1.23) # not exact, because of double->float assert abs(res - math.sin(1.23)) < 1E-5 @@ -77,7 +77,7 @@ def test_rounding_2(): ffi = FFI() ffi.cdef("double sin(float x);") - lib = ffi.verify('#include ') + lib = ffi.verify('#include ', libraries=["m"]) res = lib.sin(1.23) assert res != math.sin(1.23) # not exact, because of double->float assert abs(res - math.sin(1.23)) < 1E-5 @@ -103,7 +103,7 @@ def test_longdouble(): ffi = FFI() ffi.cdef("long double sinl(long double x);") - lib = ffi.verify('#include ') + lib = ffi.verify('#include ', libraries=["m"]) for input in [1.23, ffi.cast("double", 1.23), ffi.cast("long double", 1.23)]: diff --git a/testing/test_zdistutils.py b/testing/test_zdistutils.py --- a/testing/test_zdistutils.py +++ b/testing/test_zdistutils.py @@ -25,7 +25,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=["m"]) v.write_source() with open(v.sourcefilename, 'r') as f: data = f.read() @@ -35,7 +36,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=["m"]) v.sourcefilename = filename = str(udir.join('write_source.c')) v.write_source() assert filename == v.sourcefilename @@ -47,7 +49,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=["m"]) try: from StringIO import StringIO except ImportError: @@ -60,7 +63,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=["m"]) v.compile_module() assert v.get_module_name().startswith('_cffi_') if v.generates_python_module(): @@ -71,7 +75,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!2*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=["m"]) basename = self.__class__.__name__ + 'test_compile_module' v.modulefilename = filename = str(udir.join(basename + '.so')) v.compile_module() @@ -87,7 +92,8 @@ ffi = FFI() ffi.cdef("%s sin(double x);" % csrc) v = Verifier(ffi, "#include ", - force_generic_engine=self.generic) + force_generic_engine=self.generic, + libraries=["m"]) names.append(v.get_module_name()) assert names[0] == names[1] != names[2] @@ -104,7 +110,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!3*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=["m"]) library = v.load_library() assert library.sin(12.3) == math.sin(12.3) @@ -114,7 +121,8 @@ csrc = '/*hi there %s!4*/#include "test_verifier_args.h"\n' % self udir.join('test_verifier_args.h').write('#include \n') v = Verifier(ffi, csrc, include_dirs=[str(udir)], - force_generic_engine=self.generic) + force_generic_engine=self.generic, + libraries=["m"]) library = v.load_library() assert library.sin(12.3) == math.sin(12.3) @@ -122,7 +130,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = "/*6%s*/\n#include " % self - lib = ffi.verify(csrc, force_generic_engine=self.generic) + lib = ffi.verify(csrc, force_generic_engine=self.generic, + libraries=["m"]) assert lib.sin(12.3) == math.sin(12.3) assert isinstance(ffi.verifier, Verifier) with open(ffi.verifier.sourcefilename, 'r') as f: @@ -139,7 +148,8 @@ #endif ''' lib = ffi.verify(csrc, define_macros=[('TEST_EXTENSION_OBJECT', '1')], - force_generic_engine=self.generic) + force_generic_engine=self.generic, + libraries=["m"]) assert lib.sin(12.3) == math.sin(12.3) v = ffi.verifier ext = v.get_extension() @@ -152,7 +162,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there9!%s*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=["m"]) assert not os.path.exists(v.sourcefilename) v.get_extension() assert os.path.exists(v.sourcefilename) From noreply at buildbot.pypy.org Sun Dec 22 01:49:10 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 22 Dec 2013 01:49:10 +0100 (CET) Subject: [pypy-commit] cffi default: Merged in vbernat/cffi/fix/sinl (pull request #23) Message-ID: <20131222004910.5BFD01C358C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1446:03545db1f4b6 Date: 2013-12-22 01:48 +0100 http://bitbucket.org/cffi/cffi/changeset/03545db1f4b6/ Log: Merged in vbernat/cffi/fix/sinl (pull request #23) Link math tests using `sin()` to `libm` diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -63,13 +63,13 @@ def test_simple_case(): ffi = FFI() ffi.cdef("double sin(double x);") - lib = ffi.verify('#include ') + lib = ffi.verify('#include ', libraries=["m"]) assert lib.sin(1.23) == math.sin(1.23) def test_rounding_1(): ffi = FFI() ffi.cdef("float sin(double x);") - lib = ffi.verify('#include ') + lib = ffi.verify('#include ', libraries=["m"]) res = lib.sin(1.23) assert res != math.sin(1.23) # not exact, because of double->float assert abs(res - math.sin(1.23)) < 1E-5 @@ -77,7 +77,7 @@ def test_rounding_2(): ffi = FFI() ffi.cdef("double sin(float x);") - lib = ffi.verify('#include ') + lib = ffi.verify('#include ', libraries=["m"]) res = lib.sin(1.23) assert res != math.sin(1.23) # not exact, because of double->float assert abs(res - math.sin(1.23)) < 1E-5 @@ -103,7 +103,7 @@ def test_longdouble(): ffi = FFI() ffi.cdef("long double sinl(long double x);") - lib = ffi.verify('#include ') + lib = ffi.verify('#include ', libraries=["m"]) for input in [1.23, ffi.cast("double", 1.23), ffi.cast("long double", 1.23)]: diff --git a/testing/test_zdistutils.py b/testing/test_zdistutils.py --- a/testing/test_zdistutils.py +++ b/testing/test_zdistutils.py @@ -25,7 +25,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=["m"]) v.write_source() with open(v.sourcefilename, 'r') as f: data = f.read() @@ -35,7 +36,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=["m"]) v.sourcefilename = filename = str(udir.join('write_source.c')) v.write_source() assert filename == v.sourcefilename @@ -47,7 +49,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=["m"]) try: from StringIO import StringIO except ImportError: @@ -60,7 +63,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=["m"]) v.compile_module() assert v.get_module_name().startswith('_cffi_') if v.generates_python_module(): @@ -71,7 +75,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!2*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=["m"]) basename = self.__class__.__name__ + 'test_compile_module' v.modulefilename = filename = str(udir.join(basename + '.so')) v.compile_module() @@ -87,7 +92,8 @@ ffi = FFI() ffi.cdef("%s sin(double x);" % csrc) v = Verifier(ffi, "#include ", - force_generic_engine=self.generic) + force_generic_engine=self.generic, + libraries=["m"]) names.append(v.get_module_name()) assert names[0] == names[1] != names[2] @@ -104,7 +110,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!3*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=["m"]) library = v.load_library() assert library.sin(12.3) == math.sin(12.3) @@ -114,7 +121,8 @@ csrc = '/*hi there %s!4*/#include "test_verifier_args.h"\n' % self udir.join('test_verifier_args.h').write('#include \n') v = Verifier(ffi, csrc, include_dirs=[str(udir)], - force_generic_engine=self.generic) + force_generic_engine=self.generic, + libraries=["m"]) library = v.load_library() assert library.sin(12.3) == math.sin(12.3) @@ -122,7 +130,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = "/*6%s*/\n#include " % self - lib = ffi.verify(csrc, force_generic_engine=self.generic) + lib = ffi.verify(csrc, force_generic_engine=self.generic, + libraries=["m"]) assert lib.sin(12.3) == math.sin(12.3) assert isinstance(ffi.verifier, Verifier) with open(ffi.verifier.sourcefilename, 'r') as f: @@ -139,7 +148,8 @@ #endif ''' lib = ffi.verify(csrc, define_macros=[('TEST_EXTENSION_OBJECT', '1')], - force_generic_engine=self.generic) + force_generic_engine=self.generic, + libraries=["m"]) assert lib.sin(12.3) == math.sin(12.3) v = ffi.verifier ext = v.get_extension() @@ -152,7 +162,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there9!%s*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=["m"]) assert not os.path.exists(v.sourcefilename) v.get_extension() assert os.path.exists(v.sourcefilename) From noreply at buildbot.pypy.org Sun Dec 22 18:52:30 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 22 Dec 2013 18:52:30 +0100 (CET) Subject: [pypy-commit] stmgc c5: Fix Message-ID: <20131222175230.3B2CE1C0842@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r581:a5dd109f7237 Date: 2013-12-20 18:16 +0100 http://bitbucket.org/pypy/stmgc/changeset/a5dd109f7237/ Log: Fix diff --git a/c5/core.c b/c5/core.c --- a/c5/core.c +++ b/c5/core.c @@ -490,11 +490,11 @@ void stm_start_transaction(void) { struct shared_descriptor_s *d = stm_shared_descriptor; - stm_transaction_version = - __sync_fetch_and_add(&d->next_transaction_version, 2u); - assert(stm_transaction_version <= 0xffff);//XXX - assert((stm_transaction_version & 1) == 0); /* EVEN number */ - assert(stm_transaction_version >= 2); + unsigned int v = __sync_fetch_and_add(&d->next_transaction_version, 2u); + assert(v <= 0xffff);//XXX + assert((v & 1) == 0); /* EVEN number */ + assert(v >= 2); + stm_transaction_version = v; struct write_history_s *cur = NULL; if (stm_local.writes_by_this_transaction != NULL) { From noreply at buildbot.pypy.org Sun Dec 22 18:52:31 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 22 Dec 2013 18:52:31 +0100 (CET) Subject: [pypy-commit] stmgc c5: Refactor to have all threads really be regular threads. Message-ID: <20131222175231.4E0581C156C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r582:3d87ea473f09 Date: 2013-12-22 18:52 +0100 http://bitbucket.org/pypy/stmgc/changeset/3d87ea473f09/ Log: Refactor to have all threads really be regular threads. Adapted the comment at the start of core.c. The rest is in-progress. diff --git a/c5/Makefile b/c5/Makefile --- a/c5/Makefile +++ b/c5/Makefile @@ -4,7 +4,7 @@ demo1: demo1.c $(C_FILES) $(H_FILES) - gcc -o $@ -O2 -g demo1.c $(C_FILES) -Wall + gcc -pthread -o $@ -O2 -g demo1.c $(C_FILES) -Wall demo2: demo2.c largemalloc.c largemalloc.h gcc -o $@ -g demo2.c largemalloc.c -Wall diff --git a/c5/core.c b/c5/core.c --- a/c5/core.c +++ b/c5/core.c @@ -10,94 +10,139 @@ #include "pagecopy.h" -/* This file only works on 64-bit Linux for now. The logic is based on - remapping pages around, which can get a bit confusing. Each "thread" - runs in its own process, so that it has its own mapping. The - processes share an mmap of length NB_PAGES, which is created shared - but anonymous, and passed to subprocesses by forking. +/* This only works with clang, and on 64-bit Linux, for now. + It depends on: + + * the %gs segment prefix - The mmap's content does not depend on which process is looking at it: - it contains what we'll call "mm pages", which is 4096 bytes of data - at some file offset (which all processes agree on). The term "pgoff" - used below means such an offset. It is a uint32_t expressed in units - of 4096 bytes; so the underlying mmap is limited to 2**32 pages or - 16TB. + This a hack using __attribute__((address_space(256))) on + structs, which makes clang write all pointer dereferences to + them using the "%gs:" prefix. This is a rarely-used way to + shift all memory accesses by some offset stored in the %gs + special register. Each thread has its own value in %gs. Note + that %fs is used in a similar way by the pthread library to + offset the thread-local variables; what we need is similar to + thread-local variables, but in large quantity. - The mm pages are then mapped in each process at some address, and - their content is accessed with regular pointers. We'll call such a - page a "local page". The term "local" is used because each process - has its own, different mapping. As it turns out, mm pages are - initially mapped sequentially as local pages, but this changes over - time. To do writes in a transaction, the data containing the object - is first duplicated --- so we allocate a fresh new mm page in the - mmap file, and copy the contents to it. Then we remap the new mm - page over the *same* local page as the original. So from this - process' point of view, the object is still at the same address, but - writes to it now happen to go to the new mm page instead of the old - one. + * remap_file_pages() - This is basically what happens automatically with fork() for regular - memory; the difference is that at commit time, we try to publish the - modified pages back for everybody to see. This involves possibly - merging changes done by other processes to other objects from the - same page. + This is a Linux-only system call that allows us to move or + duplicate "pages" of an mmap. A page is 4096 bytes. The same + page can be viewed at several addresses. It gives an mmap + which appears larger than the physical memory that stores it: + read/writes at one address are identical to read/writes at + different addresses as well, by going to the same physical + memory. This is important in order to share most pages between + most threads. - The local pages are usually referenced by pointers, but may also be - expressed as an index, called the "local index" of the page. + Here is a more detailed presentation. All the GC-managed memory is + in one big mmap, divided in N+1 sections: the first section holds the + status of the latest committed transaction; and the N following + sections are thread-local and hold each the status of one of the N + threads. These thread-local sections are initially remapped with + remap_file_pages() to correspond to the same memory as the first + section. + + When the current transaction does a write to an old page, we call + remap_file_pages() again to unshare the page in question before + allowing the write to occur. (This is similar to what occurs after + fork(), but done explicitly instead of by the OS.) + + Once a page is unshared, it remains unshared until some event occurs + (probably the next major collection; not implemented yet). + + The memory content in the common (first) section contains objects + with regular pointers to each other. Each thread accesses these + objects using the %gs segment prefix, which is configured to shift + the view to this thread's thread-local section. + + To clarify terminology, we will call "object page" a page of memory + from the common (first) section. The term "pgoff" refers to a page + index in this common section. For convenience this number is a + uint32_t (so the limit is 2**32 object pages, or 16 terabytes). + + Exact layout (example with 2 threads only): + + <---------------%gs is thread 2-----------> + + <---%gs is thread 1--> + + +-------------.. +-+-+ +---+ +-+-+ +---+ + | normal progr. |L|0| |RM1| |L|0| |RM2| + +-------------.. +-+-+ +---+ +-+-+ +---+ + ^null address + + +--------------------+--------------------+--------------------+ + | object pages | thread-local 1 | thread-local 2 | + +--------------------+--------------------+--------------------+ + + There are NB_PAGES object pages; so far it is 1 GB of memory. The + big mmap (bottom line) is thus allocated as 3 GB of consecutive + memory, and %gs is set to 1 billion in thread 1 and 2 billion in + thread 2. + + The constrains on this layout come from the fact that we'd like the + objects (in the object pages) to look correct: they contain pointers + to more objects (also in the object pages), or nulls. This is not + really necessary (e.g. we could store indexes from some place, rather + than real pointers) but should help debugging. + + We also allocate 2*N pages at known addresses: the L pages, just + before the addresses 1GB and 2GB, contain thread-locals and are + accessed as %gs:(small negative offset). The 0 pages are reserved + but marked as not accessible, to crash cleanly on null pointer + dereferences, done as %gs:(0). + + Finally we have 64 MB of pages written as RM1 and RM2: they are + thread-local read markers. They are placed precisely such that, + for object address "p", the read marker is at %gs:(p/16). In the + diagram above RM1 is placed somewhere between the two L-0 blocks, + but that's not required. + + This is possible by mmaps at fixed addresses, and hopefully still + gives enough flexibility to let us try several other sets of + addresses if the first set is busy. We use here the fact that the + total address space available is huge. */ -#ifdef STM_TESTS -# define NB_PAGES (256*10) // 10MB -#else -# define NB_PAGES (256*1024) // 1GB -#endif -#define MAP_PAGES_FLAGS (MAP_SHARED|MAP_ANONYMOUS) +#define NB_PAGES (256*1024) // 1GB +#define NB_THREADS 128 +#define MAP_PAGES_FLAGS (MAP_SHARED|MAP_ANONYMOUS|MAP_NORESERVE) #define CACHE_LINE_SIZE 128 // conservatively large value to avoid aliasing -#define PGKIND_NEVER_USED 0 -#define LARGE_OBJECT_WORDS 36 /* range(2, LARGE_OBJECT_WORDS) */ -#define PGKIND_FREED 0xff -#define PGKIND_WRITE_HISTORY 0xfe -#define PGKIND_SHARED_DESCRIPTOR 0xfd /* only for the first mm page */ +#define LARGE_OBJECT_WORDS 36 struct page_header_s { /* Every page starts with one such structure */ uint16_t version; /* when the data in the page was written */ - uint8_t modif_head; /* head of a chained list of objects in this - page that have modified == this->version */ - uint8_t kind; /* either PGKIND_xxx or a number in - range(2, LARGE_OBJECT_WORDS) */ - uint32_t pgoff; /* the mm page offset */ + uint8_t obj_word_size; /* size of all objects in this page, in words + in range(2, LARGE_OBJECT_WORDS) */ + uint32_t write_log_index; +}; + +struct write_log_s { + uint32_t pgoff; + uint32_t modif[8]; /* N'th bit set if and only if object at N*16 changed */ }; struct write_history_s { struct write_history_s *previous_older_transaction; uint16_t transaction_version; - uint32_t nb_updates; - uint32_t updates[]; /* pairs (local_index, new_pgoff) */ + struct write_log_s log[]; /* ends with pgoff == 0 */ }; struct shared_descriptor_s { - /* There is a single shared descriptor. This regroups all data - that needs to be dynamically shared among processes. The - first mm page is used for this. */ - union { - struct page_header_s header; - char _pad0[CACHE_LINE_SIZE]; - }; - union { - uint64_t volatile index_page_never_used; - char _pad1[CACHE_LINE_SIZE]; - }; - union { - unsigned int volatile next_transaction_version; /* always EVEN */ - char _pad2[CACHE_LINE_SIZE]; - }; - union { - struct write_history_s *volatile most_recent_committed_transaction; - char _pad3[CACHE_LINE_SIZE]; - }; + /* There is a single shared descriptor. This contains global + variables, but as a structure, in order to control the sharing at + the cache line level --- we don't want the following few + variables to be accidentally in the same cache line. */ + char _pad0[CACHE_LINE_SIZE]; uint64_t volatile index_page_never_used; + char _pad1[CACHE_LINE_SIZE]; unsigned int volatile next_transaction_version; + /* always EVEN */ + char _pad2[CACHE_LINE_SIZE]; struct write_history_s * + volatile most_recent_committed_transaction; + char _pad3[CACHE_LINE_SIZE]; }; struct alloc_for_size_s { @@ -105,24 +150,21 @@ char *end; }; -struct local_data_s { - /* This is just a bunch of global variables, but during testing, - we save it all away and restore different ones to simulate - different forked processes. */ +typedef GCOBJECT struct _thread_local2_s { + /* All the thread-local variables we need. */ struct write_history_s *base_page_mapping; struct write_history_s *writes_by_this_transaction; struct alloc_for_size_s alloc[LARGE_OBJECT_WORDS]; char *read_markers; -#ifdef STM_TESTS - struct _read_marker_s *_current_read_markers; - uint16_t _transaction_version; -#endif -}; + _thread_local1_t _stm_tl1; /* space for the macro _STM_TL1 in core.h */ +} _thread_local2_t; -struct shared_descriptor_s *stm_shared_descriptor; -struct _read_marker_s *stm_current_read_markers; -struct local_data_s stm_local; -uint16_t stm_transaction_version; /* always EVEN */ +#define _STM_TL2 (((_thread_local2_t *)0)[-1]) + +struct shared_descriptor_s stm_shared_descriptor; + + +/************************************************************/ _Bool _stm_was_read(struct object_s *object) @@ -133,12 +175,14 @@ static struct _read_marker_s *get_current_read_marker(struct object_s *object) { - return stm_current_read_markers + (((uintptr_t)object) >> 4); + struct _read_marker_s *crm = _STM_TL1.stm_current_read_markers; + return crm + (((uintptr_t)object) >> 4); } _Bool _stm_was_written(struct object_s *object) { - return (object->modified == stm_transaction_version); + uint16_t stv = _STM_TL1.stm_transaction_version; + return (object->modified == stv); } @@ -320,7 +364,7 @@ fprintf(stderr, "Cannot use more than 1<<32 pages of memory"); abort(); } - char *stm_pages = mmap(NULL, NB_PAGES*4096, PROT_READ|PROT_WRITE, + char *stm_pages = mmap(NULL, NB_PAGES*4096ul, PROT_READ|PROT_WRITE, MAP_PAGES_FLAGS, -1, 0); if (stm_pages == MAP_FAILED) { perror("mmap stm_pages failed"); diff --git a/c5/core.h b/c5/core.h --- a/c5/core.h +++ b/c5/core.h @@ -3,12 +3,14 @@ #include -struct object_s { +#define GCOBJECT __attribute__((address_space(256))) + +typedef GCOBJECT struct object_s { /* Every objects starts with one such structure */ uint16_t modified; - uint8_t modif_next; uint8_t flags; -}; + uint8_t reserved; +} object_t; struct _read_marker_s { /* We associate a single byte to every object, by simply dividing @@ -17,8 +19,12 @@ unsigned char c; }; -extern struct _read_marker_s *stm_current_read_markers; -extern uint16_t stm_transaction_version; +typedef GCOBJECT struct _thread_local1_s { + struct _read_marker_s *stm_current_read_markers; + uint16_t stm_transaction_version; /* always EVEN */ +} _thread_local1_t; + +#define _STM_TL1 (((_thread_local1_t *)0)[-1]) /************************************************************/ @@ -32,15 +38,16 @@ static inline void stm_read(struct object_s *object) { - stm_current_read_markers[((uintptr_t)object) >> 4].c = - (unsigned char)(uintptr_t)stm_current_read_markers; + struct _read_marker_s *crm = _STM_TL1.stm_current_read_markers; + crm[((uintptr_t)object) >> 4].c = (unsigned char)(uintptr_t)crm; } void _stm_write_slowpath(struct object_s *); static inline void stm_write(struct object_s *object) { - if (__builtin_expect(object->modified != stm_transaction_version, 0)) + uint16_t stv = _STM_TL1.stm_transaction_version; + if (__builtin_expect(object->modified != stv, 0)) _stm_write_slowpath(object); } diff --git a/c5/demo1.c b/c5/demo1.c --- a/c5/demo1.c +++ b/c5/demo1.c @@ -4,6 +4,8 @@ #include #include #include +#include +#include #include "core.h" @@ -62,28 +64,33 @@ printf("thread %d: %p, %p\n", i, ob1, ob2); } +static void *run_in_thread(void *arg) +{ + stm_setup_process(); + do_run_in_thread((intptr_t)arg); + return NULL; +} + void do_test(void) { - int i; - pid_t child_pids[NUM_THREADS]; + int i, res; + pthread_t threads[NUM_THREADS]; for (i = 0; i < NUM_THREADS; i++) { - child_pids[i] = fork(); - if (child_pids[i] == -1) { - perror("fork"); + res = pthread_create(&threads[i], NULL, run_in_thread, + (void *)(intptr_t)i); + if (res != 0) { + errno = res; + perror("pthread_create"); abort(); } - if (child_pids[i] == 0) { - stm_setup_process(); - do_run_in_thread(i); - exit(0); - } } for (i = 0; i < NUM_THREADS; i++) { - int status; - if (waitpid(child_pids[i], &status, 0) == -1) { - perror("waitpid"); + res = pthread_join(threads[i], NULL); + if (res != 0) { + errno = res; + perror("pthread_join"); abort(); } } From noreply at buildbot.pypy.org Mon Dec 23 01:18:53 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 23 Dec 2013 01:18:53 +0100 (CET) Subject: [pypy-commit] pypy default: fix comment Message-ID: <20131223001853.A235D1C02D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68530:56835500790b Date: 2013-12-23 01:18 +0100 http://bitbucket.org/pypy/pypy/changeset/56835500790b/ Log: fix comment diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -42,7 +42,7 @@ if self.handles[d]() is None: self.look_distance = d + 1 return d - # full! extend, but don't use '!=' here + # full! extend, but don't use '+=' here self.handles = self.handles + [dead_ref] * (length // 3 + 5) self.look_distance = length + 1 return length From noreply at buildbot.pypy.org Mon Dec 23 19:44:11 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 23 Dec 2013 19:44:11 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup array/scalar descr_view Message-ID: <20131223184411.03D741C0162@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68531:8f477bbaddf7 Date: 2013-12-23 13:41 -0500 http://bitbucket.org/pypy/pypy/changeset/8f477bbaddf7/ Log: cleanup array/scalar descr_view diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -7,7 +7,6 @@ from pypy.objspace.std.unicodetype import unicode_typedef, unicode_from_object from pypy.objspace.std.inttype import int_typedef from pypy.objspace.std.complextype import complex_typedef -from pypy.objspace.std.typeobject import W_TypeObject from rpython.rlib.rarithmetic import LONG_BIT from rpython.rtyper.lltypesystem import rffi from rpython.tool.sourcetools import func_with_new_name @@ -277,8 +276,15 @@ def descr_view(self, space, w_dtype): from pypy.module.micronumpy.interp_dtype import W_Dtype - if type(w_dtype) is W_TypeObject and \ - space.abstract_issubclass_w(w_dtype, space.gettypefor(W_NDimArray)): + try: + subclass = space.is_true(space.issubtype( + w_dtype, space.gettypefor(W_NDimArray))) + except OperationError, e: + if e.match(space, space.w_TypeError): + subclass = False + else: + raise + if subclass: dtype = self.get_dtype(space) else: dtype = space.interp_w(W_Dtype, diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -731,11 +731,15 @@ def descr_view(self, space, w_dtype=None, w_type=None): if not w_type and w_dtype: try: - if space.is_true(space.issubtype(w_dtype, space.gettypefor(W_NDimArray))): + if space.is_true(space.issubtype( + w_dtype, space.gettypefor(W_NDimArray))): w_type = w_dtype w_dtype = None - except (OperationError, TypeError): - pass + except OperationError, e: + if e.match(space, space.w_TypeError): + pass + else: + raise if w_dtype: dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), From noreply at buildbot.pypy.org Mon Dec 23 19:57:12 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 23 Dec 2013 19:57:12 +0100 (CET) Subject: [pypy-commit] pypy default: fix test_signbit for win32 Message-ID: <20131223185712.47FFF1C1177@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68532:4702ccfceeb9 Date: 2013-12-23 13:56 -0500 http://bitbucket.org/pypy/pypy/changeset/4702ccfceeb9/ Log: fix test_signbit for win32 diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -391,14 +391,14 @@ assert (a == ref).all() def test_signbit(self): - from numpy import signbit, add + from numpy import signbit, add, copysign, nan assert signbit(add.identity) == False assert (signbit([0, 0.0, 1, 1.0, float('inf')]) == [False, False, False, False, False]).all() assert (signbit([-0, -0.0, -1, -1.0, float('-inf')]) == [False, True, True, True, True]).all() - assert (signbit([float('nan'), float('-nan'), -float('nan')]) == - [False, True, True]).all() + assert (signbit([copysign(nan, 1), copysign(nan, -1)]) == + [False, True]).all() def test_reciprocal(self): from numpy import array, reciprocal From noreply at buildbot.pypy.org Tue Dec 24 19:09:33 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 24 Dec 2013 19:09:33 +0100 (CET) Subject: [pypy-commit] pypy default: fix int_only numpy ufunc2 wrt uint64 (issue 1664) Message-ID: <20131224180933.821A81C0162@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68533:fc6ac3a6a746 Date: 2013-12-24 13:08 -0500 http://bitbucket.org/pypy/pypy/changeset/fc6ac3a6a746/ Log: fix int_only numpy ufunc2 wrt uint64 (issue 1664) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -376,14 +376,19 @@ w_rdtype = w_ldtype elif w_lhs.is_scalar() and not w_rhs.is_scalar(): w_ldtype = w_rdtype - if (self.int_only and (not w_ldtype.is_int_type() or not w_rdtype.is_int_type()) or - not self.allow_bool and (w_ldtype.is_bool_type() or w_rdtype.is_bool_type()) or - not self.allow_complex and (w_ldtype.is_complex_type() or w_rdtype.is_complex_type())): - raise OperationError(space.w_TypeError, space.wrap("Unsupported types")) calc_dtype = find_binop_result_dtype(space, w_ldtype, w_rdtype, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) + if (self.int_only and (not w_ldtype.is_int_type() or + not w_rdtype.is_int_type() or + not calc_dtype.is_int_type()) or + not self.allow_bool and (w_ldtype.is_bool_type() or + w_rdtype.is_bool_type()) or + not self.allow_complex and (w_ldtype.is_complex_type() or + w_rdtype.is_complex_type())): + raise OperationError(space.w_TypeError, space.wrap( + "ufunc '%s' not supported for the input types" % self.name)) if space.is_none(w_out): out = None elif not isinstance(w_out, W_NDimArray): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1164,14 +1164,20 @@ assert (2 << a == [2, 4, 8]).all() def test_rshift(self): - from numpypy import arange, array - - a = arange(10) + import numpy as np + a = np.arange(10) assert (a >> 2 == [0, 0, 0, 0, 1, 1, 1, 1, 2, 2]).all() - a = array([True, False]) + a = np.array([True, False]) assert (a >> 1 == [0, 0]).all() - a = arange(3, dtype=float) + a = np.arange(3, dtype=float) raises(TypeError, lambda: a >> 1) + a = np.array([123], dtype='uint64') + b = a >> 1 + assert b == 61 + assert b.dtype.type is np.uint64 + a = np.array(123, dtype='uint64') + exc = raises(TypeError, "a >> 1") + assert 'not supported for the input types' in exc.value.message def test_rrshift(self): from numpypy import arange diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -113,6 +113,15 @@ assert a.squeeze() is a raises(TypeError, a.squeeze, 2) + def test_bitshift(self): + import numpy as np + assert np.int32(123) >> 1 == 61 + assert type(np.int32(123) >> 1) is np.int64 + assert np.int64(123) << 1 == 246 + assert type(np.int64(123) << 1) is np.int64 + exc = raises(TypeError, "np.uint64(123) >> 1") + assert 'not supported for the input types' in exc.value.message + def test_attributes(self): import numpy as np value = np.dtype('int64').type(12345) From noreply at buildbot.pypy.org Tue Dec 24 20:18:38 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 24 Dec 2013 20:18:38 +0100 (CET) Subject: [pypy-commit] pypy default: fix numpy long/ulong coerce for 64bit Message-ID: <20131224191838.39C2F1C1347@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68534:527f9e5a953f Date: 2013-12-24 14:17 -0500 http://bitbucket.org/pypy/pypy/changeset/527f9e5a953f/ Log: fix numpy long/ulong coerce for 64bit diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -6,6 +6,7 @@ def test_init(self): import numpy as np import math + import sys assert np.intp() == np.intp(0) assert np.intp('123') == np.intp(123) raises(TypeError, np.intp, None) @@ -19,6 +20,7 @@ assert math.isnan(np.complex_(None)) for c in ['i', 'I', 'l', 'L', 'q', 'Q']: assert np.dtype(c).type().dtype.char == c + assert np.dtype('L').type(sys.maxint + 42) def test_builtin(self): import numpy as np diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -12,7 +12,7 @@ from rpython.rlib.rawstorage import (alloc_raw_storage, raw_storage_setitem, raw_storage_getitem) from rpython.rlib.objectmodel import specialize -from rpython.rlib.rarithmetic import widen, byteswap, r_ulonglong, most_neg_value_of +from rpython.rlib.rarithmetic import widen, byteswap, r_ulonglong, most_neg_value_of, LONG_BIT from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rstruct.runpack import runpack from rpython.rlib.rstruct.nativefmttable import native_is_bigendian @@ -568,16 +568,6 @@ BoxType = interp_boxes.W_UInt32Box format_code = "I" -class Long(BaseType, Integer): - T = rffi.LONG - BoxType = interp_boxes.W_LongBox - format_code = "l" - -class ULong(BaseType, Integer): - T = rffi.ULONG - BoxType = interp_boxes.W_ULongBox - format_code = "L" - def _int64_coerce(self, space, w_item): try: return self._base_coerce(space, w_item) @@ -618,6 +608,22 @@ _coerce = func_with_new_name(_uint64_coerce, '_coerce') +class Long(BaseType, Integer): + T = rffi.LONG + BoxType = interp_boxes.W_LongBox + format_code = "l" + + if LONG_BIT == 64: + _coerce = func_with_new_name(_int64_coerce, '_coerce') + +class ULong(BaseType, Integer): + T = rffi.ULONG + BoxType = interp_boxes.W_ULongBox + format_code = "L" + + if LONG_BIT == 64: + _coerce = func_with_new_name(_uint64_coerce, '_coerce') + class Float(Primitive): _mixin_ = True From noreply at buildbot.pypy.org Tue Dec 24 22:12:32 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 24 Dec 2013 22:12:32 +0100 (CET) Subject: [pypy-commit] pypy default: fix numpy dtype guessing for uint64 Message-ID: <20131224211232.7EF711C0162@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68535:245605722af8 Date: 2013-12-24 14:57 -0500 http://bitbucket.org/pypy/pypy/changeset/245605722af8/ Log: fix numpy dtype guessing for uint64 diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -532,6 +532,7 @@ bool_dtype = interp_dtype.get_dtype_cache(space).w_booldtype long_dtype = interp_dtype.get_dtype_cache(space).w_longdtype int64_dtype = interp_dtype.get_dtype_cache(space).w_int64dtype + uint64_dtype = interp_dtype.get_dtype_cache(space).w_uint64dtype complex_type = interp_dtype.get_dtype_cache(space).w_complex128dtype float_type = interp_dtype.get_dtype_cache(space).w_float64dtype if isinstance(w_obj, interp_boxes.W_GenericBox): @@ -552,7 +553,15 @@ elif space.isinstance_w(w_obj, space.w_long): if (current_guess is None or current_guess is bool_dtype or current_guess is long_dtype or current_guess is int64_dtype): - return int64_dtype + try: + space.int_w(w_obj) + except OperationError, e: + if e.match(space, space.w_OverflowError): + return uint64_dtype + else: + raise + else: + return int64_dtype return current_guess elif space.isinstance_w(w_obj, space.w_complex): if (current_guess is None or current_guess is bool_dtype or diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1506,7 +1506,7 @@ def test_dtype_guessing(self): from numpypy import array, dtype - + import sys assert array([True]).dtype is dtype(bool) assert array([True, False]).dtype is dtype(bool) assert array([True, 1]).dtype is dtype(int) @@ -1522,6 +1522,7 @@ assert array([int8(3)]).dtype is dtype("int8") assert array([bool_(True)]).dtype is dtype(bool) assert array([bool_(True), 3.0]).dtype is dtype(float) + assert array(sys.maxint + 42).dtype is dtype('Q') def test_comparison(self): import operator diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -20,7 +20,7 @@ assert math.isnan(np.complex_(None)) for c in ['i', 'I', 'l', 'L', 'q', 'Q']: assert np.dtype(c).type().dtype.char == c - assert np.dtype('L').type(sys.maxint + 42) + assert np.dtype('L').type(sys.maxint + 42) == sys.maxint + 42 def test_builtin(self): import numpy as np From noreply at buildbot.pypy.org Tue Dec 24 22:59:16 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 24 Dec 2013 22:59:16 +0100 (CET) Subject: [pypy-commit] pypy default: fix multiple item dtype guessing Message-ID: <20131224215916.56A061C1347@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68536:5c89e49f97c1 Date: 2013-12-24 16:42 -0500 http://bitbucket.org/pypy/pypy/changeset/5c89e49f97c1/ Log: fix multiple item dtype guessing diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -437,6 +437,8 @@ def find_binop_result_dtype(space, dt1, dt2, promote_to_float=False, promote_bools=False): + if dt2 is None: + return dt1 # dt1.num should be <= dt2.num if dt1.num > dt2.num: dt1, dt2 = dt2, dt1 @@ -533,44 +535,33 @@ long_dtype = interp_dtype.get_dtype_cache(space).w_longdtype int64_dtype = interp_dtype.get_dtype_cache(space).w_int64dtype uint64_dtype = interp_dtype.get_dtype_cache(space).w_uint64dtype - complex_type = interp_dtype.get_dtype_cache(space).w_complex128dtype - float_type = interp_dtype.get_dtype_cache(space).w_float64dtype + complex_dtype = interp_dtype.get_dtype_cache(space).w_complex128dtype + float_dtype = interp_dtype.get_dtype_cache(space).w_float64dtype if isinstance(w_obj, interp_boxes.W_GenericBox): dtype = w_obj.get_dtype(space) - if current_guess is None: - return dtype return find_binop_result_dtype(space, dtype, current_guess) if space.isinstance_w(w_obj, space.w_bool): - if current_guess is None or current_guess is bool_dtype: - return bool_dtype - return current_guess + return find_binop_result_dtype(space, bool_dtype, current_guess) elif space.isinstance_w(w_obj, space.w_int): - if (current_guess is None or current_guess is bool_dtype or - current_guess is long_dtype): - return long_dtype - return current_guess + return find_binop_result_dtype(space, long_dtype, current_guess) elif space.isinstance_w(w_obj, space.w_long): - if (current_guess is None or current_guess is bool_dtype or - current_guess is long_dtype or current_guess is int64_dtype): - try: - space.int_w(w_obj) - except OperationError, e: - if e.match(space, space.w_OverflowError): - return uint64_dtype - else: - raise - else: - return int64_dtype - return current_guess + try: + space.int_w(w_obj) + except OperationError, e: + if e.match(space, space.w_OverflowError): + return find_binop_result_dtype(space, uint64_dtype, + current_guess) + raise + return find_binop_result_dtype(space, int64_dtype, current_guess) + elif space.isinstance_w(w_obj, space.w_float): + return find_binop_result_dtype(space, float_dtype, current_guess) elif space.isinstance_w(w_obj, space.w_complex): - if (current_guess is None or current_guess is bool_dtype or - current_guess is long_dtype or current_guess is int64_dtype or - current_guess is complex_type or current_guess is float_type): - return complex_type - return current_guess + return complex_dtype + elif space.isinstance_w(w_obj, space.w_slice): + return long_dtype elif space.isinstance_w(w_obj, space.w_str): - if (current_guess is None): + if current_guess is None: return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) elif current_guess.num == NPY_STRING: @@ -578,12 +569,6 @@ return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) return current_guess - if current_guess is complex_type: - return complex_type - if space.isinstance_w(w_obj, space.w_float): - return float_type - elif space.isinstance_w(w_obj, space.w_slice): - return long_dtype raise operationerrfmt(space.w_NotImplementedError, 'unable to create dtype from objects, ' '"%T" instance not supported', w_obj) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1511,7 +1511,7 @@ assert array([True, False]).dtype is dtype(bool) assert array([True, 1]).dtype is dtype(int) assert array([1, 2, 3]).dtype is dtype(int) - #assert array([1L, 2, 3]).dtype is dtype(long) + assert array([1L, 2, 3]).dtype is dtype('q') assert array([1.2, True]).dtype is dtype(float) assert array([1.2, 5]).dtype is dtype(float) assert array([]).dtype is dtype(float) @@ -1523,6 +1523,11 @@ assert array([bool_(True)]).dtype is dtype(bool) assert array([bool_(True), 3.0]).dtype is dtype(float) assert array(sys.maxint + 42).dtype is dtype('Q') + assert array([sys.maxint + 42] * 2).dtype is dtype('Q') + assert array([sys.maxint + 42, 123]).dtype is dtype(float) + assert array([sys.maxint + 42, 123L]).dtype is dtype(float) + assert array([1+2j, 123]).dtype is dtype(complex) + assert array([1+2j, 123L]).dtype is dtype(complex) def test_comparison(self): import operator From noreply at buildbot.pypy.org Tue Dec 24 23:00:06 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 24 Dec 2013 23:00:06 +0100 (CET) Subject: [pypy-commit] pypy default: this too Message-ID: <20131224220006.2D47E1C1347@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68537:9c94d5651eef Date: 2013-12-24 16:59 -0500 http://bitbucket.org/pypy/pypy/changeset/9c94d5651eef/ Log: this too diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -558,8 +558,6 @@ return find_binop_result_dtype(space, float_dtype, current_guess) elif space.isinstance_w(w_obj, space.w_complex): return complex_dtype - elif space.isinstance_w(w_obj, space.w_slice): - return long_dtype elif space.isinstance_w(w_obj, space.w_str): if current_guess is None: return interp_dtype.variable_dtype(space, diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2840,7 +2840,11 @@ assert b[35] == 200 b[[slice(25, 30)]] = range(5) assert all(a[:5] == range(5)) - raises(TypeError, 'b[[[slice(25, 125)]]]') + import sys + if '__pypy__' not in sys.builtin_module_names: + raises(TypeError, 'b[[[slice(25, 125)]]]') + else: + raises(NotImplementedError, 'b[[[slice(25, 125)]]]') def test_cumsum(self): from numpypy import arange From noreply at buildbot.pypy.org Wed Dec 25 19:01:22 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 25 Dec 2013 19:01:22 +0100 (CET) Subject: [pypy-commit] pypy default: fix for 32bit Message-ID: <20131225180122.D71301C019E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68538:3cf09dd9b86e Date: 2013-12-25 13:00 -0500 http://bitbucket.org/pypy/pypy/changeset/3cf09dd9b86e/ Log: fix for 32bit diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -118,7 +118,7 @@ def test_bitshift(self): import numpy as np assert np.int32(123) >> 1 == 61 - assert type(np.int32(123) >> 1) is np.int64 + assert type(np.int32(123) >> 1) is np.int_ assert np.int64(123) << 1 == 246 assert type(np.int64(123) << 1) is np.int64 exc = raises(TypeError, "np.uint64(123) >> 1") From noreply at buildbot.pypy.org Wed Dec 25 21:08:20 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 25 Dec 2013 21:08:20 +0100 (CET) Subject: [pypy-commit] pypy default: Reduce confusion: call ordered dict's "dictentries" differently Message-ID: <20131225200820.3021B1C02DA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68539:8f69d56ced01 Date: 2013-12-25 21:07 +0100 http://bitbucket.org/pypy/pypy/changeset/8f69d56ced01/ Log: Reduce confusion: call ordered dict's "dictentries" differently diff --git a/rpython/rtyper/lltypesystem/rordereddict.py b/rpython/rtyper/lltypesystem/rordereddict.py --- a/rpython/rtyper/lltypesystem/rordereddict.py +++ b/rpython/rtyper/lltypesystem/rordereddict.py @@ -110,7 +110,7 @@ entrymeths['fasthashfn'] = ll_fasthash_function # Build the lltype data structures - DICTENTRY = lltype.Struct("dictentry", *entryfields) + DICTENTRY = lltype.Struct("odictentry", *entryfields) DICTENTRYARRAY = lltype.GcArray(DICTENTRY, adtmeths=entrymeths) fields = [ ("num_items", lltype.Signed), From noreply at buildbot.pypy.org Wed Dec 25 21:22:00 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 25 Dec 2013 21:22:00 +0100 (CET) Subject: [pypy-commit] pypy jit-ordereddict: A branch to support OrderedDict in the JIT Message-ID: <20131225202200.327601C02DA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-ordereddict Changeset: r68540:743f1fedcd33 Date: 2013-12-25 21:19 +0100 http://bitbucket.org/pypy/pypy/changeset/743f1fedcd33/ Log: A branch to support OrderedDict in the JIT From noreply at buildbot.pypy.org Wed Dec 25 21:22:01 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 25 Dec 2013 21:22:01 +0100 (CET) Subject: [pypy-commit] pypy jit-ordereddict: Add tests, failing Message-ID: <20131225202201.607961C02DA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-ordereddict Changeset: r68541:77989adb60ab Date: 2013-12-25 21:19 +0100 http://bitbucket.org/pypy/pypy/changeset/77989adb60ab/ Log: Add tests, failing diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -2,12 +2,15 @@ from rpython.jit.metainterp.test.support import LLJitMixin from rpython.rlib.jit import JitDriver from rpython.rlib import objectmodel +from collections import OrderedDict class DictTests: + def _freeze_(self): + return True def test_dict_set_none(self): def fn(n): - d = {} + d = self.newdict() d[0] = None return bool(d[n]) res = self.interp_operations(fn, [0]) @@ -21,7 +24,7 @@ ]: myjitdriver = JitDriver(greens = [], reds = ['n', 'dct']) def f(n): - dct = {} + dct = self.newdict() while n > 0: myjitdriver.can_enter_jit(n=n, dct=dct) myjitdriver.jit_merge_point(n=n, dct=dct) @@ -51,7 +54,9 @@ ]: myjitdriver = JitDriver(greens = [], reds = ['total', 'it']) def f(n): - dct = {n: 100, 50: n+1} + dct = self.newdict() + dct[n] = 100 + dct[50] = n + 1 it = getattr(dct, name)() total = 0 while True: @@ -71,6 +76,8 @@ assert res == expected def test_dict_trace_hash(self): + if type(self.newdict()) is not dict: + py.test.skip("this is an r_dict test") myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) def key(x): return x % 2 @@ -96,7 +103,7 @@ def test_dict_setdefault(self): myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) def f(n): - dct = {} + dct = self.newdict() total = n while total: myjitdriver.jit_merge_point(total=total, dct=dct) @@ -110,6 +117,8 @@ self.check_resops(new=0, new_with_vtable=0) def test_dict_as_counter(self): + if type(self.newdict()) is not dict: + py.test.skip("this is an r_dict test") myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) def key(x): return x % 2 @@ -131,6 +140,8 @@ self.check_resops(int_mod=2) # key + eq, but cached def test_repeated_lookup(self): + if type(self.newdict()) is not dict: + py.test.skip("this is an r_dict test") myjitdriver = JitDriver(greens = [], reds = ['n', 'd']) class Wrapper(object): _immutable_fields_ = ["value"] @@ -167,7 +178,8 @@ def f(n): while n > 0: driver.jit_merge_point(n=n) - d = {1: 1} + d = self.newdict() + d[1] = 1 for elem in d: n -= elem return n @@ -179,4 +191,28 @@ class TestLLtype(DictTests, LLJitMixin): - pass + @staticmethod + def newdict(): + return {} + +class TestLLOrderedDict(DictTests, LLJitMixin): + @staticmethod + def newdict(): + return OrderedDict() + + def test_dict_is_ordered(self): + def fn(n): + d = OrderedDict() + d[3] = 5 + d[n] = 9 + d[2] = 6 + d[1] = 4 + lst = d.items() + assert len(lst) == 4 + return ( lst[0][0] + 10*lst[0][1] + + 100*lst[1][0] + 1000*lst[1][1] + + 10000*lst[3][0] + 100000*lst[2][1] + + 1000000*lst[2][0] + 10000000*lst[3][1]) + + res = self.interp_operations(fn, [0]) + assert res == fn(0) From noreply at buildbot.pypy.org Wed Dec 25 21:34:39 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 25 Dec 2013 21:34:39 +0100 (CET) Subject: [pypy-commit] pypy jit-ordereddict: Clean up old helpers not used any more Message-ID: <20131225203439.825B41C02DA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-ordereddict Changeset: r68542:57974c10d990 Date: 2013-12-25 21:30 +0100 http://bitbucket.org/pypy/pypy/changeset/57974c10d990/ Log: Clean up old helpers not used any more diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -492,11 +492,6 @@ # ---------- dict ---------- - def _ll_0_newdict(DICT): - return ll_rdict.ll_newdict(DICT) - _ll_0_newdict.need_result_type = True - - _ll_2_dict_delitem = ll_rdict.ll_dict_delitem _ll_1_dict_copy = ll_rdict.ll_copy _ll_1_dict_clear = ll_rdict.ll_clear _ll_2_dict_update = ll_rdict.ll_update From noreply at buildbot.pypy.org Wed Dec 25 21:34:40 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 25 Dec 2013 21:34:40 +0100 (CET) Subject: [pypy-commit] pypy jit-ordereddict: Change the oopspecs in this file Message-ID: <20131225203440.A2E3A1C02DA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-ordereddict Changeset: r68543:b3184fee79ff Date: 2013-12-25 21:30 +0100 http://bitbucket.org/pypy/pypy/changeset/b3184fee79ff/ Log: Change the oopspecs in this file diff --git a/rpython/rtyper/lltypesystem/rordereddict.py b/rpython/rtyper/lltypesystem/rordereddict.py --- a/rpython/rtyper/lltypesystem/rordereddict.py +++ b/rpython/rtyper/lltypesystem/rordereddict.py @@ -663,7 +663,7 @@ ll_dict_remove_deleted_items(d) else: ll_dict_reindex(d, new_size) -ll_dict_resize.oopspec = 'dict.resize(d)' +ll_dict_resize.oopspec = 'odict.resize(d)' def ll_dict_reindex(d, new_size): ll_malloc_indexes_and_choose_lookup(d, new_size) @@ -899,7 +899,7 @@ @jit.look_inside_iff(lambda RETURNTYPE, iter: jit.isvirtual(iter) and (iter.dict is None or jit.isvirtual(iter.dict))) - @jit.oopspec("dictiter.next%s(iter)" % kind) + @jit.oopspec("odictiter.next%s(iter)" % kind) def ll_dictnext(RETURNTYPE, iter): # note that RETURNTYPE is None for keys and values dict = iter.dict @@ -984,7 +984,7 @@ ll_dict_reindex(newdict, _ll_len_of_d_indexes(dict)) return newdict -ll_dict_copy.oopspec = 'dict.copy(dict)' +ll_dict_copy.oopspec = 'odict.copy(dict)' def ll_dict_clear(d): if d.num_used_items == 0: @@ -997,7 +997,7 @@ d.num_used_items = 0 d.resize_counter = DICT_INITSIZE * 2 # old_entries.delete() XXX -ll_dict_clear.oopspec = 'dict.clear(d)' +ll_dict_clear.oopspec = 'odict.clear(d)' def ll_dict_update(dic1, dic2): i = 0 @@ -1011,7 +1011,7 @@ index = dic1.lookup_function(dic1, key, hash, FLAG_STORE) _ll_dict_setitem_lookup_done(dic1, key, value, hash, index) i += 1 -ll_dict_update.oopspec = 'dict.update(dic1, dic2)' +ll_dict_update.oopspec = 'odict.update(dic1, dic2)' # this is an implementation of keys(), values() and items() # in a single function. @@ -1050,7 +1050,7 @@ i += 1 assert p == res.ll_length() return res - ll_kvi.oopspec = 'dict.%s(dic)' % kind + ll_kvi.oopspec = 'odict.%s(dic)' % kind return ll_kvi ll_dict_keys = _make_ll_keys_values_items('keys') From noreply at buildbot.pypy.org Wed Dec 25 21:34:41 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 25 Dec 2013 21:34:41 +0100 (CET) Subject: [pypy-commit] pypy jit-ordereddict: Add oopspec pointers, copying what is done for dicts. Message-ID: <20131225203441.BF83F1C02DA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-ordereddict Changeset: r68544:0634f23dd20e Date: 2013-12-25 21:34 +0100 http://bitbucket.org/pypy/pypy/changeset/0634f23dd20e/ Log: Add oopspec pointers, copying what is done for dicts. It seems to be enough, all tests pass. diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -14,6 +14,7 @@ from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.llinterp import LLInterpreter from rpython.rtyper.lltypesystem import lltype, rclass, rffi, llmemory, rstr as ll_rstr, rdict as ll_rdict +from rpython.rtyper.lltypesystem import rordereddict from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.lltypesystem.module import ll_math from rpython.translator.translator import TranslationContext @@ -519,6 +520,33 @@ _ll_1_dict_resize = ll_rdict.ll_dict_resize + # ---------- ordered dict ---------- + + _ll_1_odict_copy = rordereddict.ll_dict_copy + _ll_1_odict_clear = rordereddict.ll_dict_clear + _ll_2_odict_update = rordereddict.ll_dict_update + + _ll_1_odict_keys = rordereddict.ll_dict_keys + _ll_1_odict_values = rordereddict.ll_dict_values + _ll_1_odict_items = rordereddict.ll_dict_items + _ll_1_odict_keys .need_result_type = True + _ll_1_odict_values.need_result_type = True + _ll_1_odict_items .need_result_type = True + + _odictnext_keys = staticmethod(rordereddict.ll_dictnext_group['keys']) + _odictnext_values = staticmethod(rordereddict.ll_dictnext_group['values']) + _odictnext_items = staticmethod(rordereddict.ll_dictnext_group['items']) + + def _ll_1_odictiter_nextkeys(iter): + return LLtypeHelpers._odictnext_keys(None, iter) + def _ll_1_odictiter_nextvalues(iter): + return LLtypeHelpers._odictnext_values(None, iter) + def _ll_1_odictiter_nextitems(RES, iter): + return LLtypeHelpers._odictnext_items(lltype.Ptr(RES), iter) + _ll_1_odictiter_nextitems.need_result_type = True + + _ll_1_odict_resize = rordereddict.ll_dict_resize + # ---------- strings and unicode ---------- _ll_1_str_str2unicode = ll_rstr.LLHelpers.ll_str2unicode From noreply at buildbot.pypy.org Wed Dec 25 22:01:03 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 25 Dec 2013 22:01:03 +0100 (CET) Subject: [pypy-commit] pypy jit-ordereddict: It doesn't make sense for bytearray() to return a constant, Message-ID: <20131225210103.3C8841C019E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-ordereddict Changeset: r68545:621e891ddae3 Date: 2013-12-25 22:00 +0100 http://bitbucket.org/pypy/pypy/changeset/621e891ddae3/ Log: It doesn't make sense for bytearray() to return a constant, either at annotation or during rtyping. diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -122,7 +122,7 @@ return constpropagate(unicode, [s_unicode], SomeUnicodeString()) def builtin_bytearray(s_str): - return constpropagate(bytearray, [s_str], SomeByteArray()) + return SomeByteArray() def our_issubclass(cls1, cls2): """ we're going to try to be less silly in the face of old-style classes""" diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3987,7 +3987,9 @@ return bytearray("xyz") a = self.RPythonAnnotator() - assert isinstance(a.build_types(f, []), annmodel.SomeByteArray) + s = a.build_types(f, []) + assert isinstance(s, annmodel.SomeByteArray) + assert not s.is_constant() # never a constant! def test_bytearray_add(self): def f(a): diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -400,10 +400,6 @@ return hop.gendirectcall(self.ll.ll_str2unicode, v_str) def rtype_bytearray(self, hop): - if hop.args_s[0].is_constant(): - # convertion errors occur during annotation, so cannot any more: - hop.exception_cannot_occur() - return hop.inputconst(hop.r_result, hop.s_result.const) hop.exception_is_here() return hop.gendirectcall(self.ll.ll_str2bytearray, hop.inputarg(hop.args_r[0].repr, 0)) diff --git a/rpython/rtyper/test/test_rbytearray.py b/rpython/rtyper/test/test_rbytearray.py --- a/rpython/rtyper/test/test_rbytearray.py +++ b/rpython/rtyper/test/test_rbytearray.py @@ -57,3 +57,17 @@ ll_res = self.interpret(f, [123]) assert hlstr(ll_res) == "2" + + def test_bytearray_not_constant(self): + for constant in ['f', 'foo']: + def f(x): + i = 0 + total = 0 + while i < x: + b = bytearray(constant) + b[0] = b[0] + 1 + total += b[0] + i += 1 + return total + ll_res = self.interpret(f, [5]) + assert ll_res == f(5) From noreply at buildbot.pypy.org Wed Dec 25 22:13:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 25 Dec 2013 22:13:57 +0100 (CET) Subject: [pypy-commit] pypy jit-ordereddict: An extra passing test: bytearray slicing returns a bytearray, not a string Message-ID: <20131225211357.66BC01C019E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-ordereddict Changeset: r68546:d861b2b9953e Date: 2013-12-25 22:05 +0100 http://bitbucket.org/pypy/pypy/changeset/d861b2b9953e/ Log: An extra passing test: bytearray slicing returns a bytearray, not a string diff --git a/rpython/rtyper/test/test_rbytearray.py b/rpython/rtyper/test/test_rbytearray.py --- a/rpython/rtyper/test/test_rbytearray.py +++ b/rpython/rtyper/test/test_rbytearray.py @@ -53,10 +53,13 @@ def test_getslice(self): def f(x): - return str(bytearray(str(x))[1:2]) + b = bytearray(str(x)) + b = b[1:3] + b[0] += 5 + return str(b) - ll_res = self.interpret(f, [123]) - assert hlstr(ll_res) == "2" + ll_res = self.interpret(f, [12345]) + assert hlstr(ll_res) == f(12345) == "73" def test_bytearray_not_constant(self): for constant in ['f', 'foo']: From noreply at buildbot.pypy.org Wed Dec 25 22:13:58 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 25 Dec 2013 22:13:58 +0100 (CET) Subject: [pypy-commit] pypy jit-ordereddict: Add a test for bytearray in the JIT (not passing) Message-ID: <20131225211358.9052D1C019E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-ordereddict Changeset: r68547:34fbea034534 Date: 2013-12-25 22:13 +0100 http://bitbucket.org/pypy/pypy/changeset/34fbea034534/ Log: Add a test for bytearray in the JIT (not passing) diff --git a/rpython/jit/metainterp/test/test_bytearray.py b/rpython/jit/metainterp/test/test_bytearray.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/test/test_bytearray.py @@ -0,0 +1,47 @@ +import py +from rpython.jit.metainterp.test.support import LLJitMixin +from rpython.rlib.jit import JitDriver + + +class TestByteArray(LLJitMixin): + + def test_getitem(self): + x = bytearray("foobar") + def fn(n): + return x[n] + res = self.interp_operations(fn, [3]) + assert res == ord('b') + + def test_len(self): + x = bytearray("foobar") + def fn(n): + return len(x) + res = self.interp_operations(fn, [3]) + assert res == 6 + + def test_setitem(self): + x = bytearray("foobar") + def fn(n): + x[n] = 3 + return x[3] + 1000 * x[4] + + res = self.interp_operations(fn, [3]) + assert res == 3 + 1000 * ord('a') + + def test_new_bytearray(self): + def fn(n, m): + x = bytearray(str(n)) + x[m] = 4 + return int(str(x)) + + res = self.interp_operations(fn, [610978, 3]) + assert res == 610478 + + def test_slice(self): + def fn(n): + x = bytearray(str(n)) + x = x[1:5] + x[m] = 5 + return int(str(x)) + res = self.interp_operations(fn, [610978, 1]) + assert res == 1597 From noreply at buildbot.pypy.org Wed Dec 25 23:12:02 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 25 Dec 2013 23:12:02 +0100 (CET) Subject: [pypy-commit] pypy jit-ordereddict: Start Message-ID: <20131225221202.18AF31C019E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-ordereddict Changeset: r68548:41fe706f73de Date: 2013-12-25 22:36 +0100 http://bitbucket.org/pypy/pypy/changeset/41fe706f73de/ Log: Start diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -125,10 +125,12 @@ class ArrayDescr(AbstractDescr): def __init__(self, A): - self.A = A + self.A = self.OUTERA = A + if isinstance(A, lltype.Struct): + self.A = A._flds[A._arrayfld] def __repr__(self): - return 'ArrayDescr(%r)' % (self.A,) + return 'ArrayDescr(%r)' % (self.OUTERA,) def is_array_of_pointers(self): return getkind(self.A.OF) == 'ref' diff --git a/rpython/jit/backend/llsupport/test/test_descr.py b/rpython/jit/backend/llsupport/test/test_descr.py --- a/rpython/jit/backend/llsupport/test/test_descr.py +++ b/rpython/jit/backend/llsupport/test/test_descr.py @@ -424,3 +424,11 @@ " > >") # caching: assert fielddescr is get_field_arraylen_descr(c0, rstr.STR) + +def test_bytearray_descr(): + c0 = GcCache(False) + descr = get_array_descr(c0, rstr.STR) # for bytearray + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == struct.calcsize("PP") # hash, length + assert descr.lendescr.offset == struct.calcsize("P") # hash + assert not descr.is_array_of_pointers() diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -13,6 +13,7 @@ from rpython.rlib.jit import _we_are_jitted from rpython.rlib.rgc import lltype_is_gc from rpython.rtyper.lltypesystem import lltype, llmemory, rstr, rclass, rffi +from rpython.rtyper.lltypesystem import rbytearray from rpython.rtyper.rclass import IR_QUASIIMMUTABLE, IR_QUASIIMMUTABLE_ARRAY from rpython.translator.unsimplify import varoftype @@ -850,6 +851,13 @@ elif optype == lltype.Ptr(rstr.UNICODE): opname = "unicodegetitem" return SpaceOperation(opname, [op.args[0], op.args[2]], op.result) + elif optype == lltype.Ptr(rbytearray.BYTEARRAY): + bytearraydescr = self.cpu.arraydescrof(rbytearray.BYTEARRAY) + v_index = op.args[2] + op = SpaceOperation('getarrayitem_gc_i', + [op.args[0], v_index, bytearraydescr], + op.result) + return op else: v_inst, v_index, c_field = op.args if op.result.concretetype is lltype.Void: diff --git a/rpython/jit/metainterp/test/test_bytearray.py b/rpython/jit/metainterp/test/test_bytearray.py --- a/rpython/jit/metainterp/test/test_bytearray.py +++ b/rpython/jit/metainterp/test/test_bytearray.py @@ -8,10 +8,18 @@ def test_getitem(self): x = bytearray("foobar") def fn(n): + assert n >= 0 return x[n] res = self.interp_operations(fn, [3]) assert res == ord('b') + def test_getitem_negative(self): + x = bytearray("foobar") + def fn(n): + return x[n] + res = self.interp_operations(fn, [-2]) + assert res == ord('a') + def test_len(self): x = bytearray("foobar") def fn(n): @@ -22,12 +30,22 @@ def test_setitem(self): x = bytearray("foobar") def fn(n): + assert n >= 0 x[n] = 3 return x[3] + 1000 * x[4] res = self.interp_operations(fn, [3]) assert res == 3 + 1000 * ord('a') + def test_setitem_negative(self): + x = bytearray("foobar") + def fn(n): + x[n] = 3 + return x[3] + 1000 * x[4] + + res = self.interp_operations(fn, [-2]) + assert res == ord('b') + 1000 * 3 + def test_new_bytearray(self): def fn(n, m): x = bytearray(str(n)) From noreply at buildbot.pypy.org Wed Dec 25 23:12:03 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 25 Dec 2013 23:12:03 +0100 (CET) Subject: [pypy-commit] pypy jit-ordereddict: bytearray length Message-ID: <20131225221203.5BED41C019E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-ordereddict Changeset: r68549:10bafee08db7 Date: 2013-12-25 22:43 +0100 http://bitbucket.org/pypy/pypy/changeset/10bafee08db7/ Log: bytearray length diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -426,6 +426,8 @@ def bh_arraylen_gc(self, a, descr): array = a._obj.container + if descr.A is not descr.OUTERA: + array = getattr(array, descr.OUTERA._arrayfld) return array.getlength() def bh_getarrayitem_gc(self, a, index, descr): diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -837,9 +837,14 @@ optype = op.args[0].concretetype if optype == lltype.Ptr(rstr.STR): opname = "strlen" + elif optype == lltype.Ptr(rstr.UNICODE): + opname = "unicodelen" + elif optype == lltype.Ptr(rbytearray.BYTEARRAY): + bytearraydescr = self.cpu.arraydescrof(rbytearray.BYTEARRAY) + return SpaceOperation('arraylen_gc', [op.args[0], bytearraydescr], + op.result) else: - assert optype == lltype.Ptr(rstr.UNICODE) - opname = "unicodelen" + assert 0, "supported type %r" % (optype,) return SpaceOperation(opname, [op.args[0]], op.result) def rewrite_op_getinteriorfield(self, op): @@ -854,10 +859,9 @@ elif optype == lltype.Ptr(rbytearray.BYTEARRAY): bytearraydescr = self.cpu.arraydescrof(rbytearray.BYTEARRAY) v_index = op.args[2] - op = SpaceOperation('getarrayitem_gc_i', - [op.args[0], v_index, bytearraydescr], - op.result) - return op + return SpaceOperation('getarrayitem_gc_i', + [op.args[0], v_index, bytearraydescr], + op.result) else: v_inst, v_index, c_field = op.args if op.result.concretetype is lltype.Void: From noreply at buildbot.pypy.org Wed Dec 25 23:12:04 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 25 Dec 2013 23:12:04 +0100 (CET) Subject: [pypy-commit] pypy jit-ordereddict: A few more operations that don't return a constant bytearray. Message-ID: <20131225221204.77FBB1C019E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-ordereddict Changeset: r68550:708ec7898dbc Date: 2013-12-25 22:59 +0100 http://bitbucket.org/pypy/pypy/changeset/708ec7898dbc/ Log: A few more operations that don't return a constant bytearray. diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -412,10 +412,7 @@ return SomeByteArray(can_be_None=can_be_None) def add((b1, b2)): - result = SomeByteArray() - if b1.is_immutable_constant() and b2.is_immutable_constant(): - result.const = b1.const + b2.const - return result + return SomeByteArray() class __extend__(pairtype(SomeByteArray, SomeInteger)): def getitem((s_b, s_i)): @@ -429,10 +426,7 @@ pairtype(SomeChar, SomeByteArray), pairtype(SomeByteArray, SomeChar)): def add((b1, b2)): - result = SomeByteArray() - if b1.is_immutable_constant() and b2.is_immutable_constant(): - result.const = b1.const + b2.const - return result + return SomeByteArray() class __extend__(pairtype(SomeChar, SomeChar)): diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -215,7 +215,8 @@ class SomeStringOrUnicode(SomeObject): - """Base class for shared implementation of SomeString and SomeUnicodeString. + """Base class for shared implementation of SomeString, + SomeUnicodeString and SomeByteArray. Cannot be an annotation.""" @@ -228,6 +229,7 @@ if can_be_None: self.can_be_None = True if no_nul: + assert self.immutable #'no_nul' cannot be used with SomeByteArray self.no_nul = True def can_be_none(self): @@ -263,6 +265,7 @@ class SomeByteArray(SomeStringOrUnicode): + immutable = False knowntype = bytearray From noreply at buildbot.pypy.org Wed Dec 25 23:12:05 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 25 Dec 2013 23:12:05 +0100 (CET) Subject: [pypy-commit] pypy jit-ordereddict: in-progress Message-ID: <20131225221205.9934C1C019E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-ordereddict Changeset: r68551:d646fef13877 Date: 2013-12-25 23:07 +0100 http://bitbucket.org/pypy/pypy/changeset/d646fef13877/ Log: in-progress diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -888,6 +888,11 @@ opname = "unicodesetitem" return SpaceOperation(opname, [op.args[0], op.args[2], op.args[3]], op.result) + elif optype == lltype.Ptr(rbytearray.BYTEARRAY): + bytearraydescr = self.cpu.arraydescrof(rbytearray.BYTEARRAY) + opname = "setarrayitem_gc_i" + return SpaceOperation(opname, [op.args[0], op.args[2], op.args[3], + bytearraydescr], op.result) else: v_inst, v_index, c_field, v_value = op.args if v_value.concretetype is lltype.Void: diff --git a/rpython/jit/metainterp/test/test_bytearray.py b/rpython/jit/metainterp/test/test_bytearray.py --- a/rpython/jit/metainterp/test/test_bytearray.py +++ b/rpython/jit/metainterp/test/test_bytearray.py @@ -1,7 +1,6 @@ import py from rpython.jit.metainterp.test.support import LLJitMixin -from rpython.rlib.jit import JitDriver - +from rpython.rlib.jit import JitDriver, dont_look_inside class TestByteArray(LLJitMixin): @@ -28,9 +27,12 @@ assert res == 6 def test_setitem(self): - x = bytearray("foobar") + @dont_look_inside + def make_me(): + return bytearray("foobar") def fn(n): assert n >= 0 + x = make_me() x[n] = 3 return x[3] + 1000 * x[4] @@ -38,8 +40,11 @@ assert res == 3 + 1000 * ord('a') def test_setitem_negative(self): - x = bytearray("foobar") + @dont_look_inside + def make_me(): + return bytearray("foobar") def fn(n): + x = make_me() x[n] = 3 return x[3] + 1000 * x[4] @@ -49,17 +54,18 @@ def test_new_bytearray(self): def fn(n, m): x = bytearray(str(n)) - x[m] = 4 + x[m] = 0x34 return int(str(x)) + assert fn(610978, 3) == 610478 res = self.interp_operations(fn, [610978, 3]) assert res == 610478 def test_slice(self): - def fn(n): + def fn(n, m): x = bytearray(str(n)) x = x[1:5] - x[m] = 5 + x[m] = 0x35 return int(str(x)) res = self.interp_operations(fn, [610978, 1]) assert res == 1597 From noreply at buildbot.pypy.org Wed Dec 25 23:12:06 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 25 Dec 2013 23:12:06 +0100 (CET) Subject: [pypy-commit] pypy jit-ordereddict: Don't support bytearray slicing right now Message-ID: <20131225221206.BB9BC1C019E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-ordereddict Changeset: r68552:91defbfab17a Date: 2013-12-25 23:11 +0100 http://bitbucket.org/pypy/pypy/changeset/91defbfab17a/ Log: Don't support bytearray slicing right now diff --git a/rpython/jit/metainterp/test/test_bytearray.py b/rpython/jit/metainterp/test/test_bytearray.py --- a/rpython/jit/metainterp/test/test_bytearray.py +++ b/rpython/jit/metainterp/test/test_bytearray.py @@ -62,6 +62,7 @@ assert res == 610478 def test_slice(self): + py.test.skip("XXX later") def fn(n, m): x = bytearray(str(n)) x = x[1:5] @@ -69,3 +70,13 @@ return int(str(x)) res = self.interp_operations(fn, [610978, 1]) assert res == 1597 + + def test_bytearray_from_bytearray(self): + def fn(n, m): + x = bytearray(str(n)) + y = bytearray(x) + x[m] = 0x34 + return int(str(x)) + int(str(y)) + + res = self.interp_operations(fn, [610978, 3]) + assert res == 610478 + 610978 From noreply at buildbot.pypy.org Thu Dec 26 00:01:13 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 26 Dec 2013 00:01:13 +0100 (CET) Subject: [pypy-commit] pypy jit-ordereddict: 'getarraysubstruct' support in the JIT Message-ID: <20131225230113.EC8221C02DA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-ordereddict Changeset: r68553:fa8c58fec72a Date: 2013-12-26 00:00 +0100 http://bitbucket.org/pypy/pypy/changeset/fa8c58fec72a/ Log: 'getarraysubstruct' support in the JIT diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -644,6 +644,12 @@ return SpaceOperation('arraylen_gc', [op.args[0], arraydescr], op.result) + def rewrite_op_getarraysubstruct(self, op): + ARRAY = op.args[0].concretetype.TO + assert ARRAY._gckind == 'raw' + assert ARRAY._hints.get('nolength') is True + return self.rewrite_op_direct_ptradd(op) + def _array_of_voids(self, ARRAY): return ARRAY.OF == lltype.Void diff --git a/rpython/jit/metainterp/test/test_rawmem.py b/rpython/jit/metainterp/test/test_rawmem.py --- a/rpython/jit/metainterp/test/test_rawmem.py +++ b/rpython/jit/metainterp/test/test_rawmem.py @@ -71,5 +71,25 @@ 'raw_store': 1, 'raw_load': 1, 'finish': 1}) + def test_getarraysubstruct(self): + A2 = lltype.Array(('a', lltype.Signed), ('b', lltype.Signed), + hints={'nolength': True}) + p = lltype.malloc(A2, 10, flavor='raw', immortal=True, zero=True) + p[2].b = 689 + def f(n, m): + p[n].a = 55 + p[n].b = 44 + p[4].b = 66 + return p[m].b + + # run with 'disable_optimizations' to prevent an error + # 'Symbolics cannot be compared!' in the optimizer for int_mul + res = self.interp_operations(f, [7, 2], disable_optimizations=True) + assert res == 689 + res = self.interp_operations(f, [7, 4], disable_optimizations=True) + assert res == 66 + res = self.interp_operations(f, [2, 2], disable_optimizations=True) + assert res == 44 + class TestRawMem(RawMemTests, LLJitMixin): pass diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -403,7 +403,12 @@ # bigger structure at once parent, parentindex = lltype.parentlink(container) if parent is not None: - convert_struct(parent) + if isinstance(parent, lltype._struct): + convert_struct(parent) + elif isinstance(parent, lltype._array): + convert_array(parent) + else: + raise AssertionError(type(parent)) return # regular case: allocate a new ctypes Structure of the proper type cls = get_ctypes_type(STRUCT) diff --git a/rpython/rtyper/lltypesystem/llmemory.py b/rpython/rtyper/lltypesystem/llmemory.py --- a/rpython/rtyper/lltypesystem/llmemory.py +++ b/rpython/rtyper/lltypesystem/llmemory.py @@ -553,6 +553,12 @@ if isinstance(ofs, FieldOffset) and ofs.TYPE is self.adr.ptr._TYPE.TO: fieldadr = getattr(self.adr.ptr, ofs.fldname) return AddressAsInt(cast_ptr_to_adr(fieldadr)) + if (isinstance(ofs, ItemOffset) and + isinstance(self.adr.ptr._TYPE.TO, lltype.Array) and + self.adr.ptr._TYPE.TO._hints.get('nolength') is True and + ofs.TYPE is self.adr.ptr._TYPE.TO.OF): + itemadr = self.adr.ptr[ofs.repeat] + return AddressAsInt(cast_ptr_to_adr(itemadr)) return NotImplemented def __repr__(self): try: diff --git a/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py b/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py --- a/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py @@ -1433,3 +1433,14 @@ def test_llgcopaque_eq(self): assert _llgcopaque(1) != None assert _llgcopaque(0) == None + + def test_array_of_struct(self): + A2 = lltype.Array(('a', lltype.Signed), ('b', lltype.Signed)) + a = lltype.malloc(A2, 10, flavor='raw') + a[3].b = 42 + ac = lltype2ctypes(a[3]) + assert ac.contents.b == 42 + ac.contents.a = 17 + assert a[3].a == 17 + #lltype.free(a, flavor='raw') + py.test.skip("free() not working correctly here...") From noreply at buildbot.pypy.org Thu Dec 26 00:14:00 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 26 Dec 2013 00:14:00 +0100 (CET) Subject: [pypy-commit] pypy jit-ordereddict: Skip optimizing the standard string oopspec'ed operations if we try to Message-ID: <20131225231400.B07B41C019E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-ordereddict Changeset: r68554:69dbe74ad96b Date: 2013-12-26 00:09 +0100 http://bitbucket.org/pypy/pypy/changeset/69dbe74ad96b/ Log: Skip optimizing the standard string oopspec'ed operations if we try to do them on bytearrays. diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1732,6 +1732,8 @@ "stroruni.copy_string_to_raw": EffectInfo.OS_UNI_COPY_TO_RAW } CHR = lltype.UniChar + elif SoU.TO == rbytearray.BYTEARRAY: + raise NotSupported("bytearray operation") else: assert 0, "args[0].concretetype must be STR or UNICODE" # From noreply at buildbot.pypy.org Thu Dec 26 00:24:17 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 26 Dec 2013 00:24:17 +0100 (CET) Subject: [pypy-commit] pypy jit-ordereddict: skip this test for now Message-ID: <20131225232417.69B4E1C10A9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-ordereddict Changeset: r68555:8e99485edb80 Date: 2013-12-26 00:23 +0100 http://bitbucket.org/pypy/pypy/changeset/8e99485edb80/ Log: skip this test for now diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -216,3 +216,6 @@ res = self.interp_operations(fn, [0]) assert res == fn(0) + + def test_unrolling_of_dict_iter(self): + py.test.skip("XXX fix me: ordereddict generates a mess for now") From noreply at buildbot.pypy.org Thu Dec 26 00:25:47 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 26 Dec 2013 00:25:47 +0100 (CET) Subject: [pypy-commit] pypy jit-ordereddict: Close branch ready to merge Message-ID: <20131225232547.58B9B1C10A9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-ordereddict Changeset: r68556:1569615b24f3 Date: 2013-12-26 00:24 +0100 http://bitbucket.org/pypy/pypy/changeset/1569615b24f3/ Log: Close branch ready to merge From noreply at buildbot.pypy.org Thu Dec 26 00:25:48 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 26 Dec 2013 00:25:48 +0100 (CET) Subject: [pypy-commit] pypy default: Merge jit-ordereddict: add minimal support in the JIT for OrderedDict, Message-ID: <20131225232548.8B2651C10A9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68557:d3aa9224a271 Date: 2013-12-26 00:25 +0100 http://bitbucket.org/pypy/pypy/changeset/d3aa9224a271/ Log: Merge jit-ordereddict: add minimal support in the JIT for OrderedDict, for bytearray, and for a few extra details, motivated by Hippy. diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -412,10 +412,7 @@ return SomeByteArray(can_be_None=can_be_None) def add((b1, b2)): - result = SomeByteArray() - if b1.is_immutable_constant() and b2.is_immutable_constant(): - result.const = b1.const + b2.const - return result + return SomeByteArray() class __extend__(pairtype(SomeByteArray, SomeInteger)): def getitem((s_b, s_i)): @@ -429,10 +426,7 @@ pairtype(SomeChar, SomeByteArray), pairtype(SomeByteArray, SomeChar)): def add((b1, b2)): - result = SomeByteArray() - if b1.is_immutable_constant() and b2.is_immutable_constant(): - result.const = b1.const + b2.const - return result + return SomeByteArray() class __extend__(pairtype(SomeChar, SomeChar)): diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -122,7 +122,7 @@ return constpropagate(unicode, [s_unicode], SomeUnicodeString()) def builtin_bytearray(s_str): - return constpropagate(bytearray, [s_str], SomeByteArray()) + return SomeByteArray() def our_issubclass(cls1, cls2): """ we're going to try to be less silly in the face of old-style classes""" diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -215,7 +215,8 @@ class SomeStringOrUnicode(SomeObject): - """Base class for shared implementation of SomeString and SomeUnicodeString. + """Base class for shared implementation of SomeString, + SomeUnicodeString and SomeByteArray. Cannot be an annotation.""" @@ -228,6 +229,7 @@ if can_be_None: self.can_be_None = True if no_nul: + assert self.immutable #'no_nul' cannot be used with SomeByteArray self.no_nul = True def can_be_none(self): @@ -263,6 +265,7 @@ class SomeByteArray(SomeStringOrUnicode): + immutable = False knowntype = bytearray diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3987,7 +3987,9 @@ return bytearray("xyz") a = self.RPythonAnnotator() - assert isinstance(a.build_types(f, []), annmodel.SomeByteArray) + s = a.build_types(f, []) + assert isinstance(s, annmodel.SomeByteArray) + assert not s.is_constant() # never a constant! def test_bytearray_add(self): def f(a): diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -125,10 +125,12 @@ class ArrayDescr(AbstractDescr): def __init__(self, A): - self.A = A + self.A = self.OUTERA = A + if isinstance(A, lltype.Struct): + self.A = A._flds[A._arrayfld] def __repr__(self): - return 'ArrayDescr(%r)' % (self.A,) + return 'ArrayDescr(%r)' % (self.OUTERA,) def is_array_of_pointers(self): return getkind(self.A.OF) == 'ref' @@ -424,6 +426,8 @@ def bh_arraylen_gc(self, a, descr): array = a._obj.container + if descr.A is not descr.OUTERA: + array = getattr(array, descr.OUTERA._arrayfld) return array.getlength() def bh_getarrayitem_gc(self, a, index, descr): diff --git a/rpython/jit/backend/llsupport/test/test_descr.py b/rpython/jit/backend/llsupport/test/test_descr.py --- a/rpython/jit/backend/llsupport/test/test_descr.py +++ b/rpython/jit/backend/llsupport/test/test_descr.py @@ -424,3 +424,11 @@ " > >") # caching: assert fielddescr is get_field_arraylen_descr(c0, rstr.STR) + +def test_bytearray_descr(): + c0 = GcCache(False) + descr = get_array_descr(c0, rstr.STR) # for bytearray + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == struct.calcsize("PP") # hash, length + assert descr.lendescr.offset == struct.calcsize("P") # hash + assert not descr.is_array_of_pointers() diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -13,6 +13,7 @@ from rpython.rlib.jit import _we_are_jitted from rpython.rlib.rgc import lltype_is_gc from rpython.rtyper.lltypesystem import lltype, llmemory, rstr, rclass, rffi +from rpython.rtyper.lltypesystem import rbytearray from rpython.rtyper.rclass import IR_QUASIIMMUTABLE, IR_QUASIIMMUTABLE_ARRAY from rpython.translator.unsimplify import varoftype @@ -643,6 +644,12 @@ return SpaceOperation('arraylen_gc', [op.args[0], arraydescr], op.result) + def rewrite_op_getarraysubstruct(self, op): + ARRAY = op.args[0].concretetype.TO + assert ARRAY._gckind == 'raw' + assert ARRAY._hints.get('nolength') is True + return self.rewrite_op_direct_ptradd(op) + def _array_of_voids(self, ARRAY): return ARRAY.OF == lltype.Void @@ -836,9 +843,14 @@ optype = op.args[0].concretetype if optype == lltype.Ptr(rstr.STR): opname = "strlen" + elif optype == lltype.Ptr(rstr.UNICODE): + opname = "unicodelen" + elif optype == lltype.Ptr(rbytearray.BYTEARRAY): + bytearraydescr = self.cpu.arraydescrof(rbytearray.BYTEARRAY) + return SpaceOperation('arraylen_gc', [op.args[0], bytearraydescr], + op.result) else: - assert optype == lltype.Ptr(rstr.UNICODE) - opname = "unicodelen" + assert 0, "supported type %r" % (optype,) return SpaceOperation(opname, [op.args[0]], op.result) def rewrite_op_getinteriorfield(self, op): @@ -850,6 +862,12 @@ elif optype == lltype.Ptr(rstr.UNICODE): opname = "unicodegetitem" return SpaceOperation(opname, [op.args[0], op.args[2]], op.result) + elif optype == lltype.Ptr(rbytearray.BYTEARRAY): + bytearraydescr = self.cpu.arraydescrof(rbytearray.BYTEARRAY) + v_index = op.args[2] + return SpaceOperation('getarrayitem_gc_i', + [op.args[0], v_index, bytearraydescr], + op.result) else: v_inst, v_index, c_field = op.args if op.result.concretetype is lltype.Void: @@ -876,6 +894,11 @@ opname = "unicodesetitem" return SpaceOperation(opname, [op.args[0], op.args[2], op.args[3]], op.result) + elif optype == lltype.Ptr(rbytearray.BYTEARRAY): + bytearraydescr = self.cpu.arraydescrof(rbytearray.BYTEARRAY) + opname = "setarrayitem_gc_i" + return SpaceOperation(opname, [op.args[0], op.args[2], op.args[3], + bytearraydescr], op.result) else: v_inst, v_index, c_field, v_value = op.args if v_value.concretetype is lltype.Void: @@ -1709,6 +1732,8 @@ "stroruni.copy_string_to_raw": EffectInfo.OS_UNI_COPY_TO_RAW } CHR = lltype.UniChar + elif SoU.TO == rbytearray.BYTEARRAY: + raise NotSupported("bytearray operation") else: assert 0, "args[0].concretetype must be STR or UNICODE" # diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -14,6 +14,7 @@ from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.llinterp import LLInterpreter from rpython.rtyper.lltypesystem import lltype, rclass, rffi, llmemory, rstr as ll_rstr, rdict as ll_rdict +from rpython.rtyper.lltypesystem import rordereddict from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.lltypesystem.module import ll_math from rpython.translator.translator import TranslationContext @@ -492,11 +493,6 @@ # ---------- dict ---------- - def _ll_0_newdict(DICT): - return ll_rdict.ll_newdict(DICT) - _ll_0_newdict.need_result_type = True - - _ll_2_dict_delitem = ll_rdict.ll_dict_delitem _ll_1_dict_copy = ll_rdict.ll_copy _ll_1_dict_clear = ll_rdict.ll_clear _ll_2_dict_update = ll_rdict.ll_update @@ -524,6 +520,33 @@ _ll_1_dict_resize = ll_rdict.ll_dict_resize + # ---------- ordered dict ---------- + + _ll_1_odict_copy = rordereddict.ll_dict_copy + _ll_1_odict_clear = rordereddict.ll_dict_clear + _ll_2_odict_update = rordereddict.ll_dict_update + + _ll_1_odict_keys = rordereddict.ll_dict_keys + _ll_1_odict_values = rordereddict.ll_dict_values + _ll_1_odict_items = rordereddict.ll_dict_items + _ll_1_odict_keys .need_result_type = True + _ll_1_odict_values.need_result_type = True + _ll_1_odict_items .need_result_type = True + + _odictnext_keys = staticmethod(rordereddict.ll_dictnext_group['keys']) + _odictnext_values = staticmethod(rordereddict.ll_dictnext_group['values']) + _odictnext_items = staticmethod(rordereddict.ll_dictnext_group['items']) + + def _ll_1_odictiter_nextkeys(iter): + return LLtypeHelpers._odictnext_keys(None, iter) + def _ll_1_odictiter_nextvalues(iter): + return LLtypeHelpers._odictnext_values(None, iter) + def _ll_1_odictiter_nextitems(RES, iter): + return LLtypeHelpers._odictnext_items(lltype.Ptr(RES), iter) + _ll_1_odictiter_nextitems.need_result_type = True + + _ll_1_odict_resize = rordereddict.ll_dict_resize + # ---------- strings and unicode ---------- _ll_1_str_str2unicode = ll_rstr.LLHelpers.ll_str2unicode diff --git a/rpython/jit/metainterp/test/test_bytearray.py b/rpython/jit/metainterp/test/test_bytearray.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/test/test_bytearray.py @@ -0,0 +1,82 @@ +import py +from rpython.jit.metainterp.test.support import LLJitMixin +from rpython.rlib.jit import JitDriver, dont_look_inside + +class TestByteArray(LLJitMixin): + + def test_getitem(self): + x = bytearray("foobar") + def fn(n): + assert n >= 0 + return x[n] + res = self.interp_operations(fn, [3]) + assert res == ord('b') + + def test_getitem_negative(self): + x = bytearray("foobar") + def fn(n): + return x[n] + res = self.interp_operations(fn, [-2]) + assert res == ord('a') + + def test_len(self): + x = bytearray("foobar") + def fn(n): + return len(x) + res = self.interp_operations(fn, [3]) + assert res == 6 + + def test_setitem(self): + @dont_look_inside + def make_me(): + return bytearray("foobar") + def fn(n): + assert n >= 0 + x = make_me() + x[n] = 3 + return x[3] + 1000 * x[4] + + res = self.interp_operations(fn, [3]) + assert res == 3 + 1000 * ord('a') + + def test_setitem_negative(self): + @dont_look_inside + def make_me(): + return bytearray("foobar") + def fn(n): + x = make_me() + x[n] = 3 + return x[3] + 1000 * x[4] + + res = self.interp_operations(fn, [-2]) + assert res == ord('b') + 1000 * 3 + + def test_new_bytearray(self): + def fn(n, m): + x = bytearray(str(n)) + x[m] = 0x34 + return int(str(x)) + + assert fn(610978, 3) == 610478 + res = self.interp_operations(fn, [610978, 3]) + assert res == 610478 + + def test_slice(self): + py.test.skip("XXX later") + def fn(n, m): + x = bytearray(str(n)) + x = x[1:5] + x[m] = 0x35 + return int(str(x)) + res = self.interp_operations(fn, [610978, 1]) + assert res == 1597 + + def test_bytearray_from_bytearray(self): + def fn(n, m): + x = bytearray(str(n)) + y = bytearray(x) + x[m] = 0x34 + return int(str(x)) + int(str(y)) + + res = self.interp_operations(fn, [610978, 3]) + assert res == 610478 + 610978 diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -2,12 +2,15 @@ from rpython.jit.metainterp.test.support import LLJitMixin from rpython.rlib.jit import JitDriver from rpython.rlib import objectmodel +from collections import OrderedDict class DictTests: + def _freeze_(self): + return True def test_dict_set_none(self): def fn(n): - d = {} + d = self.newdict() d[0] = None return bool(d[n]) res = self.interp_operations(fn, [0]) @@ -21,7 +24,7 @@ ]: myjitdriver = JitDriver(greens = [], reds = ['n', 'dct']) def f(n): - dct = {} + dct = self.newdict() while n > 0: myjitdriver.can_enter_jit(n=n, dct=dct) myjitdriver.jit_merge_point(n=n, dct=dct) @@ -51,7 +54,9 @@ ]: myjitdriver = JitDriver(greens = [], reds = ['total', 'it']) def f(n): - dct = {n: 100, 50: n+1} + dct = self.newdict() + dct[n] = 100 + dct[50] = n + 1 it = getattr(dct, name)() total = 0 while True: @@ -71,6 +76,8 @@ assert res == expected def test_dict_trace_hash(self): + if type(self.newdict()) is not dict: + py.test.skip("this is an r_dict test") myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) def key(x): return x % 2 @@ -96,7 +103,7 @@ def test_dict_setdefault(self): myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) def f(n): - dct = {} + dct = self.newdict() total = n while total: myjitdriver.jit_merge_point(total=total, dct=dct) @@ -110,6 +117,8 @@ self.check_resops(new=0, new_with_vtable=0) def test_dict_as_counter(self): + if type(self.newdict()) is not dict: + py.test.skip("this is an r_dict test") myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) def key(x): return x % 2 @@ -131,6 +140,8 @@ self.check_resops(int_mod=2) # key + eq, but cached def test_repeated_lookup(self): + if type(self.newdict()) is not dict: + py.test.skip("this is an r_dict test") myjitdriver = JitDriver(greens = [], reds = ['n', 'd']) class Wrapper(object): _immutable_fields_ = ["value"] @@ -167,7 +178,8 @@ def f(n): while n > 0: driver.jit_merge_point(n=n) - d = {1: 1} + d = self.newdict() + d[1] = 1 for elem in d: n -= elem return n @@ -179,4 +191,31 @@ class TestLLtype(DictTests, LLJitMixin): - pass + @staticmethod + def newdict(): + return {} + +class TestLLOrderedDict(DictTests, LLJitMixin): + @staticmethod + def newdict(): + return OrderedDict() + + def test_dict_is_ordered(self): + def fn(n): + d = OrderedDict() + d[3] = 5 + d[n] = 9 + d[2] = 6 + d[1] = 4 + lst = d.items() + assert len(lst) == 4 + return ( lst[0][0] + 10*lst[0][1] + + 100*lst[1][0] + 1000*lst[1][1] + + 10000*lst[3][0] + 100000*lst[2][1] + + 1000000*lst[2][0] + 10000000*lst[3][1]) + + res = self.interp_operations(fn, [0]) + assert res == fn(0) + + def test_unrolling_of_dict_iter(self): + py.test.skip("XXX fix me: ordereddict generates a mess for now") diff --git a/rpython/jit/metainterp/test/test_rawmem.py b/rpython/jit/metainterp/test/test_rawmem.py --- a/rpython/jit/metainterp/test/test_rawmem.py +++ b/rpython/jit/metainterp/test/test_rawmem.py @@ -71,5 +71,25 @@ 'raw_store': 1, 'raw_load': 1, 'finish': 1}) + def test_getarraysubstruct(self): + A2 = lltype.Array(('a', lltype.Signed), ('b', lltype.Signed), + hints={'nolength': True}) + p = lltype.malloc(A2, 10, flavor='raw', immortal=True, zero=True) + p[2].b = 689 + def f(n, m): + p[n].a = 55 + p[n].b = 44 + p[4].b = 66 + return p[m].b + + # run with 'disable_optimizations' to prevent an error + # 'Symbolics cannot be compared!' in the optimizer for int_mul + res = self.interp_operations(f, [7, 2], disable_optimizations=True) + assert res == 689 + res = self.interp_operations(f, [7, 4], disable_optimizations=True) + assert res == 66 + res = self.interp_operations(f, [2, 2], disable_optimizations=True) + assert res == 44 + class TestRawMem(RawMemTests, LLJitMixin): pass diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -403,7 +403,12 @@ # bigger structure at once parent, parentindex = lltype.parentlink(container) if parent is not None: - convert_struct(parent) + if isinstance(parent, lltype._struct): + convert_struct(parent) + elif isinstance(parent, lltype._array): + convert_array(parent) + else: + raise AssertionError(type(parent)) return # regular case: allocate a new ctypes Structure of the proper type cls = get_ctypes_type(STRUCT) diff --git a/rpython/rtyper/lltypesystem/llmemory.py b/rpython/rtyper/lltypesystem/llmemory.py --- a/rpython/rtyper/lltypesystem/llmemory.py +++ b/rpython/rtyper/lltypesystem/llmemory.py @@ -553,6 +553,12 @@ if isinstance(ofs, FieldOffset) and ofs.TYPE is self.adr.ptr._TYPE.TO: fieldadr = getattr(self.adr.ptr, ofs.fldname) return AddressAsInt(cast_ptr_to_adr(fieldadr)) + if (isinstance(ofs, ItemOffset) and + isinstance(self.adr.ptr._TYPE.TO, lltype.Array) and + self.adr.ptr._TYPE.TO._hints.get('nolength') is True and + ofs.TYPE is self.adr.ptr._TYPE.TO.OF): + itemadr = self.adr.ptr[ofs.repeat] + return AddressAsInt(cast_ptr_to_adr(itemadr)) return NotImplemented def __repr__(self): try: diff --git a/rpython/rtyper/lltypesystem/rordereddict.py b/rpython/rtyper/lltypesystem/rordereddict.py --- a/rpython/rtyper/lltypesystem/rordereddict.py +++ b/rpython/rtyper/lltypesystem/rordereddict.py @@ -663,7 +663,7 @@ ll_dict_remove_deleted_items(d) else: ll_dict_reindex(d, new_size) -ll_dict_resize.oopspec = 'dict.resize(d)' +ll_dict_resize.oopspec = 'odict.resize(d)' def ll_dict_reindex(d, new_size): ll_malloc_indexes_and_choose_lookup(d, new_size) @@ -899,7 +899,7 @@ @jit.look_inside_iff(lambda RETURNTYPE, iter: jit.isvirtual(iter) and (iter.dict is None or jit.isvirtual(iter.dict))) - @jit.oopspec("dictiter.next%s(iter)" % kind) + @jit.oopspec("odictiter.next%s(iter)" % kind) def ll_dictnext(RETURNTYPE, iter): # note that RETURNTYPE is None for keys and values dict = iter.dict @@ -984,7 +984,7 @@ ll_dict_reindex(newdict, _ll_len_of_d_indexes(dict)) return newdict -ll_dict_copy.oopspec = 'dict.copy(dict)' +ll_dict_copy.oopspec = 'odict.copy(dict)' def ll_dict_clear(d): if d.num_used_items == 0: @@ -997,7 +997,7 @@ d.num_used_items = 0 d.resize_counter = DICT_INITSIZE * 2 # old_entries.delete() XXX -ll_dict_clear.oopspec = 'dict.clear(d)' +ll_dict_clear.oopspec = 'odict.clear(d)' def ll_dict_update(dic1, dic2): i = 0 @@ -1011,7 +1011,7 @@ index = dic1.lookup_function(dic1, key, hash, FLAG_STORE) _ll_dict_setitem_lookup_done(dic1, key, value, hash, index) i += 1 -ll_dict_update.oopspec = 'dict.update(dic1, dic2)' +ll_dict_update.oopspec = 'odict.update(dic1, dic2)' # this is an implementation of keys(), values() and items() # in a single function. @@ -1050,7 +1050,7 @@ i += 1 assert p == res.ll_length() return res - ll_kvi.oopspec = 'dict.%s(dic)' % kind + ll_kvi.oopspec = 'odict.%s(dic)' % kind return ll_kvi ll_dict_keys = _make_ll_keys_values_items('keys') diff --git a/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py b/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py --- a/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py @@ -1433,3 +1433,14 @@ def test_llgcopaque_eq(self): assert _llgcopaque(1) != None assert _llgcopaque(0) == None + + def test_array_of_struct(self): + A2 = lltype.Array(('a', lltype.Signed), ('b', lltype.Signed)) + a = lltype.malloc(A2, 10, flavor='raw') + a[3].b = 42 + ac = lltype2ctypes(a[3]) + assert ac.contents.b == 42 + ac.contents.a = 17 + assert a[3].a == 17 + #lltype.free(a, flavor='raw') + py.test.skip("free() not working correctly here...") diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -400,10 +400,6 @@ return hop.gendirectcall(self.ll.ll_str2unicode, v_str) def rtype_bytearray(self, hop): - if hop.args_s[0].is_constant(): - # convertion errors occur during annotation, so cannot any more: - hop.exception_cannot_occur() - return hop.inputconst(hop.r_result, hop.s_result.const) hop.exception_is_here() return hop.gendirectcall(self.ll.ll_str2bytearray, hop.inputarg(hop.args_r[0].repr, 0)) diff --git a/rpython/rtyper/test/test_rbytearray.py b/rpython/rtyper/test/test_rbytearray.py --- a/rpython/rtyper/test/test_rbytearray.py +++ b/rpython/rtyper/test/test_rbytearray.py @@ -53,7 +53,24 @@ def test_getslice(self): def f(x): - return str(bytearray(str(x))[1:2]) + b = bytearray(str(x)) + b = b[1:3] + b[0] += 5 + return str(b) - ll_res = self.interpret(f, [123]) - assert hlstr(ll_res) == "2" + ll_res = self.interpret(f, [12345]) + assert hlstr(ll_res) == f(12345) == "73" + + def test_bytearray_not_constant(self): + for constant in ['f', 'foo']: + def f(x): + i = 0 + total = 0 + while i < x: + b = bytearray(constant) + b[0] = b[0] + 1 + total += b[0] + i += 1 + return total + ll_res = self.interpret(f, [5]) + assert ll_res == f(5) From noreply at buildbot.pypy.org Thu Dec 26 09:46:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 26 Dec 2013 09:46:57 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Add Johan Message-ID: <20131226084657.CF10F1C02C7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5119:0dccb76b72ee Date: 2013-12-26 09:45 +0100 http://bitbucket.org/pypy/extradoc/changeset/0dccb76b72ee/ Log: Add Johan diff --git a/sprintinfo/leysin-winter-2014/people.txt b/sprintinfo/leysin-winter-2014/people.txt --- a/sprintinfo/leysin-winter-2014/people.txt +++ b/sprintinfo/leysin-winter-2014/people.txt @@ -15,6 +15,7 @@ Christian Clauss 11-12 & 18-19 I live nearby Maciej Fijalkowski 11-18 Ermina Remi Meier 11-19 Ermina +Johan R�de 11-19 Ermina ==================== ============== ======================= From noreply at buildbot.pypy.org Thu Dec 26 09:46:59 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 26 Dec 2013 09:46:59 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Use utf8 instead of latin1 Message-ID: <20131226084659.014A81C02C7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5120:652d06b58383 Date: 2013-12-26 09:46 +0100 http://bitbucket.org/pypy/extradoc/changeset/652d06b58383/ Log: Use utf8 instead of latin1 diff --git a/sprintinfo/leysin-winter-2014/people.txt b/sprintinfo/leysin-winter-2014/people.txt --- a/sprintinfo/leysin-winter-2014/people.txt +++ b/sprintinfo/leysin-winter-2014/people.txt @@ -15,7 +15,7 @@ Christian Clauss 11-12 & 18-19 I live nearby Maciej Fijalkowski 11-18 Ermina Remi Meier 11-19 Ermina -Johan R�de 11-19 Ermina +Johan Råde 11-19 Ermina ==================== ============== ======================= @@ -58,5 +58,5 @@ Guido Wesdorp ? ? Leonardo Santagada ? ? Alexandre Fayolle ? ? -Sylvain Th�nault ? ? +Sylvain Thénault ? ? ==================== ============== ===================== From noreply at buildbot.pypy.org Thu Dec 26 15:18:05 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 26 Dec 2013 15:18:05 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Change of plans Message-ID: <20131226141805.9FE6C1C02C7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5121:998eceef5671 Date: 2013-12-26 15:17 +0100 http://bitbucket.org/pypy/extradoc/changeset/998eceef5671/ Log: Change of plans diff --git a/sprintinfo/leysin-winter-2014/people.txt b/sprintinfo/leysin-winter-2014/people.txt --- a/sprintinfo/leysin-winter-2014/people.txt +++ b/sprintinfo/leysin-winter-2014/people.txt @@ -15,7 +15,7 @@ Christian Clauss 11-12 & 18-19 I live nearby Maciej Fijalkowski 11-18 Ermina Remi Meier 11-19 Ermina -Johan Råde 11-19 Ermina +Johan Råde 11-18 Ermina ==================== ============== ======================= From noreply at buildbot.pypy.org Fri Dec 27 10:52:20 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 27 Dec 2013 10:52:20 +0100 (CET) Subject: [pypy-commit] pypy default: issue1665: rematch.group('nonexistent'): convert the KeyError to IndexError Message-ID: <20131227095220.8D62A1C02A7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68558:180796aa92c5 Date: 2013-12-27 11:51 +0100 http://bitbucket.org/pypy/pypy/changeset/180796aa92c5/ Log: issue1665: rematch.group('nonexistent'): convert the KeyError to IndexError diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -415,7 +415,13 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - w_groupnum = space.getitem(self.srepat.w_groupindex, w_arg) + try: + w_groupnum = space.getitem(self.srepat.w_groupindex, w_arg) + except OperationError, e: + if not e.match(space, space.w_KeyError): + raise + raise OperationError(space.w_IndexError, + space.wrap("no such group")) groupnum = space.int_w(w_groupnum) if groupnum == 0: return self.ctx.match_start, self.ctx.match_end diff --git a/pypy/module/_sre/test/test_app_sre.py b/pypy/module/_sre/test/test_app_sre.py --- a/pypy/module/_sre/test/test_app_sre.py +++ b/pypy/module/_sre/test/test_app_sre.py @@ -172,6 +172,9 @@ assert ("1", "1", None) == m.group(1, 2, 3) assert ("1", None) == m.group("first", "second") raises(IndexError, m.group, 1, 4) + assert ("1", None) == m.group(1, "second") + raises(IndexError, m.group, 'foobarbaz') + raises(IndexError, m.group, 'first', 'foobarbaz') def test_expand(self): import re From noreply at buildbot.pypy.org Fri Dec 27 23:30:42 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 27 Dec 2013 23:30:42 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: merge default Message-ID: <20131227223042.0BA7C1C00F8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r68559:0d0486dedd6f Date: 2013-12-27 14:29 -0800 http://bitbucket.org/pypy/pypy/changeset/0d0486dedd6f/ Log: merge default diff too long, truncating to 2000 out of 2284 lines diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -371,10 +371,9 @@ self._handle = handle def __repr__(self): - return "<%s '%s', handle %r at %x>" % \ - (self.__class__.__name__, self._name, - (self._handle), - id(self) & (_sys.maxint*2 + 1)) + return "<%s '%s', handle %r at 0x%x>" % ( + self.__class__.__name__, self._name, self._handle, + id(self) & (_sys.maxint * 2 + 1)) def __getattr__(self, name): diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -42,7 +42,7 @@ if self.handles[d]() is None: self.look_distance = d + 1 return d - # full! extend, but don't use '!=' here + # full! extend, but don't use '+=' here self.handles = self.handles + [dead_ref] * (length // 3 + 5) self.look_distance = length + 1 return length diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -415,7 +415,13 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - w_groupnum = space.getitem(self.srepat.w_groupindex, w_arg) + try: + w_groupnum = space.getitem(self.srepat.w_groupindex, w_arg) + except OperationError, e: + if not e.match(space, space.w_KeyError): + raise + raise OperationError(space.w_IndexError, + space.wrap("no such group")) groupnum = space.int_w(w_groupnum) if groupnum == 0: return self.ctx.match_start, self.ctx.match_end diff --git a/pypy/module/_sre/test/test_app_sre.py b/pypy/module/_sre/test/test_app_sre.py --- a/pypy/module/_sre/test/test_app_sre.py +++ b/pypy/module/_sre/test/test_app_sre.py @@ -172,6 +172,9 @@ assert ("1", "1", None) == m.group(1, 2, 3) assert ("1", None) == m.group("first", "second") raises(IndexError, m.group, 1, 4) + assert ("1", None) == m.group(1, "second") + raises(IndexError, m.group, 'foobarbaz') + raises(IndexError, m.group, 'first', 'foobarbaz') def test_expand(self): import re diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -131,7 +131,11 @@ if space.isinstance_w(w_idx, space.w_tuple): if space.len_w(w_idx) == 0: return self.get_scalar_value() - if space.is_none(w_idx): + elif space.isinstance_w(w_idx, space.w_str): + if self.dtype.is_record_type(): + w_val = self.value.descr_getitem(space, w_idx) + return convert_to_array(space, w_val) + elif space.is_none(w_idx): new_shape = [1] arr = W_NDimArray.from_shape(space, new_shape, self.dtype) arr_iter = arr.create_iter(new_shape) @@ -145,6 +149,12 @@ space.wrap("0-d arrays can't be indexed")) def descr_setitem(self, space, _, w_idx, w_val): + if space.isinstance_w(w_idx, space.w_tuple): + if space.len_w(w_idx) == 0: + return self.set_scalar_value(self.dtype.coerce(space, w_val)) + elif space.isinstance_w(w_idx, space.w_str): + if self.dtype.is_record_type(): + return self.value.descr_setitem(space, w_idx, w_val) raise OperationError(space.w_IndexError, space.wrap("0-d arrays can't be indexed")) @@ -176,7 +186,7 @@ s = self.dtype.itemtype.bool(self.value) w_res = W_NDimArray.from_shape(space, [s], index_type) if s == 1: - w_res.implementation.setitem(0, index_type.itemtype.box(0)) + w_res.implementation.setitem(0, index_type.itemtype.box(0)) return space.newtuple([w_res]) def fill(self, space, w_value): diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -123,7 +123,8 @@ if w_axis is space.w_None: # note that it's fine ot pass None here as we're not going # to pass the result around (None is the link to base in slices) - arr = arr.reshape(space, None, [arr.get_size()]) + if arr.get_size() > 0: + arr = arr.reshape(space, None, [arr.get_size()]) axis = 0 elif w_axis is None: axis = -1 diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -38,7 +38,7 @@ from pypy.module.micronumpy.arrayimpl import concrete, scalar if not shape: - w_val = dtype.base.coerce(space, space.wrap(0)) + w_val = dtype.base.coerce(space, None) impl = scalar.Scalar(dtype.base, w_val) else: strides, backstrides = calc_strides(shape, dtype.base, order) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -11,6 +11,7 @@ from rpython.rtyper.lltypesystem import rffi from rpython.tool.sourcetools import func_with_new_name from pypy.module.micronumpy.arrayimpl.voidbox import VoidBoxStorage +from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.interp_flagsobj import W_FlagsObject from pypy.interpreter.mixedmodule import MixedModule from rpython.rtyper.lltypesystem import lltype @@ -275,14 +276,25 @@ def descr_view(self, space, w_dtype): from pypy.module.micronumpy.interp_dtype import W_Dtype - dtype = space.interp_w(W_Dtype, - space.call_function(space.gettypefor(W_Dtype), w_dtype)) - if dtype.get_size() == 0: - raise OperationError(space.w_TypeError, space.wrap( - "data-type must not be 0-sized")) - if dtype.get_size() != self.get_dtype(space).get_size(): - raise OperationError(space.w_ValueError, space.wrap( - "new type not compatible with array.")) + try: + subclass = space.is_true(space.issubtype( + w_dtype, space.gettypefor(W_NDimArray))) + except OperationError, e: + if e.match(space, space.w_TypeError): + subclass = False + else: + raise + if subclass: + dtype = self.get_dtype(space) + else: + dtype = space.interp_w(W_Dtype, + space.call_function(space.gettypefor(W_Dtype), w_dtype)) + if dtype.get_size() == 0: + raise OperationError(space.w_TypeError, space.wrap( + "data-type must not be 0-sized")) + if dtype.get_size() != self.get_dtype(space).get_size(): + raise OperationError(space.w_ValueError, space.wrap( + "new type not compatible with array.")) if dtype.is_str_or_unicode(): return dtype.coerce(space, space.wrap(self.raw_str())) elif dtype.is_record_type(): @@ -350,28 +362,22 @@ descr__new__, _get_dtype, descr_reduce = new_dtype_getter("uint16") class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("int32") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("i") class W_UInt32Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("uint32") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("I") + +class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("q") + +class W_UInt64Box(W_UnsignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("Q") class W_LongBox(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("long") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("l") class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("ulong") - -class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("int64") - -class W_LongLongBox(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter('longlong') - -class W_UInt64Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("uint64") - -class W_ULongLongBox(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter('ulonglong') + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("L") class W_InexactBox(W_NumberBox): pass @@ -427,7 +433,7 @@ self.dtype = dtype def get_dtype(self, space): - return self.arr.dtype + return self.dtype def raw_str(self): return self.arr.dtype.itemtype.to_str(self) @@ -464,13 +470,17 @@ return space.wrap(dtype.itemtype.to_str(read_val)) return read_val - @unwrap_spec(item=str) - def descr_setitem(self, space, item, w_value): + def descr_setitem(self, space, w_item, w_value): + if space.isinstance_w(w_item, space.w_basestring): + item = space.str_w(w_item) + else: + raise OperationError(space.w_IndexError, space.wrap( + "invalid index")) try: ofs, dtype = self.dtype.fields[item] except KeyError: - raise OperationError(space.w_IndexError, - space.wrap("Field %s does not exist" % item)) + raise OperationError(space.w_ValueError, + space.wrap("field named %s not found" % item)) dtype.itemtype.store(self.arr, self.ofs, ofs, dtype.coerce(space, w_value)) @@ -663,13 +673,6 @@ __reduce__ = interp2app(W_Int64Box.descr_reduce), ) -if LONG_BIT == 32: - W_LongBox = W_Int32Box - W_ULongBox = W_UInt32Box -elif LONG_BIT == 64: - W_LongBox = W_Int64Box - W_ULongBox = W_UInt64Box - W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntegerBox.typedef, __module__ = "numpy", __new__ = interp2app(W_UInt64Box.descr__new__.im_func), @@ -677,6 +680,21 @@ __reduce__ = interp2app(W_UInt64Box.descr_reduce), ) +W_LongBox.typedef = TypeDef("int%d" % LONG_BIT, + (W_SignedIntegerBox.typedef, int_typedef), + __module__ = "numpy", + __new__ = interp2app(W_LongBox.descr__new__.im_func), + __index__ = interp2app(W_LongBox.descr_index), + __reduce__ = interp2app(W_LongBox.descr_reduce), +) + +W_ULongBox.typedef = TypeDef("uint%d" % LONG_BIT, W_UnsignedIntegerBox.typedef, + __module__ = "numpy", + __new__ = interp2app(W_ULongBox.descr__new__.im_func), + __index__ = interp2app(W_ULongBox.descr_index), + __reduce__ = interp2app(W_ULongBox.descr_reduce), +) + W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, __module__ = "numpy", ) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -1,4 +1,3 @@ -import sys from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -137,6 +136,8 @@ return space.wrap(self.itemtype.alignment) def descr_get_subdtype(self, space): + if self.subdtype is None: + return space.w_None return space.newtuple([space.wrap(self.subdtype), self.descr_get_shape(space)]) def descr_get_str(self, space): @@ -158,8 +159,20 @@ return space.newlist([space.newtuple([space.wrap(""), self.descr_get_str(space)])]) else: - raise OperationError(space.w_NotImplementedError, space.wrap( - "descr not implemented for record types")) + descr = [] + for name in self.fieldnames: + subdtype = self.fields[name][1] + subdescr = [space.wrap(name)] + if subdtype.is_record_type(): + subdescr.append(subdtype.descr_get_descr(space)) + elif subdtype.subdtype is not None: + subdescr.append(subdtype.subdtype.descr_get_str(space)) + else: + subdescr.append(subdtype.descr_get_str(space)) + if subdtype.shape != []: + subdescr.append(subdtype.descr_get_shape(space)) + descr.append(space.newtuple(subdescr[:])) + return space.newlist(descr) def descr_get_base(self, space): return space.wrap(self.base) @@ -651,6 +664,7 @@ w_box_type = space.gettypefor(interp_boxes.W_Float64Box), alternate_constructors=[space.w_float, space.gettypefor(interp_boxes.W_NumberBox), + space.gettypefor(interp_boxes.W_FloatingBox), ], aliases=["float", "double"], ) @@ -680,7 +694,8 @@ name="complex128", char=NPY_CDOUBLELTR, w_box_type = space.gettypefor(interp_boxes.W_Complex128Box), - alternate_constructors=[space.w_complex], + alternate_constructors=[space.w_complex, + space.gettypefor(interp_boxes.W_ComplexFloatingBox)], aliases=["complex", 'cfloat', 'cdouble'], float_type = self.w_float64dtype, ) @@ -702,7 +717,8 @@ name='string', char=NPY_STRINGLTR, w_box_type = space.gettypefor(interp_boxes.W_StringBox), - alternate_constructors=[space.w_str, space.gettypefor(interp_boxes.W_CharacterBox)], + alternate_constructors=[space.w_str, + space.gettypefor(interp_boxes.W_CharacterBox)], aliases=["str"], ) self.w_unicodedtype = W_Dtype( @@ -736,38 +752,21 @@ char=NPY_HALFLTR, w_box_type=space.gettypefor(interp_boxes.W_Float16Box), ) - ptr_size = rffi.sizeof(rffi.CCHARP) - if ptr_size == 4: - intp_box = interp_boxes.W_Int32Box - intp_type = types.Int32() - intp_num = NPY_INT - uintp_box = interp_boxes.W_UInt32Box - uintp_type = types.UInt32() - uintp_num = NPY_UINT - elif ptr_size == 8: - intp_box = interp_boxes.W_Int64Box - intp_type = types.Int64() - intp_num = NPY_LONG - uintp_box = interp_boxes.W_UInt64Box - uintp_type = types.UInt64() - uintp_num = NPY_ULONG - else: - raise ValueError('unknown point size %d' % ptr_size) self.w_intpdtype = W_Dtype( - intp_type, - num=intp_num, - kind=NPY_INTPLTR, + types.Long(), + num=NPY_LONG, + kind=NPY_SIGNEDLTR, name='intp', char=NPY_INTPLTR, - w_box_type = space.gettypefor(intp_box), + w_box_type = space.gettypefor(interp_boxes.W_LongBox), ) self.w_uintpdtype = W_Dtype( - uintp_type, - num=uintp_num, - kind=NPY_UINTPLTR, + types.ULong(), + num=NPY_ULONG, + kind=NPY_UNSIGNEDLTR, name='uintp', char=NPY_UINTPLTR, - w_box_type = space.gettypefor(uintp_box), + w_box_type = space.gettypefor(interp_boxes.W_ULongBox), ) float_dtypes = [self.w_float16dtype, self.w_float32dtype, self.w_float64dtype, self.w_floatlongdtype] diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -731,11 +731,15 @@ def descr_view(self, space, w_dtype=None, w_type=None): if not w_type and w_dtype: try: - if space.is_true(space.issubtype(w_dtype, space.gettypefor(W_NDimArray))): + if space.is_true(space.issubtype( + w_dtype, space.gettypefor(W_NDimArray))): w_type = w_dtype w_dtype = None - except (OperationError, TypeError): - pass + except OperationError, e: + if e.match(space, space.w_TypeError): + pass + else: + raise if w_dtype: dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), @@ -1185,12 +1189,15 @@ def take(a, indices, axis, out, mode): assert mode == 'raise' if axis is None: - res = a.ravel()[indices] + from numpy import array + indices = array(indices) + res = a.ravel()[indices.ravel()].reshape(indices.shape) else: + from operator import mul if axis < 0: axis += len(a.shape) s0, s1 = a.shape[:axis], a.shape[axis+1:] - l0 = prod(s0) if s0 else 1 - l1 = prod(s1) if s1 else 1 + l0 = reduce(mul, s0) if s0 else 1 + l1 = reduce(mul, s1) if s1 else 1 res = a.reshape((l0, -1, l1))[:,indices,:].reshape(s0 + (-1,) + s1) if out is not None: out[:] = res @@ -1439,12 +1446,11 @@ arr_iter.next() return w_arr - at unwrap_spec(order=str) -def zeros(space, w_shape, w_dtype=None, order='C'): +def zeros(space, w_shape, w_dtype=None, w_order=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) shape = _find_shape(space, w_shape, dtype) - return W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) + return W_NDimArray.from_shape(space, shape, dtype=dtype) @unwrap_spec(subok=bool) def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -33,6 +33,9 @@ self.allow_complex = allow_complex self.complex_to_float = complex_to_float + def descr_get_name(self, space): + return space.wrap(self.name) + def descr_repr(self, space): return space.wrap("" % self.name) @@ -373,14 +376,19 @@ w_rdtype = w_ldtype elif w_lhs.is_scalar() and not w_rhs.is_scalar(): w_ldtype = w_rdtype - if (self.int_only and (not w_ldtype.is_int_type() or not w_rdtype.is_int_type()) or - not self.allow_bool and (w_ldtype.is_bool_type() or w_rdtype.is_bool_type()) or - not self.allow_complex and (w_ldtype.is_complex_type() or w_rdtype.is_complex_type())): - raise OperationError(space.w_TypeError, space.wrap("Unsupported types")) calc_dtype = find_binop_result_dtype(space, w_ldtype, w_rdtype, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) + if (self.int_only and (not w_ldtype.is_int_type() or + not w_rdtype.is_int_type() or + not calc_dtype.is_int_type()) or + not self.allow_bool and (w_ldtype.is_bool_type() or + w_rdtype.is_bool_type()) or + not self.allow_complex and (w_ldtype.is_complex_type() or + w_rdtype.is_complex_type())): + raise OperationError(space.w_TypeError, space.wrap( + "ufunc '%s' not supported for the input types" % self.name)) if space.is_none(w_out): out = None elif not isinstance(w_out, W_NDimArray): @@ -417,6 +425,7 @@ __call__ = interp2app(W_Ufunc.descr_call), __repr__ = interp2app(W_Ufunc.descr_repr), + __name__ = GetSetProperty(W_Ufunc.descr_get_name), identity = GetSetProperty(W_Ufunc.descr_get_identity), accumulate = interp2app(W_Ufunc.descr_accumulate), @@ -428,6 +437,8 @@ def find_binop_result_dtype(space, dt1, dt2, promote_to_float=False, promote_bools=False): + if dt2 is None: + return dt1 # dt1.num should be <= dt2.num if dt1.num > dt2.num: dt1, dt2 = dt2, dt1 @@ -498,13 +509,14 @@ promote_bools=False, promote_to_largest=False): if promote_to_largest: if dt.kind == NPY_GENBOOLLTR or dt.kind == NPY_SIGNEDLTR: - return interp_dtype.get_dtype_cache(space).w_int64dtype + if dt.get_size() * 8 < LONG_BIT: + return interp_dtype.get_dtype_cache(space).w_longdtype elif dt.kind == NPY_UNSIGNEDLTR: - return interp_dtype.get_dtype_cache(space).w_uint64dtype - elif dt.kind == NPY_FLOATINGLTR or dt.kind == NPY_COMPLEXLTR: - return dt + if dt.get_size() * 8 < LONG_BIT: + return interp_dtype.get_dtype_cache(space).w_ulongdtype else: - assert False + assert dt.kind == NPY_FLOATINGLTR or dt.kind == NPY_COMPLEXLTR + return dt if promote_bools and (dt.kind == NPY_GENBOOLLTR): return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: @@ -522,36 +534,32 @@ bool_dtype = interp_dtype.get_dtype_cache(space).w_booldtype long_dtype = interp_dtype.get_dtype_cache(space).w_longdtype int64_dtype = interp_dtype.get_dtype_cache(space).w_int64dtype - complex_type = interp_dtype.get_dtype_cache(space).w_complex128dtype - float_type = interp_dtype.get_dtype_cache(space).w_float64dtype + uint64_dtype = interp_dtype.get_dtype_cache(space).w_uint64dtype + complex_dtype = interp_dtype.get_dtype_cache(space).w_complex128dtype + float_dtype = interp_dtype.get_dtype_cache(space).w_float64dtype if isinstance(w_obj, interp_boxes.W_GenericBox): dtype = w_obj.get_dtype(space) - if current_guess is None: - return dtype return find_binop_result_dtype(space, dtype, current_guess) if space.isinstance_w(w_obj, space.w_bool): - if current_guess is None or current_guess is bool_dtype: - return bool_dtype - return current_guess + return find_binop_result_dtype(space, bool_dtype, current_guess) elif space.isinstance_w(w_obj, space.w_int): - if (current_guess is None or current_guess is bool_dtype or - current_guess is long_dtype): - return long_dtype - return current_guess + return find_binop_result_dtype(space, long_dtype, current_guess) elif space.isinstance_w(w_obj, space.w_long): - if (current_guess is None or current_guess is bool_dtype or - current_guess is long_dtype or current_guess is int64_dtype): - return int64_dtype - return current_guess + try: + space.int_w(w_obj) + except OperationError, e: + if e.match(space, space.w_OverflowError): + return find_binop_result_dtype(space, uint64_dtype, + current_guess) + raise + return find_binop_result_dtype(space, int64_dtype, current_guess) + elif space.isinstance_w(w_obj, space.w_float): + return find_binop_result_dtype(space, float_dtype, current_guess) elif space.isinstance_w(w_obj, space.w_complex): - if (current_guess is None or current_guess is bool_dtype or - current_guess is long_dtype or current_guess is int64_dtype or - current_guess is complex_type or current_guess is float_type): - return complex_type - return current_guess + return complex_dtype elif space.isinstance_w(w_obj, space.w_str): - if (current_guess is None): + if current_guess is None: return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) elif current_guess.num == NPY_STRING: @@ -559,12 +567,6 @@ return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) return current_guess - if current_guess is complex_type: - return complex_type - if space.isinstance_w(w_obj, space.w_float): - return float_type - elif space.isinstance_w(w_obj, space.w_slice): - return long_dtype raise operationerrfmt(space.w_NotImplementedError, 'unable to create dtype from objects, ' '"%T" instance not supported', w_obj) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -69,9 +69,11 @@ return True def find_shape_and_elems(space, w_iterable, dtype): + is_rec_type = dtype is not None and dtype.is_record_type() + if is_rec_type and is_single_elem(space, w_iterable, is_rec_type): + return [], [w_iterable] shape = [space.len_w(w_iterable)] batch = space.listview(w_iterable) - is_rec_type = dtype is not None and dtype.is_record_type() while True: if not batch: return shape[:], [] diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -33,6 +33,11 @@ assert typeinfo['CFLOAT'] == ('F', 14, 64, 8, np.complex64) assert typeinfo['CDOUBLE'] == ('D', 15, 128, 16, np.complex128) assert typeinfo['HALF'] == ('e', 23, 16, 2, np.float16) + assert typeinfo['INTP'] == ('p', np.dtype('int').num, + self.ptr_size*8, self.ptr_size, + 2**(self.ptr_size*8 - 1) - 1, + -2**(self.ptr_size*8 - 1), + np.dtype('int').type) def test_dtype_basic(self): from numpypy import dtype @@ -49,6 +54,7 @@ assert dtype(int).fields is None assert dtype(int).names is None assert dtype(int).hasobject is False + assert dtype(int).subdtype is None assert dtype(None) is dtype(float) @@ -109,15 +115,11 @@ assert dtype(bool).num == 0 if self.ptr_size == 4: - assert dtype('intp').num == 5 - assert dtype('uintp').num == 6 assert dtype('int32').num == 7 assert dtype('uint32').num == 8 assert dtype('int64').num == 9 assert dtype('uint64').num == 10 else: - assert dtype('intp').num == 7 - assert dtype('uintp').num == 8 assert dtype('int32').num == 5 assert dtype('uint32').num == 6 assert dtype('int64').num == 7 @@ -125,6 +127,8 @@ assert dtype(int).num == 7 assert dtype('int').num == 7 assert dtype('uint').num == 8 + assert dtype('intp').num == 7 + assert dtype('uintp').num == 8 assert dtype(long).num == 9 assert dtype(float).num == 12 assert dtype('float').num == 12 @@ -366,16 +370,22 @@ # numpy allows abstract types in array creation a_n = numpy.array([4,4], numpy.number) + a_f = numpy.array([4,4], numpy.floating) + a_c = numpy.array([4,4], numpy.complexfloating) a_i = numpy.array([4,4], numpy.integer) a_s = numpy.array([4,4], numpy.signedinteger) a_u = numpy.array([4,4], numpy.unsignedinteger) assert a_n.dtype.num == 12 + assert a_f.dtype.num == 12 + assert a_c.dtype.num == 15 assert a_i.dtype.num == 7 assert a_s.dtype.num == 7 assert a_u.dtype.num == 8 assert a_n.dtype is numpy.dtype('float64') + assert a_f.dtype is numpy.dtype('float64') + assert a_c.dtype is numpy.dtype('complex128') if self.ptr_size == 4: assert a_i.dtype is numpy.dtype('int32') assert a_s.dtype is numpy.dtype('int32') @@ -473,8 +483,7 @@ assert numpy.int16('32768') == -32768 def test_uint16(self): - import numpypy as numpy - + import numpy assert numpy.uint16(65535) == 65535 assert numpy.uint16(65536) == 0 assert numpy.uint16('65535') == 65535 @@ -482,8 +491,7 @@ def test_int32(self): import sys - import numpypy as numpy - + import numpy x = numpy.int32(23) assert x == 23 assert numpy.int32(2147483647) == 2147483647 @@ -498,10 +506,8 @@ def test_uint32(self): import sys - import numpypy as numpy - + import numpy assert numpy.uint32(10) == 10 - if sys.maxint > 2 ** 31 - 1: assert numpy.uint32(4294967295) == 4294967295 assert numpy.uint32(4294967296) == 0 @@ -518,8 +524,7 @@ def test_int64(self): import sys - import numpypy as numpy - + import numpy if sys.maxint == 2 ** 63 -1: assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, numpy.integer, numpy.number, @@ -534,30 +539,30 @@ assert numpy.int64(9223372036854775807) == 9223372036854775807 assert numpy.int64(9223372036854775807) == 9223372036854775807 - raises(OverflowError, numpy.int64, 9223372036854775808) raises(OverflowError, numpy.int64, 9223372036854775808L) def test_uint64(self): - import sys - import numpypy as numpy - + import numpy + assert numpy.dtype(numpy.uint64).type is numpy.uint64 assert numpy.uint64.mro() == [numpy.uint64, numpy.unsignedinteger, numpy.integer, numpy.number, numpy.generic, object] - - assert numpy.dtype(numpy.uint64).type is numpy.uint64 - skip("see comment") - # These tests pass "by chance" on numpy, things that are larger than - # platform long (i.e. a python int), don't get put in a normal box, - # instead they become an object array containing a long, we don't have - # yet, so these can't pass. - assert numpy.uint64(9223372036854775808) == 9223372036854775808 - assert numpy.uint64(18446744073709551615) == 18446744073709551615 - raises(OverflowError, numpy.uint64(18446744073709551616)) + import sys + if '__pypy__' not in sys.builtin_module_names: + # These tests pass "by chance" on numpy, things that are larger than + # platform long (i.e. a python int), don't get put in a normal box, + # instead they become an object array containing a long, we don't have + # yet, so these can't pass. + assert numpy.uint64(9223372036854775808) == 9223372036854775808 + assert numpy.uint64(18446744073709551615) == 18446744073709551615 + else: + raises(OverflowError, numpy.int64, 9223372036854775808) + raises(OverflowError, numpy.int64, 18446744073709551615) + raises(OverflowError, numpy.uint64, 18446744073709551616) def test_float16(self): - import numpypy as numpy + import numpy assert numpy.float16.mro() == [numpy.float16, numpy.floating, numpy.inexact, numpy.number, numpy.generic, object] @@ -568,8 +573,7 @@ def test_float32(self): - import numpypy as numpy - + import numpy assert numpy.float32.mro() == [numpy.float32, numpy.floating, numpy.inexact, numpy.number, numpy.generic, object] @@ -579,8 +583,7 @@ raises(ValueError, numpy.float32, '23.2df') def test_float64(self): - import numpypy as numpy - + import numpy assert numpy.float64.mro() == [numpy.float64, numpy.floating, numpy.inexact, numpy.number, numpy.generic, float, object] @@ -596,14 +599,14 @@ raises(ValueError, numpy.float64, '23.2df') def test_float_None(self): - import numpypy as numpy + import numpy from math import isnan assert isnan(numpy.float32(None)) assert isnan(numpy.float64(None)) assert isnan(numpy.longdouble(None)) def test_longfloat(self): - import numpypy as numpy + import numpy # it can be float96 or float128 if numpy.longfloat != numpy.float64: assert numpy.longfloat.mro()[1:] == [numpy.floating, @@ -616,8 +619,7 @@ raises(ValueError, numpy.longfloat, '23.2df') def test_complex_floating(self): - import numpypy as numpy - + import numpy assert numpy.complexfloating.__mro__ == (numpy.complexfloating, numpy.inexact, numpy.number, numpy.generic, object) @@ -715,10 +717,14 @@ assert numpy.int16 is numpy.short assert numpy.int8 is numpy.byte assert numpy.bool_ is numpy.bool8 + assert numpy.intp().dtype.num == 7 + assert numpy.intp().dtype.char == 'l' if self.ptr_size == 4: + assert numpy.intp().dtype.name == 'int32' assert numpy.intp is numpy.int32 assert numpy.uintp is numpy.uint32 elif self.ptr_size == 8: + assert numpy.intp().dtype.name == 'int64' assert numpy.intp is numpy.int64 assert numpy.uintp is numpy.uint64 @@ -787,8 +793,22 @@ def test_intp(self): from numpypy import dtype - assert dtype('p') == dtype('intp') - assert dtype('P') == dtype('uintp') + assert dtype('p') is dtype('intp') + assert dtype('P') is dtype('uintp') + #assert dtype('p') is dtype('int') + #assert dtype('P') is dtype('uint') + assert dtype('p').num == 7 + assert dtype('P').num == 8 + #assert dtype('p').char == 'l' + #assert dtype('P').char == 'L' + assert dtype('p').kind == 'i' + assert dtype('P').kind == 'u' + #if self.ptr_size == 4: + # assert dtype('p').name == 'int32' + # assert dtype('P').name == 'uint32' + #else: + # assert dtype('p').name == 'int64' + # assert dtype('P').name == 'uint64' def test_alignment(self): from numpypy import dtype @@ -836,12 +856,12 @@ import numpy as np assert np.dtype('> 2 == [0, 0, 0, 0, 1, 1, 1, 1, 2, 2]).all() - a = array([True, False]) + a = np.array([True, False]) assert (a >> 1 == [0, 0]).all() - a = arange(3, dtype=float) + a = np.arange(3, dtype=float) raises(TypeError, lambda: a >> 1) + a = np.array([123], dtype='uint64') + b = a >> 1 + assert b == 61 + assert b.dtype.type is np.uint64 + a = np.array(123, dtype='uint64') + exc = raises(TypeError, "a >> 1") + assert 'not supported for the input types' in exc.value.message def test_rrshift(self): from numpypy import arange @@ -1400,16 +1412,18 @@ assert (array([[1,2],[3,4]]).prod(1) == [2, 12]).all() def test_prod(self): - from numpypy import array, int_, dtype + from numpypy import array, dtype a = array(range(1, 6)) assert a.prod() == 120.0 assert a[:4].prod() == 24.0 - a = array([True, False]) - assert a.prod() == 0 - assert type(a.prod()) is int_ - a = array([True, False], dtype='uint') - assert a.prod() == 0 - assert type(a.prod()) is dtype('uint').type + for dt in ['bool', 'int8', 'uint8', 'int16', 'uint16']: + a = array([True, False], dtype=dt) + assert a.prod() == 0 + assert a.prod().dtype is dtype('uint' if dt[0] == 'u' else 'int') + for dt in ['l', 'L', 'q', 'Q', 'e', 'f', 'd', 'F', 'D']: + a = array([True, False], dtype=dt) + assert a.prod() == 0 + assert a.prod().dtype is dtype(dt) def test_max(self): from numpypy import array, zeros @@ -1492,12 +1506,12 @@ def test_dtype_guessing(self): from numpypy import array, dtype - + import sys assert array([True]).dtype is dtype(bool) assert array([True, False]).dtype is dtype(bool) assert array([True, 1]).dtype is dtype(int) assert array([1, 2, 3]).dtype is dtype(int) - #assert array([1L, 2, 3]).dtype is dtype(long) + assert array([1L, 2, 3]).dtype is dtype('q') assert array([1.2, True]).dtype is dtype(float) assert array([1.2, 5]).dtype is dtype(float) assert array([]).dtype is dtype(float) @@ -1508,6 +1522,12 @@ assert array([int8(3)]).dtype is dtype("int8") assert array([bool_(True)]).dtype is dtype(bool) assert array([bool_(True), 3.0]).dtype is dtype(float) + assert array(sys.maxint + 42).dtype is dtype('Q') + assert array([sys.maxint + 42] * 2).dtype is dtype('Q') + assert array([sys.maxint + 42, 123]).dtype is dtype(float) + assert array([sys.maxint + 42, 123L]).dtype is dtype(float) + assert array([1+2j, 123]).dtype is dtype(complex) + assert array([1+2j, 123L]).dtype is dtype(complex) def test_comparison(self): import operator @@ -2183,12 +2203,6 @@ a[b] = 1. assert (a == [[1., 1., 1.]]).all() - @py.test.mark.xfail - def test_boolean_array(self): - import numpypy as np - a = np.ndarray([1], dtype=bool) - assert a[0] == True - class AppTestNumArrayFromBuffer(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "array", "mmap"]) @@ -2251,7 +2265,6 @@ f.close() - class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy @@ -2725,7 +2738,12 @@ assert (arange(10).take([1, 2, 1, 1]) == [1, 2, 1, 1]).all() raises(IndexError, "arange(3).take([15])") a = arange(6).reshape(2, 3) + assert a.take(3) == 3 + assert a.take(3).shape == () assert (a.take([1, 0, 3]) == [1, 0, 3]).all() + assert (a.take([[1, 0], [2, 3]]) == [[1, 0], [2, 3]]).all() + assert (a.take([1], axis=0) == [[3, 4, 5]]).all() + assert (a.take([1], axis=1) == [[1], [4]]).all() assert ((a + a).take([3]) == [6]).all() a = arange(12).reshape(2, 6) assert (a[:,::2].take([3, 2, 1]) == [6, 4, 2]).all() @@ -2822,7 +2840,11 @@ assert b[35] == 200 b[[slice(25, 30)]] = range(5) assert all(a[:5] == range(5)) - raises(TypeError, 'b[[[slice(25, 125)]]]') + import sys + if '__pypy__' not in sys.builtin_module_names: + raises(TypeError, 'b[[[slice(25, 125)]]]') + else: + raises(NotImplementedError, 'b[[[slice(25, 125)]]]') def test_cumsum(self): from numpypy import arange @@ -2976,17 +2998,18 @@ assert j[0] == 12 k = fromstring(self.float16val, dtype='float16') assert k[0] == dtype('float16').type(5.) - dt = array([5],dtype='longfloat').dtype - if dt.itemsize == 12: + dt = array([5], dtype='longfloat').dtype + if dt.itemsize == 8: + m = fromstring('\x00\x00\x00\x00\x00\x00\x14@', + dtype='float64') + elif dt.itemsize == 12: m = fromstring('\x00\x00\x00\x00\x00\x00\x00\xa0\x01@\x00\x00', dtype='float96') elif dt.itemsize == 16: m = fromstring('\x00\x00\x00\x00\x00\x00\x00\xa0\x01@\x00\x00' \ '\x00\x00\x00\x00', dtype='float128') - elif dt.itemsize == 8: - skip('longfloat is float64') else: - skip('unknown itemsize for longfloat') + assert False, 'unknown itemsize for longfloat' assert m[0] == dtype('longfloat').type(5.) def test_fromstring_invalid(self): @@ -3046,7 +3069,13 @@ spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) def test_zeros(self): - from numpypy import zeros + from numpypy import zeros, void + a = zeros((), dtype=[('x', int), ('y', float)]) + assert type(a[()]) is void + assert type(a.item()) is tuple + assert a[()]['x'] == 0 + assert a[()]['y'] == 0 + assert a.shape == () a = zeros(2, dtype=[('x', int), ('y', float)]) raises(IndexError, 'a[0]["xyz"]') assert a[0]['x'] == 0 @@ -3061,7 +3090,12 @@ assert a[1]['y'] == 2 def test_views(self): - from numpypy import array + from numpypy import array, zeros, ndarray + a = zeros((), dtype=[('x', int), ('y', float)]) + raises(IndexError, 'a[0]') + assert type(a['x']) is ndarray + assert a['x'] == 0 + assert a['y'] == 0 a = array([(1, 2), (3, 4)], dtype=[('x', int), ('y', float)]) raises((IndexError, ValueError), 'array([1])["x"]') raises((IndexError, ValueError), 'a["z"]') @@ -3082,14 +3116,44 @@ def test_creation_and_repr(self): from numpypy import array + a = array((1, 2), dtype=[('x', int), ('y', float)]) + assert a.shape == () + assert repr(a[()]) == '(1, 2.0)' a = array([(1, 2), (3, 4)], dtype=[('x', int), ('y', float)]) assert repr(a[0]) == '(1, 2.0)' + def test_void_copyswap(self): + import numpy as np + dt = np.dtype([('one', ' 0 and x['two'] > 2 + else: + assert x['one'] == 1 and x['two'] == 2 + def test_nested_dtype(self): - from numpypy import zeros + import numpy as np a = [('x', int), ('y', float)] b = [('x', int), ('y', a)] - arr = zeros(3, dtype=b) + arr = np.zeros((), dtype=b) + assert arr['x'] == 0 + arr['x'] = 2 + assert arr['x'] == 2 + exc = raises(IndexError, "arr[3L]") + assert exc.value.message == "0-d arrays can't be indexed" + exc = raises(ValueError, "arr['xx'] = 2") + assert exc.value.message == "field named xx not found" + assert arr['y'].dtype == a + assert arr['y'].shape == () + assert arr['y'][()]['x'] == 0 + assert arr['y'][()]['y'] == 0 + arr['y'][()]['x'] = 2 + arr['y'][()]['y'] = 3 + assert arr['y'][()]['x'] == 2 + assert arr['y'][()]['y'] == 3 + arr = np.zeros(3, dtype=b) arr[1]['x'] = 15 assert arr[1]['x'] == 15 arr[1]['y']['y'] = 3.5 @@ -3214,11 +3278,15 @@ def test_subarrays(self): from numpypy import dtype, array, zeros - d = dtype([("x", "int", 3), ("y", "float", 5)]) + + a = zeros((), dtype=d) + #assert a['x'].dtype == int + #assert a['x'].shape == (3,) + #assert (a['x'] == [0, 0, 0]).all() + a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) - for v in ['x', u'x', 0, -2]: assert (a[0][v] == [1, 2, 3]).all() assert (a[1][v] == [4, 5, 6]).all() @@ -3236,6 +3304,13 @@ a[0]["x"][0] = 200 assert a[0]["x"][0] == 200 + a[1]["x"][2] = 123 + assert (a[1]["x"] == [4, 5, 123]).all() + a[1]['y'][3] = 4 + assert a[1]['y'][3] == 4 + assert a['y'][1][3] == 4 + a['y'][1][4] = 5 + assert a[1]['y'][4] == 5 d = dtype([("x", "int64", (2, 3))]) a = array([([[1, 2, 3], [4, 5, 6]],)], dtype=d) @@ -3309,14 +3384,16 @@ a = array([('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])], dtype=dt) - s = str(a) i = a.item() assert isinstance(i, tuple) assert len(i) == 4 - skip('incorrect formatting via dump_data') - assert s.endswith("[('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], " - "[[7, 8, 9], [10, 11, 12]]])]") - + import sys + if '__pypy__' not in sys.builtin_module_names: + assert str(a) == "[('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], " \ + "[[7, 8, 9], [10, 11, 12]]])]" + else: + assert str(a) == "array([('aaaa', 1.0, 8.0, [1, 2, 3, 4, 5, 6, " \ + "7, 8, 9, 10, 11, 12])])" def test_issue_1589(self): import numpypy as numpy @@ -3329,6 +3406,7 @@ a = np.array([1,2,3], dtype='int16') assert (a * 2).dtype == np.dtype('int16') + class AppTestPyPy(BaseNumpyAppTest): def setup_class(cls): if option.runappdirect and '__pypy__' not in sys.builtin_module_names: diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -4,8 +4,9 @@ spaceconfig = dict(usemodules=["micronumpy", "binascii", "struct"]) def test_init(self): - import numpypy as np + import numpy as np import math + import sys assert np.intp() == np.intp(0) assert np.intp('123') == np.intp(123) raises(TypeError, np.intp, None) @@ -17,6 +18,9 @@ assert np.complex_() == np.complex_(0) #raises(TypeError, np.complex_, '1+2j') assert math.isnan(np.complex_(None)) + for c in ['i', 'I', 'l', 'L', 'q', 'Q']: + assert np.dtype(c).type().dtype.char == c + assert np.dtype('L').type(sys.maxint + 42) == sys.maxint + 42 def test_builtin(self): import numpy as np @@ -37,7 +41,7 @@ assert len(np.string_('123')) == 3 def test_pickle(self): - from numpypy import dtype, zeros + from numpy import dtype, zeros try: from numpy.core.multiarray import scalar except ImportError: @@ -111,8 +115,17 @@ assert a.squeeze() is a raises(TypeError, a.squeeze, 2) + def test_bitshift(self): + import numpy as np + assert np.int32(123) >> 1 == 61 + assert type(np.int32(123) >> 1) is np.int_ + assert np.int64(123) << 1 == 246 + assert type(np.int64(123) << 1) is np.int64 + exc = raises(TypeError, "np.uint64(123) >> 1") + assert 'not supported for the input types' in exc.value.message + def test_attributes(self): - import numpypy as np + import numpy as np value = np.dtype('int64').type(12345) assert value.dtype == np.dtype('int64') assert value.size == 1 diff --git a/pypy/module/micronumpy/test/test_sorting.py b/pypy/module/micronumpy/test/test_sorting.py --- a/pypy/module/micronumpy/test/test_sorting.py +++ b/pypy/module/micronumpy/test/test_sorting.py @@ -45,6 +45,9 @@ def test_argsort_axis(self): from numpypy import array + a = array([]) + for axis in [None, -1, 0]: + assert a.argsort(axis=axis).shape == (0,) a = array([[4, 2], [1, 3]]) assert (a.argsort(axis=None) == [2, 1, 3, 0]).all() assert (a.argsort(axis=-1) == [[1, 0], [0, 1]]).all() @@ -306,9 +309,8 @@ # tests from numpy/core/tests/test_regression.py def test_sort_bigendian(self): - skip('not implemented yet') - from numpypy import array, dtype - a = array(range(11),dtype='float64') + from numpy import array, dtype + a = array(range(11), dtype='float64') c = a.astype(dtype('" assert repr(ufunc) == "" + assert add.__name__ == 'add' def test_ufunc_attrs(self): from numpypy import add, multiply, sin @@ -390,23 +391,17 @@ assert (a == ref).all() def test_signbit(self): - from numpypy import signbit, add - + from numpy import signbit, add, copysign, nan + assert signbit(add.identity) == False assert (signbit([0, 0.0, 1, 1.0, float('inf')]) == - [False, False, False, False, False]).all() + [False, False, False, False, False]).all() assert (signbit([-0, -0.0, -1, -1.0, float('-inf')]) == - [False, True, True, True, True]).all() - - a = add.identity - assert signbit(a) == False - - skip('sign of nan is non-determinant') - assert (signbit([float('nan'), float('-nan'), -float('nan')]) == - [False, True, True]).all() + [False, True, True, True, True]).all() + assert (signbit([copysign(nan, 1), copysign(nan, -1)]) == + [False, True]).all() def test_reciprocal(self): - from numpypy import array, reciprocal - + from numpy import array, reciprocal inf = float('inf') nan = float('nan') reference = [-0.2, inf, -inf, 2.0, nan] diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -12,7 +12,7 @@ from rpython.rlib.rawstorage import (alloc_raw_storage, raw_storage_setitem, raw_storage_getitem) from rpython.rlib.objectmodel import specialize -from rpython.rlib.rarithmetic import widen, byteswap, r_ulonglong, most_neg_value_of +from rpython.rlib.rarithmetic import widen, byteswap, r_ulonglong, most_neg_value_of, LONG_BIT from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rstruct.runpack import runpack from rpython.rlib.rstruct.nativefmttable import native_is_bigendian @@ -568,16 +568,6 @@ BoxType = interp_boxes.W_UInt32Box format_code = "I" -class Long(BaseType, Integer): - T = rffi.LONG - BoxType = interp_boxes.W_LongBox - format_code = "l" - -class ULong(BaseType, Integer): - T = rffi.ULONG - BoxType = interp_boxes.W_ULongBox - format_code = "L" - def _int64_coerce(self, space, w_item): try: return self._base_coerce(space, w_item) @@ -618,6 +608,22 @@ _coerce = func_with_new_name(_uint64_coerce, '_coerce') +class Long(BaseType, Integer): + T = rffi.LONG + BoxType = interp_boxes.W_LongBox + format_code = "l" + + if LONG_BIT == 64: + _coerce = func_with_new_name(_int64_coerce, '_coerce') + +class ULong(BaseType, Integer): + T = rffi.ULONG + BoxType = interp_boxes.W_ULongBox + format_code = "L" + + if LONG_BIT == 64: + _coerce = func_with_new_name(_uint64_coerce, '_coerce') + class Float(Primitive): _mixin_ = True @@ -1620,6 +1626,8 @@ from pypy.module.micronumpy.interp_dtype import new_string_dtype if isinstance(w_item, interp_boxes.W_StringBox): return w_item + if w_item is None: + w_item = space.wrap('') arg = space.str_w(space.str(w_item)) arr = VoidBoxStorage(len(arg), new_string_dtype(space, len(arg))) for i in range(len(arg)): @@ -1733,13 +1741,16 @@ def _coerce(self, space, arr, ofs, dtype, w_items, shape): # TODO: Make sure the shape and the array match from interp_dtype import W_Dtype - items_w = space.fixedview(w_items) + if w_items is not None: + items_w = space.fixedview(w_items) + else: + items_w = [None] * shape[0] subdtype = dtype.subdtype assert isinstance(subdtype, W_Dtype) itemtype = subdtype.itemtype if len(shape) <= 1: for i in range(len(items_w)): - w_box = itemtype.coerce(space, dtype.subdtype, items_w[i]) + w_box = itemtype.coerce(space, subdtype, items_w[i]) itemtype.store(arr, 0, ofs, w_box) ofs += itemtype.get_element_size() else: @@ -1758,7 +1769,9 @@ @jit.unroll_safe def store(self, arr, i, ofs, box): + assert i == 0 assert isinstance(box, interp_boxes.W_VoidBox) + assert box.dtype is box.arr.dtype for k in range(box.arr.dtype.get_size()): arr.storage[k + ofs] = box.arr.storage[k + box.ofs] @@ -1819,29 +1832,35 @@ def coerce(self, space, dtype, w_item): if isinstance(w_item, interp_boxes.W_VoidBox): return w_item - # we treat every sequence as sequence, no special support - # for arrays - if not space.issequence_w(w_item): - raise OperationError(space.w_TypeError, space.wrap( - "expected sequence")) - if len(dtype.fields) != space.len_w(w_item): - raise OperationError(space.w_ValueError, space.wrap( - "wrong length")) - items_w = space.fixedview(w_item) + if w_item is not None: + # we treat every sequence as sequence, no special support + # for arrays + if not space.issequence_w(w_item): + raise OperationError(space.w_TypeError, space.wrap( + "expected sequence")) + if len(dtype.fields) != space.len_w(w_item): + raise OperationError(space.w_ValueError, space.wrap( + "wrong length")) + items_w = space.fixedview(w_item) + else: + items_w = [None] * len(dtype.fields) arr = VoidBoxStorage(dtype.get_size(), dtype) for i in range(len(items_w)): ofs, subdtype = dtype.fields[dtype.fieldnames[i]] itemtype = subdtype.itemtype - w_item = items_w[i] - w_box = itemtype.coerce(space, subdtype, w_item) + w_box = itemtype.coerce(space, subdtype, items_w[i]) itemtype.store(arr, 0, ofs, w_box) return interp_boxes.W_VoidBox(arr, 0, dtype) @jit.unroll_safe def store(self, arr, i, ofs, box): assert isinstance(box, interp_boxes.W_VoidBox) - for k in range(box.arr.dtype.get_size()): - arr.storage[k + i] = box.arr.storage[k + box.ofs] + for k in range(box.dtype.get_size()): + arr.storage[k + i + ofs] = box.arr.storage[k + box.ofs] + + def byteswap(self, w_v): + # XXX implement + return w_v def to_builtin_type(self, space, box): assert isinstance(box, interp_boxes.W_VoidBox) diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -51,6 +51,13 @@ return w_iter list_iter._annspecialcase_ = 'specialize:memo' +def tuple_iter(space): + "Utility that returns the app-level descriptor tuple.__iter__." + w_src, w_iter = space.lookup_in_type_where(space.w_tuple, + '__iter__') + return w_iter +tuple_iter._annspecialcase_ = 'specialize:memo' + def raiseattrerror(space, w_obj, name, w_descr=None): if w_descr is None: raise operationerrfmt(space.w_AttributeError, diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -945,7 +945,8 @@ def _extend_from_iterable(self, w_list, w_iterable): space = self.space - if isinstance(w_iterable, W_AbstractTupleObject): + if (isinstance(w_iterable, W_AbstractTupleObject) + and space._uses_tuple_iter(w_iterable)): w_list.__init__(space, w_iterable.getitems_copy()) return diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -382,7 +382,7 @@ self.wrap("expected length %d, got %d" % (expected, got))) def unpackiterable(self, w_obj, expected_length=-1): - if isinstance(w_obj, W_AbstractTupleObject): + if isinstance(w_obj, W_AbstractTupleObject) and self._uses_tuple_iter(w_obj): t = w_obj.getitems_copy() elif type(w_obj) is W_ListObject: t = w_obj.getitems_copy() @@ -396,7 +396,7 @@ def fixedview(self, w_obj, expected_length=-1, unroll=False): """ Fast paths """ - if isinstance(w_obj, W_AbstractTupleObject): + if isinstance(w_obj, W_AbstractTupleObject) and self._uses_tuple_iter(w_obj): t = w_obj.tolist() elif type(w_obj) is W_ListObject: if unroll: @@ -421,7 +421,7 @@ def listview(self, w_obj, expected_length=-1): if type(w_obj) is W_ListObject: t = w_obj.getitems() - elif isinstance(w_obj, W_AbstractTupleObject): + elif isinstance(w_obj, W_AbstractTupleObject) and self._uses_tuple_iter(w_obj): t = w_obj.getitems_copy() elif isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): t = w_obj.getitems() @@ -440,7 +440,7 @@ return w_obj.listview_str() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_str() - if isinstance(w_obj, W_StringObject): + if isinstance(w_obj, W_StringObject) and self._uses_no_iter(w_obj): return w_obj.listview_str() if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): return w_obj.getitems_str() @@ -455,7 +455,7 @@ return w_obj.listview_unicode() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_unicode() - if isinstance(w_obj, W_UnicodeObject): + if isinstance(w_obj, W_UnicodeObject) and self._uses_no_iter(w_obj): return w_obj.listview_unicode() if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): return w_obj.getitems_unicode() @@ -490,6 +490,13 @@ from pypy.objspace.descroperation import list_iter return self.lookup(w_obj, '__iter__') is list_iter(self) + def _uses_tuple_iter(self, w_obj): + from pypy.objspace.descroperation import tuple_iter + return self.lookup(w_obj, '__iter__') is tuple_iter(self) + + def _uses_no_iter(self, w_obj): + return self.lookup(w_obj, '__iter__') is None + def sliceindices(self, w_slice, w_length): if isinstance(w_slice, W_SliceObject): a, b, c = w_slice.indices3(self, self.int_w(w_length)) diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -1314,6 +1314,57 @@ non_list = NonList() assert [] != non_list + def test_extend_from_empty_list_with_subclasses(self): + # some of these tests used to fail by ignoring the + # custom __iter__() --- but only if the list has so + # far the empty strategy, as opposed to .extend()ing + # a non-empty list. + class T(tuple): + def __iter__(self): + yield "ok" + assert list(T([5, 6])) == ["ok"] + # + class L(list): + def __iter__(self): + yield "ok" + assert list(L([5, 6])) == ["ok"] + assert list(L([5.2, 6.3])) == ["ok"] + # + class S(str): + def __iter__(self): + yield "ok" + assert list(S("don't see me")) == ["ok"] + # + class U(unicode): + def __iter__(self): + yield "ok" + assert list(U(u"don't see me")) == ["ok"] + + def test_extend_from_nonempty_list_with_subclasses(self): + l = ["hi!"] + class T(tuple): + def __iter__(self): + yield "okT" + l.extend(T([5, 6])) + # + class L(list): + def __iter__(self): + yield "okL" + l.extend(L([5, 6])) + l.extend(L([5.2, 6.3])) + # + class S(str): + def __iter__(self): + yield "okS" + l.extend(S("don't see me")) + # + class U(unicode): + def __iter__(self): + yield "okU" + l.extend(U(u"don't see me")) + # + assert l == ["hi!", "okT", "okL", "okL", "okS", "okU"] + class AppTestForRangeLists(AppTestW_ListObject): spaceconfig = {"objspace.std.withrangelist": True} diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -412,10 +412,7 @@ return SomeByteArray(can_be_None=can_be_None) def add((b1, b2)): - result = SomeByteArray() - if b1.is_immutable_constant() and b2.is_immutable_constant(): - result.const = b1.const + b2.const - return result + return SomeByteArray() class __extend__(pairtype(SomeByteArray, SomeInteger)): def getitem((s_b, s_i)): @@ -429,10 +426,7 @@ pairtype(SomeChar, SomeByteArray), pairtype(SomeByteArray, SomeChar)): def add((b1, b2)): - result = SomeByteArray() - if b1.is_immutable_constant() and b2.is_immutable_constant(): - result.const = b1.const + b2.const - return result + return SomeByteArray() class __extend__(pairtype(SomeChar, SomeChar)): diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -122,7 +122,7 @@ return constpropagate(unicode, [s_unicode], SomeUnicodeString()) def builtin_bytearray(s_str): - return constpropagate(bytearray, [s_str], SomeByteArray()) + return SomeByteArray() def our_issubclass(cls1, cls2): """ we're going to try to be less silly in the face of old-style classes""" diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -215,7 +215,8 @@ class SomeStringOrUnicode(SomeObject): - """Base class for shared implementation of SomeString and SomeUnicodeString. + """Base class for shared implementation of SomeString, + SomeUnicodeString and SomeByteArray. Cannot be an annotation.""" @@ -228,6 +229,7 @@ if can_be_None: self.can_be_None = True if no_nul: + assert self.immutable #'no_nul' cannot be used with SomeByteArray self.no_nul = True def can_be_none(self): @@ -263,6 +265,7 @@ class SomeByteArray(SomeStringOrUnicode): + immutable = False knowntype = bytearray diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3987,7 +3987,9 @@ return bytearray("xyz") a = self.RPythonAnnotator() - assert isinstance(a.build_types(f, []), annmodel.SomeByteArray) + s = a.build_types(f, []) + assert isinstance(s, annmodel.SomeByteArray) + assert not s.is_constant() # never a constant! def test_bytearray_add(self): def f(a): diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -125,10 +125,12 @@ class ArrayDescr(AbstractDescr): def __init__(self, A): - self.A = A + self.A = self.OUTERA = A + if isinstance(A, lltype.Struct): + self.A = A._flds[A._arrayfld] def __repr__(self): - return 'ArrayDescr(%r)' % (self.A,) + return 'ArrayDescr(%r)' % (self.OUTERA,) def is_array_of_pointers(self): return getkind(self.A.OF) == 'ref' @@ -424,6 +426,8 @@ def bh_arraylen_gc(self, a, descr): array = a._obj.container + if descr.A is not descr.OUTERA: + array = getattr(array, descr.OUTERA._arrayfld) return array.getlength() def bh_getarrayitem_gc(self, a, index, descr): diff --git a/rpython/jit/backend/llsupport/test/test_descr.py b/rpython/jit/backend/llsupport/test/test_descr.py --- a/rpython/jit/backend/llsupport/test/test_descr.py +++ b/rpython/jit/backend/llsupport/test/test_descr.py @@ -424,3 +424,11 @@ " > >") # caching: assert fielddescr is get_field_arraylen_descr(c0, rstr.STR) + +def test_bytearray_descr(): + c0 = GcCache(False) + descr = get_array_descr(c0, rstr.STR) # for bytearray + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == struct.calcsize("PP") # hash, length + assert descr.lendescr.offset == struct.calcsize("P") # hash + assert not descr.is_array_of_pointers() diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -13,6 +13,7 @@ from rpython.rlib.jit import _we_are_jitted from rpython.rlib.rgc import lltype_is_gc from rpython.rtyper.lltypesystem import lltype, llmemory, rstr, rclass, rffi +from rpython.rtyper.lltypesystem import rbytearray from rpython.rtyper.rclass import IR_QUASIIMMUTABLE, IR_QUASIIMMUTABLE_ARRAY from rpython.translator.unsimplify import varoftype @@ -643,6 +644,12 @@ return SpaceOperation('arraylen_gc', [op.args[0], arraydescr], op.result) + def rewrite_op_getarraysubstruct(self, op): + ARRAY = op.args[0].concretetype.TO + assert ARRAY._gckind == 'raw' + assert ARRAY._hints.get('nolength') is True + return self.rewrite_op_direct_ptradd(op) + def _array_of_voids(self, ARRAY): return ARRAY.OF == lltype.Void @@ -836,9 +843,14 @@ optype = op.args[0].concretetype if optype == lltype.Ptr(rstr.STR): opname = "strlen" + elif optype == lltype.Ptr(rstr.UNICODE): + opname = "unicodelen" + elif optype == lltype.Ptr(rbytearray.BYTEARRAY): + bytearraydescr = self.cpu.arraydescrof(rbytearray.BYTEARRAY) + return SpaceOperation('arraylen_gc', [op.args[0], bytearraydescr], + op.result) else: - assert optype == lltype.Ptr(rstr.UNICODE) - opname = "unicodelen" + assert 0, "supported type %r" % (optype,) return SpaceOperation(opname, [op.args[0]], op.result) def rewrite_op_getinteriorfield(self, op): @@ -850,6 +862,12 @@ elif optype == lltype.Ptr(rstr.UNICODE): opname = "unicodegetitem" return SpaceOperation(opname, [op.args[0], op.args[2]], op.result) + elif optype == lltype.Ptr(rbytearray.BYTEARRAY): + bytearraydescr = self.cpu.arraydescrof(rbytearray.BYTEARRAY) + v_index = op.args[2] + return SpaceOperation('getarrayitem_gc_i', + [op.args[0], v_index, bytearraydescr], + op.result) else: v_inst, v_index, c_field = op.args if op.result.concretetype is lltype.Void: @@ -876,6 +894,11 @@ opname = "unicodesetitem" return SpaceOperation(opname, [op.args[0], op.args[2], op.args[3]], op.result) + elif optype == lltype.Ptr(rbytearray.BYTEARRAY): + bytearraydescr = self.cpu.arraydescrof(rbytearray.BYTEARRAY) + opname = "setarrayitem_gc_i" + return SpaceOperation(opname, [op.args[0], op.args[2], op.args[3], + bytearraydescr], op.result) else: v_inst, v_index, c_field, v_value = op.args if v_value.concretetype is lltype.Void: @@ -1709,6 +1732,8 @@ "stroruni.copy_string_to_raw": EffectInfo.OS_UNI_COPY_TO_RAW } CHR = lltype.UniChar + elif SoU.TO == rbytearray.BYTEARRAY: + raise NotSupported("bytearray operation") else: assert 0, "args[0].concretetype must be STR or UNICODE" # diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -14,6 +14,7 @@ from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.llinterp import LLInterpreter from rpython.rtyper.lltypesystem import lltype, rclass, rffi, llmemory, rstr as ll_rstr, rdict as ll_rdict +from rpython.rtyper.lltypesystem import rordereddict from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.lltypesystem.module import ll_math from rpython.translator.translator import TranslationContext @@ -492,11 +493,6 @@ # ---------- dict ---------- - def _ll_0_newdict(DICT): - return ll_rdict.ll_newdict(DICT) - _ll_0_newdict.need_result_type = True - - _ll_2_dict_delitem = ll_rdict.ll_dict_delitem _ll_1_dict_copy = ll_rdict.ll_copy _ll_1_dict_clear = ll_rdict.ll_clear _ll_2_dict_update = ll_rdict.ll_update @@ -524,6 +520,33 @@ _ll_1_dict_resize = ll_rdict.ll_dict_resize + # ---------- ordered dict ---------- + + _ll_1_odict_copy = rordereddict.ll_dict_copy + _ll_1_odict_clear = rordereddict.ll_dict_clear + _ll_2_odict_update = rordereddict.ll_dict_update + + _ll_1_odict_keys = rordereddict.ll_dict_keys + _ll_1_odict_values = rordereddict.ll_dict_values + _ll_1_odict_items = rordereddict.ll_dict_items + _ll_1_odict_keys .need_result_type = True + _ll_1_odict_values.need_result_type = True + _ll_1_odict_items .need_result_type = True + + _odictnext_keys = staticmethod(rordereddict.ll_dictnext_group['keys']) + _odictnext_values = staticmethod(rordereddict.ll_dictnext_group['values']) + _odictnext_items = staticmethod(rordereddict.ll_dictnext_group['items']) + + def _ll_1_odictiter_nextkeys(iter): + return LLtypeHelpers._odictnext_keys(None, iter) + def _ll_1_odictiter_nextvalues(iter): + return LLtypeHelpers._odictnext_values(None, iter) + def _ll_1_odictiter_nextitems(RES, iter): + return LLtypeHelpers._odictnext_items(lltype.Ptr(RES), iter) + _ll_1_odictiter_nextitems.need_result_type = True + + _ll_1_odict_resize = rordereddict.ll_dict_resize + # ---------- strings and unicode ---------- _ll_1_str_str2unicode = ll_rstr.LLHelpers.ll_str2unicode diff --git a/rpython/jit/metainterp/test/test_bytearray.py b/rpython/jit/metainterp/test/test_bytearray.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/test/test_bytearray.py @@ -0,0 +1,82 @@ +import py +from rpython.jit.metainterp.test.support import LLJitMixin +from rpython.rlib.jit import JitDriver, dont_look_inside + +class TestByteArray(LLJitMixin): + + def test_getitem(self): + x = bytearray("foobar") + def fn(n): + assert n >= 0 + return x[n] + res = self.interp_operations(fn, [3]) + assert res == ord('b') + + def test_getitem_negative(self): + x = bytearray("foobar") + def fn(n): + return x[n] + res = self.interp_operations(fn, [-2]) + assert res == ord('a') + + def test_len(self): + x = bytearray("foobar") + def fn(n): + return len(x) + res = self.interp_operations(fn, [3]) + assert res == 6 + + def test_setitem(self): + @dont_look_inside + def make_me(): + return bytearray("foobar") + def fn(n): + assert n >= 0 + x = make_me() + x[n] = 3 + return x[3] + 1000 * x[4] + + res = self.interp_operations(fn, [3]) + assert res == 3 + 1000 * ord('a') + + def test_setitem_negative(self): + @dont_look_inside + def make_me(): + return bytearray("foobar") + def fn(n): + x = make_me() + x[n] = 3 + return x[3] + 1000 * x[4] + + res = self.interp_operations(fn, [-2]) + assert res == ord('b') + 1000 * 3 + + def test_new_bytearray(self): + def fn(n, m): + x = bytearray(str(n)) + x[m] = 0x34 + return int(str(x)) + + assert fn(610978, 3) == 610478 + res = self.interp_operations(fn, [610978, 3]) + assert res == 610478 + + def test_slice(self): + py.test.skip("XXX later") + def fn(n, m): + x = bytearray(str(n)) + x = x[1:5] + x[m] = 0x35 + return int(str(x)) + res = self.interp_operations(fn, [610978, 1]) + assert res == 1597 + + def test_bytearray_from_bytearray(self): + def fn(n, m): + x = bytearray(str(n)) + y = bytearray(x) + x[m] = 0x34 + return int(str(x)) + int(str(y)) + + res = self.interp_operations(fn, [610978, 3]) + assert res == 610478 + 610978 diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -2,12 +2,15 @@ from rpython.jit.metainterp.test.support import LLJitMixin from rpython.rlib.jit import JitDriver from rpython.rlib import objectmodel +from collections import OrderedDict class DictTests: + def _freeze_(self): + return True def test_dict_set_none(self): def fn(n): - d = {} + d = self.newdict() d[0] = None return bool(d[n]) res = self.interp_operations(fn, [0]) @@ -21,7 +24,7 @@ ]: myjitdriver = JitDriver(greens = [], reds = ['n', 'dct']) def f(n): - dct = {} + dct = self.newdict() From noreply at buildbot.pypy.org Sat Dec 28 01:59:02 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 28 Dec 2013 01:59:02 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: lame and mysterious workaround for -Ojit & --objspace-std-withsmalllong failing Message-ID: <20131228005902.1DA801C02A7@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r68561:4fa4c6b93a84 Date: 2013-12-27 16:58 -0800 http://bitbucket.org/pypy/pypy/changeset/4fa4c6b93a84/ Log: lame and mysterious workaround for -Ojit & --objspace-std- withsmalllong failing translation diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -14,7 +14,8 @@ from pypy.objspace.std.intobject import W_AbstractIntObject from pypy.objspace.std.longobject import W_AbstractLongObject, W_LongObject -LONGLONG_MIN = r_longlong(-1 << (LONGLONG_BIT - 1)) +# XXX: breaks translation +#LONGLONG_MIN = r_longlong(-1 << (LONGLONG_BIT - 1)) class W_SmallLongObject(W_AbstractLongObject): @@ -175,7 +176,7 @@ x = self.longlong y = w_other.longlong try: - if y == -1 and x == LONGLONG_MIN: + if y == -1 and x == r_longlong(-1 << (LONGLONG_BIT-1)): raise OverflowError z = x // y except ZeroDivisionError: @@ -191,7 +192,7 @@ x = self.longlong y = w_other.longlong try: - if y == -1 and x == LONGLONG_MIN: + if y == -1 and x == r_longlong(-1 << (LONGLONG_BIT-1)): raise OverflowError z = x % y except ZeroDivisionError: @@ -204,7 +205,7 @@ x = self.longlong y = w_other.longlong try: - if y == -1 and x == LONGLONG_MIN: + if y == -1 and x == r_longlong(-1 << (LONGLONG_BIT-1)): raise OverflowError z = x // y except ZeroDivisionError: @@ -225,7 +226,8 @@ if space.is_none(w_modulus): try: - return _pow_impl(space, self.longlong, w_exponent) + return _pow_impl(space, self.longlong, w_exponent, + r_longlong(0)) except ValueError: self = self.descr_float(space) return space.pow(self, w_exponent, space.w_None) @@ -323,7 +325,7 @@ def descr_neg(self, space): a = self.longlong try: - if a == LONGLONG_MIN: + if a == r_longlong(-1 << (LONGLONG_BIT-1)): raise OverflowError x = -a except OverflowError: @@ -413,7 +415,7 @@ return space.newtuple([div_ovr(space, w_int1, w_int2), mod_ovr(space, w_int1, w_int2)]) -def _pow_impl(space, iv, w_int2, iz=r_longlong(0)): +def _pow_impl(space, iv, w_int2, iz): iw = space.int_w(w_int2) if iw < 0: if iz != 0: From noreply at buildbot.pypy.org Sat Dec 28 01:59:00 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 28 Dec 2013 01:59:00 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: fully handle int overflowing to smalllong and much cleanup/progress Message-ID: <20131228005900.EBBCF1C00F8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r68560:e677ccdf7fe2 Date: 2013-12-27 16:56 -0800 http://bitbucket.org/pypy/pypy/changeset/e677ccdf7fe2/ Log: fully handle int overflowing to smalllong and much cleanup/progress diff too long, truncating to 2000 out of 2240 lines diff --git a/pypy/objspace/std/boolobject.py b/pypy/objspace/std/boolobject.py --- a/pypy/objspace/std/boolobject.py +++ b/pypy/objspace/std/boolobject.py @@ -2,7 +2,7 @@ from rpython.rlib.rbigint import rbigint from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec -from pypy.objspace.std.intobject import W_AbstractIntObject, W_IntObject +from pypy.objspace.std.intobject import W_AbstractIntObject from pypy.objspace.std.stdtypedef import StdTypeDef @@ -41,7 +41,6 @@ def descr_repr(self, space): return space.wrap('True' if self.boolval else 'False') - descr_str = descr_repr def descr_nonzero(self, space): @@ -68,11 +67,11 @@ @unwrap_spec(w_obj=WrappedDefault(False)) def descr__new__(space, w_booltype, w_obj): space.w_bool.check_user_subclass(w_booltype) - return space.newbool(space.is_true(w_obj)) # XXX: method call? + return space.newbool(space.is_true(w_obj)) # ____________________________________________________________ -W_BoolObject.typedef = StdTypeDef("bool", W_IntObject.typedef, +W_BoolObject.typedef = StdTypeDef("bool", W_AbstractIntObject.typedef, __doc__ = """bool(x) -> bool Returns True when the argument x is true, False otherwise. @@ -81,6 +80,13 @@ __new__ = interp2app(descr__new__), __repr__ = interp2app(W_BoolObject.descr_repr), __str__ = interp2app(W_BoolObject.descr_str), - # XXX: might as well declare interp2app directly here for nonzero/and/etc + __nonzero__ = interp2app(W_BoolObject.descr_nonzero), + # XXX: rsides + __and__ = interp2app(W_BoolObject.descr_and), + #__rand__ = interp2app(W_BoolObject.descr_rand), + __or__ = interp2app(W_BoolObject.descr_or), + #__ror__ = interp2app(W_BoolObject.descr_ror), + __xor__ = interp2app(W_BoolObject.descr_xor), + #__rxor__ = interp2app(W_BoolObject.descr_rxor), ) W_BoolObject.typedef.acceptable_as_base_class = False diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -62,7 +62,6 @@ # long-to-float delegation def delegate_Long2Float(space, w_longobj): - # XXX: tofloat is not abstract (SmallLongs) return W_FloatObject(w_longobj.tofloat(space)) @@ -563,20 +562,3 @@ from pypy.objspace.std import floattype register_all(vars(), floattype) - -# pow delegation for negative 2nd arg -def pow_neg__Long_Long_None(space, w_int1, w_int2, thirdarg): - w_float1 = delegate_Long2Float(space, w_int1) - w_float2 = delegate_Long2Float(space, w_int2) - return pow__Float_Float_ANY(space, w_float1, w_float2, thirdarg) - -model.MM.pow.register(pow_neg__Long_Long_None, W_LongObject, W_LongObject, - W_NoneObject, order=1) - -def pow_neg__Int_Int_None(space, w_int1, w_int2, thirdarg): - w_float1 = delegate_Int2Float(space, w_int1) - w_float2 = delegate_Int2Float(space, w_int2) - return pow__Float_Float_ANY(space, w_float1, w_float2, thirdarg) - -model.MM.pow.register(pow_neg__Int_Int_None, W_IntObject, W_IntObject, - W_NoneObject, order=2) diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -6,40 +6,251 @@ """ import operator +import sys from rpython.rlib import jit +from rpython.rlib.objectmodel import instantiate, specialize from rpython.rlib.rarithmetic import ( - LONG_BIT, is_valid_int, ovfcheck, string_to_int, r_uint) -from rpython.rlib.objectmodel import instantiate + LONG_BIT, is_valid_int, ovfcheck, r_uint, string_to_int) from rpython.rlib.rbigint import rbigint from rpython.rlib.rstring import ParseStringError, ParseStringOverflowError -from rpython.tool.sourcetools import func_with_new_name +from rpython.tool.sourcetools import func_renamer, func_with_new_name from pypy.interpreter import typedef +from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import Buffer from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import ( WrappedDefault, interp2app, interpindirect2app, unwrap_spec) from pypy.objspace.std import newformat -from pypy.objspace.std.model import W_Object from pypy.objspace.std.stdtypedef import StdTypeDef -class W_AbstractIntObject(W_Object): +class W_AbstractIntObject(W_Root): + __slots__ = () def int(self, space): raise NotImplementedError + def _make_descr_cmp(opname): + op = getattr(operator, opname) + @func_renamer('descr_' + opname) + def descr_cmp(self, space, w_other): + if not isinstance(w_other, W_AbstractIntObject): + return space.w_NotImplemented + i = space.int_w(self) + j = space.int_w(w_other) + return space.newbool(op(i, j)) + return descr_cmp + + descr_lt = _make_descr_cmp('lt') + descr_le = _make_descr_cmp('le') + descr_eq = _make_descr_cmp('eq') + descr_ne = _make_descr_cmp('ne') + descr_gt = _make_descr_cmp('gt') + descr_ge = _make_descr_cmp('ge') + + def _make_generic_descr_binop(opname, ovf=True): + op = getattr(operator, + opname + '_' if opname in ('and', 'or') else opname) + commutative = opname in ('add', 'mul', 'and', 'or', 'xor') + + @func_renamer('descr_' + opname) + def descr_binop(self, space, w_other): + if not isinstance(w_other, W_AbstractIntObject): + return space.w_NotImplemented + + x = space.int_w(self) + y = space.int_w(w_other) + if ovf: + try: + z = ovfcheck(op(x, y)) + except OverflowError: + return ovf2long(space, opname, self, w_other) + else: + z = op(x, y) + return wrapint(space, z) + + if commutative: + return descr_binop, func_with_new_name(descr_binop, + 'descr_r' + opname) + + @func_renamer('descr_r' + opname) + def descr_rbinop(self, space, w_other): + if not isinstance(w_other, W_AbstractIntObject): + return space.w_NotImplemented + + x = space.int_w(self) + y = space.int_w(w_other) + if ovf: + try: + z = ovfcheck(op(y, x)) + except OverflowError: + return ovf2long(space, opname, w_other, self) + else: + z = op(y, x) + return wrapint(space, z) + + return descr_binop, descr_rbinop + + descr_add, descr_radd = _make_generic_descr_binop('add') + descr_sub, descr_rsub = _make_generic_descr_binop('sub') + descr_mul, descr_rmul = _make_generic_descr_binop('mul') + + descr_and, descr_rand = _make_generic_descr_binop('and', ovf=False) + descr_or, descr_ror = _make_generic_descr_binop('or', ovf=False) + descr_xor, descr_rxor = _make_generic_descr_binop('xor', ovf=False) + + def _make_descr_binop(func): + opname = func.__name__[1:] + + @func_renamer('descr_' + opname) + def descr_binop(self, space, w_other): + if not isinstance(w_other, W_AbstractIntObject): + return space.w_NotImplemented + try: + return func(self, space, w_other) + except OverflowError: + return ovf2long(space, opname, self, w_other) + + @func_renamer('descr_r' + opname) + def descr_rbinop(self, space, w_other): + if not isinstance(w_other, W_AbstractIntObject): + return space.w_NotImplemented + try: + return func(w_other, space, self) + except OverflowError: + return ovf2long(space, opname, w_other, self) + + return descr_binop, descr_rbinop + + def _floordiv(self, space, w_other): + x = space.int_w(self) + y = space.int_w(w_other) + try: + z = ovfcheck(x // y) + except ZeroDivisionError: + raise operationerrfmt(space.w_ZeroDivisionError, + "integer division by zero") + return wrapint(space, z) + descr_floordiv, descr_rfloordiv = _make_descr_binop(_floordiv) + + _div = func_with_new_name(_floordiv, '_div') + descr_div, descr_rdiv = _make_descr_binop(_div) + + def _truediv(self, space, w_other): + x = float(space.int_w(self)) + y = float(space.int_w(w_other)) + if y == 0.0: + raise operationerrfmt(space.w_ZeroDivisionError, + "division by zero") + return space.wrap(x / y) + descr_truediv, descr_rtruediv = _make_descr_binop(_truediv) + + def _mod(self, space, w_other): + x = space.int_w(self) + y = space.int_w(w_other) + try: + z = ovfcheck(x % y) + except ZeroDivisionError: + raise operationerrfmt(space.w_ZeroDivisionError, + "integer modulo by zero") + return wrapint(space, z) + descr_mod, descr_rmod = _make_descr_binop(_mod) + + def _divmod(self, space, w_other): + x = space.int_w(self) + y = space.int_w(w_other) + try: + z = ovfcheck(x // y) + except ZeroDivisionError: + raise operationerrfmt(space.w_ZeroDivisionError, + "integer divmod by zero") + # no overflow possible + m = x % y + w = space.wrap + return space.newtuple([w(z), w(m)]) + descr_divmod, descr_rdivmod = _make_descr_binop(_divmod) + + def _lshift(self, space, w_other): + a = space.int_w(self) + b = space.int_w(w_other) + if r_uint(b) < LONG_BIT: # 0 <= b < LONG_BIT + c = ovfcheck(a << b) + return wrapint(space, c) + if b < 0: + raise operationerrfmt(space.w_ValueError, "negative shift count") + else: # b >= LONG_BIT + if a == 0: + return self.int(space) + raise OverflowError + descr_lshift, descr_rlshift = _make_descr_binop(_lshift) + + def _rshift(self, space, w_other): + a = space.int_w(self) + b = space.int_w(w_other) + if r_uint(b) >= LONG_BIT: # not (0 <= b < LONG_BIT) + if b < 0: + raise operationerrfmt(space.w_ValueError, + "negative shift count") + # b >= LONG_BIT + if a == 0: + return self.int(space) + a = -1 if a < 0 else 0 + else: + a = a >> b + return wrapint(space, a) + descr_rshift, descr_rrshift = _make_descr_binop(_rshift) + + @unwrap_spec(w_modulus=WrappedDefault(None)) + def descr_pow(self, space, w_exponent, w_modulus=None): + if not isinstance(w_exponent, W_AbstractIntObject): + return space.w_NotImplemented + + if space.is_none(w_modulus): + z = 0 + elif isinstance(w_modulus, W_AbstractIntObject): + z = space.int_w(w_modulus) + if z == 0: + raise operationerrfmt(space.w_ValueError, + "pow() 3rd argument cannot be 0") + else: + # can't return NotImplemented (space.pow doesn't do full + # ternary, i.e. w_modulus.__zpow__(self, w_exponent)), so + # handle it ourselves + return self._ovfpow2long(space, w_exponent, w_modulus) + + x = space.int_w(self) + y = space.int_w(w_exponent) + try: + result = _pow_impl(space, x, y, z) + except (OverflowError, ValueError): + return self._ovfpow2long(space, w_exponent, w_modulus) + return space.wrap(result) + + @unwrap_spec(w_modulus=WrappedDefault(None)) + def descr_rpow(self, space, w_base, w_modulus=None): + if not isinstance(w_base, W_AbstractIntObject): + return space.w_NotImplemented + return w_base.descr_pow(space, self, w_modulus) + + def _ovfpow2long(self, space, w_exponent, w_modulus): + if space.is_none(w_modulus) and recover_with_smalllong(space): + from pypy.objspace.std.smalllongobject import pow_ovr + return pow_ovr(space, self, w_exponent) + self = self.descr_long(space) + return self.descr_pow(space, w_exponent, w_modulus) + + def descr_coerce(self, space, w_other): + if not isinstance(w_other, W_AbstractIntObject): + return space.w_NotImplemented + return space.newtuple([self, w_other]) + def descr_long(self, space): from pypy.objspace.std.longobject import W_LongObject return W_LongObject.fromint(space, self.int_w(space)) - def descr_format(self, space, w_format_spec): - return newformat.run_formatter(space, w_format_spec, - "format_int_or_long", self, - newformat.INT_KIND) - def descr_hash(self, space): # unlike CPython, we don't special-case the value -1 in most of # our hash functions, so there is not much sense special-casing @@ -47,283 +258,30 @@ # floats and longs. return self.int(space) - def descr_coerce(self, space, w_other): - if not space.isinstance_w(w_other, space.w_int): - return space.w_NotImplemented - return space.newtuple([self, w_other]) + def descr_nonzero(self, space): + return space.newbool(space.int_w(self) != 0) - def _make_descr_binop(opname): - # XXX: func_renamer or func_with_new_name? - from rpython.tool.sourcetools import func_renamer - op = getattr(operator, opname) + def descr_invert(self, space): + return wrapint(space, ~space.int_w(self)) - @func_renamer('descr_' + opname) - def descr_binop(self, space, w_other): - if not space.isinstance_w(w_other, space.w_int): - return space.w_NotImplemented - - x = space.int_w(self) - y = space.int_w(w_other) - try: - z = ovfcheck(op(x, y)) - except OverflowError: - w_long1 = _delegate_Int2Long(space, self) - # XXX: this needs to be _delegate_Int2Long(space, - # space.int(w_other)) to support bools. so maybe delegation - # should work against space.int_w(w_other) - w_long2 = _delegate_Int2Long(space, w_other) - return getattr(space, opname)(w_long1, w_long2) - return wrapint(space, z) - - @func_renamer('descr_r' + opname) - def descr_rbinop(self, space, w_other): - if not space.isinstance_w(w_other, space.w_int): - return space.w_NotImplemented - - x = space.int_w(self) - y = space.int_w(w_other) - try: - z = ovfcheck(op(y, x)) - except OverflowError: - w_long1 = _delegate_Int2Long(space, self) - # XXX: this needs to be _delegate_Int2Long(space, - # space.int(w_other)) to support bools. so maybe delegation - # should work against space.int_w(w_other) - w_long2 = _delegate_Int2Long(space, w_other) - return getattr(space, opname)(w_long2, w_long1) - return wrapint(space, z) - - return descr_binop, descr_rbinop - - descr_add, descr_radd = _make_descr_binop('add') - descr_sub, descr_rsub = _make_descr_binop('sub') - descr_mul, descr_rmul = _make_descr_binop('mul') - - def _make_descr_cmp(opname): - op = getattr(operator, opname) - def f(self, space, w_other): - # XXX: this doesn't belong here, regardless of how we originally set this up. blargh - #if isinstance(w_other, W_SmallLongObject): - # return space.newbool(op(space.int_w(self), w_other.longlong)) - if not space.isinstance_w(w_other, space.w_int): - return space.w_NotImplemented - - i = space.int_w(self) - j = space.int_w(w_other) - return space.newbool(op(i, j)) - return func_with_new_name(f, "descr_" + opname) - - descr_lt = _make_descr_cmp('lt') - descr_le = _make_descr_cmp('le') - descr_eq = _make_descr_cmp('eq') - descr_ne = _make_descr_cmp('ne') - descr_gt = _make_descr_cmp('gt') - descr_ge = _make_descr_cmp('ge') - - def descr_floordiv(self, space, w_other): - if not space.isinstance_w(w_other, space.w_int): - return space.w_NotImplemented - - x = space.int_w(self) - y = space.int_w(w_other) - try: - z = ovfcheck(x // y) - except ZeroDivisionError: - raise operationerrfmt(space.w_ZeroDivisionError, - "integer division by zero") - except OverflowError: - w_long1 = _delegate_Int2Long(space, self) - w_long2 = _delegate_Int2Long(space, w_other) - return space.floordiv(w_long1, w_long2) - return wrapint(space, z) - - descr_div = func_with_new_name(descr_floordiv, 'descr_div') - - def descr_truediv(self, space, w_other): - if not space.isinstance_w(w_other, space.w_int): - return space.w_NotImplemented - - x = float(space.int_w(self)) - y = float(space.int_w(w_other)) - if y == 0.0: - raise operationerrfmt(space.w_ZeroDivisionError, - "division by zero") - return space.wrap(x / y) - - def descr_mod(self, space, w_other): - if not space.isinstance_w(w_other, space.w_int): - return space.w_NotImplemented - - x = space.int_w(self) - y = space.int_w(w_other) - try: - z = ovfcheck(x % y) - except ZeroDivisionError: - raise operationerrfmt(space.w_ZeroDivisionError, - "integer modulo by zero") - except OverflowError: - w_long1 = _delegate_Int2Long(space, self) - w_long2 = _delegate_Int2Long(space, w_other) - return space.mod(w_long1, w_long2) - return wrapint(space, z) - - def descr_divmod(self, space, w_other): - if not space.isinstance_w(w_other, space.w_int): - return space.w_NotImplemented - - x = space.int_w(self) - y = space.int_w(w_other) - try: - z = ovfcheck(x // y) - except ZeroDivisionError: - raise operationerrfmt(space.w_ZeroDivisionError, - "integer divmod by zero") - except OverflowError: - w_long1 = _delegate_Int2Long(space, self) - w_long2 = _delegate_Int2Long(space, w_other) - return space.divmod(w_long1, w_long2) - - # no overflow possible - m = x % y - w = space.wrap - return space.newtuple([w(z), w(m)]) - - @unwrap_spec(w_modulus=WrappedDefault(None)) - def descr_pow(self, space, w_exponent, w_modulus=None): - if not space.isinstance_w(w_exponent, space.w_int): - return space.w_NotImplemented - if space.is_none(w_modulus): - z = 0 - elif space.isinstance_w(w_modulus, space.w_int): - # XXX: handle long... overflow? - z = space.int_w(w_modulus) - if z == 0: - raise operationerrfmt(space.w_ValueError, - "pow() 3rd argument cannot be 0") - else: - return self._delegate2longpow(space, w_exponent, w_modulus) - #return space.w_NotImplemented - - x = space.int_w(self) - y = space.int_w(w_exponent) - try: - return space.wrap(_pow_impl(space, x, y, z)) - except ValueError: - return self._delegate2longpow(space, w_exponent, w_modulus) - - def _delegate2longpow(self, space, w_exponent, w_modulus): - # XXX: gross - w_long1 = _delegate_Int2Long(space, self) - w_exponent = _delegate_Int2Long(space, w_exponent) - if not space.is_none(w_modulus): - w_modulus = _delegate_Int2Long(space, w_modulus) - return space.pow(w_long1, w_exponent, w_modulus) - - @unwrap_spec(w_modulus=WrappedDefault(None)) - def descr_rpow(self, space, w_base, w_modulus=None): - if not space.isinstance_w(w_base, space.w_int): - return space.w_NotImplemented - # XXX: this seems like trouble? very likely trouble with int - # subclasses implementing __pow__ - return space.pow(w_base, self, w_modulus) + def descr_pos(self, space): + return self.int(space) + descr_trunc = func_with_new_name(descr_pos, 'descr_trunc') def descr_neg(self, space): a = space.int_w(self) try: x = ovfcheck(-a) except OverflowError: - w_long1 = _delegate_Int2Long(space, self) - return space.neg(w_long1) + if recover_with_smalllong(space): + from pypy.objspace.std.smalllongobject import neg_ovr + return neg_ovr(space, self) + return self.descr_long(space).descr_neg(space) return wrapint(space, x) def descr_abs(self, space): - return self.int(space) if space.int_w(self) >= 0 else self.descr_neg(space) - - def descr_nonzero(self, space): - return space.newbool(space.int_w(self) != 0) - - def descr_invert(self, space): - return wrapint(space, ~space.int_w(self)) - - def descr_lshift(self, space, w_other): - if not space.isinstance_w(w_other, space.w_int): - return space.w_NotImplemented - - a = space.int_w(self) - b = space.int_w(w_other) - if r_uint(b) < LONG_BIT: # 0 <= b < LONG_BIT - try: - c = ovfcheck(a << b) - except OverflowError: - w_long1 = _delegate_Int2Long(space, self) - w_long2 = _delegate_Int2Long(space, w_other) - return space.lshift(w_long1, w_long2) - return wrapint(space, c) - if b < 0: - raise operationerrfmt(space.w_ValueError, "negative shift count") - else: # b >= LONG_BIT - if a == 0: - return self.int(space) - w_long1 = _delegate_Int2Long(space, self) - w_long2 = _delegate_Int2Long(space, w_other) - return space.lshift(w_long1, w_long2) - - def descr_rshift(self, space, w_other): - if not space.isinstance_w(w_other, space.w_int): - return space.w_NotImplemented - - a = space.int_w(self) - b = space.int_w(w_other) - if r_uint(b) >= LONG_BIT: # not (0 <= b < LONG_BIT) - if b < 0: - raise operationerrfmt(space.w_ValueError, - "negative shift count") - else: # b >= LONG_BIT - if a == 0: - return self.int(space) - if a < 0: - a = -1 - else: - a = 0 - else: - a = a >> b - return wrapint(space, a) - - def descr_and(self, space, w_other): - if not space.isinstance_w(w_other, space.w_int): - return space.w_NotImplemented - - a = space.int_w(self) - b = space.int_w(w_other) - res = a & b - return wrapint(space, res) - - def descr_or(self, space, w_other): - if not space.isinstance_w(w_other, space.w_int): - return space.w_NotImplemented - - a = space.int_w(self) - b = space.int_w(w_other) - res = a | b - return wrapint(space, res) - - def descr_xor(self, space, w_other): - if not space.isinstance_w(w_other, space.w_int): - return space.w_NotImplemented - - a = space.int_w(self) - b = space.int_w(w_other) - res = a ^ b - return wrapint(space, res) - - descr_rand = func_with_new_name(descr_and, 'descr_rand') - descr_ror = func_with_new_name(descr_or, 'descr_ror') - descr_rxor = func_with_new_name(descr_xor, 'descr_rxor') - - def descr_pos(self, space): - return self.int(space) - - descr_trunc = func_with_new_name(descr_pos, 'descr_trunc') + pos = space.int_w(self) >= 0 + return self.int(space) if pos else self.descr_neg(space) def descr_index(self, space): return self.int(space) @@ -342,13 +300,8 @@ def descr_getnewargs(self, space): return space.newtuple([wrapint(space, space.int_w(self))]) - def descr_repr(self, space): - res = str(self.int_w(space)) - return space.wrap(res) - descr_str = func_with_new_name(descr_repr, 'descr_str') - def descr_conjugate(self, space): - "Returns self, the complex conjugate of any int." + """Returns self, the complex conjugate of any int.""" return space.int(self) def descr_bit_length(self, space): @@ -369,9 +322,15 @@ val >>= 1 return space.wrap(bits) - def descr_get_numerator(self, space): - return space.int(self) - descr_get_real = func_with_new_name(descr_get_numerator, 'descr_get_real') + def descr_repr(self, space): + res = str(self.int_w(space)) + return space.wrap(res) + descr_str = descr_repr + + def descr_format(self, space, w_format_spec): + return newformat.run_formatter(space, w_format_spec, + "format_int_or_long", self, + newformat.INT_KIND) def descr_get_denominator(self, space): return space.wrap(1) @@ -379,8 +338,11 @@ def descr_get_imag(self, space): return space.wrap(0) + descr_get_numerator = descr_get_real = descr_conjugate + class W_IntObject(W_AbstractIntObject): + __slots__ = 'intval' _immutable_fields_ = ['intval'] @@ -393,7 +355,7 @@ return "%s(%d)" % (self.__class__.__name__, self.intval) def is_w(self, space, w_other): - if not isinstance(w_other, W_AbstractIntObject): + if not isinstance(w_other, W_IntObject): return False if self.user_overridden_class or w_other.user_overridden_class: return self is w_other @@ -414,11 +376,10 @@ def uint_w(self, space): intval = self.intval if intval < 0: - raise OperationError( - space.w_ValueError, - space.wrap("cannot convert negative integer to unsigned")) - else: - return r_uint(intval) + raise operationerrfmt(space.w_ValueError, + "cannot convert negative integer to " + "unsigned") + return r_uint(intval) def bigint_w(self, space): return rbigint.fromint(self.intval) @@ -429,15 +390,29 @@ def int(self, space): if (type(self) is not W_IntObject and space.is_overloaded(self, space.w_int, '__int__')): - return W_Object.int(self, space) + return W_Root.int(self, space) if space.is_w(space.type(self), space.w_int): return self a = self.intval return space.newint(a) -def _delegate_Int2Long(space, w_intobj): - from pypy.objspace.std.longobject import W_LongObject - return W_LongObject.fromint(space, w_intobj.int_w(space)) + +def recover_with_smalllong(space): + # True if there is a chance that a SmallLong would fit when an Int + # does not + return (space.config.objspace.std.withsmalllong and + sys.maxint == 2147483647) + + + at specialize.arg(1) +def ovf2long(space, opname, self, w_other): + if recover_with_smalllong(space) and opname != 'truediv': + from pypy.objspace.std import smalllongobject + op = getattr(smalllongobject, opname + '_ovr') + return op(space, self, w_other) + self = self.descr_long(space) + w_other = w_other.descr_long(space) + return getattr(self, 'descr_' + opname)(space, w_other) # helper for pow() @@ -446,53 +421,49 @@ def _pow_impl(space, iv, iw, iz): if iw < 0: if iz != 0: - msg = ("pow() 2nd argument cannot be negative when 3rd argument " - "specified") - raise operationerrfmt(space.w_TypeError, msg) - ## bounce it, since it always returns float + raise operationerrfmt(space.w_TypeError, + "pow() 2nd argument cannot be negative when " + "3rd argument specified") + # bounce it, since it always returns float raise ValueError temp = iv ix = 1 - try: - while iw > 0: - if iw & 1: - ix = ovfcheck(ix*temp) - iw >>= 1 #/* Shift exponent down by 1 bit */ - if iw==0: - break - temp = ovfcheck(temp*temp) #/* Square the value of temp */ - if iz: - #/* If we did a multiplication, perform a modulo */ - ix = ix % iz; - temp = temp % iz; + while iw > 0: + if iw & 1: + ix = ovfcheck(ix * temp) + iw >>= 1 # Shift exponent down by 1 bit + if iw == 0: + break + temp = ovfcheck(temp * temp) # Square the value of temp if iz: - ix = ix % iz - except OverflowError: - raise ValueError + # If we did a multiplication, perform a modulo + ix %= iz + temp %= iz + if iz: + ix %= iz return ix # ____________________________________________________________ def wrapint(space, x): - if space.config.objspace.std.withprebuiltint: - lower = space.config.objspace.std.prebuiltintfrom - upper = space.config.objspace.std.prebuiltintto - # use r_uint to perform a single comparison (this whole function - # is getting inlined into every caller so keeping the branching - # to a minimum is a good idea) - index = r_uint(x - lower) - if index >= r_uint(upper - lower): - w_res = instantiate(W_IntObject) - else: - w_res = W_IntObject.PREBUILT[index] - # obscure hack to help the CPU cache: we store 'x' even into - # a prebuilt integer's intval. This makes sure that the intval - # field is present in the cache in the common case where it is - # quickly reused. (we could use a prefetch hint if we had that) - w_res.intval = x - return w_res + if not space.config.objspace.std.withprebuiltint: + return W_IntObject(x) + lower = space.config.objspace.std.prebuiltintfrom + upper = space.config.objspace.std.prebuiltintto + # use r_uint to perform a single comparison (this whole function is + # getting inlined into every caller so keeping the branching to a + # minimum is a good idea) + index = r_uint(x - lower) + if index >= r_uint(upper - lower): + w_res = instantiate(W_IntObject) else: - return W_IntObject(x) + w_res = W_IntObject.PREBUILT[index] + # obscure hack to help the CPU cache: we store 'x' even into a + # prebuilt integer's intval. This makes sure that the intval field + # is present in the cache in the common case where it is quickly + # reused. (we could use a prefetch hint if we had that) + w_res.intval = x + return w_res # ____________________________________________________________ @@ -502,10 +473,9 @@ value = 0 try: value = string_to_int(string, base) - except ParseStringError, e: - raise OperationError(space.w_ValueError, - space.wrap(e.msg)) - except ParseStringOverflowError, e: + except ParseStringError as e: + raise OperationError(space.w_ValueError, space.wrap(e.msg)) + except ParseStringOverflowError as e: w_longval = retry_to_w_long(space, e.parser) return value, w_longval @@ -513,9 +483,8 @@ parser.rewind() try: bigint = rbigint._from_numberstring_parser(parser) - except ParseStringError, e: - raise OperationError(space.w_ValueError, - space.wrap(e.msg)) + except ParseStringError as e: + raise OperationError(space.w_ValueError, space.wrap(e.msg)) return space.newlong_from_rbigint(bigint) @unwrap_spec(w_x=WrappedDefault(0)) @@ -543,7 +512,8 @@ # an overflowing long value = space.int_w(w_obj) elif space.isinstance_w(w_value, space.w_str): - value, w_longval = string_to_int_or_long(space, space.str_w(w_value)) + value, w_longval = string_to_int_or_long(space, + space.str_w(w_value)) elif space.isinstance_w(w_value, space.w_unicode): from pypy.objspace.std.unicodeobject import unicode_to_decimal_w string = unicode_to_decimal_w(space, w_value) @@ -552,7 +522,7 @@ # If object supports the buffer interface try: w_buffer = space.buffer(w_value) - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_TypeError): raise raise operationerrfmt(space.w_TypeError, @@ -571,18 +541,17 @@ else: try: s = space.str_w(w_value) - except OperationError, e: - raise OperationError(space.w_TypeError, - space.wrap("int() can't convert non-string " - "with explicit base")) + except OperationError as e: + raise operationerrfmt(space.w_TypeError, + "int() can't convert non-string with " + "explicit base") value, w_longval = string_to_int_or_long(space, s, base) if w_longval is not None: if not space.is_w(w_inttype, space.w_int): - raise OperationError(space.w_OverflowError, - space.wrap( - "long int too large to convert to int")) + raise operationerrfmt(space.w_OverflowError, + "long int too large to convert to int") return w_longval elif space.is_w(w_inttype, space.w_int): # common case @@ -596,14 +565,14 @@ W_AbstractIntObject.typedef = StdTypeDef("int", - __doc__ = '''int(x[, base]) -> integer + __doc__ = """int(x[, base]) -> integer Convert a string or number to an integer, if possible. A floating point argument will be truncated towards zero (this does not include a string representation of a floating point number!) When converting a string, use the optional base. It is an error to supply a base when converting a non-string. If the argument is outside the integer range a long object -will be returned instead.''', +will be returned instead.""", __new__ = interp2app(descr__new__), numerator = typedef.GetSetProperty( @@ -635,12 +604,21 @@ __gt__ = interpindirect2app(W_AbstractIntObject.descr_gt), __ge__ = interpindirect2app(W_AbstractIntObject.descr_ge), - # XXX: rtruediv __floordiv__ = interpindirect2app(W_AbstractIntObject.descr_floordiv), + __rfloordiv__ = interpindirect2app(W_AbstractIntObject.descr_rfloordiv), __div__ = interpindirect2app(W_AbstractIntObject.descr_div), + __rdiv__ = interpindirect2app(W_AbstractIntObject.descr_rdiv), __truediv__ = interpindirect2app(W_AbstractIntObject.descr_truediv), + __rtruediv__ = interpindirect2app(W_AbstractIntObject.descr_rtruediv), __mod__ = interpindirect2app(W_AbstractIntObject.descr_mod), + __rmod__ = interpindirect2app(W_AbstractIntObject.descr_rmod), __divmod__ = interpindirect2app(W_AbstractIntObject.descr_divmod), + __rdivmod__ = interpindirect2app(W_AbstractIntObject.descr_rdivmod), + + __lshift__ = interpindirect2app(W_AbstractIntObject.descr_lshift), + __rlshift__ = interpindirect2app(W_AbstractIntObject.descr_rlshift), + __rshift__ = interpindirect2app(W_AbstractIntObject.descr_rshift), + __rrshift__ = interpindirect2app(W_AbstractIntObject.descr_rrshift), __pow__ = interpindirect2app(W_AbstractIntObject.descr_pow), __rpow__ = interpindirect2app(W_AbstractIntObject.descr_rpow), @@ -648,14 +626,13 @@ __abs__ = interpindirect2app(W_AbstractIntObject.descr_abs), __nonzero__ = interpindirect2app(W_AbstractIntObject.descr_nonzero), __invert__ = interpindirect2app(W_AbstractIntObject.descr_invert), - __lshift__ = interpindirect2app(W_AbstractIntObject.descr_lshift), - __rshift__ = interpindirect2app(W_AbstractIntObject.descr_rshift), __and__ = interpindirect2app(W_AbstractIntObject.descr_and), __rand__ = interpindirect2app(W_AbstractIntObject.descr_rand), __xor__ = interpindirect2app(W_AbstractIntObject.descr_xor), __rxor__ = interpindirect2app(W_AbstractIntObject.descr_rxor), __or__ = interpindirect2app(W_AbstractIntObject.descr_or), __ror__ = interpindirect2app(W_AbstractIntObject.descr_ror), + __pos__ = interpindirect2app(W_AbstractIntObject.descr_pos), __trunc__ = interpindirect2app(W_AbstractIntObject.descr_trunc), __index__ = interpindirect2app(W_AbstractIntObject.descr_index), diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -1,41 +1,35 @@ """The builtin long implementation""" -import sys +import functools from rpython.rlib.rbigint import rbigint from rpython.rlib.rstring import ParseStringError from rpython.tool.sourcetools import func_renamer, func_with_new_name from pypy.interpreter import typedef +from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import Buffer from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import ( WrappedDefault, interp2app, interpindirect2app, unwrap_spec) -from pypy.objspace.std import model, newformat -from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.model import W_Object -from pypy.objspace.std.noneobject import W_NoneObject +from pypy.objspace.std import newformat +from pypy.objspace.std.intobject import W_AbstractIntObject from pypy.objspace.std.stdtypedef import StdTypeDef def delegate_other(func): - @func_renamer(func.__name__) + @functools.wraps(func) def delegated(self, space, w_other): - if space.isinstance_w(w_other, space.w_int): - w_other = _delegate_Int2Long(space, w_other) - elif not space.isinstance_w(w_other, space.w_long): + if isinstance(w_other, W_AbstractIntObject): + w_other = w_other.descr_long(space) + elif not isinstance(w_other, W_AbstractLongObject): return space.w_NotImplemented - # XXX: if a smalllong, delegate to Long? - assert isinstance(w_other, W_AbstractLongObject) return func(self, space, w_other) return delegated -def _delegate_Int2Long(space, w_intobj): - """int-to-long delegation""" - return W_LongObject.fromint(space, w_intobj.int_w(space)) +class W_AbstractLongObject(W_Root): -class W_AbstractLongObject(W_Object): __slots__ = () def is_w(self, space, w_other): @@ -53,8 +47,8 @@ b = b.lshift(3).or_(rbigint.fromint(tag)) return space.newlong_from_rbigint(b) - def unwrap(w_self, space): #YYYYYY - return w_self.longval() + def unwrap(self, space): + return self.longval() def int(self, space): raise NotImplementedError @@ -62,15 +56,14 @@ def asbigint(self): raise NotImplementedError + # XXX: cleanup, docstrings etc def descr_long(self, space): raise NotImplementedError descr_index = func_with_new_name(descr_long, 'descr_index') descr_trunc = func_with_new_name(descr_long, 'descr_trunc') descr_pos = func_with_new_name(descr_long, 'descr_pos') - # XXX: - def descr_float(self, space): - raise NotImplementedError + descr_float = func_with_new_name(descr_long, 'descr_float') descr_neg = func_with_new_name(descr_long, 'descr_neg') descr_pos = func_with_new_name(descr_long, 'descr_pos') descr_abs = func_with_new_name(descr_long, 'descr_abs') @@ -100,7 +93,9 @@ descr_rxor = func_with_new_name(descr_lt, 'descr_rxor') descr_lshift = func_with_new_name(descr_lt, 'descr_lshift') + descr_rlshift = func_with_new_name(descr_lt, 'descr_rlshift') descr_rshift = func_with_new_name(descr_lt, 'descr_rshift') + descr_rrshift = func_with_new_name(descr_lt, 'descr_rrshift') descr_floordiv = func_with_new_name(descr_lt, 'descr_floordiv') descr_rfloordiv = func_with_new_name(descr_lt, 'descr_rfloordiv') @@ -119,20 +114,19 @@ return newformat.run_formatter(space, w_format_spec, "format_int_or_long", self, newformat.LONG_KIND) - def descr_repr(self, space): - return space.wrap(self.asbigint().repr()) - def descr_str(self, space): - return space.wrap(self.asbigint().str()) + def _make_descr_unaryop(opname): + op = getattr(rbigint, opname) + @func_renamer('descr_' + opname) + def descr_unaryop(self, space): + return space.wrap(op(self.asbigint())) + return descr_unaryop - def descr_hash(self, space): - return space.wrap(self.asbigint().hash()) - - def descr_oct(self, space): - return space.wrap(self.asbigint().oct()) - - def descr_hex(self, space): - return space.wrap(self.asbigint().hex()) + descr_repr = _make_descr_unaryop('repr') + descr_str = _make_descr_unaryop('str') + descr_hash = _make_descr_unaryop('hash') + descr_oct = _make_descr_unaryop('oct') + descr_hex = _make_descr_unaryop('hex') def descr_getnewargs(self, space): return space.newtuple([W_LongObject(self.asbigint())]) @@ -145,14 +139,11 @@ try: return space.wrap(bigint.bit_length()) except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("too many digits in integer")) + raise operationerrfmt(space.w_OverflowError, + "too many digits in integer") - # XXX: need rtruediv - @delegate_other - def descr_truediv(self, space, w_other): + def _truediv(self, space, w_other): try: - #f = self.num.truediv(w_other.num) f = self.asbigint().truediv(w_other.asbigint()) except ZeroDivisionError: raise operationerrfmt(space.w_ZeroDivisionError, @@ -163,8 +154,15 @@ return space.newfloat(f) @delegate_other + def descr_truediv(self, space, w_other): + return W_AbstractLongObject._truediv(self, space, w_other) + + @delegate_other + def descr_rtruediv(self, space, w_other): + return W_AbstractLongObject._truediv(w_other, space, self) + + @delegate_other def descr_coerce(self, space, w_other): - # XXX: consider stian's branch where he optimizes long + ints return space.newtuple([self, w_other]) def descr_get_numerator(self, space): @@ -180,10 +178,11 @@ class W_LongObject(W_AbstractLongObject): """This is a wrapper of rbigint.""" + _immutable_fields_ = ['num'] - def __init__(self, l): - self.num = l # instance of rbigint + def __init__(self, num): + self.num = num # instance of rbigint def fromint(space, intval): return W_LongObject(rbigint.fromint(intval)) @@ -219,18 +218,20 @@ try: return self.num.toint() except OverflowError: - raise OperationError(space.w_OverflowError, space.wrap( - "long int too large to convert to int")) + raise operationerrfmt(space.w_OverflowError, + "long int too large to convert to int") def uint_w(self, space): try: return self.num.touint() except ValueError: - raise OperationError(space.w_ValueError, space.wrap( - "cannot convert negative integer to unsigned int")) + raise operationerrfmt(space.w_ValueError, + "cannot convert negative integer to " + "unsigned int") except OverflowError: - raise OperationError(space.w_OverflowError, space.wrap( - "long int too large to convert to unsigned int")) + raise operationerrfmt(space.w_OverflowError, + "long int too large to convert to unsigned " + "int") def bigint_w(self, space): return self.num @@ -241,7 +242,7 @@ def int(self, space): if (type(self) is not W_LongObject and space.is_overloaded(self, space.w_long, '__int__')): - return W_Object.int(self, space) + return W_Root.int(self, space) try: return space.newint(self.num.toint()) except OverflowError: @@ -254,35 +255,22 @@ return '' % self.num.tolong() def descr_long(self, space): - # long__Long is supposed to do nothing, unless it has a derived + # __long__ is supposed to do nothing, unless it has a derived # long object, where it should return an exact one. if space.is_w(space.type(self), space.w_long): return self - l = self.num - return W_LongObject(l) - descr_index = func_with_new_name(descr_long, 'descr_index') - descr_trunc = func_with_new_name(descr_long, 'descr_trunc') - descr_pos = func_with_new_name(descr_long, 'descr_pos') + return W_LongObject(self.num) + descr_index = descr_trunc = descr_pos = descr_long def descr_float(self, space): return space.newfloat(self.tofloat(space)) def _make_descr_cmp(opname): - #from pypy.objspace.std.smalllongobject import W_SmallLongObject op = getattr(rbigint, opname) @delegate_other def descr_impl(self, space, w_other): - ## XXX: these only need explicit SmallLong support whereas - ## everything else would delegate2Long. blah blah - #if isinstance(w_other, W_SmallLongObject): - # result = op(self.num, w_other.asbigint()) - #else: - # result = op(self.num, w_other.num) - #return space.newbool(result) - - # XXX: if we use self.asbigint then can this live on - # AbstractLong? eek not really, a '_cmp' (_lt) could live on - # it that just did this (without the checks..) + # XXX: previous impl had all kinds of shortcuts between + # smalllong and int/long return space.newbool(op(self.num, w_other.asbigint())) return func_with_new_name(descr_impl, "descr_" + opname) @@ -293,8 +281,7 @@ descr_gt = _make_descr_cmp('gt') descr_ge = _make_descr_cmp('ge') - def _make_descr_binop(opname): - from rpython.tool.sourcetools import func_renamer + def _make_generic_descr_binop(opname): methname = opname + '_' if opname in ('and', 'or') else opname op = getattr(rbigint, methname) @@ -306,19 +293,19 @@ @func_renamer('descr_r' + opname) @delegate_other def descr_rbinop(self, space, w_other): + # XXX: delegate, for --objspace-std-withsmalllong return W_LongObject(op(w_other.asbigint(), self.num)) return descr_binop, descr_rbinop - descr_add, descr_radd = _make_descr_binop('add') - descr_sub, descr_rsub = _make_descr_binop('sub') - descr_mul, descr_rmul = _make_descr_binop('mul') - descr_and, descr_rand = _make_descr_binop('and') - descr_or, descr_ror = _make_descr_binop('or') - descr_xor, descr_rxor = _make_descr_binop('xor') + descr_add, descr_radd = _make_generic_descr_binop('add') + descr_sub, descr_rsub = _make_generic_descr_binop('sub') + descr_mul, descr_rmul = _make_generic_descr_binop('mul') + descr_and, descr_rand = _make_generic_descr_binop('and') + descr_or, descr_ror = _make_generic_descr_binop('or') + descr_xor, descr_rxor = _make_generic_descr_binop('xor') def _make_descr_unaryop(opname): - from rpython.tool.sourcetools import func_renamer op = getattr(rbigint, opname) @func_renamer('descr_' + opname) def descr_unaryop(self, space): @@ -332,9 +319,25 @@ def descr_nonzero(self, space): return space.newbool(self.num.tobool()) - @delegate_other - def descr_lshift(self, space, w_other): - # XXX need to replicate some of the logic, to get the errors right + def _make_descr_binop(func): + opname = func.__name__[1:] + + @delegate_other + @func_renamer('descr_' + opname) + def descr_binop(self, space, w_other): + return func(self, space, w_other) + + @delegate_other + @func_renamer('descr_r' + opname) + def descr_rbinop(self, space, w_other): + if not isinstance(w_other, W_LongObject): + # coerce other W_AbstractLongObjects + w_other = W_LongObject(w_other.asbigint()) + return func(w_other, space, self) + + return descr_binop, descr_rbinop + + def _lshift(self, space, w_other): if w_other.asbigint().sign < 0: raise operationerrfmt(space.w_ValueError, "negative shift count") try: @@ -343,10 +346,9 @@ raise operationerrfmt(space.w_OverflowError, "shift count too large") return W_LongObject(self.num.lshift(shift)) + descr_lshift, descr_rlshift = _make_descr_binop(_lshift) - @delegate_other - def descr_rshift(self, space, w_other): - # XXX need to replicate some of the logic, to get the errors right + def _rshift(self, space, w_other): if w_other.asbigint().sign < 0: raise operationerrfmt(space.w_ValueError, "negative shift count") try: @@ -355,102 +357,79 @@ raise operationerrfmt(space.w_OverflowError, "shift count too large") return newlong(space, self.num.rshift(shift)) + descr_rshift, descr_rrshift = _make_descr_binop(_rshift) - @delegate_other - def descr_floordiv(self, space, w_other): + def _floordiv(self, space, w_other): try: z = self.num.floordiv(w_other.asbigint()) except ZeroDivisionError: raise operationerrfmt(space.w_ZeroDivisionError, "long division or modulo by zero") return newlong(space, z) - descr_div = func_with_new_name(descr_floordiv, 'descr_div') + descr_floordiv, descr_rfloordiv = _make_descr_binop(_floordiv) - @delegate_other - def descr_mod(self, space, w_other): + _div = func_with_new_name(_floordiv, '_div') + descr_div, descr_rdiv = _make_descr_binop(_div) + + def _mod(self, space, w_other): try: z = self.num.mod(w_other.asbigint()) except ZeroDivisionError: raise operationerrfmt(space.w_ZeroDivisionError, "long division or modulo by zero") return newlong(space, z) + descr_mod, descr_rmod = _make_descr_binop(_mod) - @delegate_other - def descr_divmod(self, space, w_other): + def _divmod(self, space, w_other): try: div, mod = self.num.divmod(w_other.asbigint()) except ZeroDivisionError: raise operationerrfmt(space.w_ZeroDivisionError, "long division or modulo by zero") return space.newtuple([newlong(space, div), newlong(space, mod)]) + descr_divmod, descr_rdivmod = _make_descr_binop(_divmod) - #@delegate_other # XXX: @unwrap_spec(w_modulus=WrappedDefault(None)) def descr_pow(self, space, w_exponent, w_modulus=None): - if space.isinstance_w(w_exponent, space.w_int): - w_exponent = _delegate_Int2Long(space, w_exponent) - elif not space.isinstance_w(w_exponent, space.w_long): + if isinstance(w_exponent, W_AbstractIntObject): + w_exponent = w_exponent.descr_long(space) + elif not isinstance(w_exponent, W_AbstractLongObject): return space.w_NotImplemented - assert isinstance(w_exponent, W_AbstractLongObject) - #if space.is_none(w_modulus): - # from pypy.objspace.std.floatobject import delegate_Long2Float - # self = delegate_Long2Float(space, self) - # w_exponent = delegate_Long2Float(space, w_exponent) - # return space.pow(self, w_exponent, w_modulus) - #elif space.isinstance_w(w_modulus, space.w_int): if space.is_none(w_modulus): - # XXX need to replicate some of the logic, to get the errors right if w_exponent.asbigint().sign < 0: - from pypy.objspace.std.floatobject import delegate_Long2Float - w_exponent = delegate_Long2Float(space, w_exponent) - # XXX: hack around multimethod annoyances for now (when - # w_modulus=None) - return space.pow(self.descr_float(space), w_exponent, space.w_None if w_modulus is None else w_modulus) - return W_LongObject(self.num.pow(w_exponent.asbigint(), None)) - elif space.isinstance_w(w_modulus, space.w_int): - w_modulus = _delegate_Int2Long(space, w_modulus) - #elif space.is_none(w_modulus): - # # XXX need to replicate some of the logic, to get the errors right - # if w_exponent.num.sign < 0: - # return space.pow(self.descr_float(space), w_exponent, w_modulus) - # return W_LongObject(self.num.pow(w_exponent.num, None)) - elif not space.isinstance_w(w_modulus, space.w_long): + self = self.descr_float(space) + w_exponent = w_exponent.descr_float(space) + return space.pow(self, w_exponent, space.w_None) + return W_LongObject(self.num.pow(w_exponent.asbigint())) + elif isinstance(w_modulus, W_AbstractIntObject): + w_modulus = w_modulus.descr_long(space) + elif not isinstance(w_modulus, W_AbstractLongObject): return space.w_NotImplemented - assert isinstance(w_modulus, W_AbstractLongObject) if w_exponent.asbigint().sign < 0: - raise OperationError( - space.w_TypeError, - space.wrap( - "pow() 2nd argument " - "cannot be negative when 3rd argument specified")) + raise operationerrfmt(space.w_TypeError, + "pow() 2nd argument cannot be negative when " + "3rd argument specified") try: - return W_LongObject(self.num.pow(w_exponent.asbigint(), - w_modulus.asbigint())) + result = self.num.pow(w_exponent.asbigint(), w_modulus.asbigint()) except ValueError: - raise OperationError(space.w_ValueError, - space.wrap("pow 3rd argument cannot be 0")) + raise operationerrfmt(space.w_ValueError, + "pow 3rd argument cannot be 0") + return W_LongObject(result) @unwrap_spec(w_modulus=WrappedDefault(None)) - def descr_rpow(self, space, w_exponent, w_modulus=None): - if space.isinstance_w(w_exponent, space.w_int): - w_exponent = _delegate_Int2Long(space, w_exponent) - elif not space.isinstance_w(w_exponent, space.w_long): + def descr_rpow(self, space, w_base, w_modulus=None): + if isinstance(w_base, W_AbstractIntObject): + w_base = w_base.descr_long(space) + elif not isinstance(w_base, W_AbstractLongObject): return space.w_NotImplemented - ### XXX: these may needs all the checks above has. annoying - #if not space.isinstance_w(w_exponent, space.w_long): - # return space.w_NotImplemented - # XXX: - return space.pow(w_exponent, self, w_modulus) - - def descr_getnewargs(self, space): - return space.newtuple([W_LongObject(self.num)]) + return w_base.descr_pow(space, self, w_modulus) def newlong(space, bigint): - """Turn the bigint into a W_LongObject. If withsmalllong is enabled, - check if the bigint would fit in a smalllong, and return a + """Turn the bigint into a W_LongObject. If withsmalllong is + enabled, check if the bigint would fit in a smalllong, and return a W_SmallLongObject instead if it does. """ if space.config.objspace.std.withsmalllong: @@ -464,67 +443,6 @@ return W_LongObject(bigint) -# register implementations of ops that recover int op overflows -def recover_with_smalllong(space): - # True if there is a chance that a SmallLong would fit when an Int does not - return (space.config.objspace.std.withsmalllong and - sys.maxint == 2147483647) - -# XXX: -# binary ops -for opname in ['add', 'sub', 'mul', 'div', 'floordiv', 'truediv', 'mod', - 'divmod', 'lshift']: - exec compile(""" -def %(opname)s_ovr__Int_Int(space, w_int1, w_int2): - if recover_with_smalllong(space) and %(opname)r != 'truediv': - from pypy.objspace.std.smalllongobject import %(opname)s_ovr - return %(opname)s_ovr(space, w_int1, w_int2) - w_long1 = _delegate_Int2Long(space, w_int1) - w_long2 = _delegate_Int2Long(space, w_int2) - #return %(opname)s__Long_Long(space, w_long1, w_long2) - return w_long1.descr_%(opname)s(space, w_long2) -""" % {'opname': opname}, '', 'exec') - - getattr(model.MM, opname).register(globals()['%s_ovr__Int_Int' % opname], - W_IntObject, W_IntObject, order=1) - -# unary ops -for opname in ['neg', 'abs']: - exec """ -def %(opname)s_ovr__Int(space, w_int1): - if recover_with_smalllong(space): - from pypy.objspace.std.smalllongobject import %(opname)s_ovr - return %(opname)s_ovr(space, w_int1) - w_long1 = _delegate_Int2Long(space, w_int1) - #return %(opname)s__Long(space, w_long1) - return w_long1.descr_%(opname)s(space) -""" % {'opname': opname} - - getattr(model.MM, opname).register(globals()['%s_ovr__Int' % opname], - W_IntObject, order=1) - -# pow -def pow_ovr__Int_Int_None(space, w_int1, w_int2, w_none3): - if recover_with_smalllong(space): - from pypy.objspace.std.smalllongobject import pow_ovr - return pow_ovr(space, w_int1, w_int2) - w_long1 = _delegate_Int2Long(space, w_int1) - w_long2 = _delegate_Int2Long(space, w_int2) - #return pow__Long_Long_None(space, w_long1, w_long2, w_none3) - return w_long1.descr_pow(space, w_long2, w_none3) - -def pow_ovr__Int_Int_Long(space, w_int1, w_int2, w_long3): - w_long1 = _delegate_Int2Long(space, w_int1) - w_long2 = _delegate_Int2Long(space, w_int2) - #return pow__Long_Long_Long(space, w_long1, w_long2, w_long3) - return w_long1.descr_pow(space, w_long2, w_long3) - -model.MM.pow.register(pow_ovr__Int_Int_None, W_IntObject, W_IntObject, - W_NoneObject, order=1) -model.MM.pow.register(pow_ovr__Int_Int_Long, W_IntObject, W_IntObject, - W_LongObject, order=1) - - @unwrap_spec(w_x=WrappedDefault(0)) def descr__new__(space, w_longtype, w_x, w_base=None): if space.config.objspace.std.withsmalllong: @@ -580,8 +498,9 @@ try: s = space.str_w(w_value) except OperationError: - msg = "long() can't convert non-string with explicit base" - raise operationerrfmt(space.w_TypeError, msg) + raise operationerrfmt(space.w_TypeError, + "long() can't convert non-string with " + "explicit base") return string_to_w_long(space, w_longtype, s, base) @@ -633,7 +552,6 @@ conjugate = interp2app(W_AbstractLongObject.descr_conjugate), bit_length = interp2app(W_AbstractLongObject.descr_bit_length), - # XXX: likely need indirect everything for SmallLong __int__ = interpindirect2app(W_AbstractLongObject.int), __long__ = interpindirect2app(W_AbstractLongObject.descr_long), __index__ = interpindirect2app(W_AbstractLongObject.descr_index), @@ -678,14 +596,20 @@ __hex__ = interp2app(W_AbstractLongObject.descr_hex), __lshift__ = interpindirect2app(W_AbstractLongObject.descr_lshift), + __rlshift__ = interpindirect2app(W_AbstractLongObject.descr_rlshift), __rshift__ = interpindirect2app(W_AbstractLongObject.descr_rshift), + __rrshift__ = interpindirect2app(W_AbstractLongObject.descr_rrshift), - # XXX: all these need r sides __truediv__ = interp2app(W_AbstractLongObject.descr_truediv), + __rtruediv__ = interp2app(W_AbstractLongObject.descr_rtruediv), __floordiv__ = interpindirect2app(W_AbstractLongObject.descr_floordiv), + __rfloordiv__ = interpindirect2app(W_AbstractLongObject.descr_rfloordiv), __div__ = interpindirect2app(W_AbstractLongObject.descr_div), + __rdiv__ = interpindirect2app(W_AbstractLongObject.descr_rdiv), __mod__ = interpindirect2app(W_AbstractLongObject.descr_mod), + __rmod__ = interpindirect2app(W_AbstractLongObject.descr_rmod), __divmod__ = interpindirect2app(W_AbstractLongObject.descr_divmod), + __rdivmod__ = interpindirect2app(W_AbstractLongObject.descr_rdivmod), __pow__ = interpindirect2app(W_AbstractLongObject.descr_pow), __rpow__ = interpindirect2app(W_AbstractLongObject.descr_rpow), diff --git a/pypy/objspace/std/marshal_impl.py b/pypy/objspace/std/marshal_impl.py --- a/pypy/objspace/std/marshal_impl.py +++ b/pypy/objspace/std/marshal_impl.py @@ -28,6 +28,7 @@ from pypy.objspace.std.stringobject import W_StringObject from pypy.objspace.std.typeobject import W_TypeObject from pypy.objspace.std.longobject import W_LongObject, newlong +from pypy.objspace.std.smalllongobject import W_SmallLongObject from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.unicodeobject import W_UnicodeObject @@ -211,7 +212,7 @@ m.start(TYPE_LONG) SHIFT = 15 MASK = (1 << SHIFT) - 1 - num = w_long.num + num = w_long.asbigint() sign = num.sign num = num.abs() total_length = (num.bit_length() + (SHIFT - 1)) / SHIFT @@ -221,6 +222,7 @@ next = num.abs_rshift_and_mask(bigshiftcount, MASK) m.put_short(next) bigshiftcount += SHIFT +marshal_w__SmallLong = marshal_w__Long def unmarshal_Long(space, u, tc): from rpython.rlib.rbigint import rbigint diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -39,7 +39,6 @@ from pypy.objspace.std.bytearraytype import bytearray_typedef from pypy.objspace.std.typeobject import type_typedef from pypy.objspace.std.slicetype import slice_typedef - #from pypy.objspace.std.longtype import long_typedef from pypy.objspace.std.unicodetype import unicode_typedef from pypy.objspace.std.nonetype import none_typedef self.pythontypes = [value for key, value in result.__dict__.items() @@ -86,8 +85,10 @@ # the set of implementation types self.typeorder = { objectobject.W_ObjectObject: [], + # XXX: Bool/Int/Long are pythontypes but still included here + # for delegation to Float/Complex boolobject.W_BoolObject: [], - intobject.W_IntObject: [], # XXX: (And self.typeorder[intobject] below) + intobject.W_IntObject: [], floatobject.W_FloatObject: [], stringobject.W_StringObject: [], bytearrayobject.W_BytearrayObject: [], @@ -108,7 +109,6 @@ if option.startswith("with") and option in option_to_typename: for classname in option_to_typename[option]: modname = classname[:classname.index('.')] - if modname == 'smalllongobject': continue # XXX: classname = classname[classname.index('.')+1:] d = {} exec "from pypy.objspace.std.%s import %s" % ( @@ -136,26 +136,16 @@ # XXX build these lists a bit more automatically later self.typeorder[boolobject.W_BoolObject] += [ -# (intobject.W_IntObject, boolobject.delegate_Bool2IntObject), (floatobject.W_FloatObject, floatobject.delegate_Bool2Float), -# (longobject.W_LongObject, longobject.delegate_Bool2Long), (complexobject.W_ComplexObject, complexobject.delegate_Bool2Complex), ] self.typeorder[intobject.W_IntObject] += [ (floatobject.W_FloatObject, floatobject.delegate_Int2Float), -# (longobject.W_LongObject, longobject.delegate_Int2Long), (complexobject.W_ComplexObject, complexobject.delegate_Int2Complex), ] - if False and config.objspace.std.withsmalllong: + if config.objspace.std.withsmalllong: from pypy.objspace.std import smalllongobject - self.typeorder[boolobject.W_BoolObject] += [ - (smalllongobject.W_SmallLongObject, smalllongobject.delegate_Bool2SmallLong), - ] - self.typeorder[intobject.W_IntObject] += [ - (smalllongobject.W_SmallLongObject, smalllongobject.delegate_Int2SmallLong), - ] self.typeorder[smalllongobject.W_SmallLongObject] += [ - (longobject.W_LongObject, smalllongobject.delegate_SmallLong2Long), (floatobject.W_FloatObject, smalllongobject.delegate_SmallLong2Float), (complexobject.W_ComplexObject, smalllongobject.delegate_SmallLong2Complex), ] diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -7,15 +7,14 @@ from rpython.rlib.rarithmetic import LONGLONG_BIT, intmask, r_longlong, r_uint from rpython.rlib.rbigint import rbigint -from rpython.tool.sourcetools import func_with_new_name +from rpython.tool.sourcetools import func_renamer, func_with_new_name -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import operationerrfmt from pypy.interpreter.gateway import WrappedDefault, unwrap_spec -from pypy.objspace.std.multimethod import FailedToImplementArgs +from pypy.objspace.std.intobject import W_AbstractIntObject from pypy.objspace.std.longobject import W_AbstractLongObject, W_LongObject -from pypy.objspace.std.intobject import _delegate_Int2Long -LONGLONG_MIN = r_longlong((-1) << (LONGLONG_BIT-1)) +LONGLONG_MIN = r_longlong(-1 << (LONGLONG_BIT - 1)) class W_SmallLongObject(W_AbstractLongObject): @@ -48,21 +47,20 @@ b = intmask(a) if b == a: return b - else: - raise OperationError(space.w_OverflowError, space.wrap( - "long int too large to convert to int")) + raise operationerrfmt(space.w_OverflowError, + "long int too large to convert to int") def uint_w(self, space): a = self.longlong if a < 0: - raise OperationError(space.w_ValueError, space.wrap( - "cannot convert negative integer to unsigned int")) + raise operationerrfmt(space.w_ValueError, + "cannot convert negative integer to " + "unsigned int") b = r_uint(a) if r_longlong(b) == a: return b - else: - raise OperationError(space.w_OverflowError, space.wrap( - "long int too large to convert to unsigned int")) + raise operationerrfmt(space.w_OverflowError, + "long int too large to convert to unsigned int") def bigint_w(self, space): return self.asbigint() @@ -73,37 +71,32 @@ def int(self, space): a = self.longlong b = intmask(a) - if b == a: - return space.newint(b) - else: - return self + return space.newint(b) if b == a else self def descr_long(self, space): - # XXX: do subclasses never apply here? - return self - descr_index = func_with_new_name(descr_long, 'descr_index') - descr_trunc = func_with_new_name(descr_long, 'descr_trunc') - descr_pos = func_with_new_name(descr_long, 'descr_pos') - - def descr_index(self, space): - return self + if space.is_w(space.type(self), space.w_long): + return self + return W_SmallLongObject(self.longlong) + descr_index = descr_trunc = descr_pos = descr_long def descr_float(self, space): return space.newfloat(float(self.longlong)) def _make_descr_cmp(opname): op = getattr(operator, opname) - def descr_impl(self, space, w_other): - if space.isinstance_w(w_other, space.w_int): + bigint_op = getattr(rbigint, opname) + @func_renamer('descr_' + opname) + def descr_cmp(self, space, w_other): + if isinstance(w_other, W_AbstractIntObject): result = op(self.longlong, w_other.int_w(space)) - elif not space.isinstance_w(w_other, space.w_long): + elif not isinstance(w_other, W_AbstractLongObject): return space.w_NotImplemented elif isinstance(w_other, W_SmallLongObject): result = op(self.longlong, w_other.longlong) else: - result = getattr(self.asbigint(), opname)(w_other.num) + result = bigint_op(self.asbigint(), w_other.asbigint()) return space.newbool(result) - return func_with_new_name(descr_impl, "descr_" + opname) + return descr_cmp descr_lt = _make_descr_cmp('lt') descr_le = _make_descr_cmp('le') @@ -113,50 +106,51 @@ descr_ge = _make_descr_cmp('ge') def _make_descr_binop(func): - # XXX: so if w_other is Long, what do we do? sigh - # how to handle delegation with descr_add on longobject? opname = func.__name__[1:] - methname = opname + '_' if opname in ('and', 'or') else opname + descr_name = 'descr_' + opname + descr_rname = 'descr_r' + opname - def descr_impl(self, space, w_other): - if space.isinstance_w(w_other, space.w_int): + @func_renamer(descr_name) + def descr_binop(self, space, w_other): + if isinstance(w_other, W_AbstractIntObject): w_other = delegate_Int2SmallLong(space, w_other) - elif not space.isinstance_w(w_other, space.w_long): + elif not isinstance(w_other, W_AbstractLongObject): return space.w_NotImplemented elif not isinstance(w_other, W_SmallLongObject): self = delegate_SmallLong2Long(space, self) - return getattr(space, methname)(self, w_other) + return getattr(self, descr_name)(space, w_other) try: return func(self, space, w_other) except OverflowError: self = delegate_SmallLong2Long(space, self) w_other = delegate_SmallLong2Long(space, w_other) - return getattr(space, methname)(self, w_other) + return getattr(self, descr_name)(space, w_other) - def descr_rimpl(self, space, w_other): - if space.isinstance_w(w_other, space.w_int): + @func_renamer(descr_rname) + def descr_rbinop(self, space, w_other): + if isinstance(w_other, W_AbstractIntObject): w_other = delegate_Int2SmallLong(space, w_other) - elif not space.isinstance_w(w_other, space.w_long): + elif not isinstance(w_other, W_AbstractLongObject): return space.w_NotImplemented elif not isinstance(w_other, W_SmallLongObject): self = delegate_SmallLong2Long(space, self) - return getattr(space, methname)(w_other, self) + return getattr(self, descr_rname)(space, w_other) try: return func(w_other, space, self) except OverflowError: self = delegate_SmallLong2Long(space, self) w_other = delegate_SmallLong2Long(space, w_other) - return getattr(space, methname)(w_other, self) + return getattr(self, descr_rname)(space, w_other) - return descr_impl, descr_rimpl + return descr_binop, descr_rbinop def _add(self, space, w_other): x = self.longlong y = w_other.longlong z = x + y - if ((z^x)&(z^y)) < 0: + if ((z ^ x) & (z ^ y)) < 0: raise OverflowError return W_SmallLongObject(z) descr_add, descr_radd = _make_descr_binop(_add) @@ -165,7 +159,7 @@ x = self.longlong y = w_other.longlong z = x - y - if ((z^x)&(z^~y)) < 0: + if ((z ^ x) & (z ^ ~y)) < 0: raise OverflowError return W_SmallLongObject(z) descr_sub, descr_rsub = _make_descr_binop(_sub) @@ -185,11 +179,8 @@ raise OverflowError z = x // y except ZeroDivisionError: - raise OperationError(space.w_ZeroDivisionError, - space.wrap("integer division by zero")) - #except OverflowError: - # raise FailedToImplementArgs(space.w_OverflowError, - # space.wrap("integer division")) + raise operationerrfmt(space.w_ZeroDivisionError, + "integer division by zero") return W_SmallLongObject(z) descr_floordiv, descr_rfloordiv = _make_descr_binop(_floordiv) @@ -204,11 +195,8 @@ raise OverflowError z = x % y except ZeroDivisionError: - raise OperationError(space.w_ZeroDivisionError, - space.wrap("integer modulo by zero")) - #except OverflowError: - # raise FailedToImplementArgs(space.w_OverflowError, - # space.wrap("integer modulo")) + raise operationerrfmt(space.w_ZeroDivisionError, + "integer modulo by zero") return W_SmallLongObject(z) descr_mod, descr_rmod = _make_descr_binop(_mod) @@ -220,136 +208,96 @@ raise OverflowError z = x // y except ZeroDivisionError: - raise OperationError(space.w_ZeroDivisionError, - space.wrap("integer divmod by zero")) - #except OverflowError: - # raise FailedToImplementArgs(space.w_OverflowError, - # space.wrap("integer modulo")) + raise operationerrfmt(space.w_ZeroDivisionError, + "integer divmod by zero") # no overflow possible m = x % y return space.newtuple([W_SmallLongObject(z), W_SmallLongObject(m)]) descr_divmod, descr_rdivmod = _make_descr_binop(_divmod) - # XXX: @unwrap_spec(w_modulus=WrappedDefault(None)) - #def descr_pow__SmallLong_Int_SmallLong(self, space, w_exponent, def descr_pow(self, space, w_exponent, w_modulus=None): - if space.isinstance_w(w_exponent, space.w_long): + if isinstance(w_exponent, W_AbstractLongObject): self = delegate_SmallLong2Long(space, self) - return space.pow(self, w_exponent, w_modulus) - elif not space.isinstance_w(w_exponent, space.w_int): + return self.descr_pow(space, w_exponent, w_modulus) + elif not isinstance(w_exponent, W_AbstractIntObject): return space.w_NotImplemented - - # XXX: this expects w_exponent as an int o_O - """ - if space.isinstance_w(w_exponent, space.w_int): - w_exponent = delegate_Int2SmallLong(space, w_exponent) - elif not space.isinstance_w(w_exponent, space.w_long): - return space.w_NotImplemented - elif not isinstance(w_exponent, W_SmallLongObject): - self = delegate_SmallLong2Long(space, self) - return space.pow(self, w_exponent, w_modulus) - """ if space.is_none(w_modulus): - #return _impl_pow(space, self.longlong, w_exponent) try: - return _impl_pow(space, self.longlong, w_exponent) + return _pow_impl(space, self.longlong, w_exponent) except ValueError: - self = delegate_SmallLong2Float(space, self) + self = self.descr_float(space) + return space.pow(self, w_exponent, space.w_None) except OverflowError: self = delegate_SmallLong2Long(space, self) - return space.pow(self, w_exponent, w_modulus) - elif space.isinstance_w(w_modulus, space.w_int): + return self.descr_pow(space, w_exponent, w_modulus) + elif isinstance(w_modulus, W_AbstractIntObject): w_modulus = delegate_Int2SmallLong(space, w_modulus) - elif not space.isinstance_w(w_modulus, space.w_long): + elif not isinstance(w_modulus, W_AbstractLongObject): return space.w_NotImplemented elif not isinstance(w_modulus, W_SmallLongObject): self = delegate_SmallLong2Long(space, self) - #return space.pow(self, w_modulus, w_modulus) - return space.pow(self, w_exponent, w_modulus) + return self.descr_pow(space, w_exponent, w_modulus) z = w_modulus.longlong if z == 0: - raise OperationError(space.w_ValueError, - space.wrap("pow() 3rd argument cannot be 0")) + raise operationerrfmt(space.w_ValueError, + "pow() 3rd argument cannot be 0") try: - return _impl_pow(space, self.longlong, w_exponent, z) + return _pow_impl(space, self.longlong, w_exponent, z) except ValueError: - self = delegate_SmallLong2Float(space, self) + self = self.descr_float(space) + return space.pow(self, w_exponent, w_modulus) except OverflowError: self = delegate_SmallLong2Long(space, self) - return space.pow(self, w_exponent, w_modulus) + return self.descr_pow(space, w_exponent, w_modulus) - # XXX: @unwrap_spec(w_modulus=WrappedDefault(None)) - def descr_rpow(self, space, w_exponent, w_modulus=None): - # XXX: blargh - if space.isinstance_w(w_exponent, space.w_int): - w_exponent = _delegate_Int2Long(space, w_exponent) - elif not space.isinstance_w(w_exponent, space.w_long): + def descr_rpow(self, space, w_base, w_modulus=None): + if isinstance(w_base, W_AbstractIntObject): + # Defer to w_base.descr_pow + # XXX: W_AbstractIntObject.descr_long could return + # SmallLongs then it could used instead of + # delegate_Int2SmallLong + w_base = delegate_Int2SmallLong(space, w_base) + elif not isinstance(w_base, W_AbstractLongObject): return space.w_NotImplemented - return space.pow(w_exponent, self, w_modulus) + return w_base.descr_pow(space, self, w_modulus) - #def descr_lshift__SmallLong_Int(space, w_small1, w_int2): - def descr_lshift(self, space, w_other): - if space.isinstance_w(w_other, space.w_long): - self = delegate_SmallLong2Long(space, self) - w_other = delegate_SmallLong2Long(space, w_other) - return space.lshift(self, w_other) - elif not space.isinstance_w(w_other, space.w_int): - return space.w_NotImplemented - + def _lshift(self, space, w_other): a = self.longlong - b = w_other.intval + # May overflow + b = space.int_w(w_other) if r_uint(b) < LONGLONG_BIT: # 0 <= b < LONGLONG_BIT - try: - c = a << b - if a != (c >> b): - raise OverflowError - except OverflowError: - #raise FailedToImplementArgs(space.w_OverflowError, - # space.wrap("integer left shift")) - self = delegate_SmallLong2Long(space, self) - w_other = _delegate_Int2Long(space, w_other) - return space.lshift(self, w_other) + c = a << b + if a != (c >> b): + raise OverflowError return W_SmallLongObject(c) if b < 0: - raise OperationError(space.w_ValueError, - space.wrap("negative shift count")) - else: #b >= LONGLONG_BIT + raise operationerrfmt(space.w_ValueError, "negative shift count") + # b >= LONGLONG_BIT + if a == 0: + return self + raise OverflowError + descr_lshift, descr_rlshift = _make_descr_binop(_lshift) + + def _rshift(self, space, w_other): + a = self.longlong + # May overflow + b = space.int_w(w_other) + if r_uint(b) >= LONGLONG_BIT: # not (0 <= b < LONGLONG_BIT) + if b < 0: + raise operationerrfmt(space.w_ValueError, + "negative shift count") + # b >= LONGLONG_BIT if a == 0: return self - #raise FailedToImplementArgs(space.w_OverflowError, - # space.wrap("integer left shift")) - self = delegate_SmallLong2Long(space, self) - w_other = _delegate_Int2Long(space, w_other) - return space.lshift(self, w_other) - - def descr_rshift(self, space, w_other): - if space.isinstance_w(w_other, space.w_long): - self = delegate_SmallLong2Long(space, self) - w_other = delegate_SmallLong2Long(space, w_other) - return space.rshift(self, w_other) - elif not space.isinstance_w(w_other, space.w_int): - return space.w_NotImplemented - - a = self.longlong - b = w_other.intval From noreply at buildbot.pypy.org Sat Dec 28 02:02:40 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 28 Dec 2013 02:02:40 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: int's typedef is now here Message-ID: <20131228010240.286231C00F8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r68562:e0cd2c82cef7 Date: 2013-12-27 17:02 -0800 http://bitbucket.org/pypy/pypy/changeset/e0cd2c82cef7/ Log: int's typedef is now here diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -681,7 +681,7 @@ ) W_LongBox.typedef = TypeDef("int%d" % LONG_BIT, - (W_SignedIntegerBox.typedef, int_typedef), + (W_SignedIntegerBox.typedef, W_IntObject.typedef), __module__ = "numpy", __new__ = interp2app(W_LongBox.descr__new__.im_func), __index__ = interp2app(W_LongBox.descr_index), From noreply at buildbot.pypy.org Sat Dec 28 19:35:51 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 28 Dec 2013 19:35:51 +0100 (CET) Subject: [pypy-commit] stmgc c5: in-progress Message-ID: <20131228183551.2D3221C116A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r584:595f211e4f53 Date: 2013-12-23 12:06 +0100 http://bitbucket.org/pypy/stmgc/changeset/595f211e4f53/ Log: in-progress diff --git a/c5/Makefile b/c5/Makefile --- a/c5/Makefile +++ b/c5/Makefile @@ -2,11 +2,11 @@ H_FILES = core.h pagecopy.h C_FILES = core.c pagecopy.c -CLANG = clang -Wall -ferror-limit=3 -fno-color-diagnostics +CLANG = clang -Wall -ferror-limit=3 -fno-color-diagnostics -fno-exceptions demo1: demo1.c $(C_FILES) $(H_FILES) - $(CLANG) -pthread -o $@ -O2 -g demo1.c $(C_FILES) + $(CLANG) -pthread -o $@ -O0 -g demo1.c $(C_FILES) demo2: demo2.c largemalloc.c largemalloc.h $(CLANG) -o $@ -g demo2.c largemalloc.c diff --git a/c5/core.c b/c5/core.c --- a/c5/core.c +++ b/c5/core.c @@ -109,7 +109,7 @@ total address space available is huge. */ -#define NB_PAGES (256*1024) // 1GB +#define NB_PAGES (64*1024) // 256MB #define NB_THREADS 16 #define MAP_PAGES_FLAGS (MAP_SHARED|MAP_ANONYMOUS|MAP_NORESERVE) @@ -132,7 +132,6 @@ struct write_history_s { struct write_history_s *previous_older_transaction; - uint16_t transaction_version; uint32_t nb_updates; struct write_entry_s updates[]; }; @@ -143,14 +142,15 @@ at the cache line level --- we don't want the following few variables to be accidentally in the same cache line. */ char pad0[CACHE_LINE_SIZE]; uint64_t volatile index_page_never_used; + char pad1[CACHE_LINE_SIZE]; uint16_t volatile next_transaction_version; char pad2[CACHE_LINE_SIZE]; struct write_history_s * volatile most_recent_committed_transaction; char pad3[CACHE_LINE_SIZE]; }; struct alloc_for_size_s { - char *next; - char *end; + GCOBJECT char *next; + GCOBJECT char *end; }; typedef GCOBJECT struct _thread_local2_s { @@ -180,7 +180,7 @@ _Bool _stm_was_written(object_t *object) { - return (object->flags & GCFLAG_WRITE_BARRIER) == 0; + return (object->modif_version == _STM_TL1.transaction_version); } @@ -238,6 +238,8 @@ page = fetch_thread_local_page(page); + object->modif_version = _STM_TL1.transaction_version; + uint32_t write_log_index = page->write_log_index_cache; struct write_history_s *log = _STM_TL2.writes_by_this_transaction; @@ -251,6 +253,7 @@ log->updates[write_log_index].bitmask[1] = 0; log->updates[write_log_index].bitmask[2] = 0; log->updates[write_log_index].bitmask[3] = 0; + page->write_log_index_cache = write_log_index; } assert(byte_ofs16 < 256); @@ -258,54 +261,38 @@ (1UL << (byte_ofs16 & 63)); } -#if 0 -char *_stm_alloc_next_page(size_t i) +GCOBJECT char *_stm_alloc_next_page(size_t i) { + /* NB. 'newpage' points to the "object pages" area, but it is casted + to a 'GCOBJECT char *', which if dereferenced will produce a %gs + segment prefix and so will address the thread-local pages area. */ struct page_header_s *newpage = _stm_reserve_page(); - newpage->modif_head = 0xff; - newpage->kind = i; /* object size in words */ - newpage->version = 0; /* a completely new page doesn't need a version */ - stm_local.alloc[i].next = ((char *)(newpage + 1)) + (i * 8); - stm_local.alloc[i].end = ((char *)newpage) + 4096; - assert(stm_local.alloc[i].next <= stm_local.alloc[i].end); - return (char *)(newpage + 1); + newpage->obj_word_size = i; + newpage->thread_local_copy = 0; + _STM_TL2.alloc[i].next = ((GCOBJECT char *)(newpage + 1)) + (i * 8); + _STM_TL2.alloc[i].end = ((GCOBJECT char *)newpage) + 4096; + assert(_STM_TL2.alloc[i].next <= _STM_TL2.alloc[i].end); + return (GCOBJECT char *)(newpage + 1); } -struct object_s *stm_allocate(size_t size) +object_t *stm_allocate(size_t size) { assert(size % 8 == 0); size_t i = size / 8; assert(2 <= i && i < LARGE_OBJECT_WORDS);//XXX - struct alloc_for_size_s *alloc = &stm_local.alloc[i]; + GCOBJECT struct alloc_for_size_s *alloc = &_STM_TL2.alloc[i]; - char *p = alloc->next; + GCOBJECT char *p = alloc->next; alloc->next += size; if (alloc->next > alloc->end) p = _stm_alloc_next_page(i); - struct object_s *result = (struct object_s *)p; - result->modified = stm_transaction_version; - /*result->modif_next is uninitialized*/ - result->flags = 0x42; /* for debugging */ + object_t *result = (object_t *)p; + result->modif_version = _STM_TL1.transaction_version; return result; } - -unsigned char stm_get_read_marker_number(void) -{ - return (unsigned char)(uintptr_t)stm_current_read_markers; -} - -void stm_set_read_marker_number(uint8_t num) -{ - char *stm_pages = ((char *)stm_shared_descriptor) + 4096; - uintptr_t delta = ((uintptr_t)stm_pages) >> 4; - struct _read_marker_s *crm = (struct _read_marker_s *)stm_local.read_markers; - stm_current_read_markers = crm - delta; - assert(stm_get_read_marker_number() == 0); - stm_current_read_markers += num; -} - +#if 0 static void clear_all_read_markers(void) { /* set the largest possible read marker number, to find the last @@ -353,6 +340,7 @@ abort(); } stm_shared_descriptor.index_page_never_used = 0; + stm_shared_descriptor.next_transaction_version = 1; } void _stm_teardown(void) @@ -364,7 +352,7 @@ static void set_gs_register(uint64_t value) { - int result = syscall(SYS_arch_prctl, ARCH_SET_GS, &value); + int result = syscall(SYS_arch_prctl, ARCH_SET_GS, value); assert(result == 0); } @@ -404,7 +392,8 @@ uint64_t nb_rm_pages = (NB_PAGES + 15) >> 4; if (mmap(local_RM_pages(gs_value), nb_rm_pages * 4096UL, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == MAP_FAILED) { + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, + -1, 0) == MAP_FAILED) { munmap(local_L0_pages(gs_value), 2 * 4096UL); thnum++; continue; @@ -417,6 +406,8 @@ perror("remap_file_pages in stm_setup_thread"); abort(); } + /* XXX check if the following call doesn't force all pages to be + reserved or even zero-filled eagerly */ res = remap_file_pages(stm_object_pages + gs_value, NB_PAGES * 4096UL, 0, 0, MAP_PAGES_FLAGS); if (res < 0) { @@ -426,6 +417,8 @@ set_gs_register(gs_value); _STM_TL2.gs_value = gs_value; _STM_TL1.read_marker = 1; + + fprintf(stderr, "new thread starting at %d (gs=0x%lx)\n", thnum, gs_value); return thnum; } @@ -568,36 +561,28 @@ stm_local.base_page_mapping = new; return conflict; } +#endif void stm_start_transaction(void) { - struct shared_descriptor_s *d = stm_shared_descriptor; - unsigned int v = __sync_fetch_and_add(&d->next_transaction_version, 2u); + struct shared_descriptor_s *d = &stm_shared_descriptor; + uint16_t v = __sync_fetch_and_add(&d->next_transaction_version, 1); assert(v <= 0xffff);//XXX - assert((v & 1) == 0); /* EVEN number */ - assert(v >= 2); - stm_transaction_version = v; + _STM_TL1.transaction_version = v; - struct write_history_s *cur = NULL; - if (stm_local.writes_by_this_transaction != NULL) { - cur = stm_local.writes_by_this_transaction; - char *next, *page_limit = (char *)cur; - page_limit += 4096 - (((uintptr_t)page_limit) & 4095); - next = (char *)(cur + 1) + 8 * cur->nb_updates; - if (page_limit - next < sizeof(struct write_history_s) + 8) - cur = NULL; - else - cur = (struct write_history_s *)next; - } - if (cur == NULL) { - cur = _reserve_page_write_history(); - } - assert(cur != d->most_recent_committed_transaction); - cur->previous_older_transaction = NULL; - cur->transaction_version = stm_transaction_version; - cur->nb_updates = 0; - stm_local.writes_by_this_transaction = cur; + assert(_STM_TL2.writes_by_this_transaction == NULL); + _STM_TL2.nb_updates_max = 4; /* XXX for now */ + + struct write_history_s *log = (struct write_history_s *) + malloc(sizeof(struct write_history_s) + + _STM_TL2.nb_updates_max * sizeof(struct write_entry_s)); + assert(log != NULL); + + log->nb_updates = 0; + _STM_TL2.writes_by_this_transaction = log; + +#if 0 struct write_history_s *hist = d->most_recent_committed_transaction; if (hist != stm_local.base_page_mapping) { history_fast_forward(hist, 1); @@ -614,43 +599,43 @@ usually it is read-only */ } } +#endif } _Bool stm_stop_transaction(void) { - struct shared_descriptor_s *d = stm_shared_descriptor; - assert(stm_local.writes_by_this_transaction != NULL); int conflict = 0; + struct shared_descriptor_s *d = &stm_shared_descriptor; + struct write_history_s *cur = _STM_TL2.writes_by_this_transaction; + assert(cur != NULL); + _STM_TL2.writes_by_this_transaction = NULL; //fprintf(stderr, "stm_stop_transaction\n"); - struct write_history_s *cur_head = stm_local.writes_by_this_transaction; - struct write_history_s *cur_tail = cur_head; - while (cur_tail->previous_older_transaction != NULL) { - cur_tail = cur_tail->previous_older_transaction; - } - while (1) { struct write_history_s *hist = d->most_recent_committed_transaction; - if (hist != stm_local.base_page_mapping) { - conflict = history_fast_forward(hist, 0); + if (hist != _STM_TL2.base_page_mapping) { + abort(); + //XXX conflict = history_fast_forward(hist, 0); if (conflict) break; else continue; /* retry from the start of the loop */ } - assert(cur_head == stm_local.writes_by_this_transaction); - cur_tail->previous_older_transaction = hist; + cur->previous_older_transaction = hist; if (__sync_bool_compare_and_swap(&d->most_recent_committed_transaction, - hist, cur_head)) + hist, cur)) break; } - if (stm_get_read_marker_number() < 0xff) { - stm_current_read_markers++; + if (conflict) { + free(cur); + } + + if (_STM_TL1.read_marker < 0xff) { + _STM_TL1.read_marker++; } else { - clear_all_read_markers(); + abort();//XXX clear_all_read_markers(); } return !conflict; } -#endif diff --git a/c5/core.h b/c5/core.h --- a/c5/core.h +++ b/c5/core.h @@ -5,11 +5,9 @@ #define GCOBJECT __attribute__((address_space(256))) -#define GCFLAG_WRITE_BARRIER 0x01 - typedef GCOBJECT struct object_s { /* Every objects starts with one such structure */ - uint8_t flags; + uint16_t modif_version; } object_t; struct _read_marker_s { @@ -21,6 +19,7 @@ typedef GCOBJECT struct _thread_local1_s { uint8_t read_marker; + uint16_t transaction_version; } _thread_local1_t; #define _STM_TL1 (((_thread_local1_t *)0)[-1]) @@ -46,7 +45,8 @@ static inline void stm_write(object_t *object) { - if (__builtin_expect((object->flags & GCFLAG_WRITE_BARRIER) != 0, 0)) + uint16_t tversion = _STM_TL1.transaction_version; + if (__builtin_expect(object->modif_version != tversion, 0)) _stm_write_barrier_slowpath(object); } From noreply at buildbot.pypy.org Sat Dec 28 19:35:50 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 28 Dec 2013 19:35:50 +0100 (CET) Subject: [pypy-commit] stmgc c5: in-progress Message-ID: <20131228183550.098751C10F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r583:89a559713dd1 Date: 2013-12-23 11:01 +0100 http://bitbucket.org/pypy/stmgc/changeset/89a559713dd1/ Log: in-progress diff --git a/c5/Makefile b/c5/Makefile --- a/c5/Makefile +++ b/c5/Makefile @@ -2,12 +2,14 @@ H_FILES = core.h pagecopy.h C_FILES = core.c pagecopy.c +CLANG = clang -Wall -ferror-limit=3 -fno-color-diagnostics + demo1: demo1.c $(C_FILES) $(H_FILES) - gcc -pthread -o $@ -O2 -g demo1.c $(C_FILES) -Wall + $(CLANG) -pthread -o $@ -O2 -g demo1.c $(C_FILES) demo2: demo2.c largemalloc.c largemalloc.h - gcc -o $@ -g demo2.c largemalloc.c -Wall + $(CLANG) -o $@ -g demo2.c largemalloc.c clean: rm -f demo1 demo2 diff --git a/c5/core.c b/c5/core.c --- a/c5/core.c +++ b/c5/core.c @@ -1,7 +1,11 @@ #define _GNU_SOURCE #include #include +#include #include +#include +#include +#include #include #include #include @@ -12,7 +16,7 @@ /* This only works with clang, and on 64-bit Linux, for now. It depends on: - + * the %gs segment prefix This a hack using __attribute__((address_space(256))) on @@ -106,7 +110,7 @@ */ #define NB_PAGES (256*1024) // 1GB -#define NB_THREADS 128 +#define NB_THREADS 16 #define MAP_PAGES_FLAGS (MAP_SHARED|MAP_ANONYMOUS|MAP_NORESERVE) #define CACHE_LINE_SIZE 128 // conservatively large value to avoid aliasing @@ -115,34 +119,33 @@ struct page_header_s { /* Every page starts with one such structure */ - uint16_t version; /* when the data in the page was written */ uint8_t obj_word_size; /* size of all objects in this page, in words in range(2, LARGE_OBJECT_WORDS) */ - uint32_t write_log_index; + _Bool thread_local_copy; + uint32_t write_log_index_cache; }; -struct write_log_s { - uint32_t pgoff; - uint32_t modif[8]; /* N'th bit set if and only if object at N*16 changed */ -}; +struct write_entry_s { + uint32_t pgoff; /* the pgoff of the page that was modified */ + uint64_t bitmask[4]; /* bit N is set if object at 'N*16' was modified */ +} __attribute__((packed)); struct write_history_s { struct write_history_s *previous_older_transaction; uint16_t transaction_version; - struct write_log_s log[]; /* ends with pgoff == 0 */ + uint32_t nb_updates; + struct write_entry_s updates[]; }; struct shared_descriptor_s { /* There is a single shared descriptor. This contains global - variables, but as a structure, in order to control the sharing at - the cache line level --- we don't want the following few + variables, but as a structure, in order to control the aliasing + at the cache line level --- we don't want the following few variables to be accidentally in the same cache line. */ - char _pad0[CACHE_LINE_SIZE]; uint64_t volatile index_page_never_used; - char _pad1[CACHE_LINE_SIZE]; unsigned int volatile next_transaction_version; - /* always EVEN */ - char _pad2[CACHE_LINE_SIZE]; struct write_history_s * - volatile most_recent_committed_transaction; - char _pad3[CACHE_LINE_SIZE]; + char pad0[CACHE_LINE_SIZE]; uint64_t volatile index_page_never_used; + char pad2[CACHE_LINE_SIZE]; struct write_history_s * + volatile most_recent_committed_transaction; + char pad3[CACHE_LINE_SIZE]; }; struct alloc_for_size_s { @@ -154,139 +157,108 @@ /* All the thread-local variables we need. */ struct write_history_s *base_page_mapping; struct write_history_s *writes_by_this_transaction; + uint32_t nb_updates_max; struct alloc_for_size_s alloc[LARGE_OBJECT_WORDS]; - char *read_markers; + uint64_t gs_value; _thread_local1_t _stm_tl1; /* space for the macro _STM_TL1 in core.h */ } _thread_local2_t; #define _STM_TL2 (((_thread_local2_t *)0)[-1]) +char *stm_object_pages; struct shared_descriptor_s stm_shared_descriptor; +volatile int stm_next_thread_index; /************************************************************/ -_Bool _stm_was_read(struct object_s *object) +_Bool _stm_was_read(object_t *object) { - return (stm_current_read_markers[((uintptr_t)object) >> 4].c == - (unsigned char)(uintptr_t)stm_current_read_markers); + return _STM_CRM[((uintptr_t)object) >> 4].c == _STM_TL1.read_marker; } -static struct _read_marker_s *get_current_read_marker(struct object_s *object) +_Bool _stm_was_written(object_t *object) { - struct _read_marker_s *crm = _STM_TL1.stm_current_read_markers; - return crm + (((uintptr_t)object) >> 4); -} - -_Bool _stm_was_written(struct object_s *object) -{ - uint16_t stv = _STM_TL1.stm_transaction_version; - return (object->modified == stv); + return (object->flags & GCFLAG_WRITE_BARRIER) == 0; } struct page_header_s *_stm_reserve_page(void) { /* Grab a free mm page, and map it into the address space. - Return a pointer to it. It has kind == PGKIND_FREED. */ + Return a pointer to it. */ // XXX look in some free list first - /* Return the index'th mm page, which is so far NEVER_USED. It - should never have been accessed so far, and be already mapped - as the index'th local page. */ - struct shared_descriptor_s *d = stm_shared_descriptor; - uint64_t index = __sync_fetch_and_add(&d->index_page_never_used, 1); + /* Return the index'th object page, which is so far never used. */ + uint64_t index = __sync_fetch_and_add( + &stm_shared_descriptor.index_page_never_used, 1); if (index >= NB_PAGES) { fprintf(stderr, "Out of mmap'ed memory!\n"); abort(); } - struct page_header_s *result = (struct page_header_s *) - (((char *)stm_shared_descriptor) + index * 4096); - assert(result->kind == PGKIND_NEVER_USED); - result->kind = PGKIND_FREED; - result->pgoff = index; - return result; + return (struct page_header_s *)(stm_object_pages + index * 4096UL); } -static struct write_history_s *_reserve_page_write_history(void) + +static struct page_header_s * +fetch_thread_local_page(struct page_header_s *page) { - struct page_header_s *newpage = _stm_reserve_page(); - newpage->kind = PGKIND_WRITE_HISTORY; - return (struct write_history_s *)(newpage + 1); + struct page_header_s *mypage = (struct page_header_s *) + (((char *)page) + _STM_TL2.gs_value); + + if (!mypage->thread_local_copy) { + /* make a thread-local copy of that page, by remapping the page + back to its underlying page and manually copying the data. */ + uint64_t fileofs = ((char *)mypage) - stm_object_pages; + + if (remap_file_pages((void *)mypage, 4096, 0, fileofs / 4096, + MAP_PAGES_FLAGS) < 0) { + perror("remap_file_pages in write_barrier"); + abort(); + } + pagecopy(mypage, page); + mypage->thread_local_copy = 1; + } + return mypage; } - -static uint32_t get_pgoff(struct page_header_s *page) -{ - assert(page->pgoff > 0); - assert(page->pgoff < NB_PAGES); - return page->pgoff; -} - -static uint32_t get_local_index(struct page_header_s *page) -{ - uint64_t index = ((char *)page) - (char *)stm_shared_descriptor; - assert((index & 4095) == 0); - index /= 4096; - assert(0 < index && index < NB_PAGES); - return index; -} - -static struct page_header_s *get_page_by_local_index(uint32_t index) -{ - assert(0 < index && index < NB_PAGES); - uint64_t ofs = ((uint64_t)index) * 4096; - return (struct page_header_s *)(((char *)stm_shared_descriptor) + ofs); -} - -void _stm_write_slowpath(struct object_s * object) +void _stm_write_barrier_slowpath(object_t *object) { stm_read(object); struct page_header_s *page; page = (struct page_header_s *)(((uintptr_t)object) & ~4095); - assert(2 <= page->kind && page->kind < LARGE_OBJECT_WORDS); + assert(2 <= page->obj_word_size); + assert(page->obj_word_size < LARGE_OBJECT_WORDS); - if (page->version != stm_transaction_version) { - struct page_header_s *newpage = _stm_reserve_page(); - uint32_t old_pgoff = get_pgoff(page); - uint32_t new_pgoff = get_pgoff(newpage); + uint32_t byte_ofs16 = (((char *)object) - (char *)page) / 16; + uint32_t pgoff = (((char *)page) - stm_object_pages) / 4096; - pagecopy(newpage, page); - newpage->version = stm_transaction_version; - newpage->modif_head = 0xff; - newpage->pgoff = new_pgoff; - assert(page->version != stm_transaction_version); - assert(page->pgoff == old_pgoff); + page = fetch_thread_local_page(page); - remap_file_pages((void *)page, 4096, 0, new_pgoff, MAP_PAGES_FLAGS); + uint32_t write_log_index = page->write_log_index_cache; + struct write_history_s *log = _STM_TL2.writes_by_this_transaction; - assert(page->version == stm_transaction_version); - assert(page->pgoff == new_pgoff); + if (write_log_index >= log->nb_updates || + log->updates[write_log_index].pgoff != pgoff) { + /* make a new entry for this page in the write log */ + write_log_index = log->nb_updates++; + assert(log->nb_updates <= _STM_TL2.nb_updates_max); // XXX resize + log->updates[write_log_index].pgoff = pgoff; + log->updates[write_log_index].bitmask[0] = 0; + log->updates[write_log_index].bitmask[1] = 0; + log->updates[write_log_index].bitmask[2] = 0; + log->updates[write_log_index].bitmask[3] = 0; + } - struct write_history_s *cur = stm_local.writes_by_this_transaction; - size_t history_size_max = 4096 - (((uintptr_t)cur) & 4095); - if (sizeof(*cur) + (cur->nb_updates + 1) * 8 > history_size_max) { - /* The buffer would overflow its page. Allocate a new one. */ - cur = _reserve_page_write_history(); - cur->previous_older_transaction = - stm_local.writes_by_this_transaction; - cur->transaction_version = stm_transaction_version; - cur->nb_updates = 0; - stm_local.writes_by_this_transaction = cur; - } - uint64_t i = cur->nb_updates++; - cur->updates[i * 2 + 0] = get_local_index(page); - cur->updates[i * 2 + 1] = new_pgoff; - } - object->modified = stm_transaction_version; - object->modif_next = page->modif_head; - page->modif_head = (uint8_t)(((uintptr_t)object) >> 4); - assert(page->modif_head != 0xff); + assert(byte_ofs16 < 256); + log->updates[write_log_index].bitmask[byte_ofs16 / 64] |= + (1UL << (byte_ofs16 & 63)); } +#if 0 char *_stm_alloc_next_page(size_t i) { struct page_header_s *newpage = _stm_reserve_page(); @@ -353,6 +325,7 @@ } stm_set_read_marker_number(1); } +#endif void stm_setup(void) { @@ -364,50 +337,115 @@ fprintf(stderr, "Cannot use more than 1<<32 pages of memory"); abort(); } - char *stm_pages = mmap(NULL, NB_PAGES*4096ul, PROT_READ|PROT_WRITE, - MAP_PAGES_FLAGS, -1, 0); - if (stm_pages == MAP_FAILED) { - perror("mmap stm_pages failed"); + + /* For now, just prepare to make the layout given at the start of + this file, with the RM pages interleaved with the L-0 blocks. + The actual L-0-RM pages are allocated by each thread. */ + uint64_t addr_rm_base = (NB_PAGES + 1) * 4096UL; + uint64_t addr_object_pages = addr_rm_base << 4; + + stm_object_pages = mmap((void *)addr_object_pages, + (NB_PAGES * 4096UL) * NB_THREADS, + PROT_READ | PROT_WRITE, + MAP_PAGES_FLAGS | MAP_FIXED, -1, 0); + if (stm_object_pages == MAP_FAILED) { + perror("mmap stm_object_pages failed"); abort(); } - assert(sizeof(struct shared_descriptor_s) <= 4096); - stm_shared_descriptor = (struct shared_descriptor_s *)stm_pages; - stm_shared_descriptor->header.kind = PGKIND_SHARED_DESCRIPTOR; - /* the page at index 0 contains the '*stm_shared_descriptor' structure */ - /* the page at index 1 is reserved for history_fast_forward() */ - stm_shared_descriptor->index_page_never_used = 2; - stm_shared_descriptor->next_transaction_version = 2; + stm_shared_descriptor.index_page_never_used = 0; } void _stm_teardown(void) { - munmap((void *)stm_shared_descriptor, NB_PAGES*4096); - stm_shared_descriptor = NULL; + munmap((void *)stm_object_pages, (NB_PAGES * 4096UL) * NB_THREADS); + stm_object_pages = NULL; + memset(&stm_shared_descriptor, 0, sizeof(stm_shared_descriptor)); } -void stm_setup_process(void) +static void set_gs_register(uint64_t value) { - memset(&stm_local, 0, sizeof(stm_local)); - stm_local.read_markers = mmap(NULL, NB_PAGES*(4096 >> 4) + 1, - PROT_READ|PROT_WRITE, - MAP_PRIVATE|MAP_ANONYMOUS, - -1, 0); - if (stm_local.read_markers == MAP_FAILED) { - perror("mmap stm_read_markers failed"); + int result = syscall(SYS_arch_prctl, ARCH_SET_GS, &value); + assert(result == 0); +} + +static char *local_L0_pages(uint64_t gs_value) +{ + return (char *)(gs_value - 4096UL); +} + +static char *local_RM_pages(uint64_t gs_value) +{ + return (char*)gs_value + (((uint64_t)stm_object_pages) >> 4); +} + +int stm_setup_thread(void) +{ + int res; + int thnum = stm_next_thread_index; + int tries = 2 * NB_THREADS; + uint64_t gs_value; + while (1) { + thnum %= NB_THREADS; + stm_next_thread_index = thnum + 1; + + if (!--tries) { + fprintf(stderr, "too many threads or too many non-fitting mmap\n"); + abort(); + } + + gs_value = (thnum+1) * 4096UL * NB_PAGES; + + if (mmap(local_L0_pages(gs_value), 2 * 4096UL, PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == MAP_FAILED) { + thnum++; + continue; + } + + uint64_t nb_rm_pages = (NB_PAGES + 15) >> 4; + if (mmap(local_RM_pages(gs_value), nb_rm_pages * 4096UL, + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == MAP_FAILED) { + munmap(local_L0_pages(gs_value), 2 * 4096UL); + thnum++; + continue; + } + break; + } + + res = mprotect(local_L0_pages(gs_value), 4096, PROT_READ | PROT_WRITE); + if (res < 0) { + perror("remap_file_pages in stm_setup_thread"); abort(); } - - assert((stm_set_read_marker_number(42), - stm_get_read_marker_number() == 42)); - stm_set_read_marker_number(1); + res = remap_file_pages(stm_object_pages + gs_value, NB_PAGES * 4096UL, 0, + 0, MAP_PAGES_FLAGS); + if (res < 0) { + perror("remap_file_pages in stm_setup_thread"); + abort(); + } + set_gs_register(gs_value); + _STM_TL2.gs_value = gs_value; + _STM_TL1.read_marker = 1; + return thnum; } -void _stm_teardown_process(void) +void _stm_restore_state_for_thread(int thread_num) { - munmap((void *)stm_local.read_markers, NB_PAGES*(4096 >> 4) + 1); - memset(&stm_local, 0, sizeof(stm_local)); + uint64_t gs_value = (thread_num + 1) * 4096UL * NB_PAGES; + set_gs_register(gs_value); + assert(_STM_TL2.gs_value == gs_value); } +void _stm_teardown_thread(void) +{ + uint64_t gs_value = _STM_TL2.gs_value; + uint64_t nb_rm_pages = (NB_PAGES + 15) >> 4; + munmap(local_RM_pages(gs_value), nb_rm_pages * 4096UL); + munmap(local_L0_pages(gs_value), 2 * 4096UL); + /* accessing _STM_TL2 is invalid here */ +} + +#if 0 static size_t get_obj_size_in_words(struct page_header_s *page) { size_t result = page->kind; @@ -615,47 +653,4 @@ } return !conflict; } - -#ifdef STM_TESTS -struct local_data_s *_stm_save_local_state(void) -{ - uint64_t i, page_count = stm_shared_descriptor->index_page_never_used; - uint32_t *pgoffs; - struct local_data_s *p = malloc(sizeof(struct local_data_s) + - page_count * sizeof(uint32_t)); - assert(p != NULL); - memcpy(p, &stm_local, sizeof(stm_local)); - p->_current_read_markers = stm_current_read_markers; - p->_transaction_version = stm_transaction_version; - - pgoffs = (uint32_t *)(p + 1); - pgoffs[0] = page_count; - for (i = 2; i < page_count; i++) { - pgoffs[i] = get_pgoff(get_page_by_local_index(i)); - } - - return p; -} - -void _stm_restore_local_state(struct local_data_s *p) -{ - uint64_t i, page_count; - uint32_t *pgoffs; - - remap_file_pages((void *)stm_shared_descriptor, 4096 * NB_PAGES, - 0, 0, MAP_PAGES_FLAGS); - - pgoffs = (uint32_t *)(p + 1); - page_count = pgoffs[0]; - for (i = 2; i < page_count; i++) { - struct page_header_s *page = get_page_by_local_index(i); - remap_file_pages((void *)page, 4096, 0, pgoffs[i], MAP_PAGES_FLAGS); - assert(get_pgoff(page) == pgoffs[i]); - } - - memcpy(&stm_local, p, sizeof(struct local_data_s)); - stm_current_read_markers = p->_current_read_markers; - stm_transaction_version = p->_transaction_version; - free(p); -} #endif diff --git a/c5/core.h b/c5/core.h --- a/c5/core.h +++ b/c5/core.h @@ -5,11 +5,11 @@ #define GCOBJECT __attribute__((address_space(256))) +#define GCFLAG_WRITE_BARRIER 0x01 + typedef GCOBJECT struct object_s { /* Every objects starts with one such structure */ - uint16_t modified; uint8_t flags; - uint8_t reserved; } object_t; struct _read_marker_s { @@ -20,43 +20,41 @@ }; typedef GCOBJECT struct _thread_local1_s { - struct _read_marker_s *stm_current_read_markers; - uint16_t stm_transaction_version; /* always EVEN */ + uint8_t read_marker; } _thread_local1_t; #define _STM_TL1 (((_thread_local1_t *)0)[-1]) +#define _STM_CRM ((GCOBJECT struct _read_marker_s *)0) + /************************************************************/ void stm_setup(void); -void stm_setup_process(void); +int stm_setup_thread(void); void stm_start_transaction(void); _Bool stm_stop_transaction(void); -struct object_s *stm_allocate(size_t size); +object_t *stm_allocate(size_t size); -static inline void stm_read(struct object_s *object) +static inline void stm_read(object_t *object) { - struct _read_marker_s *crm = _STM_TL1.stm_current_read_markers; - crm[((uintptr_t)object) >> 4].c = (unsigned char)(uintptr_t)crm; + _STM_CRM[((uintptr_t)object) >> 4].c = _STM_TL1.read_marker; } -void _stm_write_slowpath(struct object_s *); +void _stm_write_barrier_slowpath(object_t *); -static inline void stm_write(struct object_s *object) +static inline void stm_write(object_t *object) { - uint16_t stv = _STM_TL1.stm_transaction_version; - if (__builtin_expect(object->modified != stv, 0)) - _stm_write_slowpath(object); + if (__builtin_expect((object->flags & GCFLAG_WRITE_BARRIER) != 0, 0)) + _stm_write_barrier_slowpath(object); } -_Bool _stm_was_read(struct object_s *object); -_Bool _stm_was_written(struct object_s *object); +_Bool _stm_was_read(object_t *object); +_Bool _stm_was_written(object_t *object); -struct local_data_s *_stm_save_local_state(void); -void _stm_restore_local_state(struct local_data_s *p); +void _stm_restore_state_for_thread(int thread_num); void _stm_teardown(void); -void _stm_teardown_process(void); +void _stm_teardown_thread(void); #endif diff --git a/c5/demo1.c b/c5/demo1.c --- a/c5/demo1.c +++ b/c5/demo1.c @@ -14,7 +14,7 @@ typedef struct { - struct object_s header; + object_t header; int val1, val2; } obj_t; @@ -66,7 +66,7 @@ static void *run_in_thread(void *arg) { - stm_setup_process(); + stm_setup_thread(); do_run_in_thread((intptr_t)arg); return NULL; } From noreply at buildbot.pypy.org Sat Dec 28 19:35:52 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 28 Dec 2013 19:35:52 +0100 (CET) Subject: [pypy-commit] stmgc c5: check-in before yet another change Message-ID: <20131228183552.341FD1C1177@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r585:a1b6ff2e7d25 Date: 2013-12-23 16:52 +0100 http://bitbucket.org/pypy/stmgc/changeset/a1b6ff2e7d25/ Log: check-in before yet another change diff --git a/c5/core.c b/c5/core.c --- a/c5/core.c +++ b/c5/core.c @@ -121,8 +121,8 @@ /* Every page starts with one such structure */ uint8_t obj_word_size; /* size of all objects in this page, in words in range(2, LARGE_OBJECT_WORDS) */ - _Bool thread_local_copy; - uint32_t write_log_index_cache; + uint8_t thread_local_copy; + uint32_t reserved; }; struct write_entry_s { @@ -131,7 +131,8 @@ } __attribute__((packed)); struct write_history_s { - struct write_history_s *previous_older_transaction; + struct write_history_s *next_transaction; + uint32_t refcount; /* #threads that have this as their base */ uint32_t nb_updates; struct write_entry_s updates[]; }; @@ -331,6 +332,7 @@ uint64_t addr_rm_base = (NB_PAGES + 1) * 4096UL; uint64_t addr_object_pages = addr_rm_base << 4; + assert(stm_object_pages == NULL); stm_object_pages = mmap((void *)addr_object_pages, (NB_PAGES * 4096UL) * NB_THREADS, PROT_READ | PROT_WRITE, @@ -345,6 +347,7 @@ void _stm_teardown(void) { + assert(stm_object_pages != NULL); munmap((void *)stm_object_pages, (NB_PAGES * 4096UL) * NB_THREADS); stm_object_pages = NULL; memset(&stm_shared_descriptor, 0, sizeof(stm_shared_descriptor)); @@ -366,11 +369,16 @@ return (char*)gs_value + (((uint64_t)stm_object_pages) >> 4); } +static uint64_t get_gs_value(void) +{ + return _STM_TL2.gs_value; +} + int stm_setup_thread(void) { int res; int thnum = stm_next_thread_index; - int tries = 2 * NB_THREADS; + int tries = 2 * NB_THREADS + 1; uint64_t gs_value; while (1) { thnum %= NB_THREADS; @@ -418,7 +426,7 @@ _STM_TL2.gs_value = gs_value; _STM_TL1.read_marker = 1; - fprintf(stderr, "new thread starting at %d (gs=0x%lx)\n", thnum, gs_value); + fprintf(stderr, "new thread %d starting (%%gs=0x%lx)\n", thnum, gs_value); return thnum; } @@ -582,12 +590,12 @@ log->nb_updates = 0; _STM_TL2.writes_by_this_transaction = log; -#if 0 struct write_history_s *hist = d->most_recent_committed_transaction; - if (hist != stm_local.base_page_mapping) { + if (hist != _STM_TL2.base_page_mapping) { history_fast_forward(hist, 1); } +#if 0 int i; for (i = 2; i < LARGE_OBJECT_WORDS; i++) { struct page_header_s *page; @@ -614,8 +622,7 @@ while (1) { struct write_history_s *hist = d->most_recent_committed_transaction; if (hist != _STM_TL2.base_page_mapping) { - abort(); - //XXX conflict = history_fast_forward(hist, 0); + conflict = history_fast_forward(hist, 0); if (conflict) break; else From noreply at buildbot.pypy.org Sat Dec 28 19:35:53 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 28 Dec 2013 19:35:53 +0100 (CET) Subject: [pypy-commit] stmgc c6: Another attempt based on: Message-ID: <20131228183553.463791C12F0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c6 Changeset: r586:b5ba6eb4d6dc Date: 2013-12-28 17:28 +0100 http://bitbucket.org/pypy/stmgc/changeset/b5ba6eb4d6dc/ Log: Another attempt based on: https://bitbucket.org/arigo/arigo/raw/default/hack/stm/c6/core.c Start with a checkfence version. diff --git a/checkfence/c6/run b/checkfence/c6/run new file mode 100755 --- /dev/null +++ b/checkfence/c6/run @@ -0,0 +1,11 @@ +#!/bin/sh + +export C2LSL_HOME=./c2lsl +export CHECKFENCE_HOME=./checkfence + + +$C2LSL_HOME/bin/c2lsl.exe "$1" _run.lsl || exit 1 +shift +$CHECKFENCE_HOME/run/clean || exit 1 +echo ------------------------------------------------------------------------- +$CHECKFENCE_HOME/run/checkfence -i _run.lsl "$@" || exit 1 diff --git a/checkfence/c6/test1.c b/checkfence/c6/test1.c new file mode 100644 --- /dev/null +++ b/checkfence/c6/test1.c @@ -0,0 +1,249 @@ +#include "lsl_protos.h" + +typedef unsigned short uint16_t; +typedef boolean_t bool; + + +#define NUM_OBJECTS 2 +#define NUM_THREADS 2 +#define NUM_HISTORY 5 + +#define UNDOLOG NUM_THREADS + + +typedef void *uid_t; +#define get_uid() lsl_malloc_noreuse(1) + + +typedef struct { + uid_t read_version; + bool flag_modified; + int value1, value2; +} object_t; + +typedef struct { + int n_modified_objects; + int modified_objects[NUM_OBJECTS]; + uid_t transaction_read_version; +} thread_local_t; + +thread_local_t tl[NUM_THREADS]; +object_t obj[NUM_THREADS+1][NUM_OBJECTS]; +int global_history[NUM_HISTORY]; +int n_global_history; +int leader_thread_num; +int next_free_glob; +lsl_lock_t undo_lock; + + +void setup(void) +{ + /* initialize global state */ + next_free_glob = 0; + leader_thread_num = 0; + n_global_history = 0; + lsl_initlock(&undo_lock); + + int t = 0; + while (1) { + tl[t].n_modified_objects = 0; + t++; + if (t == NUM_THREADS) + break; + } +} + + +int fetch_and_add(volatile int *loc, int increment) +{ + int oldvalue = *loc; + lsl_assume(lsl_cas_64(loc, oldvalue, oldvalue + increment)); + return oldvalue; +} + +int stm_allocate(int t) +{ + int result = fetch_and_add(&next_free_glob, 1); + lsl_observe_output("stm_allocate", result); + + obj[t][result].flag_modified = true; + return result; +} + +int acquire_lock_if_leader(int t) +{ + //XXX: + //if (leader_thread_num != t) + // return 0; + lsl_lock(&undo_lock); + if (leader_thread_num == t) + return 1; + lsl_unlock(&undo_lock); + return 0; +} + +void memcpy_obj_without_header(int tdst, int objndst, int tsrc, int objnsrc) +{ + obj[tdst][objndst].value1 = obj[tsrc][objnsrc].value1; + obj[tdst][objndst].value2 = obj[tsrc][objnsrc].value2; +} + +#define stm_read(t, objnum) \ + (obj[t][objnum].read_version = tl[t].transaction_read_version) + +void stm_write(int t, int objnum) +{ + if (obj[t][objnum].flag_modified) + return; /* already modified during this transaction */ + + stm_read(t, objnum); + obj[t][objnum].flag_modified = true; + int n = tl[t].n_modified_objects; + tl[t].modified_objects[n] = objnum; + + int is_leader = acquire_lock_if_leader(t); + tl[t].n_modified_objects = n + 1; + if (is_leader) { + memcpy_obj_without_header(UNDOLOG, n, t, objnum); + lsl_unlock(&undo_lock); + } +} + +void start_transaction(int t) +{ + lsl_assert(tl[t].n_modified_objects == 0); + lsl_assert(!obj[t][0].flag_modified); + tl[t].transaction_read_version = get_uid(); +} + +int stop_transaction(int t) +{ + int nmod = tl[t].n_modified_objects; + if (nmod == 0) { + /* no modified objects in this transaction */ + return 1; + } + + lsl_lock(&undo_lock); + + int nglob = n_global_history; + int result = 1; + + if (leader_thread_num != t) { + /* becomes the leader */ + uid_t my_version = tl[t].transaction_read_version; + + if (nglob > 0) { + while (1) { + int objnum = global_history[--nglob]; + result &= (obj[t][objnum].read_version != my_version); + obj[t][objnum].flag_modified = false; + memcpy_obj_without_header(t, objnum, 1 - t, objnum); + if (nglob == 0) + break; + } + } + int nundo = tl[1 - t].n_modified_objects; + if (nundo > 0) { + while (1) { + int objnum = tl[1 - t].modified_objects[--nundo]; + result &= (obj[t][objnum].read_version != my_version); + obj[t][objnum].flag_modified = false; + memcpy_obj_without_header(t, objnum, 1 - t, objnum); + if (nundo == 0) + break; + } + } + if (result) + leader_thread_num = t; + } + + if (result) { + while (1) { + int objnum = tl[t].modified_objects[--nmod]; + global_history[nglob++] = objnum; + obj[t][objnum].flag_modified = false; + if (nmod == 0) + break; + } + } + else { + while (1) { + int objnum = tl[t].modified_objects[--nmod]; + if (obj[t][objnum].flag_modified) { + obj[t][objnum].flag_modified = false; + memcpy_obj_without_header(t, objnum, 1 - t, objnum); + } + if (nmod == 0) + break; + } + lsl_assert(nglob == 0); + } + n_global_history = nglob; + tl[t].n_modified_objects = 0; + + lsl_unlock(&undo_lock); + return result; +} + + +void A(void) +{ + int t = lsl_get_thread_id(); + int num = stm_allocate(t); +} + +void SETUP(void) +{ + setup(); +} + +void SETUP100(void) +{ + int t = 0; + setup(); + while (1) { + obj[t][0].flag_modified = false; + obj[t][0].value1 = 100; + obj[t][0].value2 = 200; + t++; + if (t == NUM_THREADS) + break; + } +} + +void R0(void) +{ + int t = lsl_get_thread_id(); + int result1, result2; + while (1) { + start_transaction(t); + stm_read(t, 0); + result1 = obj[t][0].value1; + result2 = obj[t][0].value2; + if (stop_transaction(t)) + break; + } + + lsl_observe_output("R0:value1", result1); + lsl_observe_output("R0:value2", result2); +} + +void W0INC1(void) +{ + int t = lsl_get_thread_id(); + int nvalue1, nvalue2; + while (1) { + start_transaction(t); + stm_write(t, 0); + nvalue1 = ++obj[t][0].value1; + nvalue2 = ++obj[t][0].value2; + lsl_assert(nvalue1 == obj[t][0].value1); + lsl_assert(nvalue2 == obj[t][0].value2); + if (stop_transaction(t)) + break; + } + + lsl_observe_output("W0INC1:nvalue1", nvalue1); + lsl_observe_output("W0INC1:nvalue2", nvalue2); +} diff --git a/checkfence/c6/test1.lsl b/checkfence/c6/test1.lsl new file mode 100644 --- /dev/null +++ b/checkfence/c6/test1.lsl @@ -0,0 +1,6 @@ + +test T0 = SETUP ( A | A ) + +test T1 = SETUP100 ( R0 | R0 ) + +test T2 = SETUP100 ( W0INC1 R0 | W0INC1 R0 ) From noreply at buildbot.pypy.org Sat Dec 28 19:35:54 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 28 Dec 2013 19:35:54 +0100 (CET) Subject: [pypy-commit] stmgc c6: Adding some more versions of the test Message-ID: <20131228183554.4C2EC1C1347@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c6 Changeset: r587:52cf82f83a37 Date: 2013-12-28 19:35 +0100 http://bitbucket.org/pypy/stmgc/changeset/52cf82f83a37/ Log: Adding some more versions of the test diff --git a/checkfence/c6/test1.c b/checkfence/c6/test1.c --- a/checkfence/c6/test1.c +++ b/checkfence/c6/test1.c @@ -4,15 +4,14 @@ typedef boolean_t bool; -#define NUM_OBJECTS 2 +#define NUM_OBJECTS 1 #define NUM_THREADS 2 -#define NUM_HISTORY 5 +#define NUM_HISTORY 2 #define UNDOLOG NUM_THREADS -typedef void *uid_t; -#define get_uid() lsl_malloc_noreuse(1) +typedef unsigned uid_t; typedef struct { @@ -44,13 +43,11 @@ n_global_history = 0; lsl_initlock(&undo_lock); - int t = 0; - while (1) { - tl[t].n_modified_objects = 0; - t++; - if (t == NUM_THREADS) - break; - } + /* XXX manual unrolling */ + tl[0].n_modified_objects = 0; + tl[0].transaction_read_version = 0; + tl[1].n_modified_objects = 0; + tl[1].transaction_read_version = 0; } @@ -109,11 +106,55 @@ } } +int update_to_leader(int t, int check) +{ + /* becomes the leader */ + uid_t my_version = tl[t].transaction_read_version; + int result = check; + int nglob = n_global_history; + + if (nglob > 0) { + while (1) { + int objnum = global_history[--nglob]; + if (result) + result = (obj[t][objnum].read_version != my_version); + obj[t][objnum].flag_modified = false; + memcpy_obj_without_header(t, objnum, 1 - t, objnum); + if (nglob == 0) + break; + } + n_global_history = 0; + } + int nundo = tl[1 - t].n_modified_objects; + if (nundo > 0) { + while (1) { + int objnum = tl[1 - t].modified_objects[--nundo]; + if (result) + result = (obj[t][objnum].read_version != my_version); + obj[t][objnum].flag_modified = false; + memcpy_obj_without_header(t, objnum, UNDOLOG, objnum); + if (nundo == 0) + break; + } + } + return result; +} + +void update_state(int t) +{ + lsl_lock(&undo_lock); + if (leader_thread_num != t) { + update_to_leader(t, 0); + leader_thread_num = t; + } + lsl_unlock(&undo_lock); +} + void start_transaction(int t) { lsl_assert(tl[t].n_modified_objects == 0); lsl_assert(!obj[t][0].flag_modified); - tl[t].transaction_read_version = get_uid(); + tl[t].transaction_read_version++; } int stop_transaction(int t) @@ -126,39 +167,18 @@ lsl_lock(&undo_lock); - int nglob = n_global_history; - int result = 1; - - if (leader_thread_num != t) { - /* becomes the leader */ - uid_t my_version = tl[t].transaction_read_version; - - if (nglob > 0) { - while (1) { - int objnum = global_history[--nglob]; - result &= (obj[t][objnum].read_version != my_version); - obj[t][objnum].flag_modified = false; - memcpy_obj_without_header(t, objnum, 1 - t, objnum); - if (nglob == 0) - break; - } - } - int nundo = tl[1 - t].n_modified_objects; - if (nundo > 0) { - while (1) { - int objnum = tl[1 - t].modified_objects[--nundo]; - result &= (obj[t][objnum].read_version != my_version); - obj[t][objnum].flag_modified = false; - memcpy_obj_without_header(t, objnum, 1 - t, objnum); - if (nundo == 0) - break; - } - } + int result; + if (leader_thread_num == t) { + result = 1; + } + else { + result = update_to_leader(t, 1); if (result) leader_thread_num = t; } if (result) { + int nglob = n_global_history; while (1) { int objnum = tl[t].modified_objects[--nmod]; global_history[nglob++] = objnum; @@ -166,6 +186,7 @@ if (nmod == 0) break; } + n_global_history = nglob; } else { while (1) { @@ -177,9 +198,7 @@ if (nmod == 0) break; } - lsl_assert(nglob == 0); } - n_global_history = nglob; tl[t].n_modified_objects = 0; lsl_unlock(&undo_lock); @@ -202,14 +221,17 @@ { int t = 0; setup(); - while (1) { - obj[t][0].flag_modified = false; - obj[t][0].value1 = 100; - obj[t][0].value2 = 200; - t++; - if (t == NUM_THREADS) - break; - } + + /* XXX manual unrolling */ + obj[0][0].flag_modified = false; + obj[0][0].read_version = 0; + obj[0][0].value1 = 100; + obj[0][0].value2 = 200; + + obj[1][0].flag_modified = false; + obj[1][0].read_version = 0; + obj[1][0].value1 = 100; + obj[1][0].value2 = 200; } void R0(void) @@ -233,15 +255,24 @@ { int t = lsl_get_thread_id(); int nvalue1, nvalue2; - while (1) { + //update_state(t); + + start_transaction(t); + stm_write(t, 0); + nvalue1 = ++obj[t][0].value1; + nvalue2 = ++obj[t][0].value2; + lsl_assert(nvalue1 == obj[t][0].value1); + lsl_assert(nvalue2 == obj[t][0].value2); + if (!stop_transaction(t)) { start_transaction(t); stm_write(t, 0); nvalue1 = ++obj[t][0].value1; nvalue2 = ++obj[t][0].value2; lsl_assert(nvalue1 == obj[t][0].value1); lsl_assert(nvalue2 == obj[t][0].value2); - if (stop_transaction(t)) - break; + if (!stop_transaction(t)) { + lsl_observe_output("XXX W0INC1 failed twice", 0); + } } lsl_observe_output("W0INC1:nvalue1", nvalue1); diff --git a/checkfence/c6/test1.lsl b/checkfence/c6/test1.lsl --- a/checkfence/c6/test1.lsl +++ b/checkfence/c6/test1.lsl @@ -1,6 +1,6 @@ -test T0 = SETUP ( A | A ) +//test T0 = SETUP ( A | A ) -test T1 = SETUP100 ( R0 | R0 ) +//test T1 = SETUP100 ( R0 | R0 ) -test T2 = SETUP100 ( W0INC1 R0 | W0INC1 R0 ) +test T2 = SETUP100 ( W0INC1 | W0INC1 ) diff --git a/checkfence/c6/test2.c b/checkfence/c6/test2.c new file mode 100644 --- /dev/null +++ b/checkfence/c6/test2.c @@ -0,0 +1,240 @@ +#include "lsl_protos.h" + +typedef unsigned short uint16_t; +typedef boolean_t bool; + + +#define NUM_THREADS 2 +#define UNDOLOG NUM_THREADS + + +typedef unsigned uid_t; + + +typedef struct { + uid_t read_version; + bool flag_modified; + int value1, value2; +} object_t; + +typedef struct { + int n_modified_objects; + uid_t transaction_read_version; +} thread_local_t; + +thread_local_t tl[NUM_THREADS]; +object_t obj[NUM_THREADS+1]; +int n_global_history; +int leader_thread_num; +lsl_lock_t undo_lock; + + +void setup(void) +{ + /* initialize global state */ + leader_thread_num = 0; + n_global_history = 0; + lsl_initlock(&undo_lock); + + /* XXX manual unrolling */ + tl[0].n_modified_objects = 0; + tl[0].transaction_read_version = 0; + tl[1].n_modified_objects = 0; + tl[1].transaction_read_version = 0; +} + + +int fetch_and_add(volatile int *loc, int increment) +{ + int oldvalue = *loc; + lsl_assume(lsl_cas_64(loc, oldvalue, oldvalue + increment)); + return oldvalue; +} + +/* int stm_allocate(int t) */ +/* { */ +/* int result = fetch_and_add(&next_free_glob, 1); */ +/* lsl_observe_output("stm_allocate", result); */ + +/* obj[t][result].flag_modified = true; */ +/* return result; */ +/* } */ + +int acquire_lock_if_leader(int t) +{ + //XXX: + //if (leader_thread_num != t) + // return 0; + lsl_lock(&undo_lock); + if (leader_thread_num == t) + return 1; + lsl_unlock(&undo_lock); + return 0; +} + +void memcpy_obj_without_header(int tdst, int tsrc) +{ + obj[tdst].value1 = obj[tsrc].value1; + obj[tdst].value2 = obj[tsrc].value2; +} + +#define stm_read(t) \ + (obj[t].read_version = tl[t].transaction_read_version) + +void stm_write(int t) +{ + if (obj[t].flag_modified) + return; /* already modified during this transaction */ + + stm_read(t); + + int is_leader = acquire_lock_if_leader(t); + obj[t].flag_modified = true; + tl[t].n_modified_objects = 1; + if (is_leader) { + memcpy_obj_without_header(UNDOLOG, t); + lsl_unlock(&undo_lock); + } +} + +int update_to_leader(int t, int check) +{ + /* becomes the leader */ + uid_t my_version = tl[t].transaction_read_version; + int result = check; + + if (n_global_history > 0) { + if (result) + result = (obj[t].read_version != my_version); + obj[t].flag_modified = false; + int src = (obj[1 - t].flag_modified ? UNDOLOG : 1 - t); + memcpy_obj_without_header(t, src); + n_global_history = 0; + } + leader_thread_num = t; + return result; +} + +void update_state(int t) +{ + lsl_lock(&undo_lock); + if (leader_thread_num != t) { + update_to_leader(t, 0); + } + lsl_unlock(&undo_lock); +} + +void start_transaction(int t) +{ + lsl_assert(tl[t].n_modified_objects == 0); + lsl_assert(!obj[t].flag_modified); + tl[t].transaction_read_version++; +} + +int stop_transaction(int t) +{ + int nmod = tl[t].n_modified_objects; + if (nmod == 0) { + /* no modified objects in this transaction */ + return 1; + } + + lsl_lock(&undo_lock); + + int result; + if (leader_thread_num == t) { + result = 1; + } + else { + result = update_to_leader(t, 1); + } + + if (result) { + obj[t].flag_modified = false; + n_global_history = 1; + } + else { + if (obj[t].flag_modified) { + obj[t].flag_modified = false; + memcpy_obj_without_header(t, 1 - t); + } + } + tl[t].n_modified_objects = 0; + + lsl_unlock(&undo_lock); + return result; +} + + +/* void A(void) */ +/* { */ +/* int t = lsl_get_thread_id(); */ +/* int num = stm_allocate(t); */ +/* } */ + +void SETUP(void) +{ + setup(); +} + +void SETUP100(void) +{ + int t = 0; + setup(); + + /* XXX manual unrolling */ + obj[0].flag_modified = false; + obj[0].read_version = 0; + obj[0].value1 = 100; + obj[0].value2 = 200; + + obj[1].flag_modified = false; + obj[1].read_version = 0; + obj[1].value1 = 100; + obj[1].value2 = 200; +} + +void R0(void) +{ + int t = lsl_get_thread_id(); + int result1, result2; + while (1) { + start_transaction(t); + stm_read(t); + result1 = obj[t].value1; + result2 = obj[t].value2; + if (stop_transaction(t)) + break; + } + + lsl_observe_output("R0:value1", result1); + lsl_observe_output("R0:value2", result2); +} + +void W0INC1(void) +{ + int t = lsl_get_thread_id(); + int nvalue1, nvalue2; + //update_state(t); + + start_transaction(t); + stm_write(t); + nvalue1 = ++obj[t].value1; + nvalue2 = ++obj[t].value2; + lsl_assert(nvalue1 == obj[t].value1); + lsl_assert(nvalue2 == obj[t].value2); + if (!stop_transaction(t)) { + start_transaction(t); + stm_write(t); + nvalue1 = ++obj[t].value1; + nvalue2 = ++obj[t].value2; + lsl_assert(nvalue1 == obj[t].value1); + lsl_assert(nvalue2 == obj[t].value2); + if (!stop_transaction(t)) { + lsl_observe_output("XXX W0INC1 failed twice", 0); + } + } + + lsl_observe_output("W0INC1:nvalue1", nvalue1); + lsl_observe_output("W0INC1:nvalue2", nvalue2); +} diff --git a/checkfence/c6/test2.lsl b/checkfence/c6/test2.lsl new file mode 100644 --- /dev/null +++ b/checkfence/c6/test2.lsl @@ -0,0 +1,4 @@ + +//test T1 = SETUP100 ( R0 | R0 ) + +test T2 = SETUP100 ( W0INC1 | W0INC1 R0 ) diff --git a/checkfence/c6/test3.c b/checkfence/c6/test3.c new file mode 100644 --- /dev/null +++ b/checkfence/c6/test3.c @@ -0,0 +1,260 @@ +#include "lsl_protos.h" + +typedef unsigned short uint16_t; +typedef boolean_t bool; + + +#define NUM_THREADS 2 +#define UNDOLOG NUM_THREADS + + +typedef unsigned uid_t; + + +typedef struct { + uid_t read_version; + char flag_modified; + int value1, value2; +} object_t; + +typedef struct { + int n_modified_objects; + uid_t transaction_read_version; +} thread_local_t; + +thread_local_t tl[NUM_THREADS]; +object_t obj[NUM_THREADS+1]; +int n_global_history; +int leader_thread_num; +lsl_lock_t undo_lock; + + +void setup(void) +{ + /* initialize global state */ + leader_thread_num = 0; + n_global_history = 0; + lsl_initlock(&undo_lock); + + /* XXX manual unrolling */ + tl[0].n_modified_objects = 0; + tl[0].transaction_read_version = 0; + tl[1].n_modified_objects = 0; + tl[1].transaction_read_version = 0; +} + + +int fetch_and_add(volatile int *loc, int increment) +{ + int oldvalue = *loc; + lsl_assume(lsl_cas_64(loc, oldvalue, oldvalue + increment)); + return oldvalue; +} + +/* int stm_allocate(int t) */ +/* { */ +/* int result = fetch_and_add(&next_free_glob, 1); */ +/* lsl_observe_output("stm_allocate", result); */ + +/* obj[t][result].flag_modified = true; */ +/* return result; */ +/* } */ + +int acquire_lock_if_leader(int t) +{ + //XXX: + //if (leader_thread_num != t) + // return 0; + lsl_lock(&undo_lock); + if (leader_thread_num == t) + return 1; + lsl_unlock(&undo_lock); + return 0; +} + +void memcpy_obj_without_header(int tdst, int tsrc) +{ + obj[tdst].value1 = obj[tsrc].value1; + obj[tdst].value2 = obj[tsrc].value2; +} + +#define stm_read(t) \ + (obj[t].read_version = tl[t].transaction_read_version) + +void stm_write(int t) +{ + if (obj[t].flag_modified) + return; /* already modified during this transaction */ + + stm_read(t); + + int is_leader = acquire_lock_if_leader(t); + obj[t].flag_modified = true; + tl[t].n_modified_objects = 1; + if (is_leader) { + memcpy_obj_without_header(UNDOLOG, t); + lsl_unlock(&undo_lock); + } +} + +int update_to_leader(int t, int check) +{ + /* becomes the leader, and update the local copy of the objects */ + uid_t my_version = tl[t].transaction_read_version; + int result = check; + + if (n_global_history > 0) { + + /* loop over objects modified by the leader, and set their + local 'flag_modified' to '2' */ + if (tl[1 - t].n_modified_objects) { + obj[t].flag_modified = 109; + } + + /* now loop over objects in 'global_history': if they have been + read by the current transaction, the current transaction must + abort; then either copy, or mark as copy later */ + if (result) + result = (obj[t].read_version != my_version); + if (obj[t].flag_modified == 109) { + obj[t].flag_modified = 67; + } + else { + memcpy_obj_without_header(t, 1 - t); + } + + /* finally, loop again over objects modified by the leader, + and copy the marked ones out of the undo log */ + if (tl[1 - t].n_modified_objects) { + if (obj[t].flag_modified == 67) { + memcpy_obj_without_header(t, UNDOLOG); + } + obj[t].flag_modified = false; + } + + n_global_history = 0; + } + leader_thread_num = t; + return result; +} + +/* void update_state(int t) */ +/* { */ +/* lsl_lock(&undo_lock); */ +/* if (leader_thread_num != t) { */ +/* update_to_leader(t, 0); */ +/* } */ +/* lsl_unlock(&undo_lock); */ +/* } */ + +void start_transaction(int t) +{ + lsl_assert(tl[t].n_modified_objects == 0); + lsl_assert(!obj[t].flag_modified); + tl[t].transaction_read_version++; +} + +int stop_transaction(int t) +{ + int nmod = tl[t].n_modified_objects; + if (nmod == 0) { + /* no modified objects in this transaction */ + return 1; + } + + lsl_lock(&undo_lock); + + int result; + if (leader_thread_num == t) { + result = 1; + } + else { + result = update_to_leader(t, 1); + } + + if (result) { + obj[t].flag_modified = false; + n_global_history = 1; + } + else { + obj[t].flag_modified = false; + } + tl[t].n_modified_objects = 0; + + lsl_unlock(&undo_lock); + return result; +} + + +/* void A(void) */ +/* { */ +/* int t = lsl_get_thread_id(); */ +/* int num = stm_allocate(t); */ +/* } */ + +void SETUP(void) +{ + setup(); +} + +void SETUP100(void) +{ + int t = 0; + setup(); + + /* XXX manual unrolling */ + obj[0].flag_modified = false; + obj[0].read_version = 0; + obj[0].value1 = 100; + obj[0].value2 = 200; + + obj[1].flag_modified = false; + obj[1].read_version = 0; + obj[1].value1 = 100; + obj[1].value2 = 200; +} + +void R0(void) +{ + int t = lsl_get_thread_id(); + int result1, result2; + while (1) { + start_transaction(t); + stm_read(t); + result1 = obj[t].value1; + result2 = obj[t].value2; + if (stop_transaction(t)) + break; + } + + lsl_observe_output("R0:value1", result1); + lsl_observe_output("R0:value2", result2); +} + +void W0INC1(void) +{ + int t = lsl_get_thread_id(); + int nvalue1, nvalue2; + //update_state(t); + + start_transaction(t); + stm_write(t); + nvalue1 = ++obj[t].value1; + nvalue2 = ++obj[t].value2; + lsl_assert(nvalue1 == obj[t].value1); + lsl_assert(nvalue2 == obj[t].value2); + if (!stop_transaction(t)) { + start_transaction(t); + stm_write(t); + nvalue1 = ++obj[t].value1; + nvalue2 = ++obj[t].value2; + lsl_assert(nvalue1 == obj[t].value1); + lsl_assert(nvalue2 == obj[t].value2); + if (!stop_transaction(t)) { + lsl_observe_output("XXX W0INC1 failed twice", 0); + } + } + + lsl_observe_output("W0INC1:nvalue1", nvalue1); + lsl_observe_output("W0INC1:nvalue2", nvalue2); +} diff --git a/checkfence/c6/test3.lsl b/checkfence/c6/test3.lsl new file mode 100644 --- /dev/null +++ b/checkfence/c6/test3.lsl @@ -0,0 +1,4 @@ + +//test T1 = SETUP100 ( R0 | R0 ) + +test T2 = SETUP100 ( W0INC1 | W0INC1 ) From noreply at buildbot.pypy.org Sat Dec 28 19:59:55 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 28 Dec 2013 19:59:55 +0100 (CET) Subject: [pypy-commit] stmgc c6: Simplify for now, and add comments Message-ID: <20131228185955.4461B1C10F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c6 Changeset: r588:8a8b66cab158 Date: 2013-12-28 19:59 +0100 http://bitbucket.org/pypy/stmgc/changeset/8a8b66cab158/ Log: Simplify for now, and add comments diff --git a/checkfence/c6/test3.c b/checkfence/c6/test3.c --- a/checkfence/c6/test3.c +++ b/checkfence/c6/test3.c @@ -105,31 +105,22 @@ if (n_global_history > 0) { - /* loop over objects modified by the leader, and set their - local 'flag_modified' to '2' */ - if (tl[1 - t].n_modified_objects) { - obj[t].flag_modified = 109; - } - - /* now loop over objects in 'global_history': if they have been + /* loop over objects in 'global_history': if they have been read by the current transaction, the current transaction must - abort; then either copy, or mark as copy later */ + abort; then copy them out of the leader's object space --- + which may have been modified by the leader's uncommitted + transaction; this case will be fixed afterwards. */ if (result) result = (obj[t].read_version != my_version); - if (obj[t].flag_modified == 109) { - obj[t].flag_modified = 67; - } - else { - memcpy_obj_without_header(t, 1 - t); - } + memcpy_obj_without_header(t, 1 - t); - /* finally, loop again over objects modified by the leader, - and copy the marked ones out of the undo log */ + /* finally, loop over objects modified by the leader, + and copy them out of the undo log. XXX We could use + a heuristic to avoid copying unneeded objects: it's not + useful to copy objects that were not also present in + the 'global_history'. */ if (tl[1 - t].n_modified_objects) { - if (obj[t].flag_modified == 67) { - memcpy_obj_without_header(t, UNDOLOG); - } - obj[t].flag_modified = false; + memcpy_obj_without_header(t, UNDOLOG); } n_global_history = 0; From noreply at buildbot.pypy.org Sat Dec 28 20:10:51 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 28 Dec 2013 20:10:51 +0100 (CET) Subject: [pypy-commit] stmgc c6: Another test Message-ID: <20131228191051.8A6D01C116A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c6 Changeset: r589:e0a702f234ef Date: 2013-12-28 20:10 +0100 http://bitbucket.org/pypy/stmgc/changeset/e0a702f234ef/ Log: Another test diff --git a/checkfence/c6/test3.c b/checkfence/c6/test3.c --- a/checkfence/c6/test3.c +++ b/checkfence/c6/test3.c @@ -129,14 +129,14 @@ return result; } -/* void update_state(int t) */ -/* { */ -/* lsl_lock(&undo_lock); */ -/* if (leader_thread_num != t) { */ -/* update_to_leader(t, 0); */ -/* } */ -/* lsl_unlock(&undo_lock); */ -/* } */ +void update_state(int t) +{ + lsl_lock(&undo_lock); + if (leader_thread_num != t) { + update_to_leader(t, 0); + } + lsl_unlock(&undo_lock); +} void start_transaction(int t) { @@ -249,3 +249,9 @@ lsl_observe_output("W0INC1:nvalue1", nvalue1); lsl_observe_output("W0INC1:nvalue2", nvalue2); } + +void UPD(void) +{ + int t = lsl_get_thread_id(); + update_state(t); +} diff --git a/checkfence/c6/test3.lsl b/checkfence/c6/test3.lsl --- a/checkfence/c6/test3.lsl +++ b/checkfence/c6/test3.lsl @@ -1,4 +1,6 @@ -//test T1 = SETUP100 ( R0 | R0 ) +test T1 = SETUP100 ( R0 | R0 ) test T2 = SETUP100 ( W0INC1 | W0INC1 ) + +test T3 = SETUP100 ( UPD R0 UPD R0 | W0INC1 ) From noreply at buildbot.pypy.org Sat Dec 28 21:06:17 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Sat, 28 Dec 2013 21:06:17 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: my dates Message-ID: <20131228200617.DB5B81C0C34@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: extradoc Changeset: r5122:c11df09a0b81 Date: 2013-12-28 21:06 +0100 http://bitbucket.org/pypy/extradoc/changeset/c11df09a0b81/ Log: my dates diff --git a/sprintinfo/leysin-winter-2014/people.txt b/sprintinfo/leysin-winter-2014/people.txt --- a/sprintinfo/leysin-winter-2014/people.txt +++ b/sprintinfo/leysin-winter-2014/people.txt @@ -16,6 +16,7 @@ Maciej Fijalkowski 11-18 Ermina Remi Meier 11-19 Ermina Johan Råde 11-18 Ermina +Antonio Cuni 14-18 Ermina ==================== ============== ======================= @@ -25,7 +26,6 @@ Name Arrive/Depart Accomodation ==================== ============== ===================== Romain Guillebert ? ? -Antonio Cuni ? ? Michael Foord ? ? David Schneider ? ? Jacob Hallen ? ? From noreply at buildbot.pypy.org Sun Dec 29 10:10:40 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 29 Dec 2013 10:10:40 +0100 (CET) Subject: [pypy-commit] pypy default: don't mask the original variable Message-ID: <20131229091040.D66091C0162@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r68563:373db40f63b5 Date: 2013-11-27 15:54 +0200 http://bitbucket.org/pypy/pypy/changeset/373db40f63b5/ Log: don't mask the original variable diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -854,10 +854,10 @@ callee_graph = graph, position_tag = object()) - def convertvar(self, v, r_from, r_to): - assert isinstance(v, (Variable, Constant)) + def convertvar(self, orig_v, r_from, r_to): + assert isinstance(orig_v, (Variable, Constant)) if r_from != r_to: - v = pair(r_from, r_to).convert_from_to(v, self) + v = pair(r_from, r_to).convert_from_to(orig_v, self) if v is NotImplemented: raise TyperError("don't know how to convert from %r to %r" % (r_from, r_to)) From noreply at buildbot.pypy.org Sun Dec 29 10:10:42 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 29 Dec 2013 10:10:42 +0100 (CET) Subject: [pypy-commit] pypy default: fix Message-ID: <20131229091042.1849A1C0162@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r68564:3cb51dff4d05 Date: 2013-11-27 15:56 +0200 http://bitbucket.org/pypy/pypy/changeset/3cb51dff4d05/ Log: fix diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -865,6 +865,8 @@ raise TyperError("bug in conversion from %r to %r: " "returned a %r" % (r_from, r_to, v.concretetype)) + else: + v = orig_v return v def genop(self, opname, args_v, resulttype=None): From noreply at buildbot.pypy.org Sun Dec 29 10:10:43 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 29 Dec 2013 10:10:43 +0100 (CET) Subject: [pypy-commit] pypy default: implement multichar split for RPython Message-ID: <20131229091043.483AB1C0162@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r68565:0390ddee24d3 Date: 2013-12-29 10:43 +0200 http://bitbucket.org/pypy/pypy/changeset/0390ddee24d3/ Log: implement multichar split for RPython diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -39,8 +39,8 @@ else: def rsocket_startup(): pass - - + + def ntohs(x): return rffi.cast(lltype.Signed, _c.ntohs(x)) @@ -500,7 +500,7 @@ self.type = type self.proto = proto self.timeout = defaults.timeout - + def __del__(self): fd = self.fd if fd != _c.INVALID_SOCKET: @@ -575,8 +575,8 @@ if n == 0: return 1 return 0 - - + + def error_handler(self): return last_error() @@ -696,7 +696,7 @@ if res < 0: res = errno return (res, False) - + def connect(self, address): """Connect the socket to a remote address.""" err, timeout = self._connect(address) @@ -704,7 +704,7 @@ raise SocketTimeout if err: raise CSocketError(err) - + def connect_ex(self, address): """This is like connect(address), but returns an error code (the errno value) instead of raising an exception when an error occurs.""" @@ -720,7 +720,7 @@ raise self.error_handler() return make_socket(fd, self.family, self.type, self.proto, SocketClass=SocketClass) - + def getpeername(self): """Return the address of the remote endpoint.""" address, addr_p, addrlen_p = self._addrbuf() @@ -790,7 +790,7 @@ """Return the timeout of the socket. A timeout < 0 means that timeouts are disabled in the socket.""" return self.timeout - + def listen(self, backlog): """Enable a server to accept connections. The backlog argument must be at least 1; it specifies the number of unaccepted connections @@ -857,7 +857,7 @@ def recvfrom_into(self, rwbuffer, nbytes, flags=0): buf, addr = self.recvfrom(nbytes, flags) rwbuffer.setslice(0, buf) - return len(buf), addr + return len(buf), addr def send_raw(self, dataptr, length, flags=0): """Send data from a CCHARP buffer.""" @@ -951,7 +951,7 @@ else: self.timeout = timeout self._setblocking(self.timeout < 0.0) - + def shutdown(self, how): """Shut down the reading side of the socket (flag == SHUT_RD), the writing side of the socket (flag == SHUT_WR), or both ends diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -624,8 +624,7 @@ i += 1 return count - @classmethod - def ll_find(cls, s1, s2, start, end): + def ll_find(s1, s2, start, end): if start < 0: start = 0 if end > len(s1.chars): @@ -635,9 +634,9 @@ m = len(s2.chars) if m == 1: - return cls.ll_find_char(s1, s2.chars[0], start, end) + return LLHelpers.ll_find_char(s1, s2.chars[0], start, end) - return cls.ll_search(s1, s2, start, end, FAST_FIND) + return LLHelpers.ll_search(s1, s2, start, end, FAST_FIND) @classmethod def ll_rfind(cls, s1, s2, start, end): @@ -881,6 +880,37 @@ item.copy_contents(s, item, i, 0, j - i) return res + def ll_split(LIST, s, c, max): + count = 1 + if max == -1: + max = len(s.chars) + pos = 0 + last = len(s.chars) + markerlen = len(c.chars) + pos = s.find(c, 0, last) + while pos >= 0 and count <= max: + pos = s.find(c, pos + markerlen, last) + count += 1 + res = LIST.ll_newlist(count) + items = res.ll_items() + pos = 0 + count = 0 + pos = s.find(c, 0, last) + prev_pos = 0 + if pos < 0: + items[0] = s + return items + while pos >= 0 and count < max: + item = items[count] = s.malloc(pos - prev_pos) + item.copy_contents(s, item, prev_pos, 0, pos - + prev_pos) + count += 1 + prev_pos = pos + markerlen + pos = s.find(c, pos + markerlen, last) + item = items[count] = s.malloc(last - prev_pos) + item.copy_contents(s, item, prev_pos, 0, last - prev_pos) + return items + def ll_rsplit_chr(LIST, s, c, max): chars = s.chars strlen = len(chars) @@ -1094,7 +1124,8 @@ 'copy_contents' : staticAdtMethod(copy_string_contents), 'copy_contents_from_str' : staticAdtMethod(copy_string_contents), 'gethash': LLHelpers.ll_strhash, - 'length': LLHelpers.ll_length})) + 'length': LLHelpers.ll_length, + 'find': LLHelpers.ll_find})) UNICODE.become(GcStruct('rpy_unicode', ('hash', Signed), ('chars', Array(UniChar, hints={'immutable': True})), adtmeths={'malloc' : staticAdtMethod(mallocunicode), diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -336,10 +336,16 @@ def rtype_method_split(self, hop): rstr = hop.args_r[0].repr + v_str = hop.inputarg(rstr.repr, 0) + if isinstance(hop.args_s[1], annmodel.SomeString): + v_chr = hop.inputarg(rstr.repr, 1) + fn = self.ll.ll_split + else: + v_chr = hop.inputarg(rstr.char_repr, 1) + fn = self.ll.ll_split_chr if hop.nb_args == 3: - v_str, v_chr, v_max = hop.inputargs(rstr.repr, rstr.char_repr, Signed) + v_max = hop.inputarg(Signed, 2) else: - v_str, v_chr = hop.inputargs(rstr.repr, rstr.char_repr) v_max = hop.inputconst(Signed, -1) try: list_type = hop.r_result.lowleveltype.TO @@ -347,7 +353,7 @@ list_type = hop.r_result.lowleveltype cLIST = hop.inputconst(Void, list_type) hop.exception_cannot_occur() - return hop.gendirectcall(self.ll.ll_split_chr, cLIST, v_str, v_chr, v_max) + return hop.gendirectcall(fn, cLIST, v_str, v_chr, v_max) def rtype_method_rsplit(self, hop): rstr = hop.args_r[0].repr diff --git a/rpython/rtyper/test/test_rstr.py b/rpython/rtyper/test/test_rstr.py --- a/rpython/rtyper/test/test_rstr.py +++ b/rpython/rtyper/test/test_rstr.py @@ -731,6 +731,19 @@ res = self.interpret(fn, [i]) assert res == fn(i) + def test_split_multichar(self): + l = ["abc::z", "abc", "abc::def:::x"] + exp = [["abc", "z"], ["abc"], ["abc", "def", ":x"]] + exp2 = [["abc", "z"], ["abc"], ["abc", "def:::x"]] + + def f(i): + s = l[i] + return s.split("::") == exp[i] and s.split("::", 1) == exp2[i] + + for i in range(3): + res = self.interpret(f, [i]) + assert res == True + def test_rsplit(self): fn = self._make_split_test('rsplit') for i in range(5): From noreply at buildbot.pypy.org Sun Dec 29 10:10:44 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 29 Dec 2013 10:10:44 +0100 (CET) Subject: [pypy-commit] pypy default: merge Message-ID: <20131229091044.717C41C0162@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r68566:376eb896755b Date: 2013-12-29 10:44 +0200 http://bitbucket.org/pypy/pypy/changeset/376eb896755b/ Log: merge diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -854,10 +854,10 @@ callee_graph = graph, position_tag = object()) - def convertvar(self, v, r_from, r_to): - assert isinstance(v, (Variable, Constant)) + def convertvar(self, orig_v, r_from, r_to): + assert isinstance(orig_v, (Variable, Constant)) if r_from != r_to: - v = pair(r_from, r_to).convert_from_to(v, self) + v = pair(r_from, r_to).convert_from_to(orig_v, self) if v is NotImplemented: raise TyperError("don't know how to convert from %r to %r" % (r_from, r_to)) @@ -865,6 +865,8 @@ raise TyperError("bug in conversion from %r to %r: " "returned a %r" % (r_from, r_to, v.concretetype)) + else: + v = orig_v return v def genop(self, opname, args_v, resulttype=None): From noreply at buildbot.pypy.org Sun Dec 29 10:10:53 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 29 Dec 2013 10:10:53 +0100 (CET) Subject: [pypy-commit] pypy default: implement rsplit and fix the annotation Message-ID: <20131229091053.3A31C1C0162@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r68567:2f143b453556 Date: 2013-12-29 10:57 +0200 http://bitbucket.org/pypy/pypy/changeset/2f143b453556/ Log: implement rsplit and fix the annotation diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -638,8 +638,7 @@ return LLHelpers.ll_search(s1, s2, start, end, FAST_FIND) - @classmethod - def ll_rfind(cls, s1, s2, start, end): + def ll_rfind(s1, s2, start, end): if start < 0: start = 0 if end > len(s1.chars): @@ -649,9 +648,9 @@ m = len(s2.chars) if m == 1: - return cls.ll_rfind_char(s1, s2.chars[0], start, end) + return LLHelpers.ll_rfind_char(s1, s2.chars[0], start, end) - return cls.ll_search(s1, s2, start, end, FAST_RFIND) + return LLHelpers.ll_search(s1, s2, start, end, FAST_RFIND) @classmethod def ll_count(cls, s1, s2, start, end): @@ -899,7 +898,7 @@ prev_pos = 0 if pos < 0: items[0] = s - return items + return res while pos >= 0 and count < max: item = items[count] = s.malloc(pos - prev_pos) item.copy_contents(s, item, prev_pos, 0, pos - @@ -909,7 +908,7 @@ pos = s.find(c, pos + markerlen, last) item = items[count] = s.malloc(last - prev_pos) item.copy_contents(s, item, prev_pos, 0, last - prev_pos) - return items + return res def ll_rsplit_chr(LIST, s, c, max): chars = s.chars @@ -946,6 +945,37 @@ item.copy_contents(s, item, j, 0, i - j) return res + def ll_rsplit(LIST, s, c, max): + count = 1 + if max == -1: + max = len(s.chars) + pos = len(s.chars) + markerlen = len(c.chars) + pos = s.rfind(c, 0, pos) + while pos >= 0 and count <= max: + pos = s.rfind(c, 0, pos - markerlen) + count += 1 + res = LIST.ll_newlist(count) + items = res.ll_items() + pos = 0 + pos = len(s.chars) + prev_pos = pos + pos = s.rfind(c, 0, pos) + if pos < 0: + items[0] = s + return res + count -= 1 + while pos >= 0 and count > 0: + item = items[count] = s.malloc(prev_pos - pos - markerlen) + item.copy_contents(s, item, pos + markerlen, 0, + prev_pos - pos - markerlen) + count -= 1 + prev_pos = pos + pos = s.rfind(c, 0, pos) + item = items[count] = s.malloc(prev_pos) + item.copy_contents(s, item, 0, 0, prev_pos) + return res + @jit.elidable def ll_replace_chr_chr(s, c1, c2): length = len(s.chars) @@ -1125,7 +1155,8 @@ 'copy_contents_from_str' : staticAdtMethod(copy_string_contents), 'gethash': LLHelpers.ll_strhash, 'length': LLHelpers.ll_length, - 'find': LLHelpers.ll_find})) + 'find': LLHelpers.ll_find, + 'rfind': LLHelpers.ll_rfind})) UNICODE.become(GcStruct('rpy_unicode', ('hash', Signed), ('chars', Array(UniChar, hints={'immutable': True})), adtmeths={'malloc' : staticAdtMethod(mallocunicode), diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -357,10 +357,16 @@ def rtype_method_rsplit(self, hop): rstr = hop.args_r[0].repr + v_str = hop.inputarg(rstr.repr, 0) + if isinstance(hop.args_s[1], annmodel.SomeString): + v_chr = hop.inputarg(rstr.repr, 1) + fn = self.ll.ll_rsplit + else: + v_chr = hop.inputarg(rstr.char_repr, 1) + fn = self.ll.ll_rsplit_chr if hop.nb_args == 3: - v_str, v_chr, v_max = hop.inputargs(rstr.repr, rstr.char_repr, Signed) + v_max = hop.inputarg(Signed, 2) else: - v_str, v_chr = hop.inputargs(rstr.repr, rstr.char_repr) v_max = hop.inputconst(Signed, -1) try: list_type = hop.r_result.lowleveltype.TO @@ -368,7 +374,7 @@ list_type = hop.r_result.lowleveltype cLIST = hop.inputconst(Void, list_type) hop.exception_cannot_occur() - return hop.gendirectcall(self.ll.ll_rsplit_chr, cLIST, v_str, v_chr, v_max) + return hop.gendirectcall(fn, cLIST, v_str, v_chr, v_max) def rtype_method_replace(self, hop): rstr = hop.args_r[0].repr diff --git a/rpython/rtyper/test/test_rstr.py b/rpython/rtyper/test/test_rstr.py --- a/rpython/rtyper/test/test_rstr.py +++ b/rpython/rtyper/test/test_rstr.py @@ -744,6 +744,19 @@ res = self.interpret(f, [i]) assert res == True + def test_rsplit_multichar(self): + l = ["abc::z", "abc", "abc::def:::x"] + exp = [["abc", "z"], ["abc"], ["abc", "def:", "x"]] + exp2 = [["abc", "z"], ["abc"], ["abc::def:", "x"]] + + def f(i): + s = l[i] + return s.rsplit("::") == exp[i] and s.rsplit("::", 1) == exp2[i] + + for i in range(3): + res = self.interpret(f, [i]) + assert res == True + def test_rsplit(self): fn = self._make_split_test('rsplit') for i in range(5): From noreply at buildbot.pypy.org Sun Dec 29 10:44:33 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 29 Dec 2013 10:44:33 +0100 (CET) Subject: [pypy-commit] stmgc c6: Adapt update_to_leader() to release the lock when copying the objects Message-ID: <20131229094433.D1ECB1C0162@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c6 Changeset: r590:262fa1563851 Date: 2013-12-29 10:44 +0100 http://bitbucket.org/pypy/stmgc/changeset/262fa1563851/ Log: Adapt update_to_leader() to release the lock when copying the objects diff --git a/checkfence/c6/test4.c b/checkfence/c6/test4.c new file mode 100644 --- /dev/null +++ b/checkfence/c6/test4.c @@ -0,0 +1,266 @@ +#include "lsl_protos.h" + +typedef unsigned short uint16_t; +typedef boolean_t bool; + + +#define NUM_THREADS 2 +#define UNDOLOG NUM_THREADS + + +typedef unsigned uid_t; + + +typedef struct { + uid_t read_version; + char flag_modified; + int value1, value2; +} object_t; + +typedef struct { + int n_modified_objects; + uid_t transaction_read_version; +} thread_local_t; + +thread_local_t tl[NUM_THREADS]; +object_t obj[NUM_THREADS+1]; +int n_global_history; +int leader_thread_num; +lsl_lock_t undo_lock; + + +void setup(void) +{ + /* initialize global state */ + leader_thread_num = 0; + n_global_history = 0; + lsl_initlock(&undo_lock); + + /* XXX manual unrolling */ + tl[0].n_modified_objects = 0; + tl[0].transaction_read_version = 0; + tl[1].n_modified_objects = 0; + tl[1].transaction_read_version = 0; +} + + +int fetch_and_add(volatile int *loc, int increment) +{ + int oldvalue = *loc; + lsl_assume(lsl_cas_64(loc, oldvalue, oldvalue + increment)); + return oldvalue; +} + +/* int stm_allocate(int t) */ +/* { */ +/* int result = fetch_and_add(&next_free_glob, 1); */ +/* lsl_observe_output("stm_allocate", result); */ + +/* obj[t][result].flag_modified = true; */ +/* return result; */ +/* } */ + +int acquire_lock_if_leader(int t) +{ + //XXX: + //if (leader_thread_num != t) + // return 0; + lsl_lock(&undo_lock); + if (leader_thread_num == t) + return 1; + lsl_unlock(&undo_lock); + return 0; +} + +void memcpy_obj_without_header(int tdst, int tsrc) +{ + obj[tdst].value1 = obj[tsrc].value1; + obj[tdst].value2 = obj[tsrc].value2; +} + +#define stm_read(t) \ + (obj[t].read_version = tl[t].transaction_read_version) + +void stm_write(int t) +{ + if (obj[t].flag_modified) + return; /* already modified during this transaction */ + + stm_read(t); + + int is_leader = acquire_lock_if_leader(t); + obj[t].flag_modified = true; + tl[t].n_modified_objects = 1; + if (is_leader) { + memcpy_obj_without_header(UNDOLOG, t); + lsl_unlock(&undo_lock); + } +} + +int update_to_leader(int t, int check) +{ + /* becomes the leader, and update the local copy of the objects */ + uid_t my_version = tl[t].transaction_read_version; + int result = check; + + if (n_global_history > 0) { + int nmod = tl[1 - t].n_modified_objects; + lsl_unlock(&undo_lock); + + /* loop over objects in 'global_history': if they have been + read by the current transaction, the current transaction must + abort; then copy them out of the leader's object space --- + which may have been modified by the leader's uncommitted + transaction; this case will be fixed afterwards. */ + if (result) + result = (obj[t].read_version != my_version); + memcpy_obj_without_header(t, 1 - t); + + /* finally, loop over objects modified by the leader, + and copy them out of the undo log. XXX We could use + a heuristic to avoid copying unneeded objects: it's not + useful to copy objects that were not also present in + the 'global_history'. */ + if (!nmod) { + lsl_lock(&undo_lock); + nmod = tl[1 - t].n_modified_objects; + if (nmod) + lsl_unlock(&undo_lock); + } + if (nmod) { + memcpy_obj_without_header(t, UNDOLOG); + lsl_lock(&undo_lock); + } + + n_global_history = 0; + } + leader_thread_num = t; + return result; +} + +void update_state(int t) +{ + lsl_lock(&undo_lock); + if (leader_thread_num != t) { + update_to_leader(t, 0); + } + lsl_unlock(&undo_lock); +} + +void start_transaction(int t) +{ + lsl_assert(tl[t].n_modified_objects == 0); + lsl_assert(!obj[t].flag_modified); + tl[t].transaction_read_version++; +} + +int stop_transaction(int t) +{ + int nmod = tl[t].n_modified_objects; + if (nmod == 0) { + /* no modified objects in this transaction */ + return 1; + } + + lsl_lock(&undo_lock); + + int result; + if (leader_thread_num == t) { + result = 1; + } + else { + result = update_to_leader(t, 1); + } + + if (result) { + obj[t].flag_modified = false; + n_global_history = 1; + } + else { + obj[t].flag_modified = false; + } + tl[t].n_modified_objects = 0; + + lsl_unlock(&undo_lock); + return result; +} + + +/* void A(void) */ +/* { */ +/* int t = lsl_get_thread_id(); */ +/* int num = stm_allocate(t); */ +/* } */ + +void SETUP(void) +{ + setup(); +} + +void SETUP100(void) +{ + int t = 0; + setup(); + + /* XXX manual unrolling */ + obj[0].flag_modified = false; + obj[0].read_version = 0; + obj[0].value1 = 100; + obj[0].value2 = 200; + + obj[1].flag_modified = false; + obj[1].read_version = 0; + obj[1].value1 = 100; + obj[1].value2 = 200; +} + +void R0(void) +{ + int t = lsl_get_thread_id(); + int result1, result2; + while (1) { + start_transaction(t); + stm_read(t); + result1 = obj[t].value1; + result2 = obj[t].value2; + if (stop_transaction(t)) + break; + } + + lsl_observe_output("R0:value1", result1); + lsl_observe_output("R0:value2", result2); +} + +void W0INC1(void) +{ + int t = lsl_get_thread_id(); + int nvalue1, nvalue2; + //update_state(t); + + start_transaction(t); + stm_write(t); + nvalue1 = ++obj[t].value1; + nvalue2 = ++obj[t].value2; + lsl_assert(nvalue1 == obj[t].value1); + lsl_assert(nvalue2 == obj[t].value2); + if (!stop_transaction(t)) { + start_transaction(t); + stm_write(t); + nvalue1 = ++obj[t].value1; + nvalue2 = ++obj[t].value2; + lsl_assert(nvalue1 == obj[t].value1); + lsl_assert(nvalue2 == obj[t].value2); + if (!stop_transaction(t)) { + lsl_observe_output("XXX W0INC1 failed twice", 0); + } + } + + lsl_observe_output("W0INC1:nvalue1", nvalue1); + lsl_observe_output("W0INC1:nvalue2", nvalue2); +} + +void UPD(void) +{ + int t = lsl_get_thread_id(); + update_state(t); +} diff --git a/checkfence/c6/test4.lsl b/checkfence/c6/test4.lsl new file mode 100644 --- /dev/null +++ b/checkfence/c6/test4.lsl @@ -0,0 +1,6 @@ + +test T1 = SETUP100 ( R0 | R0 ) + +test T2 = SETUP100 ( W0INC1 | W0INC1 ) + +test T3 = SETUP100 ( UPD R0 UPD R0 | W0INC1 ) From noreply at buildbot.pypy.org Mon Dec 30 22:39:10 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 30 Dec 2013 22:39:10 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: consistent naming Message-ID: <20131230213910.ED4291C0162@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r68568:14419f6b23e9 Date: 2013-12-29 17:44 -0800 http://bitbucket.org/pypy/pypy/changeset/14419f6b23e9/ Log: consistent naming diff --git a/pypy/objspace/std/test/test_smalllongobject.py b/pypy/objspace/std/test/test_smalllongobject.py --- a/pypy/objspace/std/test/test_smalllongobject.py +++ b/pypy/objspace/std/test/test_smalllongobject.py @@ -125,7 +125,7 @@ assert (x << 32) << 32 == 18446744073709551616L -class Test_W_IntObjectWithSmallLong(TestW_IntObject): +class TestW_IntObjectWithSmallLong(TestW_IntObject): spaceconfig = {"objspace.std.withsmalllong": True} From noreply at buildbot.pypy.org Mon Dec 30 22:39:12 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 30 Dec 2013 22:39:12 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: hook in ints manually now that they no longer delegate 2 longs Message-ID: <20131230213912.1602B1C021C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r68569:1bf470bb200c Date: 2013-12-30 13:34 -0800 http://bitbucket.org/pypy/pypy/changeset/1bf470bb200c/ Log: hook in ints manually now that they no longer delegate 2 longs diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -1,6 +1,7 @@ from pypy.interpreter import gateway from pypy.interpreter.error import OperationError from pypy.objspace.std import newformat +from pypy.objspace.std.intobject import W_IntObject from pypy.objspace.std.model import registerimplementation, W_Object from pypy.objspace.std.register_all import register_all from pypy.objspace.std.floatobject import W_FloatObject, _hash_float @@ -216,17 +217,21 @@ if w_complex1.imagval: return space.w_False return space.eq(space.newfloat(w_complex1.realval), w_long2) +eq__Complex_Int = eq__Complex_Long def eq__Long_Complex(space, w_long1, w_complex2): return eq__Complex_Long(space, w_complex2, w_long1) +eq__Int_Complex = eq__Long_Complex def ne__Complex_Long(space, w_complex1, w_long2): if w_complex1.imagval: return space.w_True return space.ne(space.newfloat(w_complex1.realval), w_long2) +ne__Complex_Int = ne__Complex_Long def ne__Long_Complex(space, w_long1, w_complex2): return ne__Complex_Long(space, w_complex2, w_long1) +ne__Int_Complex = ne__Long_Complex def lt__Complex_Complex(space, w_complex1, w_complex2): raise OperationError(space.w_TypeError, space.wrap('cannot compare complex numbers using <, <=, >, >=')) diff --git a/pypy/objspace/std/test/test_complexobject.py b/pypy/objspace/std/test/test_complexobject.py --- a/pypy/objspace/std/test/test_complexobject.py +++ b/pypy/objspace/std/test/test_complexobject.py @@ -185,6 +185,14 @@ assert (5+0j) != large assert large != (5+0j) + def test_richcompare_boundaries(self): + z = 9007199254740992+0j + i = 9007199254740993 + assert not complex.__eq__(z, i) + assert not complex.__eq__(z, long(i)) + assert complex.__ne__(z, i) + assert complex.__ne__(z, long(i)) + def test_mod(self): raises(ZeroDivisionError, (1+1j).__mod__, 0+0j) From noreply at buildbot.pypy.org Mon Dec 30 22:39:13 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 30 Dec 2013 22:39:13 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: add rsides Message-ID: <20131230213913.397711C0ECA@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r68570:d96812b03457 Date: 2013-12-30 13:37 -0800 http://bitbucket.org/pypy/pypy/changeset/d96812b03457/ Log: add rsides diff --git a/pypy/objspace/std/boolobject.py b/pypy/objspace/std/boolobject.py --- a/pypy/objspace/std/boolobject.py +++ b/pypy/objspace/std/boolobject.py @@ -1,5 +1,8 @@ +import operator + from rpython.rlib.rarithmetic import r_uint from rpython.rlib.rbigint import rbigint +from rpython.tool.sourcetools import func_renamer, func_with_new_name from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.objspace.std.intobject import W_AbstractIntObject @@ -17,7 +20,7 @@ raise Exception("you cannot do that, you must use space.is_true()") def __repr__(self): - """ representation for debugging purposes """ + """representation for debugging purposes""" return "%s(%s)" % (self.__class__.__name__, self.boolval) def unwrap(self, space): @@ -46,23 +49,24 @@ def descr_nonzero(self, space): return self - def descr_and(self, space, w_other): - if not isinstance(w_other, W_BoolObject): - return W_AbstractIntObject.descr_and(self, space, w_other) - return space.newbool(self.boolval & w_other.boolval) + def make_bitwise_binop(opname): + descr_name = 'descr_' + opname + super_op = getattr(W_AbstractIntObject, descr_name) + op = getattr(operator, + opname + '_' if opname in ('and', 'or') else opname) + @func_renamer(descr_name) + def descr_binop(self, space, w_other): + if not isinstance(w_other, W_BoolObject): + return super_op(self, space, w_other) + return space.newbool(op(self.boolval, w_other.boolval)) + return descr_binop, func_with_new_name(descr_binop, 'descr_r' + opname) - def descr_or(self, space, w_other): - if not isinstance(w_other, W_BoolObject): - return W_AbstractIntObject.descr_or(self, space, w_other) - return space.newbool(self.boolval | w_other.boolval) - - def descr_xor(self, space, w_other): - if not isinstance(w_other, W_BoolObject): - return W_AbstractIntObject.descr_xor(self, space, w_other) - return space.newbool(self.boolval ^ w_other.boolval) + descr_and, descr_rand = make_bitwise_binop('and') + descr_or, descr_ror = make_bitwise_binop('or') + descr_xor, descr_rxor = make_bitwise_binop('xor') W_BoolObject.w_False = W_BoolObject(False) -W_BoolObject.w_True = W_BoolObject(True) +W_BoolObject.w_True = W_BoolObject(True) @unwrap_spec(w_obj=WrappedDefault(False)) def descr__new__(space, w_booltype, w_obj): @@ -81,12 +85,12 @@ __repr__ = interp2app(W_BoolObject.descr_repr), __str__ = interp2app(W_BoolObject.descr_str), __nonzero__ = interp2app(W_BoolObject.descr_nonzero), - # XXX: rsides + __and__ = interp2app(W_BoolObject.descr_and), - #__rand__ = interp2app(W_BoolObject.descr_rand), + __rand__ = interp2app(W_BoolObject.descr_rand), __or__ = interp2app(W_BoolObject.descr_or), - #__ror__ = interp2app(W_BoolObject.descr_ror), + __ror__ = interp2app(W_BoolObject.descr_ror), __xor__ = interp2app(W_BoolObject.descr_xor), - #__rxor__ = interp2app(W_BoolObject.descr_rxor), + __rxor__ = interp2app(W_BoolObject.descr_rxor), ) W_BoolObject.typedef.acceptable_as_base_class = False From noreply at buildbot.pypy.org Tue Dec 31 01:49:41 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 31 Dec 2013 01:49:41 +0100 (CET) Subject: [pypy-commit] pypy default: prefer the name 'self' for the sake of introspection Message-ID: <20131231004941.7BCAA1C021C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r68571:fb1a1b3b4c80 Date: 2013-12-30 16:14 -0800 http://bitbucket.org/pypy/pypy/changeset/fb1a1b3b4c80/ Log: prefer the name 'self' for the sake of introspection diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -805,8 +805,8 @@ raise TypeError("Varargs and keywords not supported in unwrap_spec") argspec = ', '.join([arg for arg in args.args[1:]]) func_code = py.code.Source(""" - def f(w_obj, %(args)s): - return w_obj.%(func_name)s(%(args)s) + def f(self, %(args)s): + return self.%(func_name)s(%(args)s) """ % {'args': argspec, 'func_name': func.func_name}) d = {} exec func_code.compile() in d @@ -821,7 +821,7 @@ else: assert isinstance(unwrap_spec, dict) unwrap_spec = unwrap_spec.copy() - unwrap_spec['w_obj'] = base_cls + unwrap_spec['self'] = base_cls return interp2app(globals()['unwrap_spec'](**unwrap_spec)(f)) class interp2app(W_Root): From noreply at buildbot.pypy.org Tue Dec 31 01:49:46 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 31 Dec 2013 01:49:46 +0100 (CET) Subject: [pypy-commit] pypy py3k: adapt to py3 and kill the old range list tests again Message-ID: <20131231004946.765A71C0F86@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68574:e34541f45c3d Date: 2013-12-30 16:48 -0800 http://bitbucket.org/pypy/pypy/changeset/e34541f45c3d/ Log: adapt to py3 and kill the old range list tests again diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -1290,15 +1290,15 @@ assert list(L([5, 6])) == ["ok"] assert list(L([5.2, 6.3])) == ["ok"] # - class S(str): + class S(bytes): def __iter__(self): yield "ok" - assert list(S("don't see me")) == ["ok"] + assert list(S(b"don't see me")) == ["ok"] # - class U(unicode): + class U(str): def __iter__(self): yield "ok" - assert list(U(u"don't see me")) == ["ok"] + assert list(U("don't see me")) == ["ok"] def test_extend_from_nonempty_list_with_subclasses(self): l = ["hi!"] @@ -1313,15 +1313,15 @@ l.extend(L([5, 6])) l.extend(L([5.2, 6.3])) # - class S(str): + class S(bytes): def __iter__(self): yield "okS" - l.extend(S("don't see me")) + l.extend(S(b"don't see me")) # - class U(unicode): + class U(str): def __iter__(self): yield "okU" - l.extend(U(u"don't see me")) + l.extend(U("don't see me")) # assert l == ["hi!", "okT", "okL", "okL", "okS", "okU"] @@ -1355,94 +1355,6 @@ assert item11 in l[::11] -class AppTestForRangeLists(AppTestW_ListObject): - spaceconfig = {"objspace.std.withrangelist": True} - - def test_range_simple_backwards(self): - x = range(5,1) - assert x == [] - - def test_range_big_start(self): - x = range(1,10) - x[22:0:-1] == range(1,10) - - def test_range_list_invalid_slice(self): - x = [1,2,3,4] - assert x[10:0] == [] - assert x[10:0:None] == [] - - x = range(1,5) - assert x[10:0] == [] - assert x[10:0:None] == [] - - assert x[0:22] == [1,2,3,4] - assert x[-1:10] == [4] - - assert x[0:22:None] == [1,2,3,4] - assert x[-1:10:None] == [4] - - def test_range_backwards(self): - x = range(1,10) - assert x[22:-10] == [] - assert x[22:-10:-1] == [9,8,7,6,5,4,3,2,1] - assert x[10:3:-1] == [9,8,7,6,5] - assert x[10:3:-2] == [9,7,5] - assert x[1:5:-1] == [] - - def test_sort_range(self): - l = range(3,10,3) - l.sort() - assert l == [3, 6, 9] - l.sort(reverse = True) - assert l == [9, 6, 3] - l.sort(reverse = True) - assert l == [9, 6, 3] - l.sort() - assert l == [3, 6, 9] - - def test_slice(self): - l = [] - l2 = range(3) - l.__setslice__(0,3,l2) - assert l == [0,1,2] - - def test_getitem(self): - l = range(5) - raises(IndexError, "l[-10]") - - def test_append(self): - l = range(5) - l.append(26) - assert l == [0,1,2,3,4,26] - - l = range(5) - l.append("a") - assert l == [0,1,2,3,4,"a"] - - l = range(5) - l.append(5) - assert l == [0,1,2,3,4,5] - - def test_pop(self): - l = range(3) - assert l.pop(0) == 0 - - def test_setitem(self): - l = range(3) - l[0] = 1 - assert l == [1,1,2] - - def test_inset(self): - l = range(3) - l.insert(1,5) - assert l == [0,5,1,2] - - def test_reverse(self): - l = range(3) - l.reverse() - assert l == [2,1,0] - - class AppTestWithoutStrategies(object): spaceconfig = {"objspace.std.withliststrategies": False} From noreply at buildbot.pypy.org Tue Dec 31 01:49:42 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 31 Dec 2013 01:49:42 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: merge default Message-ID: <20131231004942.D46A41C0291@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r68572:f7f04904f8ce Date: 2013-12-30 16:14 -0800 http://bitbucket.org/pypy/pypy/changeset/f7f04904f8ce/ Log: merge default diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -805,8 +805,8 @@ raise TypeError("Varargs and keywords not supported in unwrap_spec") argspec = ', '.join([arg for arg in args.args[1:]]) func_code = py.code.Source(""" - def f(w_obj, %(args)s): - return w_obj.%(func_name)s(%(args)s) + def f(self, %(args)s): + return self.%(func_name)s(%(args)s) """ % {'args': argspec, 'func_name': func.func_name}) d = {} exec func_code.compile() in d @@ -821,7 +821,7 @@ else: assert isinstance(unwrap_spec, dict) unwrap_spec = unwrap_spec.copy() - unwrap_spec['w_obj'] = base_cls + unwrap_spec['self'] = base_cls return interp2app(globals()['unwrap_spec'](**unwrap_spec)(f)) class interp2app(W_Root): diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -39,8 +39,8 @@ else: def rsocket_startup(): pass - - + + def ntohs(x): return rffi.cast(lltype.Signed, _c.ntohs(x)) @@ -500,7 +500,7 @@ self.type = type self.proto = proto self.timeout = defaults.timeout - + def __del__(self): fd = self.fd if fd != _c.INVALID_SOCKET: @@ -575,8 +575,8 @@ if n == 0: return 1 return 0 - - + + def error_handler(self): return last_error() @@ -696,7 +696,7 @@ if res < 0: res = errno return (res, False) - + def connect(self, address): """Connect the socket to a remote address.""" err, timeout = self._connect(address) @@ -704,7 +704,7 @@ raise SocketTimeout if err: raise CSocketError(err) - + def connect_ex(self, address): """This is like connect(address), but returns an error code (the errno value) instead of raising an exception when an error occurs.""" @@ -720,7 +720,7 @@ raise self.error_handler() return make_socket(fd, self.family, self.type, self.proto, SocketClass=SocketClass) - + def getpeername(self): """Return the address of the remote endpoint.""" address, addr_p, addrlen_p = self._addrbuf() @@ -790,7 +790,7 @@ """Return the timeout of the socket. A timeout < 0 means that timeouts are disabled in the socket.""" return self.timeout - + def listen(self, backlog): """Enable a server to accept connections. The backlog argument must be at least 1; it specifies the number of unaccepted connections @@ -857,7 +857,7 @@ def recvfrom_into(self, rwbuffer, nbytes, flags=0): buf, addr = self.recvfrom(nbytes, flags) rwbuffer.setslice(0, buf) - return len(buf), addr + return len(buf), addr def send_raw(self, dataptr, length, flags=0): """Send data from a CCHARP buffer.""" @@ -951,7 +951,7 @@ else: self.timeout = timeout self._setblocking(self.timeout < 0.0) - + def shutdown(self, how): """Shut down the reading side of the socket (flag == SHUT_RD), the writing side of the socket (flag == SHUT_WR), or both ends diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -624,8 +624,7 @@ i += 1 return count - @classmethod - def ll_find(cls, s1, s2, start, end): + def ll_find(s1, s2, start, end): if start < 0: start = 0 if end > len(s1.chars): @@ -635,12 +634,11 @@ m = len(s2.chars) if m == 1: - return cls.ll_find_char(s1, s2.chars[0], start, end) + return LLHelpers.ll_find_char(s1, s2.chars[0], start, end) - return cls.ll_search(s1, s2, start, end, FAST_FIND) + return LLHelpers.ll_search(s1, s2, start, end, FAST_FIND) - @classmethod - def ll_rfind(cls, s1, s2, start, end): + def ll_rfind(s1, s2, start, end): if start < 0: start = 0 if end > len(s1.chars): @@ -650,9 +648,9 @@ m = len(s2.chars) if m == 1: - return cls.ll_rfind_char(s1, s2.chars[0], start, end) + return LLHelpers.ll_rfind_char(s1, s2.chars[0], start, end) - return cls.ll_search(s1, s2, start, end, FAST_RFIND) + return LLHelpers.ll_search(s1, s2, start, end, FAST_RFIND) @classmethod def ll_count(cls, s1, s2, start, end): @@ -881,6 +879,37 @@ item.copy_contents(s, item, i, 0, j - i) return res + def ll_split(LIST, s, c, max): + count = 1 + if max == -1: + max = len(s.chars) + pos = 0 + last = len(s.chars) + markerlen = len(c.chars) + pos = s.find(c, 0, last) + while pos >= 0 and count <= max: + pos = s.find(c, pos + markerlen, last) + count += 1 + res = LIST.ll_newlist(count) + items = res.ll_items() + pos = 0 + count = 0 + pos = s.find(c, 0, last) + prev_pos = 0 + if pos < 0: + items[0] = s + return res + while pos >= 0 and count < max: + item = items[count] = s.malloc(pos - prev_pos) + item.copy_contents(s, item, prev_pos, 0, pos - + prev_pos) + count += 1 + prev_pos = pos + markerlen + pos = s.find(c, pos + markerlen, last) + item = items[count] = s.malloc(last - prev_pos) + item.copy_contents(s, item, prev_pos, 0, last - prev_pos) + return res + def ll_rsplit_chr(LIST, s, c, max): chars = s.chars strlen = len(chars) @@ -916,6 +945,37 @@ item.copy_contents(s, item, j, 0, i - j) return res + def ll_rsplit(LIST, s, c, max): + count = 1 + if max == -1: + max = len(s.chars) + pos = len(s.chars) + markerlen = len(c.chars) + pos = s.rfind(c, 0, pos) + while pos >= 0 and count <= max: + pos = s.rfind(c, 0, pos - markerlen) + count += 1 + res = LIST.ll_newlist(count) + items = res.ll_items() + pos = 0 + pos = len(s.chars) + prev_pos = pos + pos = s.rfind(c, 0, pos) + if pos < 0: + items[0] = s + return res + count -= 1 + while pos >= 0 and count > 0: + item = items[count] = s.malloc(prev_pos - pos - markerlen) + item.copy_contents(s, item, pos + markerlen, 0, + prev_pos - pos - markerlen) + count -= 1 + prev_pos = pos + pos = s.rfind(c, 0, pos) + item = items[count] = s.malloc(prev_pos) + item.copy_contents(s, item, 0, 0, prev_pos) + return res + @jit.elidable def ll_replace_chr_chr(s, c1, c2): length = len(s.chars) @@ -1094,7 +1154,9 @@ 'copy_contents' : staticAdtMethod(copy_string_contents), 'copy_contents_from_str' : staticAdtMethod(copy_string_contents), 'gethash': LLHelpers.ll_strhash, - 'length': LLHelpers.ll_length})) + 'length': LLHelpers.ll_length, + 'find': LLHelpers.ll_find, + 'rfind': LLHelpers.ll_rfind})) UNICODE.become(GcStruct('rpy_unicode', ('hash', Signed), ('chars', Array(UniChar, hints={'immutable': True})), adtmeths={'malloc' : staticAdtMethod(mallocunicode), diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -336,10 +336,16 @@ def rtype_method_split(self, hop): rstr = hop.args_r[0].repr + v_str = hop.inputarg(rstr.repr, 0) + if isinstance(hop.args_s[1], annmodel.SomeString): + v_chr = hop.inputarg(rstr.repr, 1) + fn = self.ll.ll_split + else: + v_chr = hop.inputarg(rstr.char_repr, 1) + fn = self.ll.ll_split_chr if hop.nb_args == 3: - v_str, v_chr, v_max = hop.inputargs(rstr.repr, rstr.char_repr, Signed) + v_max = hop.inputarg(Signed, 2) else: - v_str, v_chr = hop.inputargs(rstr.repr, rstr.char_repr) v_max = hop.inputconst(Signed, -1) try: list_type = hop.r_result.lowleveltype.TO @@ -347,14 +353,20 @@ list_type = hop.r_result.lowleveltype cLIST = hop.inputconst(Void, list_type) hop.exception_cannot_occur() - return hop.gendirectcall(self.ll.ll_split_chr, cLIST, v_str, v_chr, v_max) + return hop.gendirectcall(fn, cLIST, v_str, v_chr, v_max) def rtype_method_rsplit(self, hop): rstr = hop.args_r[0].repr + v_str = hop.inputarg(rstr.repr, 0) + if isinstance(hop.args_s[1], annmodel.SomeString): + v_chr = hop.inputarg(rstr.repr, 1) + fn = self.ll.ll_rsplit + else: + v_chr = hop.inputarg(rstr.char_repr, 1) + fn = self.ll.ll_rsplit_chr if hop.nb_args == 3: - v_str, v_chr, v_max = hop.inputargs(rstr.repr, rstr.char_repr, Signed) + v_max = hop.inputarg(Signed, 2) else: - v_str, v_chr = hop.inputargs(rstr.repr, rstr.char_repr) v_max = hop.inputconst(Signed, -1) try: list_type = hop.r_result.lowleveltype.TO @@ -362,7 +374,7 @@ list_type = hop.r_result.lowleveltype cLIST = hop.inputconst(Void, list_type) hop.exception_cannot_occur() - return hop.gendirectcall(self.ll.ll_rsplit_chr, cLIST, v_str, v_chr, v_max) + return hop.gendirectcall(fn, cLIST, v_str, v_chr, v_max) def rtype_method_replace(self, hop): rstr = hop.args_r[0].repr diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -854,10 +854,10 @@ callee_graph = graph, position_tag = object()) - def convertvar(self, v, r_from, r_to): - assert isinstance(v, (Variable, Constant)) + def convertvar(self, orig_v, r_from, r_to): + assert isinstance(orig_v, (Variable, Constant)) if r_from != r_to: - v = pair(r_from, r_to).convert_from_to(v, self) + v = pair(r_from, r_to).convert_from_to(orig_v, self) if v is NotImplemented: raise TyperError("don't know how to convert from %r to %r" % (r_from, r_to)) @@ -865,6 +865,8 @@ raise TyperError("bug in conversion from %r to %r: " "returned a %r" % (r_from, r_to, v.concretetype)) + else: + v = orig_v return v def genop(self, opname, args_v, resulttype=None): diff --git a/rpython/rtyper/test/test_rstr.py b/rpython/rtyper/test/test_rstr.py --- a/rpython/rtyper/test/test_rstr.py +++ b/rpython/rtyper/test/test_rstr.py @@ -731,6 +731,32 @@ res = self.interpret(fn, [i]) assert res == fn(i) + def test_split_multichar(self): + l = ["abc::z", "abc", "abc::def:::x"] + exp = [["abc", "z"], ["abc"], ["abc", "def", ":x"]] + exp2 = [["abc", "z"], ["abc"], ["abc", "def:::x"]] + + def f(i): + s = l[i] + return s.split("::") == exp[i] and s.split("::", 1) == exp2[i] + + for i in range(3): + res = self.interpret(f, [i]) + assert res == True + + def test_rsplit_multichar(self): + l = ["abc::z", "abc", "abc::def:::x"] + exp = [["abc", "z"], ["abc"], ["abc", "def:", "x"]] + exp2 = [["abc", "z"], ["abc"], ["abc::def:", "x"]] + + def f(i): + s = l[i] + return s.rsplit("::") == exp[i] and s.rsplit("::", 1) == exp2[i] + + for i in range(3): + res = self.interpret(f, [i]) + assert res == True + def test_rsplit(self): fn = self._make_split_test('rsplit') for i in range(5): From noreply at buildbot.pypy.org Tue Dec 31 01:49:45 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 31 Dec 2013 01:49:45 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20131231004945.4A0A91C067F@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68573:91714ff3b0b3 Date: 2013-12-30 16:30 -0800 http://bitbucket.org/pypy/pypy/changeset/91714ff3b0b3/ Log: merge default diff too long, truncating to 2000 out of 2663 lines diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -823,8 +823,8 @@ raise TypeError("Varargs and keywords not supported in unwrap_spec") argspec = ', '.join([arg for arg in args.args[1:]]) func_code = py.code.Source(""" - def f(w_obj, %(args)s): - return w_obj.%(func_name)s(%(args)s) + def f(self, %(args)s): + return self.%(func_name)s(%(args)s) """ % {'args': argspec, 'func_name': func.func_name}) d = {} exec func_code.compile() in d @@ -839,7 +839,7 @@ else: assert isinstance(unwrap_spec, dict) unwrap_spec = unwrap_spec.copy() - unwrap_spec['w_obj'] = base_cls + unwrap_spec['self'] = base_cls return interp2app(globals()['unwrap_spec'](**unwrap_spec)(f)) class interp2app(W_Root): diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -42,7 +42,7 @@ if self.handles[d]() is None: self.look_distance = d + 1 return d - # full! extend, but don't use '!=' here + # full! extend, but don't use '+=' here self.handles = self.handles + [dead_ref] * (length // 3 + 5) self.look_distance = length + 1 return length diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -426,7 +426,13 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - w_groupnum = space.getitem(self.srepat.w_groupindex, w_arg) + try: + w_groupnum = space.getitem(self.srepat.w_groupindex, w_arg) + except OperationError, e: + if not e.match(space, space.w_KeyError): + raise + raise OperationError(space.w_IndexError, + space.wrap("no such group")) groupnum = space.int_w(w_groupnum) if groupnum == 0: return self.ctx.match_start, self.ctx.match_end diff --git a/pypy/module/_sre/test/test_app_sre.py b/pypy/module/_sre/test/test_app_sre.py --- a/pypy/module/_sre/test/test_app_sre.py +++ b/pypy/module/_sre/test/test_app_sre.py @@ -178,6 +178,9 @@ assert ("1", "1", None) == m.group(1, 2, 3) assert ("1", None) == m.group("first", "second") raises(IndexError, m.group, 1, 4) + assert ("1", None) == m.group(1, "second") + raises(IndexError, m.group, 'foobarbaz') + raises(IndexError, m.group, 'first', 'foobarbaz') def test_expand(self): import re diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -131,7 +131,11 @@ if space.isinstance_w(w_idx, space.w_tuple): if space.len_w(w_idx) == 0: return self.get_scalar_value() - if space.is_none(w_idx): + elif space.isinstance_w(w_idx, space.w_str): + if self.dtype.is_record_type(): + w_val = self.value.descr_getitem(space, w_idx) + return convert_to_array(space, w_val) + elif space.is_none(w_idx): new_shape = [1] arr = W_NDimArray.from_shape(space, new_shape, self.dtype) arr_iter = arr.create_iter(new_shape) @@ -145,6 +149,12 @@ space.wrap("0-d arrays can't be indexed")) def descr_setitem(self, space, _, w_idx, w_val): + if space.isinstance_w(w_idx, space.w_tuple): + if space.len_w(w_idx) == 0: + return self.set_scalar_value(self.dtype.coerce(space, w_val)) + elif space.isinstance_w(w_idx, space.w_str): + if self.dtype.is_record_type(): + return self.value.descr_setitem(space, w_idx, w_val) raise OperationError(space.w_IndexError, space.wrap("0-d arrays can't be indexed")) @@ -176,7 +186,7 @@ s = self.dtype.itemtype.bool(self.value) w_res = W_NDimArray.from_shape(space, [s], index_type) if s == 1: - w_res.implementation.setitem(0, index_type.itemtype.box(0)) + w_res.implementation.setitem(0, index_type.itemtype.box(0)) return space.newtuple([w_res]) def fill(self, space, w_value): diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -123,7 +123,8 @@ if w_axis is space.w_None: # note that it's fine ot pass None here as we're not going # to pass the result around (None is the link to base in slices) - arr = arr.reshape(space, None, [arr.get_size()]) + if arr.get_size() > 0: + arr = arr.reshape(space, None, [arr.get_size()]) axis = 0 elif w_axis is None: axis = -1 diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -38,7 +38,7 @@ from pypy.module.micronumpy.arrayimpl import concrete, scalar if not shape: - w_val = dtype.base.coerce(space, space.wrap(0)) + w_val = dtype.base.coerce(space, None) impl = scalar.Scalar(dtype.base, w_val) else: strides, backstrides = calc_strides(shape, dtype.base, order) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -11,6 +11,7 @@ from rpython.rtyper.lltypesystem import rffi from rpython.tool.sourcetools import func_with_new_name from pypy.module.micronumpy.arrayimpl.voidbox import VoidBoxStorage +from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.interp_flagsobj import W_FlagsObject from pypy.interpreter.mixedmodule import MixedModule from rpython.rtyper.lltypesystem import lltype @@ -270,14 +271,25 @@ def descr_view(self, space, w_dtype): from pypy.module.micronumpy.interp_dtype import W_Dtype - dtype = space.interp_w(W_Dtype, - space.call_function(space.gettypefor(W_Dtype), w_dtype)) - if dtype.get_size() == 0: - raise OperationError(space.w_TypeError, space.wrap( - "data-type must not be 0-sized")) - if dtype.get_size() != self.get_dtype(space).get_size(): - raise OperationError(space.w_ValueError, space.wrap( - "new type not compatible with array.")) + try: + subclass = space.is_true(space.issubtype( + w_dtype, space.gettypefor(W_NDimArray))) + except OperationError, e: + if e.match(space, space.w_TypeError): + subclass = False + else: + raise + if subclass: + dtype = self.get_dtype(space) + else: + dtype = space.interp_w(W_Dtype, + space.call_function(space.gettypefor(W_Dtype), w_dtype)) + if dtype.get_size() == 0: + raise OperationError(space.w_TypeError, space.wrap( + "data-type must not be 0-sized")) + if dtype.get_size() != self.get_dtype(space).get_size(): + raise OperationError(space.w_ValueError, space.wrap( + "new type not compatible with array.")) if dtype.is_str_or_unicode(): return dtype.coerce(space, space.wrap(self.raw_str())) elif dtype.is_record_type(): @@ -345,28 +357,22 @@ descr__new__, _get_dtype, descr_reduce = new_dtype_getter("uint16") class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("int32") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("i") class W_UInt32Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("uint32") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("I") + +class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("q") + +class W_UInt64Box(W_UnsignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("Q") class W_LongBox(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("long") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("l") class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("ulong") - -class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("int64") - -class W_LongLongBox(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter('longlong') - -class W_UInt64Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("uint64") - -class W_ULongLongBox(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter('ulonglong') + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("L") class W_InexactBox(W_NumberBox): pass @@ -422,7 +428,7 @@ self.dtype = dtype def get_dtype(self, space): - return self.arr.dtype + return self.dtype def raw_str(self): return self.arr.dtype.itemtype.to_str(self) @@ -460,13 +466,17 @@ return space.wrap(dtype.itemtype.to_str(read_val)) return read_val - @unwrap_spec(item=str) - def descr_setitem(self, space, item, w_value): + def descr_setitem(self, space, w_item, w_value): + if space.isinstance_w(w_item, space.w_basestring): + item = space.str_w(w_item) + else: + raise OperationError(space.w_IndexError, space.wrap( + "invalid index")) try: ofs, dtype = self.dtype.fields[item] except KeyError: - raise OperationError(space.w_IndexError, - space.wrap("Field %s does not exist" % item)) + raise OperationError(space.w_ValueError, + space.wrap("field named %s not found" % item)) dtype.itemtype.store(self.arr, self.ofs, ofs, dtype.coerce(space, w_value)) @@ -658,13 +668,6 @@ __reduce__ = interp2app(W_Int64Box.descr_reduce), ) -if LONG_BIT == 32: - W_LongBox = W_Int32Box - W_ULongBox = W_UInt32Box -elif LONG_BIT == 64: - W_LongBox = W_Int64Box - W_ULongBox = W_UInt64Box - W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntegerBox.typedef, __module__ = "numpy", __new__ = interp2app(W_UInt64Box.descr__new__.im_func), @@ -672,6 +675,21 @@ __reduce__ = interp2app(W_UInt64Box.descr_reduce), ) +W_LongBox.typedef = TypeDef("int%d" % LONG_BIT, + (W_SignedIntegerBox.typedef, int_typedef), + __module__ = "numpy", + __new__ = interp2app(W_LongBox.descr__new__.im_func), + __index__ = interp2app(W_LongBox.descr_index), + __reduce__ = interp2app(W_LongBox.descr_reduce), +) + +W_ULongBox.typedef = TypeDef("uint%d" % LONG_BIT, W_UnsignedIntegerBox.typedef, + __module__ = "numpy", + __new__ = interp2app(W_ULongBox.descr__new__.im_func), + __index__ = interp2app(W_ULongBox.descr_index), + __reduce__ = interp2app(W_ULongBox.descr_reduce), +) + W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, __module__ = "numpy", ) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -136,6 +136,8 @@ return space.wrap(self.itemtype.alignment) def descr_get_subdtype(self, space): + if self.subdtype is None: + return space.w_None return space.newtuple([space.wrap(self.subdtype), self.descr_get_shape(space)]) def descr_get_str(self, space): @@ -157,8 +159,20 @@ return space.newlist([space.newtuple([space.wrap(""), self.descr_get_str(space)])]) else: - raise OperationError(space.w_NotImplementedError, space.wrap( - "descr not implemented for record types")) + descr = [] + for name in self.fieldnames: + subdtype = self.fields[name][1] + subdescr = [space.wrap(name)] + if subdtype.is_record_type(): + subdescr.append(subdtype.descr_get_descr(space)) + elif subdtype.subdtype is not None: + subdescr.append(subdtype.subdtype.descr_get_str(space)) + else: + subdescr.append(subdtype.descr_get_str(space)) + if subdtype.shape != []: + subdescr.append(subdtype.descr_get_shape(space)) + descr.append(space.newtuple(subdescr[:])) + return space.newlist(descr) def descr_get_base(self, space): return space.wrap(self.base) @@ -651,6 +665,7 @@ w_box_type = space.gettypefor(interp_boxes.W_Float64Box), alternate_constructors=[space.w_float, space.gettypefor(interp_boxes.W_NumberBox), + space.gettypefor(interp_boxes.W_FloatingBox), ], aliases=["float", "double"], ) @@ -680,7 +695,8 @@ name="complex128", char=NPY_CDOUBLELTR, w_box_type = space.gettypefor(interp_boxes.W_Complex128Box), - alternate_constructors=[space.w_complex], + alternate_constructors=[space.w_complex, + space.gettypefor(interp_boxes.W_ComplexFloatingBox)], aliases=["complex", 'cfloat', 'cdouble'], float_type = self.w_float64dtype, ) @@ -702,7 +718,8 @@ name='string', char=NPY_STRINGLTR, w_box_type = space.gettypefor(interp_boxes.W_StringBox), - alternate_constructors=[space.w_str, space.gettypefor(interp_boxes.W_CharacterBox)], + alternate_constructors=[space.w_str, + space.gettypefor(interp_boxes.W_CharacterBox)], aliases=["str"], ) self.w_unicodedtype = W_Dtype( @@ -736,38 +753,21 @@ char=NPY_HALFLTR, w_box_type=space.gettypefor(interp_boxes.W_Float16Box), ) - ptr_size = rffi.sizeof(rffi.CCHARP) - if ptr_size == 4: - intp_box = interp_boxes.W_Int32Box - intp_type = types.Int32() - intp_num = NPY_INT - uintp_box = interp_boxes.W_UInt32Box - uintp_type = types.UInt32() - uintp_num = NPY_UINT - elif ptr_size == 8: - intp_box = interp_boxes.W_Int64Box - intp_type = types.Int64() - intp_num = NPY_LONG - uintp_box = interp_boxes.W_UInt64Box - uintp_type = types.UInt64() - uintp_num = NPY_ULONG - else: - raise ValueError('unknown point size %d' % ptr_size) self.w_intpdtype = W_Dtype( - intp_type, - num=intp_num, - kind=NPY_INTPLTR, + types.Long(), + num=NPY_LONG, + kind=NPY_SIGNEDLTR, name='intp', char=NPY_INTPLTR, - w_box_type = space.gettypefor(intp_box), + w_box_type = space.gettypefor(interp_boxes.W_LongBox), ) self.w_uintpdtype = W_Dtype( - uintp_type, - num=uintp_num, - kind=NPY_UINTPLTR, + types.ULong(), + num=NPY_ULONG, + kind=NPY_UNSIGNEDLTR, name='uintp', char=NPY_UINTPLTR, - w_box_type = space.gettypefor(uintp_box), + w_box_type = space.gettypefor(interp_boxes.W_ULongBox), ) float_dtypes = [self.w_float16dtype, self.w_float32dtype, self.w_float64dtype, self.w_floatlongdtype] diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -731,11 +731,15 @@ def descr_view(self, space, w_dtype=None, w_type=None): if not w_type and w_dtype: try: - if space.is_true(space.issubtype(w_dtype, space.gettypefor(W_NDimArray))): + if space.is_true(space.issubtype( + w_dtype, space.gettypefor(W_NDimArray))): w_type = w_dtype w_dtype = None - except (OperationError, TypeError): - pass + except OperationError, e: + if e.match(space, space.w_TypeError): + pass + else: + raise if w_dtype: dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), @@ -1170,12 +1174,15 @@ def take(a, indices, axis, out, mode): assert mode == 'raise' if axis is None: - res = a.ravel()[indices] + from numpy import array + indices = array(indices) + res = a.ravel()[indices.ravel()].reshape(indices.shape) else: + from operator import mul if axis < 0: axis += len(a.shape) s0, s1 = a.shape[:axis], a.shape[axis+1:] - l0 = prod(s0) if s0 else 1 - l1 = prod(s1) if s1 else 1 + l0 = reduce(mul, s0) if s0 else 1 + l1 = reduce(mul, s1) if s1 else 1 res = a.reshape((l0, -1, l1))[:,indices,:].reshape(s0 + (-1,) + s1) if out is not None: out[:] = res @@ -1423,12 +1430,11 @@ arr_iter.next() return w_arr - at unwrap_spec(order=str) -def zeros(space, w_shape, w_dtype=None, order='C'): +def zeros(space, w_shape, w_dtype=None, w_order=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) shape = _find_shape(space, w_shape, dtype) - return W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) + return W_NDimArray.from_shape(space, shape, dtype=dtype) @unwrap_spec(subok=bool) def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -33,6 +33,9 @@ self.allow_complex = allow_complex self.complex_to_float = complex_to_float + def descr_get_name(self, space): + return space.wrap(self.name) + def descr_repr(self, space): return space.wrap("" % self.name) @@ -373,14 +376,19 @@ w_rdtype = w_ldtype elif w_lhs.is_scalar() and not w_rhs.is_scalar(): w_ldtype = w_rdtype - if (self.int_only and (not w_ldtype.is_int_type() or not w_rdtype.is_int_type()) or - not self.allow_bool and (w_ldtype.is_bool_type() or w_rdtype.is_bool_type()) or - not self.allow_complex and (w_ldtype.is_complex_type() or w_rdtype.is_complex_type())): - raise OperationError(space.w_TypeError, space.wrap("Unsupported types")) calc_dtype = find_binop_result_dtype(space, w_ldtype, w_rdtype, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) + if (self.int_only and (not w_ldtype.is_int_type() or + not w_rdtype.is_int_type() or + not calc_dtype.is_int_type()) or + not self.allow_bool and (w_ldtype.is_bool_type() or + w_rdtype.is_bool_type()) or + not self.allow_complex and (w_ldtype.is_complex_type() or + w_rdtype.is_complex_type())): + raise OperationError(space.w_TypeError, space.wrap( + "ufunc '%s' not supported for the input types" % self.name)) if space.is_none(w_out): out = None elif not isinstance(w_out, W_NDimArray): @@ -417,6 +425,7 @@ __call__ = interp2app(W_Ufunc.descr_call), __repr__ = interp2app(W_Ufunc.descr_repr), + __name__ = GetSetProperty(W_Ufunc.descr_get_name), identity = GetSetProperty(W_Ufunc.descr_get_identity), accumulate = interp2app(W_Ufunc.descr_accumulate), @@ -428,6 +437,8 @@ def find_binop_result_dtype(space, dt1, dt2, promote_to_float=False, promote_bools=False): + if dt2 is None: + return dt1 # dt1.num should be <= dt2.num if dt1.num > dt2.num: dt1, dt2 = dt2, dt1 @@ -523,31 +534,30 @@ bool_dtype = interp_dtype.get_dtype_cache(space).w_booldtype long_dtype = interp_dtype.get_dtype_cache(space).w_longdtype int64_dtype = interp_dtype.get_dtype_cache(space).w_int64dtype - complex_type = interp_dtype.get_dtype_cache(space).w_complex128dtype - float_type = interp_dtype.get_dtype_cache(space).w_float64dtype + uint64_dtype = interp_dtype.get_dtype_cache(space).w_uint64dtype + complex_dtype = interp_dtype.get_dtype_cache(space).w_complex128dtype + float_dtype = interp_dtype.get_dtype_cache(space).w_float64dtype if isinstance(w_obj, interp_boxes.W_GenericBox): dtype = w_obj.get_dtype(space) - if current_guess is None: - return dtype return find_binop_result_dtype(space, dtype, current_guess) if space.isinstance_w(w_obj, space.w_bool): - if current_guess is None or current_guess is bool_dtype: - return bool_dtype - return current_guess + return find_binop_result_dtype(space, bool_dtype, current_guess) elif space.isinstance_w(w_obj, space.w_int): - if (current_guess is None or current_guess is bool_dtype or - current_guess is long_dtype or current_guess is int64_dtype): - return int64_dtype - return current_guess + try: + space.int_w(w_obj) + except OperationError, e: + if e.match(space, space.w_OverflowError): + return find_binop_result_dtype(space, uint64_dtype, + current_guess) + raise + return find_binop_result_dtype(space, int64_dtype, current_guess) + elif space.isinstance_w(w_obj, space.w_float): + return find_binop_result_dtype(space, float_dtype, current_guess) elif space.isinstance_w(w_obj, space.w_complex): - if (current_guess is None or current_guess is bool_dtype or - current_guess is long_dtype or current_guess is int64_dtype or - current_guess is complex_type or current_guess is float_type): - return complex_type - return current_guess + return complex_dtype elif space.isinstance_w(w_obj, space.w_str): - if (current_guess is None): + if current_guess is None: return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) elif current_guess.num == NPY_STRING: @@ -555,12 +565,6 @@ return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) return current_guess - if current_guess is complex_type: - return complex_type - if space.isinstance_w(w_obj, space.w_float): - return float_type - elif space.isinstance_w(w_obj, space.w_slice): - return long_dtype raise operationerrfmt(space.w_NotImplementedError, 'unable to create dtype from objects, ' '"%T" instance not supported', w_obj) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -69,9 +69,11 @@ return True def find_shape_and_elems(space, w_iterable, dtype): + is_rec_type = dtype is not None and dtype.is_record_type() + if is_rec_type and is_single_elem(space, w_iterable, is_rec_type): + return [], [w_iterable] shape = [space.len_w(w_iterable)] batch = space.listview(w_iterable) - is_rec_type = dtype is not None and dtype.is_record_type() while True: if not batch: return shape[:], [] diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -33,6 +33,11 @@ assert typeinfo['CFLOAT'] == ('F', 14, 64, 8, np.complex64) assert typeinfo['CDOUBLE'] == ('D', 15, 128, 16, np.complex128) assert typeinfo['HALF'] == ('e', 23, 16, 2, np.float16) + assert typeinfo['INTP'] == ('p', np.dtype('int').num, + self.ptr_size*8, self.ptr_size, + 2**(self.ptr_size*8 - 1) - 1, + -2**(self.ptr_size*8 - 1), + np.dtype('int').type) def test_dtype_basic(self): from numpypy import dtype @@ -49,6 +54,7 @@ assert dtype(int).fields is None assert dtype(int).names is None assert dtype(int).hasobject is False + assert dtype(int).subdtype is None assert dtype(None) is dtype(float) @@ -109,15 +115,11 @@ assert dtype(bool).num == 0 if self.ptr_size == 4: - assert dtype('intp').num == 5 - assert dtype('uintp').num == 6 assert dtype('int32').num == 7 assert dtype('uint32').num == 8 assert dtype('int64').num == 9 assert dtype('uint64').num == 10 else: - assert dtype('intp').num == 7 - assert dtype('uintp').num == 8 assert dtype('int32').num == 5 assert dtype('uint32').num == 6 assert dtype('int64').num == 7 @@ -125,6 +127,8 @@ assert dtype(int).num == 7 assert dtype('int').num == 7 assert dtype('uint').num == 8 + assert dtype('intp').num == 7 + assert dtype('uintp').num == 8 assert dtype(float).num == 12 assert dtype('float').num == 12 assert dtype('complex').num == 15 @@ -365,16 +369,22 @@ # numpy allows abstract types in array creation a_n = numpy.array([4,4], numpy.number) + a_f = numpy.array([4,4], numpy.floating) + a_c = numpy.array([4,4], numpy.complexfloating) a_i = numpy.array([4,4], numpy.integer) a_s = numpy.array([4,4], numpy.signedinteger) a_u = numpy.array([4,4], numpy.unsignedinteger) assert a_n.dtype.num == 12 + assert a_f.dtype.num == 12 + assert a_c.dtype.num == 15 assert a_i.dtype.num == 7 assert a_s.dtype.num == 7 assert a_u.dtype.num == 8 assert a_n.dtype is numpy.dtype('float64') + assert a_f.dtype is numpy.dtype('float64') + assert a_c.dtype is numpy.dtype('complex128') if self.ptr_size == 4: assert a_i.dtype is numpy.dtype('int32') assert a_s.dtype is numpy.dtype('int32') @@ -472,8 +482,7 @@ assert numpy.int16('32768') == -32768 def test_uint16(self): - import numpypy as numpy - + import numpy assert numpy.uint16(65535) == 65535 assert numpy.uint16(65536) == 0 assert numpy.uint16('65535') == 65535 @@ -481,8 +490,7 @@ def test_int32(self): import sys - import numpypy as numpy - + import numpy x = numpy.int32(23) assert x == 23 assert numpy.int32(2147483647) == 2147483647 @@ -497,10 +505,8 @@ def test_uint32(self): import sys - import numpypy as numpy - + import numpy assert numpy.uint32(10) == 10 - if sys.maxsize > 2 ** 31 - 1: assert numpy.uint32(4294967295) == 4294967295 assert numpy.uint32(4294967296) == 0 @@ -517,8 +523,7 @@ def test_int64(self): import sys - import numpypy as numpy - + import numpy if sys.maxsize == 2 ** 63 -1: assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, numpy.integer, numpy.number, @@ -533,30 +538,30 @@ assert numpy.int64(9223372036854775807) == 9223372036854775807 assert numpy.int64(9223372036854775807) == 9223372036854775807 - raises(OverflowError, numpy.int64, 9223372036854775808) raises(OverflowError, numpy.int64, 9223372036854775808L) def test_uint64(self): - import sys - import numpypy as numpy - + import numpy + assert numpy.dtype(numpy.uint64).type is numpy.uint64 assert numpy.uint64.mro() == [numpy.uint64, numpy.unsignedinteger, numpy.integer, numpy.number, numpy.generic, object] - - assert numpy.dtype(numpy.uint64).type is numpy.uint64 - skip("see comment") - # These tests pass "by chance" on numpy, things that are larger than - # platform long (i.e. a python int), don't get put in a normal box, - # instead they become an object array containing a long, we don't have - # yet, so these can't pass. - assert numpy.uint64(9223372036854775808) == 9223372036854775808 - assert numpy.uint64(18446744073709551615) == 18446744073709551615 - raises(OverflowError, numpy.uint64(18446744073709551616)) + import sys + if '__pypy__' not in sys.builtin_module_names: + # These tests pass "by chance" on numpy, things that are larger than + # platform long (i.e. a python int), don't get put in a normal box, + # instead they become an object array containing a long, we don't have + # yet, so these can't pass. + assert numpy.uint64(9223372036854775808) == 9223372036854775808 + assert numpy.uint64(18446744073709551615) == 18446744073709551615 + else: + raises(OverflowError, numpy.int64, 9223372036854775808) + raises(OverflowError, numpy.int64, 18446744073709551615) + raises(OverflowError, numpy.uint64, 18446744073709551616) def test_float16(self): - import numpypy as numpy + import numpy assert numpy.float16.mro() == [numpy.float16, numpy.floating, numpy.inexact, numpy.number, numpy.generic, object] @@ -567,8 +572,7 @@ def test_float32(self): - import numpypy as numpy - + import numpy assert numpy.float32.mro() == [numpy.float32, numpy.floating, numpy.inexact, numpy.number, numpy.generic, object] @@ -578,8 +582,7 @@ raises(ValueError, numpy.float32, '23.2df') def test_float64(self): - import numpypy as numpy - + import numpy assert numpy.float64.mro() == [numpy.float64, numpy.floating, numpy.inexact, numpy.number, numpy.generic, float, object] @@ -595,14 +598,14 @@ raises(ValueError, numpy.float64, '23.2df') def test_float_None(self): - import numpypy as numpy + import numpy from math import isnan assert isnan(numpy.float32(None)) assert isnan(numpy.float64(None)) assert isnan(numpy.longdouble(None)) def test_longfloat(self): - import numpypy as numpy + import numpy # it can be float96 or float128 if numpy.longfloat != numpy.float64: assert numpy.longfloat.mro()[1:] == [numpy.floating, @@ -615,8 +618,7 @@ raises(ValueError, numpy.longfloat, '23.2df') def test_complex_floating(self): - import numpypy as numpy - + import numpy assert numpy.complexfloating.__mro__ == (numpy.complexfloating, numpy.inexact, numpy.number, numpy.generic, object) @@ -714,10 +716,14 @@ assert numpy.int16 is numpy.short assert numpy.int8 is numpy.byte assert numpy.bool_ is numpy.bool8 + assert numpy.intp().dtype.num == 7 + assert numpy.intp().dtype.char == 'l' if self.ptr_size == 4: + assert numpy.intp().dtype.name == 'int32' assert numpy.intp is numpy.int32 assert numpy.uintp is numpy.uint32 elif self.ptr_size == 8: + assert numpy.intp().dtype.name == 'int64' assert numpy.intp is numpy.int64 assert numpy.uintp is numpy.uint64 @@ -786,8 +792,22 @@ def test_intp(self): from numpypy import dtype - assert dtype('p') == dtype('intp') - assert dtype('P') == dtype('uintp') + assert dtype('p') is dtype('intp') + assert dtype('P') is dtype('uintp') + #assert dtype('p') is dtype('int') + #assert dtype('P') is dtype('uint') + assert dtype('p').num == 7 + assert dtype('P').num == 8 + #assert dtype('p').char == 'l' + #assert dtype('P').char == 'L' + assert dtype('p').kind == 'i' + assert dtype('P').kind == 'u' + #if self.ptr_size == 4: + # assert dtype('p').name == 'int32' + # assert dtype('P').name == 'uint32' + #else: + # assert dtype('p').name == 'int64' + # assert dtype('P').name == 'uint64' def test_alignment(self): from numpypy import dtype @@ -835,12 +855,12 @@ import numpy as np assert np.dtype('> 2 == [0, 0, 0, 0, 1, 1, 1, 1, 2, 2]).all() - a = array([True, False]) + a = np.array([True, False]) assert (a >> 1 == [0, 0]).all() - a = arange(3, dtype=float) + a = np.arange(3, dtype=float) raises(TypeError, lambda: a >> 1) + a = np.array([123], dtype='uint64') + b = a >> 1 + assert b == 61 + assert b.dtype.type is np.uint64 + a = np.array(123, dtype='uint64') + exc = raises(TypeError, "a >> 1") + assert 'not supported for the input types' in exc.value.message def test_rrshift(self): from numpypy import arange @@ -1407,11 +1419,11 @@ for dt in ['bool', 'int8', 'uint8', 'int16', 'uint16']: a = array([True, False], dtype=dt) assert a.prod() == 0 - assert a.prod().dtype == dtype('uint' if dt[0] == 'u' else 'int') + assert a.prod().dtype is dtype('uint' if dt[0] == 'u' else 'int') for dt in ['l', 'L', 'q', 'Q', 'e', 'f', 'd', 'F', 'D']: a = array([True, False], dtype=dt) assert a.prod() == 0 - assert a.prod().dtype == dtype(dt) + assert a.prod().dtype is dtype(dt) def test_max(self): from numpypy import array, zeros @@ -1494,12 +1506,12 @@ def test_dtype_guessing(self): from numpypy import array, dtype - + import sys assert array([True]).dtype is dtype(bool) assert array([True, False]).dtype is dtype(bool) assert array([True, 1]).dtype is dtype(int) assert array([1, 2, 3]).dtype is dtype(int) - #assert array([1L, 2, 3]).dtype is dtype(long) + assert array([1L, 2, 3]).dtype is dtype('q') assert array([1.2, True]).dtype is dtype(float) assert array([1.2, 5]).dtype is dtype(float) assert array([]).dtype is dtype(float) @@ -1510,6 +1522,12 @@ assert array([int8(3)]).dtype is dtype("int8") assert array([bool_(True)]).dtype is dtype(bool) assert array([bool_(True), 3.0]).dtype is dtype(float) + assert array(sys.maxint + 42).dtype is dtype('Q') + assert array([sys.maxint + 42] * 2).dtype is dtype('Q') + assert array([sys.maxint + 42, 123]).dtype is dtype(float) + assert array([sys.maxint + 42, 123L]).dtype is dtype(float) + assert array([1+2j, 123]).dtype is dtype(complex) + assert array([1+2j, 123L]).dtype is dtype(complex) def test_comparison(self): import operator @@ -2176,12 +2194,6 @@ a[b] = 1. assert (a == [[1., 1., 1.]]).all() - @py.test.mark.xfail - def test_boolean_array(self): - import numpypy as np - a = np.ndarray([1], dtype=bool) - assert a[0] == True - class AppTestNumArrayFromBuffer(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "array", "mmap"]) @@ -2244,7 +2256,6 @@ f.close() - class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy @@ -2718,7 +2729,12 @@ assert (arange(10).take([1, 2, 1, 1]) == [1, 2, 1, 1]).all() raises(IndexError, "arange(3).take([15])") a = arange(6).reshape(2, 3) + assert a.take(3) == 3 + assert a.take(3).shape == () assert (a.take([1, 0, 3]) == [1, 0, 3]).all() + assert (a.take([[1, 0], [2, 3]]) == [[1, 0], [2, 3]]).all() + assert (a.take([1], axis=0) == [[3, 4, 5]]).all() + assert (a.take([1], axis=1) == [[1], [4]]).all() assert ((a + a).take([3]) == [6]).all() a = arange(12).reshape(2, 6) assert (a[:,::2].take([3, 2, 1]) == [6, 4, 2]).all() @@ -2815,7 +2831,11 @@ assert b[35] == 200 b[[slice(25, 30)]] = range(5) assert all(a[:5] == range(5)) - raises(TypeError, 'b[[[slice(25, 125)]]]') + import sys + if '__pypy__' not in sys.builtin_module_names: + raises(TypeError, 'b[[[slice(25, 125)]]]') + else: + raises(NotImplementedError, 'b[[[slice(25, 125)]]]') def test_cumsum(self): from numpypy import arange @@ -2969,17 +2989,18 @@ assert j[0] == 12 k = fromstring(self.float16val, dtype='float16') assert k[0] == dtype('float16').type(5.) - dt = array([5],dtype='longfloat').dtype - if dt.itemsize == 12: + dt = array([5], dtype='longfloat').dtype + if dt.itemsize == 8: + m = fromstring('\x00\x00\x00\x00\x00\x00\x14@', + dtype='float64') + elif dt.itemsize == 12: m = fromstring('\x00\x00\x00\x00\x00\x00\x00\xa0\x01@\x00\x00', dtype='float96') elif dt.itemsize == 16: m = fromstring('\x00\x00\x00\x00\x00\x00\x00\xa0\x01@\x00\x00' \ '\x00\x00\x00\x00', dtype='float128') - elif dt.itemsize == 8: - skip('longfloat is float64') else: - skip('unknown itemsize for longfloat') + assert False, 'unknown itemsize for longfloat' assert m[0] == dtype('longfloat').type(5.) def test_fromstring_invalid(self): @@ -3040,7 +3061,13 @@ spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) def test_zeros(self): - from numpypy import zeros + from numpypy import zeros, void + a = zeros((), dtype=[('x', int), ('y', float)]) + assert type(a[()]) is void + assert type(a.item()) is tuple + assert a[()]['x'] == 0 + assert a[()]['y'] == 0 + assert a.shape == () a = zeros(2, dtype=[('x', int), ('y', float)]) raises(IndexError, 'a[0]["xyz"]') assert a[0]['x'] == 0 @@ -3055,7 +3082,12 @@ assert a[1]['y'] == 2 def test_views(self): - from numpypy import array + from numpypy import array, zeros, ndarray + a = zeros((), dtype=[('x', int), ('y', float)]) + raises(IndexError, 'a[0]') + assert type(a['x']) is ndarray + assert a['x'] == 0 + assert a['y'] == 0 a = array([(1, 2), (3, 4)], dtype=[('x', int), ('y', float)]) raises((IndexError, ValueError), 'array([1])["x"]') raises((IndexError, ValueError), 'a["z"]') @@ -3076,14 +3108,44 @@ def test_creation_and_repr(self): from numpypy import array + a = array((1, 2), dtype=[('x', int), ('y', float)]) + assert a.shape == () + assert repr(a[()]) == '(1, 2.0)' a = array([(1, 2), (3, 4)], dtype=[('x', int), ('y', float)]) assert repr(a[0]) == '(1, 2.0)' + def test_void_copyswap(self): + import numpy as np + dt = np.dtype([('one', ' 0 and x['two'] > 2 + else: + assert x['one'] == 1 and x['two'] == 2 + def test_nested_dtype(self): - from numpypy import zeros + import numpy as np a = [('x', int), ('y', float)] b = [('x', int), ('y', a)] - arr = zeros(3, dtype=b) + arr = np.zeros((), dtype=b) + assert arr['x'] == 0 + arr['x'] = 2 + assert arr['x'] == 2 + exc = raises(IndexError, "arr[3L]") + assert exc.value.message == "0-d arrays can't be indexed" + exc = raises(ValueError, "arr['xx'] = 2") + assert exc.value.message == "field named xx not found" + assert arr['y'].dtype == a + assert arr['y'].shape == () + assert arr['y'][()]['x'] == 0 + assert arr['y'][()]['y'] == 0 + arr['y'][()]['x'] = 2 + arr['y'][()]['y'] = 3 + assert arr['y'][()]['x'] == 2 + assert arr['y'][()]['y'] == 3 + arr = np.zeros(3, dtype=b) arr[1]['x'] = 15 assert arr[1]['x'] == 15 arr[1]['y']['y'] = 3.5 @@ -3208,11 +3270,15 @@ def test_subarrays(self): from numpypy import dtype, array, zeros - d = dtype([("x", "int", 3), ("y", "float", 5)]) + + a = zeros((), dtype=d) + #assert a['x'].dtype == int + #assert a['x'].shape == (3,) + #assert (a['x'] == [0, 0, 0]).all() + a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) - for v in ['x', u'x', 0, -2]: assert (a[0][v] == [1, 2, 3]).all() assert (a[1][v] == [4, 5, 6]).all() @@ -3230,6 +3296,13 @@ a[0]["x"][0] = 200 assert a[0]["x"][0] == 200 + a[1]["x"][2] = 123 + assert (a[1]["x"] == [4, 5, 123]).all() + a[1]['y'][3] = 4 + assert a[1]['y'][3] == 4 + assert a['y'][1][3] == 4 + a['y'][1][4] = 5 + assert a[1]['y'][4] == 5 d = dtype([("x", "int64", (2, 3))]) a = array([([[1, 2, 3], [4, 5, 6]],)], dtype=d) @@ -3303,14 +3376,16 @@ a = array([('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])], dtype=dt) - s = str(a) i = a.item() assert isinstance(i, tuple) assert len(i) == 4 - skip('incorrect formatting via dump_data') - assert s.endswith("[('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], " - "[[7, 8, 9], [10, 11, 12]]])]") - + import sys + if '__pypy__' not in sys.builtin_module_names: + assert str(a) == "[('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], " \ + "[[7, 8, 9], [10, 11, 12]]])]" + else: + assert str(a) == "array([('aaaa', 1.0, 8.0, [1, 2, 3, 4, 5, 6, " \ + "7, 8, 9, 10, 11, 12])])" def test_issue_1589(self): import numpypy as numpy @@ -3323,6 +3398,7 @@ a = np.array([1,2,3], dtype='int16') assert (a * 2).dtype == np.dtype('int16') + class AppTestPyPy(BaseNumpyAppTest): def setup_class(cls): if option.runappdirect and '__pypy__' not in sys.builtin_module_names: diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -4,8 +4,9 @@ spaceconfig = dict(usemodules=["micronumpy", "binascii", "struct"]) def test_init(self): - import numpypy as np + import numpy as np import math + import sys assert np.intp() == np.intp(0) assert np.intp('123') == np.intp(123) raises(TypeError, np.intp, None) @@ -17,6 +18,9 @@ assert np.complex_() == np.complex_(0) #raises(TypeError, np.complex_, '1+2j') assert math.isnan(np.complex_(None)) + for c in ['i', 'I', 'l', 'L', 'q', 'Q']: + assert np.dtype(c).type().dtype.char == c + assert np.dtype('L').type(sys.maxint + 42) == sys.maxint + 42 def test_builtin(self): import numpy as np @@ -37,7 +41,7 @@ assert len(np.string_('123')) == 3 def test_pickle(self): - from numpypy import dtype, zeros + from numpy import dtype, zeros try: from numpy.core.multiarray import scalar except ImportError: @@ -111,8 +115,17 @@ assert a.squeeze() is a raises(TypeError, a.squeeze, 2) + def test_bitshift(self): + import numpy as np + assert np.int32(123) >> 1 == 61 + assert type(np.int32(123) >> 1) is np.int_ + assert np.int64(123) << 1 == 246 + assert type(np.int64(123) << 1) is np.int64 + exc = raises(TypeError, "np.uint64(123) >> 1") + assert 'not supported for the input types' in exc.value.message + def test_attributes(self): - import numpypy as np + import numpy as np value = np.dtype('int64').type(12345) assert value.dtype == np.dtype('int64') assert value.size == 1 diff --git a/pypy/module/micronumpy/test/test_sorting.py b/pypy/module/micronumpy/test/test_sorting.py --- a/pypy/module/micronumpy/test/test_sorting.py +++ b/pypy/module/micronumpy/test/test_sorting.py @@ -45,6 +45,9 @@ def test_argsort_axis(self): from numpypy import array + a = array([]) + for axis in [None, -1, 0]: + assert a.argsort(axis=axis).shape == (0,) a = array([[4, 2], [1, 3]]) assert (a.argsort(axis=None) == [2, 1, 3, 0]).all() assert (a.argsort(axis=-1) == [[1, 0], [0, 1]]).all() @@ -306,9 +309,8 @@ # tests from numpy/core/tests/test_regression.py def test_sort_bigendian(self): - skip('not implemented yet') - from numpypy import array, dtype - a = array(range(11),dtype='float64') + from numpy import array, dtype + a = array(range(11), dtype='float64') c = a.astype(dtype('" assert repr(ufunc) == "" + assert add.__name__ == 'add' def test_ufunc_attrs(self): from numpypy import add, multiply, sin @@ -390,23 +391,17 @@ assert (a == ref).all() def test_signbit(self): - from numpypy import signbit, add - + from numpy import signbit, add, copysign, nan + assert signbit(add.identity) == False assert (signbit([0, 0.0, 1, 1.0, float('inf')]) == - [False, False, False, False, False]).all() + [False, False, False, False, False]).all() assert (signbit([-0, -0.0, -1, -1.0, float('-inf')]) == - [False, True, True, True, True]).all() - - a = add.identity - assert signbit(a) == False - - skip('sign of nan is non-determinant') - assert (signbit([float('nan'), float('-nan'), -float('nan')]) == - [False, True, True]).all() + [False, True, True, True, True]).all() + assert (signbit([copysign(nan, 1), copysign(nan, -1)]) == + [False, True]).all() def test_reciprocal(self): - from numpypy import array, reciprocal - + from numpy import array, reciprocal inf = float('inf') nan = float('nan') reference = [-0.2, inf, -inf, 2.0, nan] diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -12,7 +12,7 @@ from rpython.rlib.rawstorage import (alloc_raw_storage, raw_storage_setitem, raw_storage_getitem) from rpython.rlib.objectmodel import specialize -from rpython.rlib.rarithmetic import widen, byteswap, r_ulonglong, most_neg_value_of +from rpython.rlib.rarithmetic import widen, byteswap, r_ulonglong, most_neg_value_of, LONG_BIT from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rstruct.runpack import runpack from rpython.rlib.rstruct.nativefmttable import native_is_bigendian @@ -568,16 +568,6 @@ BoxType = interp_boxes.W_UInt32Box format_code = "I" -class Long(BaseType, Integer): - T = rffi.LONG - BoxType = interp_boxes.W_LongBox - format_code = "l" - -class ULong(BaseType, Integer): - T = rffi.ULONG - BoxType = interp_boxes.W_ULongBox - format_code = "L" - def _int64_coerce(self, space, w_item): try: return self._base_coerce(space, w_item) @@ -618,6 +608,22 @@ _coerce = func_with_new_name(_uint64_coerce, '_coerce') +class Long(BaseType, Integer): + T = rffi.LONG + BoxType = interp_boxes.W_LongBox + format_code = "l" + + if LONG_BIT == 64: + _coerce = func_with_new_name(_int64_coerce, '_coerce') + +class ULong(BaseType, Integer): + T = rffi.ULONG + BoxType = interp_boxes.W_ULongBox + format_code = "L" + + if LONG_BIT == 64: + _coerce = func_with_new_name(_uint64_coerce, '_coerce') + class Float(Primitive): _mixin_ = True @@ -1620,6 +1626,8 @@ from pypy.module.micronumpy.interp_dtype import new_string_dtype if isinstance(w_item, interp_boxes.W_StringBox): return w_item + if w_item is None: + w_item = space.wrap('') arg = space.str_w(space.str(w_item)) arr = VoidBoxStorage(len(arg), new_string_dtype(space, len(arg))) for i in range(len(arg)): @@ -1733,13 +1741,16 @@ def _coerce(self, space, arr, ofs, dtype, w_items, shape): # TODO: Make sure the shape and the array match from interp_dtype import W_Dtype - items_w = space.fixedview(w_items) + if w_items is not None: + items_w = space.fixedview(w_items) + else: + items_w = [None] * shape[0] subdtype = dtype.subdtype assert isinstance(subdtype, W_Dtype) itemtype = subdtype.itemtype if len(shape) <= 1: for i in range(len(items_w)): - w_box = itemtype.coerce(space, dtype.subdtype, items_w[i]) + w_box = itemtype.coerce(space, subdtype, items_w[i]) itemtype.store(arr, 0, ofs, w_box) ofs += itemtype.get_element_size() else: @@ -1758,7 +1769,9 @@ @jit.unroll_safe def store(self, arr, i, ofs, box): + assert i == 0 assert isinstance(box, interp_boxes.W_VoidBox) + assert box.dtype is box.arr.dtype for k in range(box.arr.dtype.get_size()): arr.storage[k + ofs] = box.arr.storage[k + box.ofs] @@ -1819,29 +1832,35 @@ def coerce(self, space, dtype, w_item): if isinstance(w_item, interp_boxes.W_VoidBox): return w_item - # we treat every sequence as sequence, no special support - # for arrays - if not space.issequence_w(w_item): - raise OperationError(space.w_TypeError, space.wrap( - "expected sequence")) - if len(dtype.fields) != space.len_w(w_item): - raise OperationError(space.w_ValueError, space.wrap( - "wrong length")) - items_w = space.fixedview(w_item) + if w_item is not None: + # we treat every sequence as sequence, no special support + # for arrays + if not space.issequence_w(w_item): + raise OperationError(space.w_TypeError, space.wrap( + "expected sequence")) + if len(dtype.fields) != space.len_w(w_item): + raise OperationError(space.w_ValueError, space.wrap( + "wrong length")) + items_w = space.fixedview(w_item) + else: + items_w = [None] * len(dtype.fields) arr = VoidBoxStorage(dtype.get_size(), dtype) for i in range(len(items_w)): ofs, subdtype = dtype.fields[dtype.fieldnames[i]] itemtype = subdtype.itemtype - w_item = items_w[i] - w_box = itemtype.coerce(space, subdtype, w_item) + w_box = itemtype.coerce(space, subdtype, items_w[i]) itemtype.store(arr, 0, ofs, w_box) return interp_boxes.W_VoidBox(arr, 0, dtype) @jit.unroll_safe def store(self, arr, i, ofs, box): assert isinstance(box, interp_boxes.W_VoidBox) - for k in range(box.arr.dtype.get_size()): - arr.storage[k + i] = box.arr.storage[k + box.ofs] + for k in range(box.dtype.get_size()): + arr.storage[k + i + ofs] = box.arr.storage[k + box.ofs] + + def byteswap(self, w_v): + # XXX implement + return w_v def to_builtin_type(self, space, box): assert isinstance(box, interp_boxes.W_VoidBox) diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -50,6 +50,13 @@ return w_iter list_iter._annspecialcase_ = 'specialize:memo' +def tuple_iter(space): + "Utility that returns the app-level descriptor tuple.__iter__." + w_src, w_iter = space.lookup_in_type_where(space.w_tuple, + '__iter__') + return w_iter +tuple_iter._annspecialcase_ = 'specialize:memo' + def raiseattrerror(space, w_obj, w_name, w_descr=None): # space.repr always returns an encodable string. if w_descr is None: diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -921,7 +921,8 @@ def _extend_from_iterable(self, w_list, w_iterable): space = self.space - if isinstance(w_iterable, W_AbstractTupleObject): + if (isinstance(w_iterable, W_AbstractTupleObject) + and space._uses_tuple_iter(w_iterable)): w_list.__init__(space, w_iterable.getitems_copy()) return diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -413,7 +413,7 @@ got, got != 1 and "s" or "") def unpackiterable(self, w_obj, expected_length=-1): - if isinstance(w_obj, W_AbstractTupleObject): + if isinstance(w_obj, W_AbstractTupleObject) and self._uses_tuple_iter(w_obj): t = w_obj.getitems_copy() elif type(w_obj) is W_ListObject: t = w_obj.getitems_copy() @@ -427,7 +427,7 @@ def fixedview(self, w_obj, expected_length=-1, unroll=False): """ Fast paths """ - if isinstance(w_obj, W_AbstractTupleObject): + if isinstance(w_obj, W_AbstractTupleObject) and self._uses_tuple_iter(w_obj): t = w_obj.tolist() elif type(w_obj) is W_ListObject: if unroll: @@ -452,7 +452,7 @@ def listview(self, w_obj, expected_length=-1): if type(w_obj) is W_ListObject: t = w_obj.getitems() - elif isinstance(w_obj, W_AbstractTupleObject): + elif isinstance(w_obj, W_AbstractTupleObject) and self._uses_tuple_iter(w_obj): t = w_obj.getitems_copy() elif isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): t = w_obj.getitems() @@ -487,7 +487,7 @@ return w_obj.listview_unicode() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_unicode() - if isinstance(w_obj, W_UnicodeObject): + if isinstance(w_obj, W_UnicodeObject) and self._uses_no_iter(w_obj): return w_obj.listview_unicode() if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): return w_obj.getitems_unicode() @@ -522,6 +522,13 @@ from pypy.objspace.descroperation import list_iter return self.lookup(w_obj, '__iter__') is list_iter(self) + def _uses_tuple_iter(self, w_obj): + from pypy.objspace.descroperation import tuple_iter + return self.lookup(w_obj, '__iter__') is tuple_iter(self) + + def _uses_no_iter(self, w_obj): + return self.lookup(w_obj, '__iter__') is None + def sliceindices(self, w_slice, w_length): if isinstance(w_slice, W_SliceObject): a, b, c = w_slice.indices3(self, self.int_w(w_length)) diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -1274,6 +1274,57 @@ non_list = NonList() assert [] != non_list + def test_extend_from_empty_list_with_subclasses(self): + # some of these tests used to fail by ignoring the + # custom __iter__() --- but only if the list has so + # far the empty strategy, as opposed to .extend()ing + # a non-empty list. + class T(tuple): + def __iter__(self): + yield "ok" + assert list(T([5, 6])) == ["ok"] + # + class L(list): + def __iter__(self): + yield "ok" + assert list(L([5, 6])) == ["ok"] + assert list(L([5.2, 6.3])) == ["ok"] + # + class S(str): + def __iter__(self): + yield "ok" + assert list(S("don't see me")) == ["ok"] + # + class U(unicode): + def __iter__(self): + yield "ok" + assert list(U(u"don't see me")) == ["ok"] + + def test_extend_from_nonempty_list_with_subclasses(self): + l = ["hi!"] + class T(tuple): + def __iter__(self): + yield "okT" + l.extend(T([5, 6])) + # + class L(list): + def __iter__(self): + yield "okL" + l.extend(L([5, 6])) + l.extend(L([5.2, 6.3])) + # + class S(str): + def __iter__(self): + yield "okS" + l.extend(S("don't see me")) + # + class U(unicode): + def __iter__(self): + yield "okU" + l.extend(U(u"don't see me")) + # + assert l == ["hi!", "okT", "okL", "okL", "okS", "okU"] + def test_issue1266(self): l = list(range(1)) l.pop() @@ -1304,6 +1355,94 @@ assert item11 in l[::11] +class AppTestForRangeLists(AppTestW_ListObject): + spaceconfig = {"objspace.std.withrangelist": True} + + def test_range_simple_backwards(self): + x = range(5,1) + assert x == [] + + def test_range_big_start(self): + x = range(1,10) + x[22:0:-1] == range(1,10) + + def test_range_list_invalid_slice(self): + x = [1,2,3,4] + assert x[10:0] == [] + assert x[10:0:None] == [] + + x = range(1,5) + assert x[10:0] == [] + assert x[10:0:None] == [] + + assert x[0:22] == [1,2,3,4] + assert x[-1:10] == [4] + + assert x[0:22:None] == [1,2,3,4] + assert x[-1:10:None] == [4] + + def test_range_backwards(self): + x = range(1,10) + assert x[22:-10] == [] + assert x[22:-10:-1] == [9,8,7,6,5,4,3,2,1] + assert x[10:3:-1] == [9,8,7,6,5] + assert x[10:3:-2] == [9,7,5] + assert x[1:5:-1] == [] + + def test_sort_range(self): + l = range(3,10,3) + l.sort() + assert l == [3, 6, 9] + l.sort(reverse = True) + assert l == [9, 6, 3] + l.sort(reverse = True) + assert l == [9, 6, 3] + l.sort() + assert l == [3, 6, 9] + + def test_slice(self): + l = [] + l2 = range(3) + l.__setslice__(0,3,l2) + assert l == [0,1,2] + + def test_getitem(self): + l = range(5) + raises(IndexError, "l[-10]") + + def test_append(self): + l = range(5) + l.append(26) + assert l == [0,1,2,3,4,26] + + l = range(5) + l.append("a") + assert l == [0,1,2,3,4,"a"] + + l = range(5) + l.append(5) + assert l == [0,1,2,3,4,5] + + def test_pop(self): + l = range(3) + assert l.pop(0) == 0 + + def test_setitem(self): + l = range(3) + l[0] = 1 + assert l == [1,1,2] + + def test_inset(self): + l = range(3) + l.insert(1,5) + assert l == [0,5,1,2] + + def test_reverse(self): + l = range(3) + l.reverse() + assert l == [2,1,0] + + class AppTestWithoutStrategies(object): spaceconfig = {"objspace.std.withliststrategies": False} diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -412,10 +412,7 @@ return SomeByteArray(can_be_None=can_be_None) def add((b1, b2)): - result = SomeByteArray() - if b1.is_immutable_constant() and b2.is_immutable_constant(): - result.const = b1.const + b2.const - return result + return SomeByteArray() class __extend__(pairtype(SomeByteArray, SomeInteger)): def getitem((s_b, s_i)): @@ -429,10 +426,7 @@ pairtype(SomeChar, SomeByteArray), pairtype(SomeByteArray, SomeChar)): def add((b1, b2)): - result = SomeByteArray() - if b1.is_immutable_constant() and b2.is_immutable_constant(): - result.const = b1.const + b2.const - return result + return SomeByteArray() class __extend__(pairtype(SomeChar, SomeChar)): diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -122,7 +122,7 @@ return constpropagate(unicode, [s_unicode], SomeUnicodeString()) def builtin_bytearray(s_str): - return constpropagate(bytearray, [s_str], SomeByteArray()) + return SomeByteArray() def our_issubclass(cls1, cls2): """ we're going to try to be less silly in the face of old-style classes""" diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -215,7 +215,8 @@ class SomeStringOrUnicode(SomeObject): - """Base class for shared implementation of SomeString and SomeUnicodeString. + """Base class for shared implementation of SomeString, + SomeUnicodeString and SomeByteArray. Cannot be an annotation.""" @@ -228,6 +229,7 @@ if can_be_None: self.can_be_None = True if no_nul: + assert self.immutable #'no_nul' cannot be used with SomeByteArray self.no_nul = True def can_be_none(self): @@ -263,6 +265,7 @@ class SomeByteArray(SomeStringOrUnicode): + immutable = False knowntype = bytearray diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3987,7 +3987,9 @@ return bytearray("xyz") a = self.RPythonAnnotator() - assert isinstance(a.build_types(f, []), annmodel.SomeByteArray) + s = a.build_types(f, []) + assert isinstance(s, annmodel.SomeByteArray) + assert not s.is_constant() # never a constant! def test_bytearray_add(self): def f(a): diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -125,10 +125,12 @@ class ArrayDescr(AbstractDescr): def __init__(self, A): - self.A = A + self.A = self.OUTERA = A + if isinstance(A, lltype.Struct): + self.A = A._flds[A._arrayfld] def __repr__(self): - return 'ArrayDescr(%r)' % (self.A,) + return 'ArrayDescr(%r)' % (self.OUTERA,) def is_array_of_pointers(self): return getkind(self.A.OF) == 'ref' @@ -424,6 +426,8 @@ def bh_arraylen_gc(self, a, descr): array = a._obj.container + if descr.A is not descr.OUTERA: + array = getattr(array, descr.OUTERA._arrayfld) return array.getlength() def bh_getarrayitem_gc(self, a, index, descr): diff --git a/rpython/jit/backend/llsupport/test/test_descr.py b/rpython/jit/backend/llsupport/test/test_descr.py --- a/rpython/jit/backend/llsupport/test/test_descr.py +++ b/rpython/jit/backend/llsupport/test/test_descr.py @@ -424,3 +424,11 @@ " > >") # caching: assert fielddescr is get_field_arraylen_descr(c0, rstr.STR) + +def test_bytearray_descr(): + c0 = GcCache(False) + descr = get_array_descr(c0, rstr.STR) # for bytearray + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == struct.calcsize("PP") # hash, length + assert descr.lendescr.offset == struct.calcsize("P") # hash + assert not descr.is_array_of_pointers() diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -13,6 +13,7 @@ from rpython.rlib.jit import _we_are_jitted from rpython.rlib.rgc import lltype_is_gc from rpython.rtyper.lltypesystem import lltype, llmemory, rstr, rclass, rffi +from rpython.rtyper.lltypesystem import rbytearray from rpython.rtyper.rclass import IR_QUASIIMMUTABLE, IR_QUASIIMMUTABLE_ARRAY from rpython.translator.unsimplify import varoftype @@ -643,6 +644,12 @@ return SpaceOperation('arraylen_gc', [op.args[0], arraydescr], op.result) + def rewrite_op_getarraysubstruct(self, op): + ARRAY = op.args[0].concretetype.TO + assert ARRAY._gckind == 'raw' + assert ARRAY._hints.get('nolength') is True + return self.rewrite_op_direct_ptradd(op) + def _array_of_voids(self, ARRAY): return ARRAY.OF == lltype.Void @@ -836,9 +843,14 @@ optype = op.args[0].concretetype if optype == lltype.Ptr(rstr.STR): opname = "strlen" + elif optype == lltype.Ptr(rstr.UNICODE): + opname = "unicodelen" + elif optype == lltype.Ptr(rbytearray.BYTEARRAY): + bytearraydescr = self.cpu.arraydescrof(rbytearray.BYTEARRAY) + return SpaceOperation('arraylen_gc', [op.args[0], bytearraydescr], + op.result) else: - assert optype == lltype.Ptr(rstr.UNICODE) - opname = "unicodelen" + assert 0, "supported type %r" % (optype,) return SpaceOperation(opname, [op.args[0]], op.result) def rewrite_op_getinteriorfield(self, op): @@ -850,6 +862,12 @@ elif optype == lltype.Ptr(rstr.UNICODE): opname = "unicodegetitem" return SpaceOperation(opname, [op.args[0], op.args[2]], op.result) + elif optype == lltype.Ptr(rbytearray.BYTEARRAY): + bytearraydescr = self.cpu.arraydescrof(rbytearray.BYTEARRAY) + v_index = op.args[2] + return SpaceOperation('getarrayitem_gc_i', + [op.args[0], v_index, bytearraydescr], + op.result) else: v_inst, v_index, c_field = op.args if op.result.concretetype is lltype.Void: @@ -876,6 +894,11 @@ opname = "unicodesetitem" return SpaceOperation(opname, [op.args[0], op.args[2], op.args[3]], op.result) + elif optype == lltype.Ptr(rbytearray.BYTEARRAY): + bytearraydescr = self.cpu.arraydescrof(rbytearray.BYTEARRAY) + opname = "setarrayitem_gc_i" + return SpaceOperation(opname, [op.args[0], op.args[2], op.args[3], + bytearraydescr], op.result) else: v_inst, v_index, c_field, v_value = op.args if v_value.concretetype is lltype.Void: @@ -1709,6 +1732,8 @@ "stroruni.copy_string_to_raw": EffectInfo.OS_UNI_COPY_TO_RAW } CHR = lltype.UniChar + elif SoU.TO == rbytearray.BYTEARRAY: + raise NotSupported("bytearray operation") else: assert 0, "args[0].concretetype must be STR or UNICODE" # diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -14,6 +14,7 @@ from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.llinterp import LLInterpreter from rpython.rtyper.lltypesystem import lltype, rclass, rffi, llmemory, rstr as ll_rstr, rdict as ll_rdict +from rpython.rtyper.lltypesystem import rordereddict from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.lltypesystem.module import ll_math from rpython.translator.translator import TranslationContext @@ -492,11 +493,6 @@ # ---------- dict ---------- - def _ll_0_newdict(DICT): - return ll_rdict.ll_newdict(DICT) - _ll_0_newdict.need_result_type = True - - _ll_2_dict_delitem = ll_rdict.ll_dict_delitem _ll_1_dict_copy = ll_rdict.ll_copy _ll_1_dict_clear = ll_rdict.ll_clear _ll_2_dict_update = ll_rdict.ll_update @@ -524,6 +520,33 @@ _ll_1_dict_resize = ll_rdict.ll_dict_resize + # ---------- ordered dict ---------- + + _ll_1_odict_copy = rordereddict.ll_dict_copy + _ll_1_odict_clear = rordereddict.ll_dict_clear + _ll_2_odict_update = rordereddict.ll_dict_update + + _ll_1_odict_keys = rordereddict.ll_dict_keys + _ll_1_odict_values = rordereddict.ll_dict_values + _ll_1_odict_items = rordereddict.ll_dict_items + _ll_1_odict_keys .need_result_type = True + _ll_1_odict_values.need_result_type = True + _ll_1_odict_items .need_result_type = True + + _odictnext_keys = staticmethod(rordereddict.ll_dictnext_group['keys']) + _odictnext_values = staticmethod(rordereddict.ll_dictnext_group['values']) + _odictnext_items = staticmethod(rordereddict.ll_dictnext_group['items']) + + def _ll_1_odictiter_nextkeys(iter): + return LLtypeHelpers._odictnext_keys(None, iter) + def _ll_1_odictiter_nextvalues(iter): + return LLtypeHelpers._odictnext_values(None, iter) + def _ll_1_odictiter_nextitems(RES, iter): + return LLtypeHelpers._odictnext_items(lltype.Ptr(RES), iter) + _ll_1_odictiter_nextitems.need_result_type = True + + _ll_1_odict_resize = rordereddict.ll_dict_resize + # ---------- strings and unicode ---------- _ll_1_str_str2unicode = ll_rstr.LLHelpers.ll_str2unicode diff --git a/rpython/jit/metainterp/test/test_bytearray.py b/rpython/jit/metainterp/test/test_bytearray.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/test/test_bytearray.py @@ -0,0 +1,82 @@ +import py +from rpython.jit.metainterp.test.support import LLJitMixin +from rpython.rlib.jit import JitDriver, dont_look_inside + +class TestByteArray(LLJitMixin): + + def test_getitem(self): + x = bytearray("foobar") + def fn(n): + assert n >= 0 + return x[n] + res = self.interp_operations(fn, [3]) + assert res == ord('b') + + def test_getitem_negative(self): + x = bytearray("foobar") + def fn(n): + return x[n] + res = self.interp_operations(fn, [-2]) + assert res == ord('a') + + def test_len(self): + x = bytearray("foobar") + def fn(n): + return len(x) + res = self.interp_operations(fn, [3]) + assert res == 6 + + def test_setitem(self): + @dont_look_inside + def make_me(): + return bytearray("foobar") + def fn(n): + assert n >= 0 + x = make_me() + x[n] = 3 + return x[3] + 1000 * x[4] + + res = self.interp_operations(fn, [3]) + assert res == 3 + 1000 * ord('a') + + def test_setitem_negative(self): + @dont_look_inside + def make_me(): + return bytearray("foobar") + def fn(n): + x = make_me() + x[n] = 3 + return x[3] + 1000 * x[4] + + res = self.interp_operations(fn, [-2]) + assert res == ord('b') + 1000 * 3 + + def test_new_bytearray(self): + def fn(n, m): + x = bytearray(str(n)) + x[m] = 0x34 + return int(str(x)) +