[pypy-commit] pypy buffer-interface2: merge default into branch
mattip
pypy.commits at gmail.com
Sat Sep 17 17:05:16 EDT 2016
Author: Matti Picus <matti.picus at gmail.com>
Branch: buffer-interface2
Changeset: r87198:a3e17bbc7b47
Date: 2016-09-17 23:24 +0300
http://bitbucket.org/pypy/pypy/changeset/a3e17bbc7b47/
Log: merge default into branch
diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
--- a/lib_pypy/cffi.egg-info/PKG-INFO
+++ b/lib_pypy/cffi.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: cffi
-Version: 1.8.2
+Version: 1.8.3
Summary: Foreign Function Interface for Python calling C code.
Home-page: http://cffi.readthedocs.org
Author: Armin Rigo, Maciej Fijalkowski
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -4,8 +4,8 @@
from .api import FFI, CDefError, FFIError
from .ffiplatform import VerificationError, VerificationMissing
-__version__ = "1.8.2"
-__version_info__ = (1, 8, 2)
+__version__ = "1.8.3"
+__version_info__ = (1, 8, 3)
# The verifier module file names are based on the CRC32 of a string that
# contains the following version number. It may be older than __version__
diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h
--- a/lib_pypy/cffi/_embedding.h
+++ b/lib_pypy/cffi/_embedding.h
@@ -233,7 +233,7 @@
f = PySys_GetObject((char *)"stderr");
if (f != NULL && f != Py_None) {
PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
- "\ncompiled with cffi version: 1.8.2"
+ "\ncompiled with cffi version: 1.8.3"
"\n_cffi_backend module: ", f);
modules = PyImport_GetModuleDict();
mod = PyDict_GetItemString(modules, "_cffi_backend");
diff --git a/pypy/doc/config/translation.profopt.txt b/pypy/doc/config/translation.profopt.txt
--- a/pypy/doc/config/translation.profopt.txt
+++ b/pypy/doc/config/translation.profopt.txt
@@ -3,3 +3,14 @@
RPython program) to gather profile data. Example for pypy-c: "-c 'from
richards import main;main(); from test import pystone;
pystone.main()'"
+
+NOTE: be aware of what this does in JIT-enabled executables. What it
+does is instrument and later optimize the C code that happens to run in
+the example you specify, ignoring any execution of the JIT-generated
+assembler. That means that you have to choose the example wisely. If
+it is something that will just generate assembler and stay there, there
+is little value. If it is something that exercises heavily library
+routines that are anyway written in C, then it will optimize that. Most
+interesting would be something that causes a lot of JIT-compilation,
+like running a medium-sized test suite several times in a row, in order
+to optimize the warm-up in general.
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -449,6 +449,27 @@
support (see ``multiline_input()``). On the other hand,
``parse_and_bind()`` calls are ignored (issue `#2072`_).
+* ``sys.getsizeof()`` always raises ``TypeError``. This is because a
+ memory profiler using this function is most likely to give results
+ inconsistent with reality on PyPy. It would be possible to have
+ ``sys.getsizeof()`` return a number (with enough work), but that may
+ or may not represent how much memory the object uses. It doesn't even
+ make really sense to ask how much *one* object uses, in isolation with
+ the rest of the system. For example, instances have maps, which are
+ often shared across many instances; in this case the maps would
+ probably be ignored by an implementation of ``sys.getsizeof()``, but
+ their overhead is important in some cases if they are many instances
+ with unique maps. Conversely, equal strings may share their internal
+ string data even if they are different objects---or empty containers
+ may share parts of their internals as long as they are empty. Even
+ stranger, some lists create objects as you read them; if you try to
+ estimate the size in memory of ``range(10**6)`` as the sum of all
+ items' size, that operation will by itself create one million integer
+ objects that never existed in the first place. Note that some of
+ these concerns also exist on CPython, just less so. For this reason
+ we explicitly don't implement ``sys.getsizeof()``.
+
+
.. _`is ignored in PyPy`: http://bugs.python.org/issue14621
.. _`little point`: http://events.ccc.de/congress/2012/Fahrplan/events/5152.en.html
.. _`#2072`: https://bitbucket.org/pypy/pypy/issue/2072/
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -16,3 +16,8 @@
Improve merging of virtual states in the JIT in order to avoid jumping to the
preamble. Accomplished by allocating virtual objects where non-virtuals are
expected.
+
+.. branch: conditional_call_value_3
+JIT residual calls: if the called function starts with a fast-path
+like "if x.foo != 0: return x.foo", then inline the check before
+doing the CALL. For now, string hashing is about the only case.
diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -239,6 +239,10 @@
raise Exception("Cannot use the --output option with PyPy "
"when --shared is on (it is by default). "
"See issue #1971.")
+ if config.translation.profopt is not None:
+ raise Exception("Cannot use the --profopt option "
+ "when --shared is on (it is by default). "
+ "See issue #2398.")
if sys.platform == 'win32':
libdir = thisdir.join('..', '..', 'libs')
libdir.ensure(dir=1)
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -1986,6 +1986,7 @@
'ZeroDivisionError',
'RuntimeWarning',
'PendingDeprecationWarning',
+ 'UserWarning',
]
if sys.platform.startswith("win"):
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -63,7 +63,7 @@
"""x.__iter__() <==> iter(x)"""
return self.space.wrap(self)
- def descr_send(self, w_arg=None):
+ def descr_send(self, w_arg):
"""send(arg) -> send 'arg' into generator,
return next yielded value or raise StopIteration."""
return self.send_ex(w_arg)
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -264,25 +264,22 @@
try:
executioncontext.call_trace(self)
#
- if operr is not None:
- ec = self.space.getexecutioncontext()
- next_instr = self.handle_operation_error(ec, operr)
- self.last_instr = intmask(next_instr - 1)
- else:
- # Execution starts just after the last_instr. Initially,
- # last_instr is -1. After a generator suspends it points to
- # the YIELD_VALUE instruction.
- next_instr = r_uint(self.last_instr + 1)
- if next_instr != 0:
- self.pushvalue(w_inputvalue)
- #
try:
+ if operr is not None:
+ ec = self.space.getexecutioncontext()
+ next_instr = self.handle_operation_error(ec, operr)
+ self.last_instr = intmask(next_instr - 1)
+ else:
+ # Execution starts just after the last_instr. Initially,
+ # last_instr is -1. After a generator suspends it points to
+ # the YIELD_VALUE instruction.
+ next_instr = r_uint(self.last_instr + 1)
+ if next_instr != 0:
+ self.pushvalue(w_inputvalue)
w_exitvalue = self.dispatch(self.pycode, next_instr,
executioncontext)
- except Exception:
- executioncontext.return_trace(self, self.space.w_None)
- raise
- executioncontext.return_trace(self, w_exitvalue)
+ finally:
+ executioncontext.return_trace(self, w_exitvalue)
# it used to say self.last_exception = None
# this is now done by the code in pypyjit module
# since we don't want to invalidate the virtualizable
diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py
--- a/pypy/interpreter/test/test_generator.py
+++ b/pypy/interpreter/test/test_generator.py
@@ -57,12 +57,14 @@
def f():
yield 2
g = f()
+ # two arguments version
raises(NameError, g.throw, NameError, "Error")
def test_throw2(self):
def f():
yield 2
g = f()
+ # single argument version
raises(NameError, g.throw, NameError("Error"))
def test_throw3(self):
@@ -221,7 +223,8 @@
def f():
yield 1
g = f()
- raises(TypeError, g.send, 1)
+ raises(TypeError, g.send) # one argument required
+ raises(TypeError, g.send, 1) # not started, must send None
def test_generator_explicit_stopiteration(self):
def f():
diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py
--- a/pypy/interpreter/test/test_pyframe.py
+++ b/pypy/interpreter/test/test_pyframe.py
@@ -562,3 +562,21 @@
res = f(10).g()
sys.settrace(None)
assert res == 10
+
+ def test_throw_trace_bug(self):
+ import sys
+ def f():
+ yield 5
+ gen = f()
+ assert next(gen) == 5
+ seen = []
+ def trace_func(frame, event, *args):
+ seen.append(event)
+ return trace_func
+ sys.settrace(trace_func)
+ try:
+ gen.throw(ValueError)
+ except ValueError:
+ pass
+ sys.settrace(None)
+ assert seen == ['call', 'exception', 'return']
diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -3,7 +3,7 @@
from rpython.rlib import rdynload, clibffi, entrypoint
from rpython.rtyper.lltypesystem import rffi
-VERSION = "1.8.2"
+VERSION = "1.8.3"
FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI
try:
diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py
--- a/pypy/module/_cffi_backend/ctypearray.py
+++ b/pypy/module/_cffi_backend/ctypearray.py
@@ -11,7 +11,7 @@
from rpython.rlib.rarithmetic import ovfcheck
from pypy.module._cffi_backend import cdataobj
-from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray
+from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray, W_CTypePointer
from pypy.module._cffi_backend import ctypeprim
@@ -22,6 +22,7 @@
is_nonfunc_pointer_or_array = True
def __init__(self, space, ctptr, length, arraysize, extra):
+ assert isinstance(ctptr, W_CTypePointer)
W_CTypePtrOrArray.__init__(self, space, arraysize, extra, 0,
ctptr.ctitem)
self.length = length
diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py
--- a/pypy/module/_cffi_backend/ctypefunc.py
+++ b/pypy/module/_cffi_backend/ctypefunc.py
@@ -35,8 +35,7 @@
assert isinstance(ellipsis, bool)
extra, xpos = self._compute_extra_text(fargs, fresult, ellipsis, abi)
size = rffi.sizeof(rffi.VOIDP)
- W_CTypePtrBase.__init__(self, space, size, extra, xpos, fresult,
- could_cast_anything=False)
+ W_CTypePtrBase.__init__(self, space, size, extra, xpos, fresult)
self.fargs = fargs
self.ellipsis = ellipsis
self.abi = abi
@@ -59,6 +58,16 @@
lltype.free(self.cif_descr, flavor='raw')
self.cif_descr = lltype.nullptr(CIF_DESCRIPTION)
+ def is_unichar_ptr_or_array(self):
+ return False
+
+ def is_char_or_unichar_ptr_or_array(self):
+ return False
+
+ def string(self, cdataobj, maxlen):
+ # Can't use ffi.string() on a function pointer
+ return W_CType.string(self, cdataobj, maxlen)
+
def new_ctypefunc_completing_argtypes(self, args_w):
space = self.space
nargs_declared = len(self.fargs)
diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py
--- a/pypy/module/_cffi_backend/ctypeobj.py
+++ b/pypy/module/_cffi_backend/ctypeobj.py
@@ -19,7 +19,6 @@
# XXX this could be improved with an elidable method get_size()
# that raises in case it's still -1...
- cast_anything = False
is_primitive_integer = False
is_nonfunc_pointer_or_array = False
is_indirect_arg_for_call_python = False
diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py
--- a/pypy/module/_cffi_backend/ctypeprim.py
+++ b/pypy/module/_cffi_backend/ctypeprim.py
@@ -120,7 +120,6 @@
class W_CTypePrimitiveChar(W_CTypePrimitiveCharOrUniChar):
_attrs_ = []
- cast_anything = True
def cast_to_int(self, cdata):
return self.space.wrap(ord(cdata[0]))
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -14,12 +14,11 @@
class W_CTypePtrOrArray(W_CType):
- _attrs_ = ['ctitem', 'can_cast_anything', 'accept_str', 'length']
- _immutable_fields_ = ['ctitem', 'can_cast_anything', 'accept_str', 'length']
+ _attrs_ = ['ctitem', 'accept_str', 'length']
+ _immutable_fields_ = ['ctitem', 'accept_str', 'length']
length = -1
- def __init__(self, space, size, extra, extra_position, ctitem,
- could_cast_anything=True):
+ def __init__(self, space, size, extra, extra_position, ctitem):
name, name_position = ctitem.insert_name(extra, extra_position)
W_CType.__init__(self, space, size, name, name_position)
# this is the "underlying type":
@@ -27,10 +26,11 @@
# - for arrays, it is the array item type
# - for functions, it is the return type
self.ctitem = ctitem
- self.can_cast_anything = could_cast_anything and ctitem.cast_anything
- self.accept_str = (self.can_cast_anything or
- (ctitem.is_primitive_integer and
- ctitem.size == rffi.sizeof(lltype.Char)))
+ self.accept_str = (self.is_nonfunc_pointer_or_array and
+ (isinstance(ctitem, ctypevoid.W_CTypeVoid) or
+ isinstance(ctitem, ctypeprim.W_CTypePrimitiveChar) or
+ (ctitem.is_primitive_integer and
+ ctitem.size == rffi.sizeof(lltype.Char))))
def is_unichar_ptr_or_array(self):
return isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveUniChar)
@@ -137,7 +137,10 @@
class W_CTypePtrBase(W_CTypePtrOrArray):
# base class for both pointers and pointers-to-functions
- _attrs_ = []
+ _attrs_ = ['is_void_ptr', 'is_voidchar_ptr']
+ _immutable_fields_ = ['is_void_ptr', 'is_voidchar_ptr']
+ is_void_ptr = False
+ is_voidchar_ptr = False
def convert_to_object(self, cdata):
ptrdata = rffi.cast(rffi.CCHARPP, cdata)[0]
@@ -154,7 +157,16 @@
else:
raise self._convert_error("compatible pointer", w_ob)
if self is not other:
- if not (self.can_cast_anything or other.can_cast_anything):
+ if self.is_void_ptr or other.is_void_ptr:
+ pass # cast from or to 'void *'
+ elif self.is_voidchar_ptr or other.is_voidchar_ptr:
+ space = self.space
+ msg = ("implicit cast from '%s' to '%s' "
+ "will be forbidden in the future (check that the types "
+ "are as you expect; use an explicit ffi.cast() if they "
+ "are correct)" % (other.name, self.name))
+ space.warn(space.wrap(msg), space.w_UserWarning, stacklevel=1)
+ else:
raise self._convert_error("compatible pointer", w_ob)
rffi.cast(rffi.CCHARPP, cdata)[0] = w_ob.unsafe_escaping_ptr()
@@ -165,8 +177,8 @@
class W_CTypePointer(W_CTypePtrBase):
- _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr', '_array_types']
- _immutable_fields_ = ['is_file', 'cache_array_type?', 'is_void_ptr']
+ _attrs_ = ['is_file', 'cache_array_type', '_array_types']
+ _immutable_fields_ = ['is_file', 'cache_array_type?']
kind = "pointer"
cache_array_type = None
is_nonfunc_pointer_or_array = True
@@ -181,6 +193,8 @@
self.is_file = (ctitem.name == "struct _IO_FILE" or
ctitem.name == "FILE")
self.is_void_ptr = isinstance(ctitem, ctypevoid.W_CTypeVoid)
+ self.is_voidchar_ptr = (self.is_void_ptr or
+ isinstance(ctitem, ctypeprim.W_CTypePrimitiveChar))
W_CTypePtrBase.__init__(self, space, size, extra, 2, ctitem)
def newp(self, w_init, allocator):
diff --git a/pypy/module/_cffi_backend/ctypevoid.py b/pypy/module/_cffi_backend/ctypevoid.py
--- a/pypy/module/_cffi_backend/ctypevoid.py
+++ b/pypy/module/_cffi_backend/ctypevoid.py
@@ -7,7 +7,6 @@
class W_CTypeVoid(W_CType):
_attrs_ = []
- cast_anything = True
kind = "void"
def __init__(self, space):
diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py
--- a/pypy/module/_cffi_backend/handle.py
+++ b/pypy/module/_cffi_backend/handle.py
@@ -32,8 +32,8 @@
@unwrap_spec(w_cdata=cdataobj.W_CData)
def from_handle(space, w_cdata):
ctype = w_cdata.ctype
- if (not isinstance(ctype, ctypeptr.W_CTypePtrOrArray) or
- not ctype.can_cast_anything):
+ if (not isinstance(ctype, ctypeptr.W_CTypePointer) or
+ not ctype.is_voidchar_ptr):
raise oefmt(space.w_TypeError,
"expected a 'cdata' object with a 'void *' out of "
"new_handle(), got '%s'", ctype.name)
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -1,7 +1,7 @@
# ____________________________________________________________
import sys
-assert __version__ == "1.8.2", ("This test_c.py file is for testing a version"
+assert __version__ == "1.8.3", ("This test_c.py file is for testing a version"
" of cffi that differs from the one that we"
" get from 'import _cffi_backend'")
if sys.version_info < (3,):
@@ -3665,3 +3665,27 @@
check_dir(pp, [])
check_dir(pp[0], ['a1', 'a2'])
check_dir(pp[0][0], ['a1', 'a2'])
+
+def test_char_pointer_conversion():
+ import warnings
+ assert __version__.startswith(("1.8", "1.9")), (
+ "consider turning the warning into an error")
+ BCharP = new_pointer_type(new_primitive_type("char"))
+ BIntP = new_pointer_type(new_primitive_type("int"))
+ BVoidP = new_pointer_type(new_void_type())
+ z1 = cast(BCharP, 0)
+ z2 = cast(BIntP, 0)
+ z3 = cast(BVoidP, 0)
+ with warnings.catch_warnings(record=True) as w:
+ newp(new_pointer_type(BIntP), z1) # warn
+ assert len(w) == 1
+ newp(new_pointer_type(BVoidP), z1) # fine
+ assert len(w) == 1
+ newp(new_pointer_type(BCharP), z2) # warn
+ assert len(w) == 2
+ newp(new_pointer_type(BVoidP), z2) # fine
+ assert len(w) == 2
+ newp(new_pointer_type(BCharP), z3) # fine
+ assert len(w) == 2
+ newp(new_pointer_type(BIntP), z3) # fine
+ assert len(w) == 2
diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py
--- a/pypy/module/_cffi_backend/test/test_ffi_obj.py
+++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py
@@ -503,3 +503,10 @@
assert ffi.unpack(p+1, 7) == b"bc\x00def\x00"
p = ffi.new("int[]", [-123456789])
assert ffi.unpack(p, 1) == [-123456789]
+
+ def test_bug_1(self):
+ import _cffi_backend as _cffi1_backend
+ ffi = _cffi1_backend.FFI()
+ q = ffi.new("char[]", "abcd")
+ p = ffi.cast("char(*)(void)", q)
+ raises(TypeError, ffi.string, p)
diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py
--- a/pypy/module/array/interp_array.py
+++ b/pypy/module/array/interp_array.py
@@ -30,16 +30,19 @@
raise oefmt(space.w_TypeError,
"array.array() does not take keyword arguments")
+ w_initializer_type = None
+ w_initializer = None
+ if len(__args__.arguments_w) > 0:
+ w_initializer = __args__.arguments_w[0]
+ w_initializer_type = space.type(w_initializer)
for tc in unroll_typecodes:
if typecode == tc:
a = space.allocate_instance(types[tc].w_class, w_cls)
a.__init__(space)
-
- if len(__args__.arguments_w) > 0:
- w_initializer = __args__.arguments_w[0]
- if space.type(w_initializer) is space.w_str:
+ if w_initializer is not None:
+ if w_initializer_type is space.w_str:
a.descr_fromstring(space, w_initializer)
- elif space.type(w_initializer) is space.w_list:
+ elif w_initializer_type is space.w_list:
a.descr_fromlist(space, w_initializer)
else:
a.extend(w_initializer, True)
diff --git a/pypy/module/cpyext/longobject.py b/pypy/module/cpyext/longobject.py
--- a/pypy/module/cpyext/longobject.py
+++ b/pypy/module/cpyext/longobject.py
@@ -6,7 +6,7 @@
from pypy.interpreter.error import OperationError
from pypy.module.cpyext.intobject import PyInt_AsUnsignedLongMask
from rpython.rlib.rbigint import rbigint
-from rpython.rlib.rarithmetic import intmask
+from rpython.rlib.rarithmetic import widen
PyLong_Check, PyLong_CheckExact = build_type_checkers("Long")
@@ -34,7 +34,7 @@
def PyLong_FromLongLong(space, val):
"""Return a new PyLongObject object from a C long long, or NULL
on failure."""
- return space.wrap(val)
+ return space.newlong(val)
@cpython_api([rffi.ULONG], PyObject)
def PyLong_FromUnsignedLong(space, val):
@@ -203,7 +203,7 @@
can be retrieved from the resulting value using PyLong_AsVoidPtr().
If the integer is larger than LONG_MAX, a positive long integer is returned."""
- return space.wrap(rffi.cast(ADDR, p))
+ return space.newlong(rffi.cast(ADDR, p))
@cpython_api([PyObject], rffi.VOIDP, error=lltype.nullptr(rffi.VOIDP.TO))
def PyLong_AsVoidPtr(space, w_long):
diff --git a/pypy/module/cpyext/pytraceback.py b/pypy/module/cpyext/pytraceback.py
--- a/pypy/module/cpyext/pytraceback.py
+++ b/pypy/module/cpyext/pytraceback.py
@@ -5,7 +5,6 @@
from pypy.module.cpyext.pyobject import (
PyObject, make_ref, from_ref, Py_DecRef, make_typedescr)
from pypy.module.cpyext.frameobject import PyFrameObject
-from rpython.rlib.unroll import unrolling_iterable
from pypy.interpreter.error import OperationError
from pypy.interpreter.pytraceback import PyTraceback
from pypy.interpreter import pycode
diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py
--- a/pypy/module/cpyext/test/test_longobject.py
+++ b/pypy/module/cpyext/test/test_longobject.py
@@ -8,18 +8,20 @@
class TestLongObject(BaseApiTest):
def test_FromLong(self, space, api):
- value = api.PyLong_FromLong(3)
- assert isinstance(value, W_LongObject)
- assert space.unwrap(value) == 3
+ w_value = api.PyLong_FromLong(3)
+ assert isinstance(w_value, W_LongObject)
+ assert space.unwrap(w_value) == 3
- value = api.PyLong_FromLong(sys.maxint)
- assert isinstance(value, W_LongObject)
- assert space.unwrap(value) == sys.maxint
+ w_value = api.PyLong_FromLong(sys.maxint)
+ assert isinstance(w_value, W_LongObject)
+ assert space.unwrap(w_value) == sys.maxint
def test_aslong(self, space, api):
w_value = api.PyLong_FromLong((sys.maxint - 1) / 2)
+ assert isinstance(w_value, W_LongObject)
w_value = space.mul(w_value, space.wrap(2))
+ assert isinstance(w_value, W_LongObject)
value = api.PyLong_AsLong(w_value)
assert value == (sys.maxint - 1)
@@ -35,12 +37,16 @@
def test_as_ssize_t(self, space, api):
w_value = space.newlong(2)
+ assert isinstance(w_value, W_LongObject)
value = api.PyLong_AsSsize_t(w_value)
assert value == 2
- assert space.eq_w(w_value, api.PyLong_FromSsize_t(2))
+ w_val2 = api.PyLong_FromSsize_t(2)
+ assert isinstance(w_val2, W_LongObject)
+ assert space.eq_w(w_value, w_val2)
def test_fromdouble(self, space, api):
w_value = api.PyLong_FromDouble(-12.74)
+ assert isinstance(w_value, W_LongObject)
assert space.unwrap(w_value) == -12
assert api.PyLong_AsDouble(w_value) == -12
@@ -103,6 +109,7 @@
def test_as_voidptr(self, space, api):
w_l = api.PyLong_FromVoidPtr(lltype.nullptr(rffi.VOIDP.TO))
+ assert isinstance(w_l, W_LongObject)
assert space.unwrap(w_l) == 0L
assert api.PyLong_AsVoidPtr(w_l) == lltype.nullptr(rffi.VOIDP.TO)
@@ -128,23 +135,58 @@
module = self.import_extension('foo', [
("from_unsignedlong", "METH_NOARGS",
"""
- return PyLong_FromUnsignedLong((unsigned long)-1);
+ PyObject * obj;
+ obj = PyLong_FromUnsignedLong((unsigned long)-1);
+ if (obj->ob_type != &PyLong_Type)
+ {
+ Py_DECREF(obj);
+ PyErr_SetString(PyExc_ValueError,
+ "PyLong_FromLongLong did not return PyLongObject");
+ return NULL;
+ }
+ return obj;
""")])
import sys
assert module.from_unsignedlong() == 2 * sys.maxint + 1
def test_fromlonglong(self):
module = self.import_extension('foo', [
- ("from_longlong", "METH_NOARGS",
+ ("from_longlong", "METH_VARARGS",
"""
- return PyLong_FromLongLong((long long)-1);
+ int val;
+ PyObject * obj;
+ if (!PyArg_ParseTuple(args, "i", &val))
+ return NULL;
+ obj = PyLong_FromLongLong((long long)val);
+ if (obj->ob_type != &PyLong_Type)
+ {
+ Py_DECREF(obj);
+ PyErr_SetString(PyExc_ValueError,
+ "PyLong_FromLongLong did not return PyLongObject");
+ return NULL;
+ }
+ return obj;
"""),
- ("from_unsignedlonglong", "METH_NOARGS",
+ ("from_unsignedlonglong", "METH_VARARGS",
"""
- return PyLong_FromUnsignedLongLong((unsigned long long)-1);
+ int val;
+ PyObject * obj;
+ if (!PyArg_ParseTuple(args, "i", &val))
+ return NULL;
+ obj = PyLong_FromUnsignedLongLong((long long)val);
+ if (obj->ob_type != &PyLong_Type)
+ {
+ Py_DECREF(obj);
+ PyErr_SetString(PyExc_ValueError,
+ "PyLong_FromLongLong did not return PyLongObject");
+ return NULL;
+ }
+ return obj;
""")])
- assert module.from_longlong() == -1
- assert module.from_unsignedlonglong() == (1<<64) - 1
+ assert module.from_longlong(-1) == -1
+ assert module.from_longlong(0) == 0
+ assert module.from_unsignedlonglong(0) == 0
+ assert module.from_unsignedlonglong(-1) == (1<<64) - 1
def test_from_size_t(self):
module = self.import_extension('foo', [
@@ -232,10 +274,15 @@
("has_sub", "METH_NOARGS",
"""
PyObject *ret, *obj = PyLong_FromLong(42);
- if (obj->ob_type->tp_as_number->nb_subtract)
- ret = obj->ob_type->tp_as_number->nb_subtract(obj, obj);
+ if (obj->ob_type != &PyLong_Type)
+ ret = PyLong_FromLong(-2);
else
- ret = PyLong_FromLong(-1);
+ {
+ if (obj->ob_type->tp_as_number->nb_subtract)
+ ret = obj->ob_type->tp_as_number->nb_subtract(obj, obj);
+ else
+ ret = PyLong_FromLong(-1);
+ }
Py_DECREF(obj);
return ret;
"""),
diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py
--- a/pypy/module/sys/vm.py
+++ b/pypy/module/sys/vm.py
@@ -253,9 +253,28 @@
from rpython.rtyper.lltypesystem import lltype, rffi
return space.wrap(rffi.cast(lltype.Signed, handle))
+getsizeof_missing = """sys.getsizeof() is not implemented on PyPy.
+
+A memory profiler using this function is most likely to give results
+inconsistent with reality on PyPy. It would be possible to have
+sys.getsizeof() return a number (with enough work), but that may or
+may not represent how much memory the object uses. It doesn't even
+make really sense to ask how much *one* object uses, in isolation
+with the rest of the system. For example, instances have maps,
+which are often shared across many instances; in this case the maps
+would probably be ignored by an implementation of sys.getsizeof(),
+but their overhead is important in some cases if they are many
+instances with unique maps. Conversely, equal strings may share
+their internal string data even if they are different objects---or
+empty containers may share parts of their internals as long as they
+are empty. Even stranger, some lists create objects as you read
+them; if you try to estimate the size in memory of range(10**6) as
+the sum of all items' size, that operation will by itself create one
+million integer objects that never existed in the first place.
+"""
+
def getsizeof(space, w_object, w_default=None):
- """Not implemented on PyPy."""
if w_default is None:
- raise oefmt(space.w_TypeError,
- "sys.getsizeof() not implemented on PyPy")
+ raise oefmt(space.w_TypeError, getsizeof_missing)
return w_default
+getsizeof.__doc__ = getsizeof_missing
diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py
--- a/rpython/jit/codewriter/jtransform.py
+++ b/rpython/jit/codewriter/jtransform.py
@@ -200,8 +200,12 @@
or v.concretetype != lltype.Bool):
return False
for op in block.operations[::-1]:
- if v in op.args:
- return False # variable is also used in cur block
+ # check if variable is used in block
+ for arg in op.args:
+ if arg == v:
+ return False
+ if isinstance(arg, ListOfKind) and v in arg.content:
+ return False
if v is op.result:
if op.opname not in ('int_lt', 'int_le', 'int_eq', 'int_ne',
'int_gt', 'int_ge',
diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py
--- a/rpython/jit/codewriter/test/test_jtransform.py
+++ b/rpython/jit/codewriter/test/test_jtransform.py
@@ -243,6 +243,20 @@
assert block.exitswitch == (opname, v1, '-live-before')
assert block.exits == exits
+def test_optimize_goto_if_not__argument_to_call():
+ for opname in ['ptr_iszero', 'ptr_nonzero']:
+ v1 = Variable()
+ v3 = Variable(); v3.concretetype = lltype.Bool
+ v4 = Variable()
+ block = Block([v1])
+ callop = SpaceOperation('residual_call_r_i',
+ ["fake", ListOfKind('int', [v3])], v4)
+ block.operations = [SpaceOperation(opname, [v1], v3), callop]
+ block.exitswitch = v3
+ block.exits = exits = [FakeLink(False), FakeLink(True)]
+ res = Transformer().optimize_goto_if_not(block)
+ assert not res
+
def test_symmetric():
ops = {'int_add': 'int_add',
'int_or': 'int_or',
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py
@@ -90,18 +90,23 @@
assert vs.make_inputargs(args, optimizer) == []
def test_make_inputargs_2(self):
- # Ensure that make_inputargs properly errors with VirtualStatesCantMatch
- # when the type information for a virtual field conflicts. In practice the
- # expected and given field always share a common subclass.
- # This check is needed as not all paths to make_inputargs in unroll.py
- # are guarded by a call to generate_guards.
+ # Ensure that make_inputargs does not error when the lengths of the fields
+ # for the runtime box does not match what the virtual state expected.
+ # This can occur in unroll.py, as not all paths to make_inputargs are
+ # guareded with a generalization_of check. The property is validated
+ # subsequently in all cases, so we just need to ensure that this case does
+ # not cause segfaults.
optimizer = FakeOptimizer(self.cpu)
classbox1 = self.cpu.ts.cls_of_box(InputArgRef(self.nodeaddr))
- innervalue1 = info.InstancePtrInfo(known_class=classbox1, is_virtual=True, descr=self.valuedescr.get_parent_descr())
+ innervalue1 = info.InstancePtrInfo(
+ known_class=classbox1, is_virtual=True,
+ descr=self.valuedescr.get_parent_descr())
for field in self.valuedescr.get_parent_descr().get_all_fielddescrs():
innervalue1.setfield(field, None, ConstInt(42))
classbox2 = self.cpu.ts.cls_of_box(InputArgRef(self.myptr3))
- innervalue2 = info.InstancePtrInfo(known_class=classbox2, is_virtual=True, descr=self.valuedescr3.get_parent_descr())
+ innervalue2 = info.InstancePtrInfo(
+ known_class=classbox2, is_virtual=True,
+ descr=self.valuedescr3.get_parent_descr())
for field in self.valuedescr3.get_parent_descr().get_all_fielddescrs():
innervalue2.setfield(field, None, ConstInt(42))
@@ -111,10 +116,14 @@
nodebox2.set_forwarded(innervalue2)
constr = VirtualStateConstructor(optimizer)
- vs = constr.get_virtual_state([nodebox1])
+ vs1 = constr.get_virtual_state([nodebox1])
+ constr = VirtualStateConstructor(optimizer)
+ vs2 = constr.get_virtual_state([nodebox2])
- with py.test.raises(VirtualStatesCantMatch):
- args = vs.make_inputargs([nodebox2], optimizer, force_boxes=True)
+ # This should succeed with no exceptions
+ vs1.make_inputargs([nodebox2], optimizer, force_boxes=False)
+ assert not vs1.generalization_of(vs2, optimizer)
+ assert not vs2.generalization_of(vs1, optimizer)
def test_position_generalization(self):
def postest(info1, info2):
diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py
--- a/rpython/jit/metainterp/optimizeopt/unroll.py
+++ b/rpython/jit/metainterp/optimizeopt/unroll.py
@@ -167,7 +167,8 @@
[self.get_box_replacement(x) for x in end_jump.getarglist()],
self.optimizer, force_boxes=True)
for arg in args:
- self.optimizer.force_box(arg)
+ if arg is not None:
+ self.optimizer.force_box(arg)
except VirtualStatesCantMatch:
raise InvalidLoop("Virtual states did not match "
"after picking the virtual state, when forcing"
diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py
--- a/rpython/jit/metainterp/optimizeopt/virtualstate.py
+++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py
@@ -177,14 +177,6 @@
def _generalization_of_structpart(self, other):
raise NotImplementedError
- @staticmethod
- def descr_issubclass(descr1, descr2, optimizer):
- if not descr1.is_object() or not descr2.is_object():
- return True
- vtable1 = descr1.get_vtable()
- vtable2 = descr2.get_vtable()
- return optimizer._check_subclass(vtable1, vtable2)
-
def enum_forced_boxes(self, boxes, box, optimizer, force_boxes=False):
box = optimizer.get_box_replacement(box)
info = optimizer.getptrinfo(box)
@@ -193,13 +185,12 @@
else:
assert isinstance(info, AbstractStructPtrInfo)
- for i in range(len(self.fielddescrs)):
+ # The min operation ensures we don't wander off either array, as not all
+ # to make_inputargs have validated their inputs with generate_guards.
+ for i in range(min(len(self.fielddescrs), len(info._fields))):
state = self.fieldstate[i]
- descr = self.fielddescrs[i].get_parent_descr()
if not state:
continue
- if not self.descr_issubclass(info.descr, descr, optimizer.optimizer):
- raise VirtualStatesCantMatch()
if state.position > self.position:
fieldbox = info._fields[i]
state.enum_forced_boxes(boxes, fieldbox, optimizer, force_boxes)
diff --git a/rpython/translator/backendopt/merge_if_blocks.py b/rpython/translator/backendopt/merge_if_blocks.py
--- a/rpython/translator/backendopt/merge_if_blocks.py
+++ b/rpython/translator/backendopt/merge_if_blocks.py
@@ -20,6 +20,14 @@
return False
if isinstance(op.args[0], Constant) and isinstance(op.args[1], Constant):
return False
+ # check that the constant is hashable (ie not a symbolic)
+ try:
+ if isinstance(op.args[0], Constant):
+ hash(op.args[0].value)
+ else:
+ hash(op.args[1].value)
+ except TypeError:
+ return False
return True
def merge_chain(chain, checkvar, varmap, graph):
diff --git a/rpython/translator/backendopt/test/test_merge_if_blocks.py b/rpython/translator/backendopt/test/test_merge_if_blocks.py
--- a/rpython/translator/backendopt/test/test_merge_if_blocks.py
+++ b/rpython/translator/backendopt/test/test_merge_if_blocks.py
@@ -2,11 +2,12 @@
from rpython.translator.backendopt.merge_if_blocks import merge_if_blocks
from rpython.translator.backendopt.all import backend_optimizations
from rpython.translator.translator import TranslationContext, graphof as tgraphof
-from rpython.flowspace.model import Block
+from rpython.flowspace.model import Block, checkgraph
from rpython.translator.backendopt.removenoops import remove_same_as
from rpython.rtyper.llinterp import LLInterpreter
from rpython.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong, r_int
from rpython.annotator.model import SomeChar, SomeUnicodeCodePoint
+from rpython.rlib.objectmodel import CDefinedIntSymbolic
def do_test_merge(fn, testvalues):
t = TranslationContext()
@@ -225,3 +226,29 @@
malloc.remove_mallocs(t, t.graphs)
from rpython.translator import simplify
simplify.join_blocks(graph)
+
+def test_switch_on_symbolic():
+ symb1 = CDefinedIntSymbolic("1", 1)
+ symb2 = CDefinedIntSymbolic("2", 2)
+ symb3 = CDefinedIntSymbolic("3", 3)
+ def fn(x):
+ res = 0
+ if x == symb1:
+ res += x + 1
+ elif x == symb2:
+ res += x + 2
+ elif x == symb3:
+ res += x + 3
+ res += 1
+ return res
+ t = TranslationContext()
+ a = t.buildannotator()
+ a.build_types(fn, [int])
+ rtyper = t.buildrtyper()
+ rtyper.specialize()
+ graph = t.graphs[0]
+ remove_same_as(graph)
+ res = merge_if_blocks_once(graph)
+ assert not res
+ checkgraph(graph)
+
diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py
--- a/rpython/translator/c/genc.py
+++ b/rpython/translator/c/genc.py
@@ -275,6 +275,7 @@
if retval and self.translator.platform.name == 'msvc':
raise ValueError('Cannot do profile based optimization on MSVC,'
'it is not supported in free compiler version')
+ return retval
def getentrypointptr(self):
# XXX check that the entrypoint has the correct
More information about the pypy-commit
mailing list