[pypy-commit] pypy py3.3: hg merge py3k
mjacob
noreply at buildbot.pypy.org
Sat Jul 4 16:05:55 CEST 2015
Author: Manuel Jacob <me at manueljacob.de>
Branch: py3.3
Changeset: r78426:d9dca9ea01f3
Date: 2015-07-04 16:05 +0200
http://bitbucket.org/pypy/pypy/changeset/d9dca9ea01f3/
Log: hg merge py3k
diff too long, truncating to 2000 out of 2155 lines
diff --git a/lib-python/2.7/test/test_urllib2.py b/lib-python/2.7/test/test_urllib2.py
--- a/lib-python/2.7/test/test_urllib2.py
+++ b/lib-python/2.7/test/test_urllib2.py
@@ -291,6 +291,7 @@
self.req_headers = []
self.data = None
self.raise_on_endheaders = False
+ self.sock = None
self._tunnel_headers = {}
def __call__(self, host, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
diff --git a/lib-python/2.7/urllib2.py b/lib-python/2.7/urllib2.py
--- a/lib-python/2.7/urllib2.py
+++ b/lib-python/2.7/urllib2.py
@@ -1200,6 +1200,12 @@
r = h.getresponse(buffering=True)
except TypeError: # buffering kw not supported
r = h.getresponse()
+ # If the server does not send us a 'Connection: close' header,
+ # HTTPConnection assumes the socket should be left open. Manually
+ # mark the socket to be closed when this response object goes away.
+ if h.sock:
+ h.sock.close()
+ h.sock = None
# Pick apart the HTTPResponse object to get the addinfourl
# object initialized properly.
diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py
--- a/lib_pypy/_tkinter/tclobj.py
+++ b/lib_pypy/_tkinter/tclobj.py
@@ -25,7 +25,7 @@
result = app.call('expr', '2**63')
typePtr = AsObj(result).typePtr
- if tkffi.string(typePtr.name) == v"bignum":
+ if tkffi.string(typePtr.name) == b"bignum":
self.BigNumType = typePtr
@@ -103,6 +103,8 @@
return value.internalRep.doubleValue
if value.typePtr == typeCache.IntType:
return value.internalRep.longValue
+ if value.typePtr == typeCache.WideIntType:
+ return FromWideIntObj(app, value)
if value.typePtr == typeCache.BigNumType and tklib.HAVE_LIBTOMMATH:
return FromBignumObj(app, value)
if value.typePtr == typeCache.ListType:
diff --git a/lib_pypy/_tkinter/tklib_build.py b/lib_pypy/_tkinter/tklib_build.py
--- a/lib_pypy/_tkinter/tklib_build.py
+++ b/lib_pypy/_tkinter/tklib_build.py
@@ -180,6 +180,7 @@
typedef int... Tcl_WideInt;
int Tcl_GetWideIntFromObj(Tcl_Interp *interp, Tcl_Obj *obj, Tcl_WideInt *value);
+Tcl_Obj *Tcl_NewWideIntObj(Tcl_WideInt value);
""")
if HAVE_LIBTOMMATH:
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -135,7 +135,7 @@
Here are some more technical details. This issue affects the precise
time at which ``__del__`` methods are called, which
is not reliable in PyPy (nor Jython nor IronPython). It also means that
-weak references may stay alive for a bit longer than expected. This
+**weak references** may stay alive for a bit longer than expected. This
makes "weak proxies" (as returned by ``weakref.proxy()``) somewhat less
useful: they will appear to stay alive for a bit longer in PyPy, and
suddenly they will really be dead, raising a ``ReferenceError`` on the
@@ -143,6 +143,24 @@
``ReferenceError`` at any place that uses them. (Or, better yet, don't use
``weakref.proxy()`` at all; use ``weakref.ref()``.)
+Note a detail in the `documentation for weakref callbacks`__:
+
+ If callback is provided and not None, *and the returned weakref
+ object is still alive,* the callback will be called when the object
+ is about to be finalized.
+
+There are cases where, due to CPython's refcount semantics, a weakref
+dies immediately before or after the objects it points to (typically
+with some circular reference). If it happens to die just after, then
+the callback will be invoked. In a similar case in PyPy, both the
+object and the weakref will be considered as dead at the same time,
+and the callback will not be invoked. (Issue `#2030`__)
+
+.. __: https://docs.python.org/2/library/weakref.html
+.. __: https://bitbucket.org/pypy/pypy/issue/2030/
+
+---------------------------------
+
There are a few extra implications from the difference in the GC. Most
notably, if an object has a ``__del__``, the ``__del__`` is never called more
than once in PyPy; but CPython will call the same ``__del__`` several times
@@ -321,9 +339,8 @@
Miscellaneous
-------------
-* Hash randomization (``-R``) is ignored in PyPy. As documented in
- http://bugs.python.org/issue14621, some of us believe it has no
- purpose in CPython either.
+* Hash randomization (``-R``) `is ignored in PyPy`_. In CPython
+ before 3.4 it has `little point`_.
* You can't store non-string keys in type objects. For example::
@@ -338,7 +355,8 @@
for about 1400 calls.
* since the implementation of dictionary is different, the exact number
- which ``__hash__`` and ``__eq__`` are called is different. Since CPython
+ of times that ``__hash__`` and ``__eq__`` are called is different.
+ Since CPython
does not give any specific guarantees either, don't rely on it.
* assignment to ``__class__`` is limited to the cases where it
@@ -395,3 +413,12 @@
interactive mode. In a released version, this behaviour is suppressed, but
setting the environment variable PYPY_IRC_TOPIC will bring it back. Note that
downstream package providers have been known to totally disable this feature.
+
+* PyPy's readline module was rewritten from scratch: it is not GNU's
+ readline. It should be mostly compatible, and it adds multiline
+ support (see ``multiline_input()``). On the other hand,
+ ``parse_and_bind()`` calls are ignored (issue `#2072`_).
+
+.. _`is ignored in PyPy`: http://bugs.python.org/issue14621
+.. _`little point`: http://events.ccc.de/congress/2012/Fahrplan/events/5152.en.html
+.. _`#2072`: https://bitbucket.org/pypy/pypy/issue/2072/
diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst
--- a/pypy/doc/embedding.rst
+++ b/pypy/doc/embedding.rst
@@ -6,15 +6,9 @@
C. It was developed in collaboration with Roberto De Ioris from the `uwsgi`_
project. The `PyPy uwsgi plugin`_ is a good example of using the embedding API.
-**NOTE**: As of 1st of December, PyPy comes with ``--shared`` by default
-on linux, linux64 and windows. We will make it the default on all platforms
-by the time of the next release.
-
-The first thing that you need is to compile PyPy yourself with the option
-``--shared``. We plan to make ``--shared`` the default in the future. Consult
-the `how to compile PyPy`_ doc for details. This will result in ``libpypy.so``
-or ``pypy.dll`` file or something similar, depending on your platform. Consult
-your platform specification for details.
+**NOTE**: You need a PyPy compiled with the option ``--shared``, i.e.
+with a ``libpypy-c.so`` or ``pypy-c.dll`` file. This is the default in
+recent versions of PyPy.
The resulting shared library exports very few functions, however they are
enough to accomplish everything you need, provided you follow a few principles.
@@ -75,10 +69,12 @@
Note that this API is a lot more minimal than say CPython C API, so at first
it's obvious to think that you can't do much. However, the trick is to do
all the logic in Python and expose it via `cffi`_ callbacks. Let's assume
-we're on linux and pypy is installed in ``/opt/pypy`` with the
+we're on linux and pypy is installed in ``/opt/pypy`` (with
+subdirectories like ``lib-python`` and ``lib_pypy``), and with the
library in ``/opt/pypy/bin/libpypy-c.so``. (It doesn't need to be
-installed; you can also replace this path with your local checkout.)
-We write a little C program:
+installed; you can also replace these paths with a local extract of the
+installation tarballs, or with your local checkout of pypy.) We write a
+little C program:
.. code-block:: c
@@ -92,7 +88,9 @@
int res;
rpython_startup_code();
- res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1);
+ /* note: in the path /opt/pypy/x, the final x is ignored and
+ replaced with lib-python and lib_pypy. */
+ res = pypy_setup_home("/opt/pypy/x", 1);
if (res) {
printf("Error setting pypy home!\n");
return 1;
@@ -179,7 +177,7 @@
int res;
rpython_startup_code();
- res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1);
+ res = pypy_setup_home("/opt/pypy/x", 1);
if (res) {
fprintf(stderr, "Error setting pypy home!\n");
return -1;
@@ -220,9 +218,15 @@
Finding pypy_home
-----------------
-Function pypy_setup_home takes one parameter - the path to libpypy. There's
-currently no "clean" way (pkg-config comes to mind) how to find this path. You
-can try the following (GNU-specific) hack (don't forget to link against *dl*):
+The function pypy_setup_home() takes as first parameter the path to a
+file from which it can deduce the location of the standard library.
+More precisely, it tries to remove final components until it finds
+``lib-python`` and ``lib_pypy``. There is currently no "clean" way
+(pkg-config comes to mind) to find this path. You can try the following
+(GNU-specific) hack (don't forget to link against *dl*), which assumes
+that the ``libpypy-c.so`` is inside the standard library directory.
+(This must more-or-less be the case anyway, otherwise the ``pypy``
+program itself would not run.)
.. code-block:: c
@@ -236,7 +240,7 @@
// caller should free returned pointer to avoid memleaks
// returns NULL on error
- char* guess_pypyhome() {
+ char* guess_pypyhome(void) {
// glibc-only (dladdr is why we #define _GNU_SOURCE)
Dl_info info;
void *_rpython_startup_code = dlsym(0,"rpython_startup_code");
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -11,3 +11,14 @@
.. branch: stdlib-2.7.10
Update stdlib to version 2.7.10
+
+.. branch: issue2062
+
+.. branch: disable-unroll-for-short-loops
+The JIT no longer performs loop unrolling if the loop compiles to too much code.
+
+.. branch: run-create_cffi_imports
+
+Build cffi import libraries as part of translation by monkey-patching an
+aditional task into translation
+
diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -1,6 +1,6 @@
import py
-import os, sys
+import os, sys, subprocess
import pypy
from pypy.interpreter import gateway
@@ -104,13 +104,16 @@
from pypy.module.sys.initpath import pypy_find_stdlib
verbose = rffi.cast(lltype.Signed, verbose)
if ll_home:
- home = rffi.charp2str(ll_home)
+ home1 = rffi.charp2str(ll_home)
+ home = os.path.join(home1, 'x') # <- so that 'll_home' can be
+ # directly the root directory
else:
- home = pypydir
+ home = home1 = pypydir
w_path = pypy_find_stdlib(space, home)
if space.is_none(w_path):
if verbose:
- debug("Failed to find library based on pypy_find_stdlib")
+ debug("pypy_setup_home: directories 'lib-python' and 'lib_pypy'"
+ " not found in '%s' or in any parent directory" % home1)
return rffi.cast(rffi.INT, 1)
space.startup()
space.call_function(w_pathsetter, w_path)
@@ -301,6 +304,44 @@
wrapstr = 'space.wrap(%r)' % (options)
pypy.module.sys.Module.interpleveldefs['pypy_translation_info'] = wrapstr
+ # HACKHACKHACK
+ # ugly hack to modify target goal from compile_c to build_cffi_imports
+ # this should probably get cleaned up and merged with driver.create_exe
+ from rpython.translator.driver import taskdef
+ import types
+
+ class Options(object):
+ pass
+
+
+ def mkexename(name):
+ if sys.platform == 'win32':
+ name = name.new(ext='exe')
+ return name
+
+ @taskdef(['compile_c'], "Create cffi bindings for modules")
+ def task_build_cffi_imports(self):
+ from pypy.tool.build_cffi_imports import create_cffi_import_libraries
+ ''' Use cffi to compile cffi interfaces to modules'''
+ exename = mkexename(driver.compute_exe_name())
+ basedir = exename
+ while not basedir.join('include').exists():
+ _basedir = basedir.dirpath()
+ if _basedir == basedir:
+ raise ValueError('interpreter %s not inside pypy repo',
+ str(exename))
+ basedir = _basedir
+ modules = self.config.objspace.usemodules.getpaths()
+ options = Options()
+ # XXX possibly adapt options using modules
+ failures = create_cffi_import_libraries(exename, options, basedir)
+ # if failures, they were already printed
+ print >> sys.stderr, str(exename),'successfully built, but errors while building the above modules will be ignored'
+ driver.task_build_cffi_imports = types.MethodType(task_build_cffi_imports, driver)
+ driver.tasks['build_cffi_imports'] = driver.task_build_cffi_imports, ['compile_c']
+ driver.default_goal = 'build_cffi_imports'
+ # HACKHACKHACK end
+
return self.get_entry_point(config)
def jitpolicy(self, driver):
diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py
--- a/pypy/interpreter/app_main.py
+++ b/pypy/interpreter/app_main.py
@@ -767,6 +767,7 @@
# This is important for py3k
sys.executable = executable
+ at hidden_applevel
def entry_point(executable, argv):
# note that before calling setup_bootstrap_path, we are limited because we
# cannot import stdlib modules. In particular, we cannot use unicode
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -1,6 +1,7 @@
import sys
from pypy.interpreter.error import OperationError, get_cleared_operation_error
from rpython.rlib.unroll import unrolling_iterable
+from rpython.rlib.objectmodel import specialize
from rpython.rlib import jit
TICK_COUNTER_STEP = 100
diff --git a/pypy/interpreter/pytraceback.py b/pypy/interpreter/pytraceback.py
--- a/pypy/interpreter/pytraceback.py
+++ b/pypy/interpreter/pytraceback.py
@@ -65,7 +65,6 @@
def check_traceback(space, w_tb, msg):
- from pypy.interpreter.typedef import PyTraceback
if w_tb is None or not space.isinstance_w(w_tb, space.gettypeobject(PyTraceback.typedef)):
raise OperationError(space.w_TypeError, space.wrap(msg))
return w_tb
diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py
--- a/pypy/module/_cffi_backend/ctypefunc.py
+++ b/pypy/module/_cffi_backend/ctypefunc.py
@@ -143,7 +143,7 @@
@jit.unroll_safe
def _call(self, funcaddr, args_w):
space = self.space
- cif_descr = self.cif_descr
+ cif_descr = self.cif_descr # 'self' should have been promoted here
size = cif_descr.exchange_size
mustfree_max_plus_1 = 0
buffer = lltype.malloc(rffi.CCHARP.TO, size, flavor='raw')
diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py
--- a/pypy/module/_cffi_backend/ctypeprim.py
+++ b/pypy/module/_cffi_backend/ctypeprim.py
@@ -134,8 +134,7 @@
def convert_to_object(self, cdata):
unichardata = rffi.cast(rffi.CWCHARP, cdata)
- s = rffi.wcharpsize2unicode(unichardata, 1)
- return self.space.wrap(s)
+ return self.space.wrap(unichardata[0])
def string(self, cdataobj, maxlen):
with cdataobj as ptr:
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -225,9 +225,13 @@
if (isinstance(w_cdata, cdataobj.W_CDataNewOwning) or
isinstance(w_cdata, cdataobj.W_CDataPtrToStructOrUnion)):
if i != 0:
- space = self.space
- raise oefmt(space.w_IndexError,
+ raise oefmt(self.space.w_IndexError,
"cdata '%s' can only be indexed by 0", self.name)
+ else:
+ if not w_cdata.unsafe_escaping_ptr():
+ raise oefmt(self.space.w_RuntimeError,
+ "cannot dereference null pointer from cdata '%s'",
+ self.name)
return self
def _check_slice_index(self, w_cdata, start, stop):
diff --git a/pypy/module/_cffi_backend/lib_obj.py b/pypy/module/_cffi_backend/lib_obj.py
--- a/pypy/module/_cffi_backend/lib_obj.py
+++ b/pypy/module/_cffi_backend/lib_obj.py
@@ -60,12 +60,12 @@
self.ffi, self.ctx.c_types, getarg(g.c_type_op))
assert isinstance(rawfunctype, realize_c_type.W_RawFuncType)
#
- w_ct, locs = rawfunctype.unwrap_as_nostruct_fnptr(self.ffi)
+ rawfunctype.prepare_nostruct_fnptr(self.ffi)
#
ptr = rffi.cast(rffi.CCHARP, g.c_address)
assert ptr
- return W_FunctionWrapper(self.space, ptr, g.c_size_or_direct_fn, w_ct,
- locs, rawfunctype, fnname, self.libname)
+ return W_FunctionWrapper(self.space, ptr, g.c_size_or_direct_fn,
+ rawfunctype, fnname, self.libname)
@jit.elidable_promote()
def _get_attr_elidable(self, attr):
@@ -173,6 +173,10 @@
if w_value is None:
if is_getattr and attr == '__all__':
return self.dir1(ignore_type=cffi_opcode.OP_GLOBAL_VAR)
+ if is_getattr and attr == '__dict__':
+ return self.full_dict_copy()
+ if is_getattr and attr == '__name__':
+ return self.descr_repr()
raise oefmt(self.space.w_AttributeError,
"cffi library '%s' has no function, constant "
"or global variable named '%s'",
@@ -212,6 +216,17 @@
names_w.append(space.wrap(rffi.charp2str(g[i].c_name)))
return space.newlist(names_w)
+ def full_dict_copy(self):
+ space = self.space
+ total = rffi.getintfield(self.ctx, 'c_num_globals')
+ g = self.ctx.c_globals
+ w_result = space.newdict()
+ for i in range(total):
+ w_attr = space.wrap(rffi.charp2str(g[i].c_name))
+ w_value = self._get_attr(w_attr)
+ space.setitem(w_result, w_attr, w_value)
+ return w_result
+
def address_of_func_or_global_var(self, varname):
# rebuild a string object from 'varname', to do typechecks and
# to force a unicode back to a plain string
@@ -224,7 +239,8 @@
if isinstance(w_value, W_FunctionWrapper):
# '&func' returns a regular cdata pointer-to-function
if w_value.directfnptr:
- return W_CData(space, w_value.directfnptr, w_value.ctype)
+ ctype = w_value.typeof(self.ffi)
+ return W_CData(space, w_value.directfnptr, ctype)
else:
return w_value # backward compatibility
#
diff --git a/pypy/module/_cffi_backend/realize_c_type.py b/pypy/module/_cffi_backend/realize_c_type.py
--- a/pypy/module/_cffi_backend/realize_c_type.py
+++ b/pypy/module/_cffi_backend/realize_c_type.py
@@ -1,4 +1,5 @@
import sys
+from rpython.rlib import jit
from rpython.rlib.rarithmetic import intmask
from rpython.rlib.objectmodel import specialize
from rpython.rtyper.lltypesystem import lltype, rffi
@@ -135,8 +136,12 @@
class W_RawFuncType(W_Root):
"""Temporary: represents a C function type (not a function pointer)"""
+
+ _immutable_fields_ = ['nostruct_ctype', 'nostruct_locs', 'nostruct_nargs']
_ctfuncptr = None
- _nostruct_ctfuncptr = (None, None)
+ nostruct_ctype = None
+ nostruct_locs = None
+ nostruct_nargs = 0
def __init__(self, opcodes, base_index):
self.opcodes = opcodes
@@ -168,14 +173,16 @@
assert self._ctfuncptr is not None
return self._ctfuncptr
- def unwrap_as_nostruct_fnptr(self, ffi):
- # tweaked version: instead of returning the ctfuncptr corresponding
- # exactly to the OP_FUNCTION ... OP_FUNCTION_END opcodes, return
- # another one in which the struct args are replaced with ptr-to-
- # struct, and a struct return value is replaced with a hidden first
- # arg of type ptr-to-struct. This is how recompiler.py produces
+ @jit.dont_look_inside
+ def prepare_nostruct_fnptr(self, ffi):
+ # tweaked version: instead of returning the ctfuncptr
+ # corresponding exactly to the OP_FUNCTION ... OP_FUNCTION_END
+ # opcodes, this builds in self.nostruct_ctype another one in
+ # which the struct args are replaced with ptr-to- struct, and
+ # a struct return value is replaced with a hidden first arg of
+ # type ptr-to-struct. This is how recompiler.py produces
# trampoline functions for PyPy.
- if self._nostruct_ctfuncptr[0] is None:
+ if self.nostruct_ctype is None:
fargs, fret, ellipsis = self._unpack(ffi)
# 'locs' will be a string of the same length as the final fargs,
# containing 'A' where a struct argument was detected, and 'R'
@@ -198,8 +205,10 @@
locs = None
else:
locs = ''.join(locs)
- self._nostruct_ctfuncptr = (ctfuncptr, locs)
- return self._nostruct_ctfuncptr
+ self.nostruct_ctype = ctfuncptr
+ self.nostruct_locs = locs
+ self.nostruct_nargs = len(ctfuncptr.fargs) - (locs is not None and
+ locs[0] == 'R')
def unexpected_fn_type(self, ffi):
fargs, fret, ellipsis = self._unpack(ffi)
diff --git a/pypy/module/_cffi_backend/src/parse_c_type.c b/pypy/module/_cffi_backend/src/parse_c_type.c
--- a/pypy/module/_cffi_backend/src/parse_c_type.c
+++ b/pypy/module/_cffi_backend/src/parse_c_type.c
@@ -362,7 +362,7 @@
case TOK_INTEGER:
errno = 0;
-#ifndef MS_WIN32
+#ifndef _MSC_VER
if (sizeof(length) > sizeof(unsigned long))
length = strtoull(tok->p, &endptr, 0);
else
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -2099,8 +2099,7 @@
p = cast(BVoidP, 123456)
py.test.raises(TypeError, "p[0]")
p = cast(BVoidP, 0)
- if 'PY_DOT_PY' in globals(): py.test.skip("NULL crashes early on py.py")
- py.test.raises(TypeError, "p[0]")
+ py.test.raises((TypeError, RuntimeError), "p[0]")
def test_iter():
BInt = new_primitive_type("int")
@@ -3333,6 +3332,15 @@
check(4 | 8, "CHB", "GTB")
check(4 | 16, "CHB", "ROB")
+def test_dereference_null_ptr():
+ BInt = new_primitive_type("int")
+ BIntPtr = new_pointer_type(BInt)
+ p = cast(BIntPtr, 0)
+ py.test.raises(RuntimeError, "p[0]")
+ py.test.raises(RuntimeError, "p[0] = 42")
+ py.test.raises(RuntimeError, "p[42]")
+ py.test.raises(RuntimeError, "p[42] = -1")
+
def test_version():
# this test is here mostly for PyPy
assert __version__ == "1.1.2"
diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py b/pypy/module/_cffi_backend/test/test_recompiler.py
--- a/pypy/module/_cffi_backend/test/test_recompiler.py
+++ b/pypy/module/_cffi_backend/test/test_recompiler.py
@@ -276,6 +276,15 @@
""")
lib.aa = 5
assert dir(lib) == ['aa', 'ff', 'my_constant']
+ #
+ aaobj = lib.__dict__['aa']
+ assert not isinstance(aaobj, int) # some internal object instead
+ assert lib.__dict__ == {
+ 'ff': lib.ff,
+ 'aa': aaobj,
+ 'my_constant': -45}
+ lib.__dict__['ff'] = "??"
+ assert lib.ff(10) == 15
def test_verify_opaque_struct(self):
ffi, lib = self.prepare(
@@ -819,6 +828,22 @@
assert isinstance(addr, ffi.CData)
assert ffi.typeof(addr) == ffi.typeof("long(*)(long)")
+ def test_address_of_function_with_struct(self):
+ ffi, lib = self.prepare(
+ "struct foo_s { int x; }; long myfunc(struct foo_s);",
+ "test_addressof_function_with_struct", """
+ struct foo_s { int x; };
+ char myfunc(struct foo_s input) { return (char)(input.x + 42); }
+ """)
+ s = ffi.new("struct foo_s *", [5])[0]
+ assert lib.myfunc(s) == 47
+ assert not isinstance(lib.myfunc, ffi.CData)
+ assert ffi.typeof(lib.myfunc) == ffi.typeof("long(*)(struct foo_s)")
+ addr = ffi.addressof(lib, 'myfunc')
+ assert addr(s) == 47
+ assert isinstance(addr, ffi.CData)
+ assert ffi.typeof(addr) == ffi.typeof("long(*)(struct foo_s)")
+
def test_issue198(self):
ffi, lib = self.prepare("""
typedef struct{...;} opaque_t;
@@ -984,5 +1009,6 @@
assert sys.modules['_CFFI_test_import_from_lib.lib'] is lib
from _CFFI_test_import_from_lib.lib import MYFOO
assert MYFOO == 42
- assert not hasattr(lib, '__dict__')
+ assert hasattr(lib, '__dict__')
assert lib.__all__ == ['MYFOO', 'mybar'] # but not 'myvar'
+ assert lib.__name__ == repr(lib)
diff --git a/pypy/module/_cffi_backend/wrapper.py b/pypy/module/_cffi_backend/wrapper.py
--- a/pypy/module/_cffi_backend/wrapper.py
+++ b/pypy/module/_cffi_backend/wrapper.py
@@ -19,12 +19,20 @@
wrapper is callable, and the arguments it expects and returns
are directly the struct/union. Calling ffi.typeof(wrapper)
also returns the original struct/union signature.
+
+ This class cannot be used for variadic functions.
"""
_immutable_ = True
common_doc_str = 'direct call to the C function of the same name'
- def __init__(self, space, fnptr, directfnptr, ctype,
- locs, rawfunctype, fnname, modulename):
+ def __init__(self, space, fnptr, directfnptr,
+ rawfunctype, fnname, modulename):
+ # everything related to the type of the function is accessed
+ # as immutable attributes of the 'rawfunctype' object, which
+ # is a W_RawFuncType. This gives us an obvious thing to
+ # promote in order to do the call.
+ ctype = rawfunctype.nostruct_ctype
+ locs = rawfunctype.nostruct_locs
assert isinstance(ctype, W_CTypeFunc)
assert ctype.cif_descr is not None # not for '...' functions
assert locs is None or len(ctype.fargs) == len(locs)
@@ -32,83 +40,86 @@
self.space = space
self.fnptr = fnptr
self.directfnptr = directfnptr
- self.ctype = ctype
- self.locs = locs
self.rawfunctype = rawfunctype
self.fnname = fnname
self.modulename = modulename
- self.nargs_expected = len(ctype.fargs) - (locs is not None and
- locs[0] == 'R')
def typeof(self, ffi):
return self.rawfunctype.unwrap_as_fnptr(ffi)
- @jit.unroll_safe
- def _prepare(self, args_w, start_index):
- # replaces struct/union arguments with ptr-to-struct/union arguments
+ def descr_call(self, args_w):
space = self.space
- locs = self.locs
- fargs = self.ctype.fargs
- for i in range(start_index, len(locs)):
- if locs[i] != 'A':
- continue
- w_arg = args_w[i]
- farg = fargs[i] # <ptr to struct/union>
- assert isinstance(farg, W_CTypePtrOrArray)
- if isinstance(w_arg, W_CData) and w_arg.ctype is farg.ctitem:
- # fast way: we are given a W_CData "struct", so just make
- # a new W_CData "ptr-to-struct" which points to the same
- # raw memory. We use unsafe_escaping_ptr(), so we have to
- # make sure the original 'w_arg' stays alive; the easiest
- # is to build an instance of W_CDataPtrToStructOrUnion.
- w_arg = W_CDataPtrToStructOrUnion(
- space, w_arg.unsafe_escaping_ptr(), farg, w_arg)
- else:
- # slow way: build a new "ptr to struct" W_CData by calling
- # the equivalent of ffi.new()
- if space.is_w(w_arg, space.w_None):
- continue
- w_arg = farg.newp(w_arg)
- args_w[i] = w_arg
-
- def descr_call(self, args_w):
- if len(args_w) != self.nargs_expected:
- space = self.space
- if self.nargs_expected == 0:
+ rawfunctype = jit.promote(self.rawfunctype)
+ ctype = rawfunctype.nostruct_ctype
+ locs = rawfunctype.nostruct_locs
+ nargs_expected = rawfunctype.nostruct_nargs
+ #
+ if len(args_w) != nargs_expected:
+ if nargs_expected == 0:
raise oefmt(space.w_TypeError,
"%s() takes no arguments (%d given)",
self.fnname, len(args_w))
- elif self.nargs_expected == 1:
+ elif nargs_expected == 1:
raise oefmt(space.w_TypeError,
"%s() takes exactly one argument (%d given)",
self.fnname, len(args_w))
else:
raise oefmt(space.w_TypeError,
"%s() takes exactly %d arguments (%d given)",
- self.fnname, self.nargs_expected, len(args_w))
+ self.fnname, nargs_expected, len(args_w))
#
- if self.locs is not None:
+ if locs is not None:
# This case is if there are structs as arguments or return values.
# If the result we want to present to the user is "returns struct",
# then internally allocate the struct and pass a pointer to it as
# a first argument.
- if self.locs[0] == 'R':
- w_result_cdata = self.ctype.fargs[0].newp(self.space.w_None)
+ if locs[0] == 'R':
+ w_result_cdata = ctype.fargs[0].newp(space.w_None)
args_w = [w_result_cdata] + args_w
- self._prepare(args_w, 1)
- self.ctype._call(self.fnptr, args_w) # returns w_None
+ prepare_args(space, rawfunctype, args_w, 1)
+ #
+ ctype._call(self.fnptr, args_w) # returns w_None
+ #
assert isinstance(w_result_cdata, W_CDataPtrToStructOrUnion)
return w_result_cdata.structobj
else:
args_w = args_w[:]
- self._prepare(args_w, 0)
+ prepare_args(space, rawfunctype, args_w, 0)
#
- return self.ctype._call(self.fnptr, args_w)
+ return ctype._call(self.fnptr, args_w)
def descr_repr(self, space):
return space.wrap("<FFIFunctionWrapper for %s()>" % (self.fnname,))
+ at jit.unroll_safe
+def prepare_args(space, rawfunctype, args_w, start_index):
+ # replaces struct/union arguments with ptr-to-struct/union arguments
+ locs = rawfunctype.nostruct_locs
+ fargs = rawfunctype.nostruct_ctype.fargs
+ for i in range(start_index, len(locs)):
+ if locs[i] != 'A':
+ continue
+ w_arg = args_w[i]
+ farg = fargs[i] # <ptr to struct/union>
+ assert isinstance(farg, W_CTypePtrOrArray)
+ if isinstance(w_arg, W_CData) and w_arg.ctype is farg.ctitem:
+ # fast way: we are given a W_CData "struct", so just make
+ # a new W_CData "ptr-to-struct" which points to the same
+ # raw memory. We use unsafe_escaping_ptr(), so we have to
+ # make sure the original 'w_arg' stays alive; the easiest
+ # is to build an instance of W_CDataPtrToStructOrUnion.
+ w_arg = W_CDataPtrToStructOrUnion(
+ space, w_arg.unsafe_escaping_ptr(), farg, w_arg)
+ else:
+ # slow way: build a new "ptr to struct" W_CData by calling
+ # the equivalent of ffi.new()
+ if space.is_w(w_arg, space.w_None):
+ continue
+ w_arg = farg.newp(w_arg)
+ args_w[i] = w_arg
+
+
W_FunctionWrapper.typedef = TypeDef(
'FFIFunctionWrapper',
__repr__ = interp2app(W_FunctionWrapper.descr_repr),
diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py
--- a/pypy/module/_io/interp_textio.py
+++ b/pypy/module/_io/interp_textio.py
@@ -626,6 +626,7 @@
def read_w(self, space, w_size=None):
self._check_attached(space)
+ self._check_closed(space)
if not self.w_decoder:
self._unsupportedoperation(space, "not readable")
@@ -667,6 +668,7 @@
def readline_w(self, space, w_limit=None):
self._check_attached(space)
+ self._check_closed(space)
self._writeflush(space)
limit = convert_size(space, w_limit)
@@ -762,7 +764,7 @@
def write_w(self, space, w_text):
self._check_attached(space)
- # self._check_closed(space)
+ self._check_closed(space)
if not self.w_encoder:
self._unsupportedoperation(space, "not writable")
diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py
--- a/pypy/module/_io/test/test_io.py
+++ b/pypy/module/_io/test/test_io.py
@@ -450,3 +450,56 @@
with _io.open(filename, 'x') as f:
assert f.mode == 'x'
raises(FileExistsError, _io.open, filename, 'x')
+
+
+class AppTestIoAferClose:
+ spaceconfig = dict(usemodules=['_io'])
+
+ def setup_class(cls):
+ tmpfile = udir.join('tmpfile').ensure()
+ cls.w_tmpfile = cls.space.wrap(str(tmpfile))
+
+ def test_io_after_close(self):
+ import _io
+ for kwargs in [
+ {"mode": "w"},
+ {"mode": "wb"},
+ {"mode": "w", "buffering": 1},
+ {"mode": "w", "buffering": 2},
+ {"mode": "wb", "buffering": 0},
+ {"mode": "r"},
+ {"mode": "rb"},
+ {"mode": "r", "buffering": 1},
+ {"mode": "r", "buffering": 2},
+ {"mode": "rb", "buffering": 0},
+ {"mode": "w+"},
+ {"mode": "w+b"},
+ {"mode": "w+", "buffering": 1},
+ {"mode": "w+", "buffering": 2},
+ {"mode": "w+b", "buffering": 0},
+ ]:
+ if "b" not in kwargs["mode"]:
+ kwargs["encoding"] = "ascii"
+ f = _io.open(self.tmpfile, **kwargs)
+ f.close()
+ raises(ValueError, f.flush)
+ raises(ValueError, f.fileno)
+ raises(ValueError, f.isatty)
+ raises(ValueError, f.__iter__)
+ if hasattr(f, "peek"):
+ raises(ValueError, f.peek, 1)
+ raises(ValueError, f.read)
+ if hasattr(f, "read1"):
+ raises(ValueError, f.read1, 1024)
+ if hasattr(f, "readall"):
+ raises(ValueError, f.readall)
+ if hasattr(f, "readinto"):
+ raises(ValueError, f.readinto, bytearray(1024))
+ raises(ValueError, f.readline)
+ raises(ValueError, f.readlines)
+ raises(ValueError, f.seek, 0)
+ raises(ValueError, f.tell)
+ raises(ValueError, f.truncate)
+ raises(ValueError, f.write, b"" if "b" in kwargs['mode'] else u"")
+ raises(ValueError, f.writelines, [])
+ raises(ValueError, next, f)
diff --git a/pypy/module/_rawffi/callback.py b/pypy/module/_rawffi/callback.py
--- a/pypy/module/_rawffi/callback.py
+++ b/pypy/module/_rawffi/callback.py
@@ -27,8 +27,10 @@
callback_ptr = global_counter.get(userdata.addarg)
w_callable = callback_ptr.w_callable
argtypes = callback_ptr.argtypes
+ must_leave = False
space = callback_ptr.space
try:
+ must_leave = space.threadlocals.try_enter_thread(space)
args_w = [None] * len(argtypes)
for i in range(len(argtypes)):
argtype = argtypes[i]
@@ -50,6 +52,8 @@
resshape = letter2tp(space, callback_ptr.result)
for i in range(resshape.size):
ll_res[i] = '\x00'
+ if must_leave:
+ space.threadlocals.leave_thread(space)
class W_CallbackPtr(W_DataInstance):
@@ -75,6 +79,14 @@
if tracker.DO_TRACING:
addr = rffi.cast(lltype.Signed, self.ll_callback.ll_closure)
tracker.trace_allocation(addr, self)
+ #
+ # We must setup the GIL here, in case the callback is invoked in
+ # some other non-Pythonic thread. This is the same as ctypes on
+ # CPython (but only when creating a callback; on CPython it occurs
+ # as soon as we import _ctypes)
+ if space.config.translation.thread:
+ from pypy.module.thread.os_thread import setup_threads
+ setup_threads(space)
def free(self):
if tracker.DO_TRACING:
diff --git a/pypy/module/_socket/__init__.py b/pypy/module/_socket/__init__.py
--- a/pypy/module/_socket/__init__.py
+++ b/pypy/module/_socket/__init__.py
@@ -18,6 +18,10 @@
from rpython.rlib.rsocket import rsocket_startup
rsocket_startup()
+ def shutdown(self, space):
+ from pypy.module._socket.interp_socket import close_all_sockets
+ close_all_sockets(space)
+
def buildloaders(cls):
from rpython.rlib import rsocket
for name in """
diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py
--- a/pypy/module/_socket/interp_func.py
+++ b/pypy/module/_socket/interp_func.py
@@ -2,7 +2,7 @@
from rpython.rlib.rsocket import SocketError, INVALID_SOCKET
from rpython.rlib.rarithmetic import intmask
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
from pypy.module._socket.interp_socket import (
converted_error, W_Socket, addr_as_object, fill_from_object, get_error,
@@ -147,6 +147,19 @@
newfd = rsocket.dup(fd)
return space.wrap(newfd)
+ at unwrap_spec(fd=int, family=int, type=int, proto=int)
+def fromfd(space, fd, family, type, proto=0):
+ """fromfd(fd, family, type[, proto]) -> socket object
+
+ Create a socket object from the given file descriptor.
+ The remaining arguments are the same as for socket().
+ """
+ try:
+ sock = rsocket.fromfd(fd, family, type, proto)
+ except SocketError, e:
+ raise converted_error(space, e)
+ return space.wrap(W_Socket(space, sock))
+
@unwrap_spec(family=int, type=int, proto=int)
def socketpair(space, family=rsocket.socketpair_default_family,
type =rsocket.SOCK_STREAM,
@@ -163,8 +176,8 @@
except SocketError, e:
raise converted_error(space, e)
return space.newtuple([
- space.wrap(W_Socket(sock1)),
- space.wrap(W_Socket(sock2))
+ space.wrap(W_Socket(space, sock1)),
+ space.wrap(W_Socket(space, sock2))
])
# The following 4 functions refuse all negative numbers, like CPython 2.6.
@@ -250,9 +263,9 @@
ip = rsocket.inet_ntop(family, packed)
except SocketError, e:
raise converted_error(space, e)
- except ValueError, e: # XXX the message is lost in RPython
- raise OperationError(space.w_ValueError,
- space.wrap(str(e)))
+ except ValueError:
+ raise oefmt(space.w_ValueError,
+ "invalid length of packed IP address string")
return space.wrap(ip)
@unwrap_spec(family=int, type=int, proto=int, flags=int)
diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py
--- a/pypy/module/_socket/interp_socket.py
+++ b/pypy/module/_socket/interp_socket.py
@@ -1,4 +1,5 @@
-from rpython.rlib import rsocket
+import sys
+from rpython.rlib import rsocket, rweaklist
from rpython.rlib.rarithmetic import intmask
from rpython.rlib.rsocket import (
RSocket, AF_INET, SOCK_STREAM, SocketError, SocketErrorWithErrno,
@@ -161,16 +162,14 @@
class W_Socket(W_Root):
-
- # for _dealloc_warn
- space = None
-
- def __init__(self, sock):
+ def __init__(self, space, sock):
+ self.space = space
self.sock = sock
+ register_socket(space, sock)
def descr_new(space, w_subtype, __args__):
sock = space.allocate_instance(W_Socket, w_subtype)
- W_Socket.__init__(sock, RSocket.empty_rsocket())
+ W_Socket.__init__(sock, space, RSocket.empty_rsocket())
return space.wrap(sock)
@unwrap_spec(family=int, type=int, proto=int,
@@ -183,8 +182,7 @@
fd=space.c_filedescriptor_w(w_fileno))
else:
sock = RSocket(family, type, proto)
- W_Socket.__init__(self, sock)
- self.space = space
+ W_Socket.__init__(self, space, sock)
except SocketError, e:
raise converted_error(space, e)
@@ -621,6 +619,45 @@
# ____________________________________________________________
+# Automatic shutdown()/close()
+
+# On some systems, the C library does not guarantee that when the program
+# finishes, all data sent so far is really sent even if the socket is not
+# explicitly closed. This behavior has been observed on Windows but not
+# on Linux, so far.
+NEED_EXPLICIT_CLOSE = (sys.platform == 'win32')
+
+class OpenRSockets(rweaklist.RWeakListMixin):
+ pass
+class OpenRSocketsState:
+ def __init__(self, space):
+ self.openrsockets = OpenRSockets()
+ self.openrsockets.initialize()
+
+def getopenrsockets(space):
+ if NEED_EXPLICIT_CLOSE and space.config.translation.rweakref:
+ return space.fromcache(OpenRSocketsState).openrsockets
+ else:
+ return None
+
+def register_socket(space, socket):
+ openrsockets = getopenrsockets(space)
+ if openrsockets is not None:
+ openrsockets.add_handle(socket)
+
+def close_all_sockets(space):
+ openrsockets = getopenrsockets(space)
+ if openrsockets is not None:
+ for sock_wref in openrsockets.get_all_handles():
+ sock = sock_wref()
+ if sock is not None:
+ try:
+ sock.close()
+ except SocketError:
+ pass
+
+
+# ____________________________________________________________
# Error handling
class SocketAPI:
diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py
--- a/pypy/module/_socket/test/test_sock_app.py
+++ b/pypy/module/_socket/test/test_sock_app.py
@@ -301,10 +301,16 @@
class AppTestSocket:
+ spaceconfig = dict(usemodules=['_socket', '_weakref', 'struct'])
+
def setup_class(cls):
cls.space = space
cls.w_udir = space.wrap(str(udir))
+ def teardown_class(cls):
+ if not cls.runappdirect:
+ cls.space.sys.getmodule('_socket').shutdown(cls.space)
+
def test_module(self):
import _socket
assert _socket.socket.__name__ == 'socket'
@@ -602,6 +608,12 @@
finally:
os.chdir(oldcwd)
+ def test_automatic_shutdown(self):
+ # doesn't really test anything, but at least should not explode
+ # in close_all_sockets()
+ import _socket
+ self.foo = _socket.socket()
+
def test_subclass_init(self):
# Socket is not created in __new__, but in __init__.
import socket
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -143,7 +143,7 @@
def __init__(self, ctx, protos):
self.protos = protos
self.buf, self.pinned, self.is_raw = rffi.get_nonmovingbuffer(protos)
- NPN_STORAGE.set(r_uint(rffi.cast(rffi.UINT, self.buf)), self)
+ NPN_STORAGE.set(rffi.cast(lltype.Unsigned, self.buf), self)
# set both server and client callbacks, because the context
# can be used to create both types of sockets
@@ -158,7 +158,7 @@
@staticmethod
def advertiseNPN_cb(s, data_ptr, len_ptr, args):
- npn = NPN_STORAGE.get(r_uint(rffi.cast(rffi.UINT, args)))
+ npn = NPN_STORAGE.get(rffi.cast(lltype.Unsigned, args))
if npn and npn.protos:
data_ptr[0] = npn.buf
len_ptr[0] = rffi.cast(rffi.UINT, len(npn.protos))
@@ -170,7 +170,7 @@
@staticmethod
def selectNPN_cb(s, out_ptr, outlen_ptr, server, server_len, args):
- npn = NPN_STORAGE.get(r_uint(rffi.cast(rffi.UINT, args)))
+ npn = NPN_STORAGE.get(rffi.cast(lltype.Unsigned, args))
if npn and npn.protos:
client = npn.buf
client_len = len(npn.protos)
@@ -189,7 +189,7 @@
def __init__(self, ctx, protos):
self.protos = protos
self.buf, self.pinned, self.is_raw = rffi.get_nonmovingbuffer(protos)
- ALPN_STORAGE.set(r_uint(rffi.cast(rffi.UINT, self.buf)), self)
+ ALPN_STORAGE.set(rffi.cast(lltype.Unsigned, self.buf), self)
with rffi.scoped_str2charp(protos) as protos_buf:
if libssl_SSL_CTX_set_alpn_protos(
@@ -204,7 +204,7 @@
@staticmethod
def selectALPN_cb(s, out_ptr, outlen_ptr, client, client_len, args):
- alpn = ALPN_STORAGE.get(r_uint(rffi.cast(rffi.UINT, args)))
+ alpn = ALPN_STORAGE.get(rffi.cast(lltype.Unsigned, args))
if alpn and alpn.protos:
server = alpn.buf
server_len = len(alpn.protos)
diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py
--- a/pypy/module/_vmprof/interp_vmprof.py
+++ b/pypy/module/_vmprof/interp_vmprof.py
@@ -26,7 +26,7 @@
eci_kwds = dict(
include_dirs = [SRC],
includes = ['vmprof.h', 'trampoline.h'],
- separate_module_files = [SRC.join('trampoline.asmgcc.s')],
+ separate_module_files = [SRC.join('trampoline.vmprof.s')],
libraries = ['dl'],
post_include_bits=["""
diff --git a/pypy/module/_vmprof/src/trampoline.asmgcc.s b/pypy/module/_vmprof/src/trampoline.vmprof.s
rename from pypy/module/_vmprof/src/trampoline.asmgcc.s
rename to pypy/module/_vmprof/src/trampoline.vmprof.s
--- a/pypy/module/_vmprof/src/trampoline.asmgcc.s
+++ b/pypy/module/_vmprof/src/trampoline.vmprof.s
@@ -1,7 +1,6 @@
// NOTE: you need to use TABs, not spaces!
.text
- .p2align 4,,-1
.globl pypy_execute_frame_trampoline
.type pypy_execute_frame_trampoline, @function
pypy_execute_frame_trampoline:
diff --git a/pypy/module/_vmprof/src/vmprof.c b/pypy/module/_vmprof/src/vmprof.c
--- a/pypy/module/_vmprof/src/vmprof.c
+++ b/pypy/module/_vmprof/src/vmprof.c
@@ -305,7 +305,6 @@
static int remove_sigprof_timer(void) {
static struct itimerval timer;
- last_period_usec = 0;
timer.it_interval.tv_sec = 0;
timer.it_interval.tv_usec = 0;
timer.it_value.tv_sec = 0;
@@ -317,11 +316,15 @@
}
static void atfork_disable_timer(void) {
- remove_sigprof_timer();
+ if (last_period_usec) {
+ remove_sigprof_timer();
+ }
}
static void atfork_enable_timer(void) {
- install_sigprof_timer(last_period_usec);
+ if (last_period_usec) {
+ install_sigprof_timer(last_period_usec);
+ }
}
static int install_pthread_atfork_hooks(void) {
@@ -412,6 +415,7 @@
if (remove_sigprof_timer() == -1) {
return -1;
}
+ last_period_usec = 0;
if (remove_sigprof_handler() == -1) {
return -1;
}
diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py
--- a/pypy/module/cpyext/test/test_version.py
+++ b/pypy/module/cpyext/test/test_version.py
@@ -24,7 +24,7 @@
}
"""
module = self.import_module(name='foo', init=init)
- assert module.py_version == sys.version[:5]
+ assert module.py_version == '%d.%d.%d' % sys.version_info[:3]
assert module.py_major_version == sys.version_info.major
assert module.py_minor_version == sys.version_info.minor
assert module.py_micro_version == sys.version_info.micro
diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py
--- a/pypy/module/imp/importing.py
+++ b/pypy/module/imp/importing.py
@@ -91,7 +91,6 @@
return w_mod
-
class _WIN32Path(object):
def __init__(self, path):
self.path = path
diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py
--- a/pypy/module/imp/test/test_import.py
+++ b/pypy/module/imp/test/test_import.py
@@ -1223,6 +1223,53 @@
finally:
sys.path_hooks.pop()
+ def test_meta_path_import_error_1(self):
+ # as far as I can tell, the problem is that in CPython, if you
+ # use an import hook that doesn't update sys.modules, then the
+ # import succeeds; but at the same time, you can have the same
+ # result without an import hook (see test_del_from_sys_modules)
+ # and then the import fails. This looks like even more mess
+ # to replicate, so we ignore it until someone really hits this
+ # case...
+ skip("looks like an inconsistency in CPython")
+
+ class ImportHook(object):
+ def find_module(self, fullname, path=None):
+ assert not fullname.endswith('*')
+ if fullname == 'meta_path_pseudo_module':
+ return self
+ def load_module(self, fullname):
+ assert fullname == 'meta_path_pseudo_module'
+ # we "forget" to update sys.modules
+ return new.module('meta_path_pseudo_module')
+
+ import sys, new
+ sys.meta_path.append(ImportHook())
+ try:
+ import meta_path_pseudo_module
+ finally:
+ sys.meta_path.pop()
+
+ def test_meta_path_import_star_2(self):
+ class ImportHook(object):
+ def find_module(self, fullname, path=None):
+ if fullname.startswith('meta_path_2_pseudo_module'):
+ return self
+ def load_module(self, fullname):
+ assert fullname == 'meta_path_2_pseudo_module'
+ m = types.ModuleType('meta_path_2_pseudo_module')
+ m.__path__ = ['/some/random/dir']
+ sys.modules['meta_path_2_pseudo_module'] = m
+ return m
+
+ import sys, types
+ sys.meta_path.append(ImportHook())
+ try:
+ exec("from meta_path_2_pseudo_module import *", {})
+ finally:
+ sys.meta_path.pop()
+
+
class AppTestPyPyExtension(object):
spaceconfig = dict(usemodules=['imp', 'zipimport', '__pypy__'])
diff --git a/pypy/module/math/interp_math.py b/pypy/module/math/interp_math.py
--- a/pypy/module/math/interp_math.py
+++ b/pypy/module/math/interp_math.py
@@ -368,7 +368,7 @@
else:
partials.append(v)
if special_sum != 0.0:
- if rfloat.isnan(special_sum):
+ if rfloat.isnan(inf_sum):
raise OperationError(space.w_ValueError, space.wrap("-inf + inf"))
return space.wrap(special_sum)
hi = 0.0
diff --git a/pypy/module/math/test/test_math.py b/pypy/module/math/test/test_math.py
--- a/pypy/module/math/test/test_math.py
+++ b/pypy/module/math/test/test_math.py
@@ -1,5 +1,6 @@
from __future__ import with_statement
+import py
from pypy.interpreter.function import Function
from pypy.interpreter.gateway import BuiltinCode
from pypy.module.math.test import test_direct
@@ -113,6 +114,10 @@
([2.**n - 2.**(n+50) + 2.**(n+52) for n in range(-1074, 972, 2)] +
[-2.**1022],
float.fromhex('0x1.5555555555555p+970')),
+ # infinity and nans
+ ([float("inf")], float("inf")),
+ ([float("-inf")], float("-inf")),
+ ([float("nan")], float("nan")),
]
for i, (vals, expected) in enumerate(test_values):
@@ -124,7 +129,8 @@
except ValueError:
py.test.fail("test %d failed: got ValueError, expected %r "
"for math.fsum(%.100r)" % (i, expected, vals))
- assert actual == expected
+ assert actual == expected or (
+ math.isnan(actual) and math.isnan(expected))
def test_factorial(self):
import math, sys
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
@@ -266,6 +266,15 @@
""")
lib.aa = 5
assert dir(lib) == ['aa', 'ff', 'my_constant']
+ #
+ aaobj = lib.__dict__['aa']
+ assert not isinstance(aaobj, int) # some internal object instead
+ assert lib.__dict__ == {
+ 'ff': lib.ff,
+ 'aa': aaobj,
+ 'my_constant': -45}
+ lib.__dict__['ff'] = "??"
+ assert lib.ff(10) == 15
def test_verify_opaque_struct():
ffi = FFI()
@@ -1053,5 +1062,5 @@
assert sys.modules['_CFFI_test_import_from_lib.lib'] is lib
from _CFFI_test_import_from_lib.lib import MYFOO
assert MYFOO == 42
- assert not hasattr(lib, '__dict__')
+ assert hasattr(lib, '__dict__')
assert lib.__all__ == ['MYFOO', 'mybar'] # but not 'myvar'
diff --git a/pypy/objspace/std/kwargsdict.py b/pypy/objspace/std/kwargsdict.py
--- a/pypy/objspace/std/kwargsdict.py
+++ b/pypy/objspace/std/kwargsdict.py
@@ -167,19 +167,26 @@
return iter(self.unerase(w_dict.dstorage)[1])
def getiteritems(self, w_dict):
- keys = self.unerase(w_dict.dstorage)[0]
- return iter(range(len(keys)))
+ return Zip(*self.unerase(w_dict.dstorage))
wrapkey = _wrapkey
-def next_item(self):
- strategy = self.strategy
- assert isinstance(strategy, KwargsDictStrategy)
- for i in self.iterator:
- keys, values_w = strategy.unerase(self.w_dict.dstorage)
- return _wrapkey(self.space, keys[i]), values_w[i]
- else:
- return None, None
+class Zip(object):
+ def __init__(self, list1, list2):
+ assert len(list1) == len(list2)
+ self.list1 = list1
+ self.list2 = list2
+ self.i = 0
-create_iterator_classes(KwargsDictStrategy, override_next_item=next_item)
+ def __iter__(self):
+ return self
+
+ def next(self):
+ i = self.i
+ if i >= len(self.list1):
+ raise StopIteration
+ self.i = i + 1
+ return (self.list1[i], self.list2[i])
+
+create_iterator_classes(KwargsDictStrategy)
diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py
--- a/pypy/objspace/std/listobject.py
+++ b/pypy/objspace/std/listobject.py
@@ -193,9 +193,9 @@
def switch_to_object_strategy(self):
list_w = self.getitems()
- self.strategy = self.space.fromcache(ObjectListStrategy)
- # XXX this is quite indirect
- self.init_from_list_w(list_w)
+ object_strategy = self.space.fromcache(ObjectListStrategy)
+ self.strategy = object_strategy
+ object_strategy.init_from_list_w(self, list_w)
def _temporarily_as_objects(self):
if self.strategy is self.space.fromcache(ObjectListStrategy):
diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py
--- a/pypy/objspace/std/test/test_dictproxy.py
+++ b/pypy/objspace/std/test/test_dictproxy.py
@@ -97,6 +97,8 @@
#
raises(TypeError, dictproxy, 3)
raises(TypeError, dictproxy, [3])
+ #
+ {}.update(proxy)
class AppTestUserObjectMethodCache(AppTestUserObject):
spaceconfig = {"objspace.std.withmethodcachecounter": True}
diff --git a/pypy/objspace/std/test/test_kwargsdict.py b/pypy/objspace/std/test/test_kwargsdict.py
--- a/pypy/objspace/std/test/test_kwargsdict.py
+++ b/pypy/objspace/std/test/test_kwargsdict.py
@@ -160,6 +160,14 @@
assert a == 3
assert "KwargsDictStrategy" in self.get_strategy(d)
+ def test_iteritems_bug(self):
+ def f(**args):
+ return args
+
+ d = f(a=2, b=3, c=4)
+ for key, value in d.items():
+ None in d
+
def test_unicode(self):
"""
def f(**kwargs):
diff --git a/pypy/tool/build_cffi_imports.py b/pypy/tool/build_cffi_imports.py
new file mode 100644
--- /dev/null
+++ b/pypy/tool/build_cffi_imports.py
@@ -0,0 +1,77 @@
+import sys, shutil
+from rpython.tool.runsubprocess import run_subprocess
+
+class MissingDependenciesError(Exception):
+ pass
+
+
+cffi_build_scripts = {
+ "sqlite3": "_sqlite3_build.py",
+ "audioop": "_audioop_build.py",
+ "tk": "_tkinter/tklib_build.py",
+ "curses": "_curses_build.py" if sys.platform != "win32" else None,
+ "syslog": "_syslog_build.py" if sys.platform != "win32" else None,
+ "_gdbm": "_gdbm_build.py" if sys.platform != "win32" else None,
+ "pwdgrp": "_pwdgrp_build.py" if sys.platform != "win32" else None,
+ "lzma": "_lzma_build.py",
+ "_decimal": "_decimal_build.py",
+ "xx": None, # for testing: 'None' should be completely ignored
+ }
+
+def create_cffi_import_libraries(pypy_c, options, basedir):
+ shutil.rmtree(str(basedir.join('lib_pypy', '__pycache__')),
+ ignore_errors=True)
+ failures = []
+ for key, module in sorted(cffi_build_scripts.items()):
+ if module is None or getattr(options, 'no_' + key, False):
+ continue
+ if module.endswith('.py'):
+ args = [module]
+ cwd = str(basedir.join('lib_pypy'))
+ else:
+ args = ['-c', 'import ' + module]
+ cwd = None
+ print >> sys.stderr, '*', ' '.join(args)
+ try:
+ status, stdout, stderr = run_subprocess(str(pypy_c), args, cwd=cwd)
+ if status != 0:
+ print >> sys.stderr, stdout, stderr
+ failures.append((key, module))
+ except:
+ import traceback;traceback.print_exc()
+ failures.append((key, module))
+ return failures
+
+if __name__ == '__main__':
+ import py, os
+ if '__pypy__' not in sys.builtin_module_names:
+ print 'Call with a pypy interpreter'
+ sys.exit(-1)
+
+ class Options(object):
+ pass
+
+ exename = py.path.local(sys.executable)
+ basedir = exename
+ while not basedir.join('include').exists():
+ _basedir = basedir.dirpath()
+ if _basedir == basedir:
+ raise ValueError('interpreter %s not inside pypy repo',
+ str(exename))
+ basedir = _basedir
+ options = Options()
+ print >> sys.stderr, "There should be no failures here"
+ failures = create_cffi_import_libraries(exename, options, basedir)
+ if len(failures) > 0:
+ print 'failed to build', [f[1] for f in failures]
+ assert False
+
+ # monkey patch a failure, just to test
+ print >> sys.stderr, 'This line should be followed by a traceback'
+ for k in cffi_build_scripts:
+ setattr(options, 'no_' + k, True)
+ must_fail = '_missing_build_script.py'
+ assert not os.path.exists(str(basedir.join('lib_pypy').join(must_fail)))
+ cffi_build_scripts['should_fail'] = must_fail
+ failures = create_cffi_import_libraries(exename, options, basedir)
+ assert len(failures) == 1
diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py
--- a/pypy/tool/release/package.py
+++ b/pypy/tool/release/package.py
@@ -29,6 +29,9 @@
# XXX: don't hardcode the version
POSIX_EXE = 'pypy3.3'
+from pypy.tool.build_cffi_imports import (create_cffi_import_libraries,
+ MissingDependenciesError, cffi_build_scripts)
+
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
@@ -44,50 +47,12 @@
class PyPyCNotFound(Exception):
pass
-class MissingDependenciesError(Exception):
- pass
-
def fix_permissions(dirname):
if sys.platform != 'win32':
os.system("chmod -R a+rX %s" % dirname)
os.system("chmod -R g-w %s" % dirname)
-cffi_build_scripts = {
- "sqlite3": "_sqlite3_build.py",
- "audioop": "_audioop_build.py",
- "tk": "_tkinter/tklib_build.py",
- "curses": "_curses_build.py" if sys.platform != "win32" else None,
- "syslog": "_syslog_build.py" if sys.platform != "win32" else None,
- "_gdbm": "_gdbm_build.py" if sys.platform != "win32" else None,
- "pwdgrp": "_pwdgrp_build.py" if sys.platform != "win32" else None,
- "lzma": "_lzma_build.py",
- "_decimal": "_decimal_build.py",
- "xx": None, # for testing: 'None' should be completely ignored
- }
-
-def create_cffi_import_libraries(pypy_c, options, basedir):
- shutil.rmtree(str(basedir.join('lib_pypy', '__pycache__')),
- ignore_errors=True)
- for key, module in sorted(cffi_build_scripts.items()):
- if module is None or getattr(options, 'no_' + key):
- continue
- if module.endswith('.py'):
- args = [str(pypy_c), module]
- cwd = str(basedir.join('lib_pypy'))
- else:
- args = [str(pypy_c), '-c', 'import ' + module]
- cwd = None
- print >> sys.stderr, '*', ' '.join(args)
- try:
- subprocess.check_call(args, cwd=cwd)
- except subprocess.CalledProcessError:
- print >>sys.stderr, """!!!!!!!!!!\nBuilding {0} bindings failed.
-You can either install development headers package,
-add the --without-{0} option to skip packaging this
-binary CFFI extension, or say --without-cffi.""".format(key)
- raise MissingDependenciesError(module)
-
def pypy_runs(pypy_c, quiet=False):
kwds = {}
if quiet:
@@ -119,9 +84,13 @@
if not _fake and not pypy_runs(pypy_c):
raise OSError("Running %r failed!" % (str(pypy_c),))
if not options.no_cffi:
- try:
- create_cffi_import_libraries(pypy_c, options, basedir)
- except MissingDependenciesError:
+ failures = create_cffi_import_libraries(pypy_c, options, basedir)
+ for key, module in failures:
+ print >>sys.stderr, """!!!!!!!!!!\nBuilding {0} bindings failed.
+ You can either install development headers package,
+ add the --without-{0} option to skip packaging this
+ binary CFFI extension, or say --without-cffi.""".format(key)
+ if len(failures) > 0:
return 1, None
if sys.platform == 'win32' and not rename_pypy_c.lower().endswith('.exe'):
diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py
--- a/rpython/flowspace/flowcontext.py
+++ b/rpython/flowspace/flowcontext.py
@@ -1207,7 +1207,8 @@
def nomoreblocks(self, ctx):
w_exc = self.w_exc
if w_exc.w_type == const(ImportError):
- msg = 'import statement always raises %s' % self
+ msg = 'ImportError is raised in RPython: %s' % (
+ getattr(w_exc.w_value, 'value', '<not a constant message>'),)
raise ImportError(msg)
link = Link([w_exc.w_type, w_exc.w_value], ctx.graph.exceptblock)
ctx.recorder.crnt_block.closeblock(link)
diff --git a/rpython/flowspace/test/cant_import.py b/rpython/flowspace/test/cant_import.py
new file mode 100644
--- /dev/null
+++ b/rpython/flowspace/test/cant_import.py
@@ -0,0 +1,1 @@
+raise ImportError("some explanation here")
diff --git a/rpython/flowspace/test/test_objspace.py b/rpython/flowspace/test/test_objspace.py
--- a/rpython/flowspace/test/test_objspace.py
+++ b/rpython/flowspace/test/test_objspace.py
@@ -816,6 +816,12 @@
from rpython import this_does_not_exist
py.test.raises(ImportError, 'self.codetest(f)')
+ def test_importerror_3(self):
+ def f():
+ import rpython.flowspace.test.cant_import
+ e = py.test.raises(ImportError, 'self.codetest(f)')
+ assert "some explanation here" in str(e.value)
+
def test_relative_import(self):
def f():
from ..objspace import build_flow
diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py
--- a/rpython/jit/backend/llsupport/rewrite.py
+++ b/rpython/jit/backend/llsupport/rewrite.py
@@ -73,8 +73,6 @@
self.emit_pending_zeros()
elif op.can_malloc():
self.emitting_an_operation_that_can_collect()
- elif op.getopnum() == rop.DEBUG_MERGE_POINT:
- continue # ignore debug_merge_points
elif op.getopnum() == rop.LABEL:
self.emitting_an_operation_that_can_collect()
self.known_lengths.clear()
diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py
--- a/rpython/jit/metainterp/heapcache.py
+++ b/rpython/jit/metainterp/heapcache.py
@@ -60,6 +60,26 @@
if not value.is_unescaped:
del d[value]
+
+class FieldUpdater(object):
+ def __init__(self, heapcache, value, cache, fieldvalue):
+ self.heapcache = heapcache
+ self.value = value
+ self.cache = cache
+ if fieldvalue is not None:
+ self.currfieldbox = fieldvalue.box
+ else:
+ self.currfieldbox = None
+
+ def getfield_now_known(self, fieldbox):
+ fieldvalue = self.heapcache.getvalue(fieldbox)
+ self.cache.read_now_known(self.value, fieldvalue)
+
+ def setfield(self, fieldbox):
+ fieldvalue = self.heapcache.getvalue(fieldbox)
+ self.cache.do_write_with_aliasing(self.value, fieldvalue)
+
+
class HeapCache(object):
def __init__(self):
self.reset()
@@ -98,9 +118,9 @@
self.heap_cache = {}
self.heap_array_cache = {}
- def getvalue(self, box):
+ def getvalue(self, box, create=True):
value = self.values.get(box, None)
- if not value:
+ if not value and create:
value = self.values[box] = HeapCacheValue(box)
return value
@@ -111,25 +131,26 @@
self.mark_escaped(opnum, descr, argboxes)
self.clear_caches(opnum, descr, argboxes)
+ def _escape_from_write(self, box, fieldbox):
+ value = self.getvalue(box, create=False)
+ fieldvalue = self.getvalue(fieldbox, create=False)
+ if (value is not None and value.is_unescaped and
+ fieldvalue is not None and fieldvalue.is_unescaped):
+ if value.dependencies is None:
+ value.dependencies = []
+ value.dependencies.append(fieldvalue)
+ elif fieldvalue is not None:
+ self._escape(fieldvalue)
+
def mark_escaped(self, opnum, descr, argboxes):
if opnum == rop.SETFIELD_GC:
assert len(argboxes) == 2
- value, fieldvalue = self.getvalues(argboxes)
- if value.is_unescaped and fieldvalue.is_unescaped:
- if value.dependencies is None:
- value.dependencies = []
- value.dependencies.append(fieldvalue)
- else:
- self._escape(fieldvalue)
+ box, fieldbox = argboxes
+ self._escape_from_write(box, fieldbox)
elif opnum == rop.SETARRAYITEM_GC:
assert len(argboxes) == 3
- value, indexvalue, fieldvalue = self.getvalues(argboxes)
- if value.is_unescaped and fieldvalue.is_unescaped:
- if value.dependencies is None:
- value.dependencies = []
- value.dependencies.append(fieldvalue)
- else:
- self._escape(fieldvalue)
+ box, indexbox, fieldbox = argboxes
+ self._escape_from_write(box, fieldbox)
elif (opnum == rop.CALL and
descr.get_extra_info().oopspecindex == descr.get_extra_info().OS_ARRAYCOPY and
isinstance(argboxes[3], ConstInt) and
@@ -153,7 +174,7 @@
self._escape_box(box)
def _escape_box(self, box):
- value = self.values.get(box, None)
+ value = self.getvalue(box, create=False)
if not value:
return
self._escape(value)
@@ -261,7 +282,7 @@
self.reset_keep_likely_virtuals()
def is_class_known(self, box):
- value = self.values.get(box, None)
+ value = self.getvalue(box, create=False)
if value:
return value.known_class
return False
@@ -270,7 +291,7 @@
self.getvalue(box).known_class = True
def is_nonstandard_virtualizable(self, box):
- value = self.values.get(box, None)
+ value = self.getvalue(box, create=False)
if value:
return value.nonstandard_virtualizable
return False
@@ -279,13 +300,13 @@
self.getvalue(box).nonstandard_virtualizable = True
def is_unescaped(self, box):
- value = self.values.get(box, None)
+ value = self.getvalue(box, create=False)
if value:
return value.is_unescaped
return False
def is_likely_virtual(self, box):
- value = self.values.get(box, None)
+ value = self.getvalue(box, create=False)
if value:
return value.likely_virtual
return False
@@ -301,7 +322,7 @@
self.arraylen_now_known(box, lengthbox)
def getfield(self, box, descr):
- value = self.values.get(box, None)
+ value = self.getvalue(box, create=False)
if value:
cache = self.heap_cache.get(descr, None)
if cache:
@@ -310,26 +331,28 @@
return tovalue.box
return None
- def getfield_now_known(self, box, descr, fieldbox):
+ def get_field_updater(self, box, descr):
value = self.getvalue(box)
- fieldvalue = self.getvalue(fieldbox)
cache = self.heap_cache.get(descr, None)
if cache is None:
cache = self.heap_cache[descr] = CacheEntry()
- cache.read_now_known(value, fieldvalue)
+ fieldvalue = None
+ else:
+ fieldvalue = cache.read(value)
+ return FieldUpdater(self, value, cache, fieldvalue)
+
+ def getfield_now_known(self, box, descr, fieldbox):
+ upd = self.get_field_updater(box, descr)
+ upd.getfield_now_known(fieldbox)
def setfield(self, box, fieldbox, descr):
- cache = self.heap_cache.get(descr, None)
- if cache is None:
- cache = self.heap_cache[descr] = CacheEntry()
- value = self.getvalue(box)
- fieldvalue = self.getvalue(fieldbox)
- cache.do_write_with_aliasing(value, fieldvalue)
+ upd = self.get_field_updater(box, descr)
+ upd.setfield(fieldbox)
def getarrayitem(self, box, indexbox, descr):
if not isinstance(indexbox, ConstInt):
return None
- value = self.values.get(box, None)
+ value = self.getvalue(box, create=False)
if value is None:
return None
index = indexbox.getint()
@@ -373,7 +396,7 @@
indexcache.do_write_with_aliasing(value, fieldvalue)
def arraylen(self, box):
- value = self.values.get(box, None)
+ value = self.getvalue(box, create=False)
if value and value.length:
return value.length.box
return None
@@ -383,7 +406,7 @@
value.length = self.getvalue(lengthbox)
def replace_box(self, oldbox, newbox):
- value = self.values.get(oldbox, None)
+ value = self.getvalue(oldbox, create=False)
if value is None:
return
value.box = newbox
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
@@ -187,7 +187,12 @@
[i0]
jump(i0)
"""
- self.optimize_loop(ops, expected)
+ short = """
+ [i2]
+ p3 = cast_int_to_ptr(i2)
+ jump(i2)
+ """
+ self.optimize_loop(ops, expected, expected_short=short)
def test_reverse_of_cast_2(self):
ops = """
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -649,16 +649,16 @@
@specialize.arg(1)
def _opimpl_getfield_gc_any_pureornot(self, opnum, box, fielddescr):
- tobox = self.metainterp.heapcache.getfield(box, fielddescr)
- if tobox is not None:
+ upd = self.metainterp.heapcache.get_field_updater(box, fielddescr)
+ if upd.currfieldbox is not None:
# sanity check: see whether the current struct value
# corresponds to what the cache thinks the value is
resbox = executor.execute(self.metainterp.cpu, self.metainterp,
rop.GETFIELD_GC, fielddescr, box)
- assert resbox.constbox().same_constant(tobox.constbox())
- return tobox
+ assert resbox.constbox().same_constant(upd.currfieldbox.constbox())
+ return upd.currfieldbox
resbox = self.execute_with_descr(opnum, fielddescr, box)
- self.metainterp.heapcache.getfield_now_known(box, fielddescr, resbox)
+ upd.getfield_now_known(resbox)
return resbox
@arguments("box", "descr", "orgpc")
@@ -679,10 +679,11 @@
@arguments("box", "box", "descr")
def _opimpl_setfield_gc_any(self, box, valuebox, fielddescr):
- tobox = self.metainterp.heapcache.getfield(box, fielddescr)
- if tobox is valuebox:
+ upd = self.metainterp.heapcache.get_field_updater(box, fielddescr)
+ if upd.currfieldbox is valuebox:
return
- self.metainterp.execute_setfield_gc(fielddescr, box, valuebox)
+ self.metainterp.execute_and_record(rop.SETFIELD_GC, fielddescr, box, valuebox)
+ upd.setfield(valuebox)
# The following logic is disabled because buggy. It is supposed
# to be: not(we're writing null into a freshly allocated object)
# but the bug is that is_unescaped() can be True even after the
@@ -1922,9 +1923,10 @@
resbox = executor.execute(self.cpu, self, opnum, descr, *argboxes)
if rop._ALWAYS_PURE_FIRST <= opnum <= rop._ALWAYS_PURE_LAST:
return self._record_helper_pure(opnum, resbox, descr, *argboxes)
- else:
- return self._record_helper_nonpure_varargs(opnum, resbox, descr,
- list(argboxes))
+ if rop._OVF_FIRST <= opnum <= rop._OVF_LAST:
+ return self._record_helper_ovf(opnum, resbox, descr, *argboxes)
+ return self._record_helper_nonpure_varargs(opnum, resbox, descr,
+ list(argboxes))
@specialize.arg(1)
def execute_and_record_varargs(self, opnum, argboxes, descr=None):
@@ -1951,6 +1953,12 @@
resbox = resbox.nonconstbox() # ensure it is a Box
return self._record_helper_nonpure_varargs(opnum, resbox, descr, list(argboxes))
+ def _record_helper_ovf(self, opnum, resbox, descr, *argboxes):
+ if (self.last_exc_value_box is None and
+ self._all_constants(*argboxes)):
+ return resbox.constbox()
+ return self._record_helper_nonpure_varargs(opnum, resbox, descr, list(argboxes))
+
def _record_helper_pure_varargs(self, opnum, resbox, descr, argboxes):
canfold = self._all_constants_varargs(argboxes)
if canfold:
@@ -1962,10 +1970,6 @@
def _record_helper_nonpure_varargs(self, opnum, resbox, descr, argboxes):
assert resbox is None or isinstance(resbox, Box)
- if (rop._OVF_FIRST <= opnum <= rop._OVF_LAST and
- self.last_exc_value_box is None and
- self._all_constants_varargs(argboxes)):
- return resbox.constbox()
# record the operation
profiler = self.staticdata.profiler
profiler.count_ops(opnum, Counters.RECORDED_OPS)
diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py
--- a/rpython/memory/gctransform/framework.py
+++ b/rpython/memory/gctransform/framework.py
@@ -52,21 +52,22 @@
return (op.opname in LL_OPERATIONS and
LL_OPERATIONS[op.opname].canmallocgc)
-def find_initializing_stores(collect_analyzer, graph):
- from rpython.flowspace.model import mkentrymap
- entrymap = mkentrymap(graph)
- # a bit of a hackish analysis: if a block contains a malloc and check that
- # the result is not zero, then the block following the True link will
- # usually initialize the newly allocated object
- result = set()
- def find_in_block(block, mallocvars):
+def propagate_no_write_barrier_needed(result, block, mallocvars,
+ collect_analyzer, entrymap,
+ startindex=0):
+ # We definitely know that no write barrier is needed in the 'block'
+ # for any of the variables in 'mallocvars'. Propagate this information
+ # forward. Note that "definitely know" implies that we just did either
+ # a fixed-size malloc (variable-size might require card marking), or
+ # that we just did a full write barrier (not just for card marking).
+ if 1: # keep indentation
for i, op in enumerate(block.operations):
+ if i < startindex:
+ continue
if op.opname in ("cast_pointer", "same_as"):
if op.args[0] in mallocvars:
mallocvars[op.result] = True
elif op.opname in ("setfield", "setarrayitem", "setinteriorfield"):
- # note that 'mallocvars' only tracks fixed-size mallocs,
- # so no risk that they use card marking
TYPE = op.args[-1].concretetype
if (op.args[0] in mallocvars and
isinstance(TYPE, lltype.Ptr) and
@@ -83,7 +84,15 @@
if var in mallocvars:
newmallocvars[exit.target.inputargs[i]] = True
if newmallocvars:
- find_in_block(exit.target, newmallocvars)
+ propagate_no_write_barrier_needed(result, exit.target,
+ newmallocvars,
+ collect_analyzer, entrymap)
+
+def find_initializing_stores(collect_analyzer, graph, entrymap):
+ # a bit of a hackish analysis: if a block contains a malloc and check that
+ # the result is not zero, then the block following the True link will
+ # usually initialize the newly allocated object
+ result = set()
mallocnum = 0
blockset = set(graph.iterblocks())
while blockset:
@@ -113,7 +122,8 @@
target = exit.target
mallocvars = {target.inputargs[index]: True}
mallocnum += 1
- find_in_block(target, mallocvars)
+ propagate_no_write_barrier_needed(result, target, mallocvars,
+ collect_analyzer, entrymap)
#if result:
# print "found %s initializing stores in %s" % (len(result), graph.name)
return result
@@ -698,8 +708,11 @@
" %s" % func)
if self.write_barrier_ptr:
+ from rpython.flowspace.model import mkentrymap
+ self._entrymap = mkentrymap(graph)
self.clean_sets = (
- find_initializing_stores(self.collect_analyzer, graph))
+ find_initializing_stores(self.collect_analyzer, graph,
+ self._entrymap))
if self.gcdata.gc.can_optimize_clean_setarrayitems():
self.clean_sets = self.clean_sets.union(
find_clean_setarrayitems(self.collect_analyzer, graph))
@@ -1269,6 +1282,17 @@
hop.genop("direct_call", [self.write_barrier_ptr,
self.c_const_gc,
v_structaddr])
+ # we just did a full write barrier here, so we can use
+ # this helper to propagate this knowledge forward and
+ # avoid to repeat the write barrier.
+ if self.curr_block is not None: # for tests
+ assert self.curr_block.operations[hop.index] is hop.spaceop
+ propagate_no_write_barrier_needed(self.clean_sets,
+ self.curr_block,
More information about the pypy-commit
mailing list