[pypy-commit] pypy py3k: hg merge default
mjacob
noreply at buildbot.pypy.org
Fri Jun 26 16:26:10 CEST 2015
Author: Manuel Jacob <me at manueljacob.de>
Branch: py3k
Changeset: r78323:85bc12fb4725
Date: 2015-06-26 16:26 +0200
http://bitbucket.org/pypy/pypy/changeset/85bc12fb4725/
Log: hg merge default
diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py
--- a/lib_pypy/_tkinter/tclobj.py
+++ b/lib_pypy/_tkinter/tclobj.py
@@ -103,6 +103,8 @@
return value.internalRep.doubleValue
if value.typePtr == typeCache.IntType:
return value.internalRep.longValue
+ if value.typePtr == typeCache.WideIntType:
+ return FromWideIntObj(app, value)
if value.typePtr == typeCache.BigNumType and tklib.HAVE_LIBTOMMATH:
return FromBignumObj(app, value)
if value.typePtr == typeCache.ListType:
diff --git a/lib_pypy/_tkinter/tklib_build.py b/lib_pypy/_tkinter/tklib_build.py
--- a/lib_pypy/_tkinter/tklib_build.py
+++ b/lib_pypy/_tkinter/tklib_build.py
@@ -179,6 +179,7 @@
typedef int... Tcl_WideInt;
int Tcl_GetWideIntFromObj(Tcl_Interp *interp, Tcl_Obj *obj, Tcl_WideInt *value);
+Tcl_Obj *Tcl_NewWideIntObj(Tcl_WideInt value);
""")
if HAVE_LIBTOMMATH:
diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst
--- a/pypy/doc/embedding.rst
+++ b/pypy/doc/embedding.rst
@@ -6,15 +6,9 @@
C. It was developed in collaboration with Roberto De Ioris from the `uwsgi`_
project. The `PyPy uwsgi plugin`_ is a good example of using the embedding API.
-**NOTE**: As of 1st of December, PyPy comes with ``--shared`` by default
-on linux, linux64 and windows. We will make it the default on all platforms
-by the time of the next release.
-
-The first thing that you need is to compile PyPy yourself with the option
-``--shared``. We plan to make ``--shared`` the default in the future. Consult
-the `how to compile PyPy`_ doc for details. This will result in ``libpypy.so``
-or ``pypy.dll`` file or something similar, depending on your platform. Consult
-your platform specification for details.
+**NOTE**: You need a PyPy compiled with the option ``--shared``, i.e.
+with a ``libpypy-c.so`` or ``pypy-c.dll`` file. This is the default in
+recent versions of PyPy.
The resulting shared library exports very few functions, however they are
enough to accomplish everything you need, provided you follow a few principles.
@@ -75,10 +69,12 @@
Note that this API is a lot more minimal than say CPython C API, so at first
it's obvious to think that you can't do much. However, the trick is to do
all the logic in Python and expose it via `cffi`_ callbacks. Let's assume
-we're on linux and pypy is installed in ``/opt/pypy`` with the
+we're on linux and pypy is installed in ``/opt/pypy`` (with
+subdirectories like ``lib-python`` and ``lib_pypy``), and with the
library in ``/opt/pypy/bin/libpypy-c.so``. (It doesn't need to be
-installed; you can also replace this path with your local checkout.)
-We write a little C program:
+installed; you can also replace these paths with a local extract of the
+installation tarballs, or with your local checkout of pypy.) We write a
+little C program:
.. code-block:: c
@@ -92,7 +88,9 @@
int res;
rpython_startup_code();
- res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1);
+ /* note: in the path /opt/pypy/x, the final x is ignored and
+ replaced with lib-python and lib_pypy. */
+ res = pypy_setup_home("/opt/pypy/x", 1);
if (res) {
printf("Error setting pypy home!\n");
return 1;
@@ -179,7 +177,7 @@
int res;
rpython_startup_code();
- res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1);
+ res = pypy_setup_home("/opt/pypy/x", 1);
if (res) {
fprintf(stderr, "Error setting pypy home!\n");
return -1;
@@ -220,9 +218,15 @@
Finding pypy_home
-----------------
-Function pypy_setup_home takes one parameter - the path to libpypy. There's
-currently no "clean" way (pkg-config comes to mind) how to find this path. You
-can try the following (GNU-specific) hack (don't forget to link against *dl*):
+The function pypy_setup_home() takes as first parameter the path to a
+file from which it can deduce the location of the standard library.
+More precisely, it tries to remove final components until it finds
+``lib-python`` and ``lib_pypy``. There is currently no "clean" way
+(pkg-config comes to mind) to find this path. You can try the following
+(GNU-specific) hack (don't forget to link against *dl*), which assumes
+that the ``libpypy-c.so`` is inside the standard library directory.
+(This must more-or-less be the case anyway, otherwise the ``pypy``
+program itself would not run.)
.. code-block:: c
@@ -236,7 +240,7 @@
// caller should free returned pointer to avoid memleaks
// returns NULL on error
- char* guess_pypyhome() {
+ char* guess_pypyhome(void) {
// glibc-only (dladdr is why we #define _GNU_SOURCE)
Dl_info info;
void *_rpython_startup_code = dlsym(0,"rpython_startup_code");
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -11,3 +11,14 @@
.. branch: stdlib-2.7.10
Update stdlib to version 2.7.10
+
+.. branch: issue2062
+
+.. branch: disable-unroll-for-short-loops
+The JIT no longer performs loop unrolling if the loop compiles to too much code.
+
+.. branch: run-create_cffi_imports
+
+Build cffi import libraries as part of translation by monkey-patching an
+aditional task into translation
+
diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -1,6 +1,6 @@
import py
-import os, sys
+import os, sys, subprocess
import pypy
from pypy.interpreter import gateway
@@ -104,13 +104,16 @@
from pypy.module.sys.initpath import pypy_find_stdlib
verbose = rffi.cast(lltype.Signed, verbose)
if ll_home:
- home = rffi.charp2str(ll_home)
+ home1 = rffi.charp2str(ll_home)
+ home = os.path.join(home1, 'x') # <- so that 'll_home' can be
+ # directly the root directory
else:
- home = pypydir
+ home = home1 = pypydir
w_path = pypy_find_stdlib(space, home)
if space.is_none(w_path):
if verbose:
- debug("Failed to find library based on pypy_find_stdlib")
+ debug("pypy_setup_home: directories 'lib-python' and 'lib_pypy'"
+ " not found in '%s' or in any parent directory" % home1)
return rffi.cast(rffi.INT, 1)
space.startup()
space.call_function(w_pathsetter, w_path)
@@ -301,6 +304,44 @@
wrapstr = 'space.wrap(%r)' % (options)
pypy.module.sys.Module.interpleveldefs['pypy_translation_info'] = wrapstr
+ # HACKHACKHACK
+ # ugly hack to modify target goal from compile_c to build_cffi_imports
+ # this should probably get cleaned up and merged with driver.create_exe
+ from rpython.translator.driver import taskdef
+ import types
+
+ class Options(object):
+ pass
+
+
+ def mkexename(name):
+ if sys.platform == 'win32':
+ name = name.new(ext='exe')
+ return name
+
+ @taskdef(['compile_c'], "Create cffi bindings for modules")
+ def task_build_cffi_imports(self):
+ from pypy.tool.build_cffi_imports import create_cffi_import_libraries
+ ''' Use cffi to compile cffi interfaces to modules'''
+ exename = mkexename(driver.compute_exe_name())
+ basedir = exename
+ while not basedir.join('include').exists():
+ _basedir = basedir.dirpath()
+ if _basedir == basedir:
+ raise ValueError('interpreter %s not inside pypy repo',
+ str(exename))
+ basedir = _basedir
+ modules = self.config.objspace.usemodules.getpaths()
+ options = Options()
+ # XXX possibly adapt options using modules
+ failures = create_cffi_import_libraries(exename, options, basedir)
+ # if failures, they were already printed
+ print >> sys.stderr, str(exename),'successfully built, but errors while building the above modules will be ignored'
+ driver.task_build_cffi_imports = types.MethodType(task_build_cffi_imports, driver)
+ driver.tasks['build_cffi_imports'] = driver.task_build_cffi_imports, ['compile_c']
+ driver.default_goal = 'build_cffi_imports'
+ # HACKHACKHACK end
+
return self.get_entry_point(config)
def jitpolicy(self, driver):
diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py
--- a/pypy/interpreter/app_main.py
+++ b/pypy/interpreter/app_main.py
@@ -766,6 +766,7 @@
# This is important for py3k
sys.executable = executable
+ at hidden_applevel
def entry_point(executable, argv):
# note that before calling setup_bootstrap_path, we are limited because we
# cannot import stdlib modules. In particular, we cannot use unicode
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -1,6 +1,7 @@
import sys
from pypy.interpreter.error import OperationError, get_cleared_operation_error
from rpython.rlib.unroll import unrolling_iterable
+from rpython.rlib.objectmodel import specialize
from rpython.rlib import jit
TICK_COUNTER_STEP = 100
diff --git a/pypy/interpreter/pytraceback.py b/pypy/interpreter/pytraceback.py
--- a/pypy/interpreter/pytraceback.py
+++ b/pypy/interpreter/pytraceback.py
@@ -61,7 +61,6 @@
def check_traceback(space, w_tb, msg):
- from pypy.interpreter.typedef import PyTraceback
if w_tb is None or not space.isinstance_w(w_tb, space.gettypeobject(PyTraceback.typedef)):
raise OperationError(space.w_TypeError, space.wrap(msg))
return w_tb
diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py
--- a/pypy/module/_cffi_backend/ctypefunc.py
+++ b/pypy/module/_cffi_backend/ctypefunc.py
@@ -143,7 +143,7 @@
@jit.unroll_safe
def _call(self, funcaddr, args_w):
space = self.space
- cif_descr = self.cif_descr
+ cif_descr = self.cif_descr # 'self' should have been promoted here
size = cif_descr.exchange_size
mustfree_max_plus_1 = 0
buffer = lltype.malloc(rffi.CCHARP.TO, size, flavor='raw')
diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py
--- a/pypy/module/_cffi_backend/ctypeprim.py
+++ b/pypy/module/_cffi_backend/ctypeprim.py
@@ -134,8 +134,7 @@
def convert_to_object(self, cdata):
unichardata = rffi.cast(rffi.CWCHARP, cdata)
- s = rffi.wcharpsize2unicode(unichardata, 1)
- return self.space.wrap(s)
+ return self.space.wrap(unichardata[0])
def string(self, cdataobj, maxlen):
with cdataobj as ptr:
diff --git a/pypy/module/_cffi_backend/lib_obj.py b/pypy/module/_cffi_backend/lib_obj.py
--- a/pypy/module/_cffi_backend/lib_obj.py
+++ b/pypy/module/_cffi_backend/lib_obj.py
@@ -60,12 +60,12 @@
self.ffi, self.ctx.c_types, getarg(g.c_type_op))
assert isinstance(rawfunctype, realize_c_type.W_RawFuncType)
#
- w_ct, locs = rawfunctype.unwrap_as_nostruct_fnptr(self.ffi)
+ rawfunctype.prepare_nostruct_fnptr(self.ffi)
#
ptr = rffi.cast(rffi.CCHARP, g.c_address)
assert ptr
- return W_FunctionWrapper(self.space, ptr, g.c_size_or_direct_fn, w_ct,
- locs, rawfunctype, fnname, self.libname)
+ return W_FunctionWrapper(self.space, ptr, g.c_size_or_direct_fn,
+ rawfunctype, fnname, self.libname)
@jit.elidable_promote()
def _get_attr_elidable(self, attr):
@@ -173,6 +173,8 @@
if w_value is None:
if is_getattr and attr == '__all__':
return self.dir1(ignore_type=cffi_opcode.OP_GLOBAL_VAR)
+ if is_getattr and attr == '__dict__':
+ return self.full_dict_copy()
raise oefmt(self.space.w_AttributeError,
"cffi library '%s' has no function, constant "
"or global variable named '%s'",
@@ -212,6 +214,17 @@
names_w.append(space.wrap(rffi.charp2str(g[i].c_name)))
return space.newlist(names_w)
+ def full_dict_copy(self):
+ space = self.space
+ total = rffi.getintfield(self.ctx, 'c_num_globals')
+ g = self.ctx.c_globals
+ w_result = space.newdict()
+ for i in range(total):
+ w_attr = space.wrap(rffi.charp2str(g[i].c_name))
+ w_value = self._get_attr(w_attr)
+ space.setitem(w_result, w_attr, w_value)
+ return w_result
+
def address_of_func_or_global_var(self, varname):
# rebuild a string object from 'varname', to do typechecks and
# to force a unicode back to a plain string
@@ -224,7 +237,8 @@
if isinstance(w_value, W_FunctionWrapper):
# '&func' returns a regular cdata pointer-to-function
if w_value.directfnptr:
- return W_CData(space, w_value.directfnptr, w_value.ctype)
+ ctype = w_value.typeof(self.ffi)
+ return W_CData(space, w_value.directfnptr, ctype)
else:
return w_value # backward compatibility
#
diff --git a/pypy/module/_cffi_backend/realize_c_type.py b/pypy/module/_cffi_backend/realize_c_type.py
--- a/pypy/module/_cffi_backend/realize_c_type.py
+++ b/pypy/module/_cffi_backend/realize_c_type.py
@@ -1,4 +1,5 @@
import sys
+from rpython.rlib import jit
from rpython.rlib.rarithmetic import intmask
from rpython.rlib.objectmodel import specialize
from rpython.rtyper.lltypesystem import lltype, rffi
@@ -135,8 +136,12 @@
class W_RawFuncType(W_Root):
"""Temporary: represents a C function type (not a function pointer)"""
+
+ _immutable_fields_ = ['nostruct_ctype', 'nostruct_locs', 'nostruct_nargs']
_ctfuncptr = None
- _nostruct_ctfuncptr = (None, None)
+ nostruct_ctype = None
+ nostruct_locs = None
+ nostruct_nargs = 0
def __init__(self, opcodes, base_index):
self.opcodes = opcodes
@@ -168,14 +173,16 @@
assert self._ctfuncptr is not None
return self._ctfuncptr
- def unwrap_as_nostruct_fnptr(self, ffi):
- # tweaked version: instead of returning the ctfuncptr corresponding
- # exactly to the OP_FUNCTION ... OP_FUNCTION_END opcodes, return
- # another one in which the struct args are replaced with ptr-to-
- # struct, and a struct return value is replaced with a hidden first
- # arg of type ptr-to-struct. This is how recompiler.py produces
+ @jit.dont_look_inside
+ def prepare_nostruct_fnptr(self, ffi):
+ # tweaked version: instead of returning the ctfuncptr
+ # corresponding exactly to the OP_FUNCTION ... OP_FUNCTION_END
+ # opcodes, this builds in self.nostruct_ctype another one in
+ # which the struct args are replaced with ptr-to- struct, and
+ # a struct return value is replaced with a hidden first arg of
+ # type ptr-to-struct. This is how recompiler.py produces
# trampoline functions for PyPy.
- if self._nostruct_ctfuncptr[0] is None:
+ if self.nostruct_ctype is None:
fargs, fret, ellipsis = self._unpack(ffi)
# 'locs' will be a string of the same length as the final fargs,
# containing 'A' where a struct argument was detected, and 'R'
@@ -198,8 +205,10 @@
locs = None
else:
locs = ''.join(locs)
- self._nostruct_ctfuncptr = (ctfuncptr, locs)
- return self._nostruct_ctfuncptr
+ self.nostruct_ctype = ctfuncptr
+ self.nostruct_locs = locs
+ self.nostruct_nargs = len(ctfuncptr.fargs) - (locs is not None and
+ locs[0] == 'R')
def unexpected_fn_type(self, ffi):
fargs, fret, ellipsis = self._unpack(ffi)
diff --git a/pypy/module/_cffi_backend/src/parse_c_type.c b/pypy/module/_cffi_backend/src/parse_c_type.c
--- a/pypy/module/_cffi_backend/src/parse_c_type.c
+++ b/pypy/module/_cffi_backend/src/parse_c_type.c
@@ -362,7 +362,7 @@
case TOK_INTEGER:
errno = 0;
-#ifndef MS_WIN32
+#ifndef _MSC_VER
if (sizeof(length) > sizeof(unsigned long))
length = strtoull(tok->p, &endptr, 0);
else
diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py b/pypy/module/_cffi_backend/test/test_recompiler.py
--- a/pypy/module/_cffi_backend/test/test_recompiler.py
+++ b/pypy/module/_cffi_backend/test/test_recompiler.py
@@ -276,6 +276,15 @@
""")
lib.aa = 5
assert dir(lib) == ['aa', 'ff', 'my_constant']
+ #
+ aaobj = lib.__dict__['aa']
+ assert not isinstance(aaobj, int) # some internal object instead
+ assert lib.__dict__ == {
+ 'ff': lib.ff,
+ 'aa': aaobj,
+ 'my_constant': -45}
+ lib.__dict__['ff'] = "??"
+ assert lib.ff(10) == 15
def test_verify_opaque_struct(self):
ffi, lib = self.prepare(
@@ -819,6 +828,22 @@
assert isinstance(addr, ffi.CData)
assert ffi.typeof(addr) == ffi.typeof("long(*)(long)")
+ def test_address_of_function_with_struct(self):
+ ffi, lib = self.prepare(
+ "struct foo_s { int x; }; long myfunc(struct foo_s);",
+ "test_addressof_function_with_struct", """
+ struct foo_s { int x; };
+ char myfunc(struct foo_s input) { return (char)(input.x + 42); }
+ """)
+ s = ffi.new("struct foo_s *", [5])[0]
+ assert lib.myfunc(s) == 47
+ assert not isinstance(lib.myfunc, ffi.CData)
+ assert ffi.typeof(lib.myfunc) == ffi.typeof("long(*)(struct foo_s)")
+ addr = ffi.addressof(lib, 'myfunc')
+ assert addr(s) == 47
+ assert isinstance(addr, ffi.CData)
+ assert ffi.typeof(addr) == ffi.typeof("long(*)(struct foo_s)")
+
def test_issue198(self):
ffi, lib = self.prepare("""
typedef struct{...;} opaque_t;
@@ -984,5 +1009,5 @@
assert sys.modules['_CFFI_test_import_from_lib.lib'] is lib
from _CFFI_test_import_from_lib.lib import MYFOO
assert MYFOO == 42
- assert not hasattr(lib, '__dict__')
+ assert hasattr(lib, '__dict__')
assert lib.__all__ == ['MYFOO', 'mybar'] # but not 'myvar'
diff --git a/pypy/module/_cffi_backend/wrapper.py b/pypy/module/_cffi_backend/wrapper.py
--- a/pypy/module/_cffi_backend/wrapper.py
+++ b/pypy/module/_cffi_backend/wrapper.py
@@ -19,12 +19,20 @@
wrapper is callable, and the arguments it expects and returns
are directly the struct/union. Calling ffi.typeof(wrapper)
also returns the original struct/union signature.
+
+ This class cannot be used for variadic functions.
"""
_immutable_ = True
common_doc_str = 'direct call to the C function of the same name'
- def __init__(self, space, fnptr, directfnptr, ctype,
- locs, rawfunctype, fnname, modulename):
+ def __init__(self, space, fnptr, directfnptr,
+ rawfunctype, fnname, modulename):
+ # everything related to the type of the function is accessed
+ # as immutable attributes of the 'rawfunctype' object, which
+ # is a W_RawFuncType. This gives us an obvious thing to
+ # promote in order to do the call.
+ ctype = rawfunctype.nostruct_ctype
+ locs = rawfunctype.nostruct_locs
assert isinstance(ctype, W_CTypeFunc)
assert ctype.cif_descr is not None # not for '...' functions
assert locs is None or len(ctype.fargs) == len(locs)
@@ -32,83 +40,86 @@
self.space = space
self.fnptr = fnptr
self.directfnptr = directfnptr
- self.ctype = ctype
- self.locs = locs
self.rawfunctype = rawfunctype
self.fnname = fnname
self.modulename = modulename
- self.nargs_expected = len(ctype.fargs) - (locs is not None and
- locs[0] == 'R')
def typeof(self, ffi):
return self.rawfunctype.unwrap_as_fnptr(ffi)
- @jit.unroll_safe
- def _prepare(self, args_w, start_index):
- # replaces struct/union arguments with ptr-to-struct/union arguments
+ def descr_call(self, args_w):
space = self.space
- locs = self.locs
- fargs = self.ctype.fargs
- for i in range(start_index, len(locs)):
- if locs[i] != 'A':
- continue
- w_arg = args_w[i]
- farg = fargs[i] # <ptr to struct/union>
- assert isinstance(farg, W_CTypePtrOrArray)
- if isinstance(w_arg, W_CData) and w_arg.ctype is farg.ctitem:
- # fast way: we are given a W_CData "struct", so just make
- # a new W_CData "ptr-to-struct" which points to the same
- # raw memory. We use unsafe_escaping_ptr(), so we have to
- # make sure the original 'w_arg' stays alive; the easiest
- # is to build an instance of W_CDataPtrToStructOrUnion.
- w_arg = W_CDataPtrToStructOrUnion(
- space, w_arg.unsafe_escaping_ptr(), farg, w_arg)
- else:
- # slow way: build a new "ptr to struct" W_CData by calling
- # the equivalent of ffi.new()
- if space.is_w(w_arg, space.w_None):
- continue
- w_arg = farg.newp(w_arg)
- args_w[i] = w_arg
-
- def descr_call(self, args_w):
- if len(args_w) != self.nargs_expected:
- space = self.space
- if self.nargs_expected == 0:
+ rawfunctype = jit.promote(self.rawfunctype)
+ ctype = rawfunctype.nostruct_ctype
+ locs = rawfunctype.nostruct_locs
+ nargs_expected = rawfunctype.nostruct_nargs
+ #
+ if len(args_w) != nargs_expected:
+ if nargs_expected == 0:
raise oefmt(space.w_TypeError,
"%s() takes no arguments (%d given)",
self.fnname, len(args_w))
- elif self.nargs_expected == 1:
+ elif nargs_expected == 1:
raise oefmt(space.w_TypeError,
"%s() takes exactly one argument (%d given)",
self.fnname, len(args_w))
else:
raise oefmt(space.w_TypeError,
"%s() takes exactly %d arguments (%d given)",
- self.fnname, self.nargs_expected, len(args_w))
+ self.fnname, nargs_expected, len(args_w))
#
- if self.locs is not None:
+ if locs is not None:
# This case is if there are structs as arguments or return values.
# If the result we want to present to the user is "returns struct",
# then internally allocate the struct and pass a pointer to it as
# a first argument.
- if self.locs[0] == 'R':
- w_result_cdata = self.ctype.fargs[0].newp(self.space.w_None)
+ if locs[0] == 'R':
+ w_result_cdata = ctype.fargs[0].newp(space.w_None)
args_w = [w_result_cdata] + args_w
- self._prepare(args_w, 1)
- self.ctype._call(self.fnptr, args_w) # returns w_None
+ prepare_args(space, rawfunctype, args_w, 1)
+ #
+ ctype._call(self.fnptr, args_w) # returns w_None
+ #
assert isinstance(w_result_cdata, W_CDataPtrToStructOrUnion)
return w_result_cdata.structobj
else:
args_w = args_w[:]
- self._prepare(args_w, 0)
+ prepare_args(space, rawfunctype, args_w, 0)
#
- return self.ctype._call(self.fnptr, args_w)
+ return ctype._call(self.fnptr, args_w)
def descr_repr(self, space):
return space.wrap("<FFIFunctionWrapper for %s()>" % (self.fnname,))
+ at jit.unroll_safe
+def prepare_args(space, rawfunctype, args_w, start_index):
+ # replaces struct/union arguments with ptr-to-struct/union arguments
+ locs = rawfunctype.nostruct_locs
+ fargs = rawfunctype.nostruct_ctype.fargs
+ for i in range(start_index, len(locs)):
+ if locs[i] != 'A':
+ continue
+ w_arg = args_w[i]
+ farg = fargs[i] # <ptr to struct/union>
+ assert isinstance(farg, W_CTypePtrOrArray)
+ if isinstance(w_arg, W_CData) and w_arg.ctype is farg.ctitem:
+ # fast way: we are given a W_CData "struct", so just make
+ # a new W_CData "ptr-to-struct" which points to the same
+ # raw memory. We use unsafe_escaping_ptr(), so we have to
+ # make sure the original 'w_arg' stays alive; the easiest
+ # is to build an instance of W_CDataPtrToStructOrUnion.
+ w_arg = W_CDataPtrToStructOrUnion(
+ space, w_arg.unsafe_escaping_ptr(), farg, w_arg)
+ else:
+ # slow way: build a new "ptr to struct" W_CData by calling
+ # the equivalent of ffi.new()
+ if space.is_w(w_arg, space.w_None):
+ continue
+ w_arg = farg.newp(w_arg)
+ args_w[i] = w_arg
+
+
W_FunctionWrapper.typedef = TypeDef(
'FFIFunctionWrapper',
__repr__ = interp2app(W_FunctionWrapper.descr_repr),
diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py
--- a/pypy/module/_io/interp_textio.py
+++ b/pypy/module/_io/interp_textio.py
@@ -625,6 +625,7 @@
def read_w(self, space, w_size=None):
self._check_attached(space)
+ self._check_closed(space)
if not self.w_decoder:
self._unsupportedoperation(space, "not readable")
@@ -666,6 +667,7 @@
def readline_w(self, space, w_limit=None):
self._check_attached(space)
+ self._check_closed(space)
self._writeflush(space)
limit = convert_size(space, w_limit)
@@ -761,7 +763,7 @@
def write_w(self, space, w_text):
self._check_attached(space)
- # self._check_closed(space)
+ self._check_closed(space)
if not self.w_encoder:
self._unsupportedoperation(space, "not writable")
diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py
--- a/pypy/module/_io/test/test_io.py
+++ b/pypy/module/_io/test/test_io.py
@@ -429,3 +429,55 @@
f.seek(1, 0)
f.read(buffer_size * 2)
assert f.tell() == 1 + buffer_size * 2
+
+
+class AppTestIoAferClose:
+ spaceconfig = dict(usemodules=['_io'])
+
+ def setup_class(cls):
+ tmpfile = udir.join('tmpfile').ensure()
+ cls.w_tmpfile = cls.space.wrap(str(tmpfile))
+
+ def test_io_after_close(self):
+ import _io
+ for kwargs in [
+ {"mode": "w"},
+ {"mode": "wb"},
+ {"mode": "w", "buffering": 1},
+ {"mode": "w", "buffering": 2},
+ {"mode": "wb", "buffering": 0},
+ {"mode": "r"},
+ {"mode": "rb"},
+ {"mode": "r", "buffering": 1},
+ {"mode": "r", "buffering": 2},
+ {"mode": "rb", "buffering": 0},
+ {"mode": "w+"},
+ {"mode": "w+b"},
+ {"mode": "w+", "buffering": 1},
+ {"mode": "w+", "buffering": 2},
+ {"mode": "w+b", "buffering": 0},
+ ]:
+ print kwargs
+ f = _io.open(self.tmpfile, **kwargs)
+ f.close()
+ raises(ValueError, f.flush)
+ raises(ValueError, f.fileno)
+ raises(ValueError, f.isatty)
+ raises(ValueError, f.__iter__)
+ if hasattr(f, "peek"):
+ raises(ValueError, f.peek, 1)
+ raises(ValueError, f.read)
+ if hasattr(f, "read1"):
+ raises(ValueError, f.read1, 1024)
+ if hasattr(f, "readall"):
+ raises(ValueError, f.readall)
+ if hasattr(f, "readinto"):
+ raises(ValueError, f.readinto, bytearray(1024))
+ raises(ValueError, f.readline)
+ raises(ValueError, f.readlines)
+ raises(ValueError, f.seek, 0)
+ raises(ValueError, f.tell)
+ raises(ValueError, f.truncate)
+ raises(ValueError, f.write, b"" if "b" in kwargs['mode'] else u"")
+ raises(ValueError, f.writelines, [])
+ raises(ValueError, next, f)
diff --git a/pypy/module/_socket/__init__.py b/pypy/module/_socket/__init__.py
--- a/pypy/module/_socket/__init__.py
+++ b/pypy/module/_socket/__init__.py
@@ -18,6 +18,10 @@
from rpython.rlib.rsocket import rsocket_startup
rsocket_startup()
+ def shutdown(self, space):
+ from pypy.module._socket.interp_socket import close_all_sockets
+ close_all_sockets(space)
+
def buildloaders(cls):
from rpython.rlib import rsocket
for name in """
diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py
--- a/pypy/module/_socket/interp_func.py
+++ b/pypy/module/_socket/interp_func.py
@@ -2,7 +2,7 @@
from rpython.rlib.rsocket import SocketError, INVALID_SOCKET
from rpython.rlib.rarithmetic import intmask
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
from pypy.module._socket.interp_socket import (
converted_error, W_Socket, addr_as_object, fill_from_object, get_error,
@@ -147,6 +147,19 @@
newfd = rsocket.dup(fd)
return space.wrap(newfd)
+ at unwrap_spec(fd=int, family=int, type=int, proto=int)
+def fromfd(space, fd, family, type, proto=0):
+ """fromfd(fd, family, type[, proto]) -> socket object
+
+ Create a socket object from the given file descriptor.
+ The remaining arguments are the same as for socket().
+ """
+ try:
+ sock = rsocket.fromfd(fd, family, type, proto)
+ except SocketError, e:
+ raise converted_error(space, e)
+ return space.wrap(W_Socket(space, sock))
+
@unwrap_spec(family=int, type=int, proto=int)
def socketpair(space, family=rsocket.socketpair_default_family,
type =rsocket.SOCK_STREAM,
@@ -163,8 +176,8 @@
except SocketError, e:
raise converted_error(space, e)
return space.newtuple([
- space.wrap(W_Socket(sock1)),
- space.wrap(W_Socket(sock2))
+ space.wrap(W_Socket(space, sock1)),
+ space.wrap(W_Socket(space, sock2))
])
# The following 4 functions refuse all negative numbers, like CPython 2.6.
@@ -250,9 +263,9 @@
ip = rsocket.inet_ntop(family, packed)
except SocketError, e:
raise converted_error(space, e)
- except ValueError, e: # XXX the message is lost in RPython
- raise OperationError(space.w_ValueError,
- space.wrap(str(e)))
+ except ValueError:
+ raise oefmt(space.w_ValueError,
+ "invalid length of packed IP address string")
return space.wrap(ip)
@unwrap_spec(family=int, type=int, proto=int, flags=int)
diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py
--- a/pypy/module/_socket/interp_socket.py
+++ b/pypy/module/_socket/interp_socket.py
@@ -1,4 +1,5 @@
-from rpython.rlib import rsocket
+import sys
+from rpython.rlib import rsocket, rweaklist
from rpython.rlib.rarithmetic import intmask
from rpython.rlib.rsocket import (
RSocket, AF_INET, SOCK_STREAM, SocketError, SocketErrorWithErrno,
@@ -158,12 +159,10 @@
class W_Socket(W_Root):
-
- # for _dealloc_warn
- space = None
-
- def __init__(self, sock):
+ def __init__(self, space, sock):
+ self.space = space
self.sock = sock
+ register_socket(space, sock)
def descr_new(space, w_subtype, __args__):
sock = space.allocate_instance(W_Socket, w_subtype)
@@ -179,8 +178,7 @@
fd=space.c_filedescriptor_w(w_fileno))
else:
sock = RSocket(family, type, proto)
- W_Socket.__init__(self, sock)
- self.space = space
+ W_Socket.__init__(self, space, sock)
except SocketError, e:
raise converted_error(space, e)
@@ -617,6 +615,45 @@
# ____________________________________________________________
+# Automatic shutdown()/close()
+
+# On some systems, the C library does not guarantee that when the program
+# finishes, all data sent so far is really sent even if the socket is not
+# explicitly closed. This behavior has been observed on Windows but not
+# on Linux, so far.
+NEED_EXPLICIT_CLOSE = (sys.platform == 'win32')
+
+class OpenRSockets(rweaklist.RWeakListMixin):
+ pass
+class OpenRSocketsState:
+ def __init__(self, space):
+ self.openrsockets = OpenRSockets()
+ self.openrsockets.initialize()
+
+def getopenrsockets(space):
+ if NEED_EXPLICIT_CLOSE and space.config.translation.rweakref:
+ return space.fromcache(OpenRSocketsState).openrsockets
+ else:
+ return None
+
+def register_socket(space, socket):
+ openrsockets = getopenrsockets(space)
+ if openrsockets is not None:
+ openrsockets.add_handle(socket)
+
+def close_all_sockets(space):
+ openrsockets = getopenrsockets(space)
+ if openrsockets is not None:
+ for sock_wref in openrsockets.get_all_handles():
+ sock = sock_wref()
+ if sock is not None:
+ try:
+ sock.close()
+ except SocketError:
+ pass
+
+
+# ____________________________________________________________
# Error handling
class SocketAPI:
diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py
--- a/pypy/module/_socket/test/test_sock_app.py
+++ b/pypy/module/_socket/test/test_sock_app.py
@@ -301,10 +301,16 @@
class AppTestSocket:
+ spaceconfig = dict(usemodules=['_socket', '_weakref', 'struct'])
+
def setup_class(cls):
cls.space = space
cls.w_udir = space.wrap(str(udir))
+ def teardown_class(cls):
+ if not cls.runappdirect:
+ cls.space.sys.getmodule('_socket').shutdown(cls.space)
+
def test_module(self):
import _socket
assert _socket.socket.__name__ == 'socket'
@@ -602,6 +608,12 @@
finally:
os.chdir(oldcwd)
+ def test_automatic_shutdown(self):
+ # doesn't really test anything, but at least should not explode
+ # in close_all_sockets()
+ import _socket
+ self.foo = _socket.socket()
+
def test_subclass(self):
# Socket is not created in __new__, but in __init__.
import socket
diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py
--- a/pypy/module/cpyext/test/test_version.py
+++ b/pypy/module/cpyext/test/test_version.py
@@ -24,7 +24,7 @@
}
"""
module = self.import_module(name='foo', init=init)
- assert module.py_version == sys.version[:5]
+ assert module.py_version == '%d.%d.%d' % sys.version_info[:3]
assert module.py_major_version == sys.version_info.major
assert module.py_minor_version == sys.version_info.minor
assert module.py_micro_version == sys.version_info.micro
diff --git a/pypy/module/math/interp_math.py b/pypy/module/math/interp_math.py
--- a/pypy/module/math/interp_math.py
+++ b/pypy/module/math/interp_math.py
@@ -361,7 +361,7 @@
else:
partials.append(v)
if special_sum != 0.0:
- if rfloat.isnan(special_sum):
+ if rfloat.isnan(inf_sum):
raise OperationError(space.w_ValueError, space.wrap("-inf + inf"))
return space.wrap(special_sum)
hi = 0.0
diff --git a/pypy/module/math/test/test_math.py b/pypy/module/math/test/test_math.py
--- a/pypy/module/math/test/test_math.py
+++ b/pypy/module/math/test/test_math.py
@@ -1,5 +1,6 @@
from __future__ import with_statement
+import py
from pypy.interpreter.function import Function
from pypy.interpreter.gateway import BuiltinCode
from pypy.module.math.test import test_direct
@@ -113,6 +114,10 @@
([2.**n - 2.**(n+50) + 2.**(n+52) for n in range(-1074, 972, 2)] +
[-2.**1022],
float.fromhex('0x1.5555555555555p+970')),
+ # infinity and nans
+ ([float("inf")], float("inf")),
+ ([float("-inf")], float("-inf")),
+ ([float("nan")], float("nan")),
]
for i, (vals, expected) in enumerate(test_values):
@@ -124,7 +129,8 @@
except ValueError:
py.test.fail("test %d failed: got ValueError, expected %r "
"for math.fsum(%.100r)" % (i, expected, vals))
- assert actual == expected
+ assert actual == expected or (
+ math.isnan(actual) and math.isnan(expected))
def test_factorial(self):
import math, sys
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
@@ -266,6 +266,15 @@
""")
lib.aa = 5
assert dir(lib) == ['aa', 'ff', 'my_constant']
+ #
+ aaobj = lib.__dict__['aa']
+ assert not isinstance(aaobj, int) # some internal object instead
+ assert lib.__dict__ == {
+ 'ff': lib.ff,
+ 'aa': aaobj,
+ 'my_constant': -45}
+ lib.__dict__['ff'] = "??"
+ assert lib.ff(10) == 15
def test_verify_opaque_struct():
ffi = FFI()
@@ -1053,5 +1062,5 @@
assert sys.modules['_CFFI_test_import_from_lib.lib'] is lib
from _CFFI_test_import_from_lib.lib import MYFOO
assert MYFOO == 42
- assert not hasattr(lib, '__dict__')
+ assert hasattr(lib, '__dict__')
assert lib.__all__ == ['MYFOO', 'mybar'] # but not 'myvar'
diff --git a/pypy/tool/build_cffi_imports.py b/pypy/tool/build_cffi_imports.py
new file mode 100644
--- /dev/null
+++ b/pypy/tool/build_cffi_imports.py
@@ -0,0 +1,75 @@
+import sys, shutil
+from rpython.tool.runsubprocess import run_subprocess
+
+class MissingDependenciesError(Exception):
+ pass
+
+
+cffi_build_scripts = {
+ "sqlite3": "_sqlite3_build.py",
+ "audioop": "_audioop_build.py",
+ "tk": "_tkinter/tklib_build.py",
+ "curses": "_curses_build.py" if sys.platform != "win32" else None,
+ "syslog": "_syslog_build.py" if sys.platform != "win32" else None,
+ "_gdbm": "_gdbm_build.py" if sys.platform != "win32" else None,
+ "pwdgrp": "_pwdgrp_build.py" if sys.platform != "win32" else None,
+ "xx": None, # for testing: 'None' should be completely ignored
+ }
+
+def create_cffi_import_libraries(pypy_c, options, basedir):
+ shutil.rmtree(str(basedir.join('lib_pypy', '__pycache__')),
+ ignore_errors=True)
+ failures = []
+ for key, module in sorted(cffi_build_scripts.items()):
+ if module is None or getattr(options, 'no_' + key, False):
+ continue
+ if module.endswith('.py'):
+ args = [module]
+ cwd = str(basedir.join('lib_pypy'))
+ else:
+ args = ['-c', 'import ' + module]
+ cwd = None
+ print >> sys.stderr, '*', ' '.join(args)
+ try:
+ status, stdout, stderr = run_subprocess(str(pypy_c), args, cwd=cwd)
+ if status != 0:
+ print >> sys.stderr, stdout, stderr
+ failures.append((key, module))
+ except:
+ import traceback;traceback.print_exc()
+ failures.append((key, module))
+ return failures
+
+if __name__ == '__main__':
+ import py, os
+ if '__pypy__' not in sys.builtin_module_names:
+ print 'Call with a pypy interpreter'
+ sys.exit(-1)
+
+ class Options(object):
+ pass
+
+ exename = py.path.local(sys.executable)
+ basedir = exename
+ while not basedir.join('include').exists():
+ _basedir = basedir.dirpath()
+ if _basedir == basedir:
+ raise ValueError('interpreter %s not inside pypy repo',
+ str(exename))
+ basedir = _basedir
+ options = Options()
+ print >> sys.stderr, "There should be no failures here"
+ failures = create_cffi_import_libraries(exename, options, basedir)
+ if len(failures) > 0:
+ print 'failed to build', [f[1] for f in failures]
+ assert False
+
+ # monkey patch a failure, just to test
+ print >> sys.stderr, 'This line should be followed by a traceback'
+ for k in cffi_build_scripts:
+ setattr(options, 'no_' + k, True)
+ must_fail = '_missing_build_script.py'
+ assert not os.path.exists(str(basedir.join('lib_pypy').join(must_fail)))
+ cffi_build_scripts['should_fail'] = must_fail
+ failures = create_cffi_import_libraries(exename, options, basedir)
+ assert len(failures) == 1
diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py
--- a/pypy/tool/release/package.py
+++ b/pypy/tool/release/package.py
@@ -29,6 +29,9 @@
# XXX: don't hardcode the version
POSIX_EXE = 'pypy3.2'
+from pypy.tool.build_cffi_imports import (create_cffi_import_libraries,
+ MissingDependenciesError, cffi_build_scripts)
+
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
@@ -44,48 +47,12 @@
class PyPyCNotFound(Exception):
pass
-class MissingDependenciesError(Exception):
- pass
-
def fix_permissions(dirname):
if sys.platform != 'win32':
os.system("chmod -R a+rX %s" % dirname)
os.system("chmod -R g-w %s" % dirname)
-cffi_build_scripts = {
- "sqlite3": "_sqlite3_build.py",
- "audioop": "_audioop_build.py",
- "tk": "_tkinter/tklib_build.py",
- "curses": "_curses_build.py" if sys.platform != "win32" else None,
- "syslog": "_syslog_build.py" if sys.platform != "win32" else None,
- "_gdbm": "_gdbm_build.py" if sys.platform != "win32" else None,
- "pwdgrp": "_pwdgrp_build.py" if sys.platform != "win32" else None,
- "xx": None, # for testing: 'None' should be completely ignored
- }
-
-def create_cffi_import_libraries(pypy_c, options, basedir):
- shutil.rmtree(str(basedir.join('lib_pypy', '__pycache__')),
- ignore_errors=True)
- for key, module in sorted(cffi_build_scripts.items()):
- if module is None or getattr(options, 'no_' + key):
- continue
- if module.endswith('.py'):
- args = [str(pypy_c), module]
- cwd = str(basedir.join('lib_pypy'))
- else:
- args = [str(pypy_c), '-c', 'import ' + module]
- cwd = None
- print >> sys.stderr, '*', ' '.join(args)
- try:
- subprocess.check_call(args, cwd=cwd)
- except subprocess.CalledProcessError:
- print >>sys.stderr, """!!!!!!!!!!\nBuilding {0} bindings failed.
-You can either install development headers package,
-add the --without-{0} option to skip packaging this
-binary CFFI extension, or say --without-cffi.""".format(key)
- raise MissingDependenciesError(module)
-
def pypy_runs(pypy_c, quiet=False):
kwds = {}
if quiet:
@@ -117,9 +84,13 @@
if not _fake and not pypy_runs(pypy_c):
raise OSError("Running %r failed!" % (str(pypy_c),))
if not options.no_cffi:
- try:
- create_cffi_import_libraries(pypy_c, options, basedir)
- except MissingDependenciesError:
+ failures = create_cffi_import_libraries(pypy_c, options, basedir)
+ for key, module in failures:
+ print >>sys.stderr, """!!!!!!!!!!\nBuilding {0} bindings failed.
+ You can either install development headers package,
+ add the --without-{0} option to skip packaging this
+ binary CFFI extension, or say --without-cffi.""".format(key)
+ if len(failures) > 0:
return 1, None
if sys.platform == 'win32' and not rename_pypy_c.lower().endswith('.exe'):
diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py
--- a/rpython/flowspace/flowcontext.py
+++ b/rpython/flowspace/flowcontext.py
@@ -1207,7 +1207,8 @@
def nomoreblocks(self, ctx):
w_exc = self.w_exc
if w_exc.w_type == const(ImportError):
- msg = 'import statement always raises %s' % self
+ msg = 'ImportError is raised in RPython: %s' % (
+ getattr(w_exc.w_value, 'value', '<not a constant message>'),)
raise ImportError(msg)
link = Link([w_exc.w_type, w_exc.w_value], ctx.graph.exceptblock)
ctx.recorder.crnt_block.closeblock(link)
diff --git a/rpython/flowspace/test/cant_import.py b/rpython/flowspace/test/cant_import.py
new file mode 100644
--- /dev/null
+++ b/rpython/flowspace/test/cant_import.py
@@ -0,0 +1,1 @@
+raise ImportError("some explanation here")
diff --git a/rpython/flowspace/test/test_objspace.py b/rpython/flowspace/test/test_objspace.py
--- a/rpython/flowspace/test/test_objspace.py
+++ b/rpython/flowspace/test/test_objspace.py
@@ -816,6 +816,12 @@
from rpython import this_does_not_exist
py.test.raises(ImportError, 'self.codetest(f)')
+ def test_importerror_3(self):
+ def f():
+ import rpython.flowspace.test.cant_import
+ e = py.test.raises(ImportError, 'self.codetest(f)')
+ assert "some explanation here" in str(e.value)
+
def test_relative_import(self):
def f():
from ..objspace import build_flow
diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py
--- a/rpython/jit/backend/llsupport/rewrite.py
+++ b/rpython/jit/backend/llsupport/rewrite.py
@@ -73,8 +73,6 @@
self.emit_pending_zeros()
elif op.can_malloc():
self.emitting_an_operation_that_can_collect()
- elif op.getopnum() == rop.DEBUG_MERGE_POINT:
- continue # ignore debug_merge_points
elif op.getopnum() == rop.LABEL:
self.emitting_an_operation_that_can_collect()
self.known_lengths.clear()
diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py
--- a/rpython/jit/metainterp/heapcache.py
+++ b/rpython/jit/metainterp/heapcache.py
@@ -60,6 +60,26 @@
if not value.is_unescaped:
del d[value]
+
+class FieldUpdater(object):
+ def __init__(self, heapcache, value, cache, fieldvalue):
+ self.heapcache = heapcache
+ self.value = value
+ self.cache = cache
+ if fieldvalue is not None:
+ self.currfieldbox = fieldvalue.box
+ else:
+ self.currfieldbox = None
+
+ def getfield_now_known(self, fieldbox):
+ fieldvalue = self.heapcache.getvalue(fieldbox)
+ self.cache.read_now_known(self.value, fieldvalue)
+
+ def setfield(self, fieldbox):
+ fieldvalue = self.heapcache.getvalue(fieldbox)
+ self.cache.do_write_with_aliasing(self.value, fieldvalue)
+
+
class HeapCache(object):
def __init__(self):
self.reset()
@@ -98,9 +118,9 @@
self.heap_cache = {}
self.heap_array_cache = {}
- def getvalue(self, box):
+ def getvalue(self, box, create=True):
value = self.values.get(box, None)
- if not value:
+ if not value and create:
value = self.values[box] = HeapCacheValue(box)
return value
@@ -111,25 +131,26 @@
self.mark_escaped(opnum, descr, argboxes)
self.clear_caches(opnum, descr, argboxes)
+ def _escape_from_write(self, box, fieldbox):
+ value = self.getvalue(box, create=False)
+ fieldvalue = self.getvalue(fieldbox, create=False)
+ if (value is not None and value.is_unescaped and
+ fieldvalue is not None and fieldvalue.is_unescaped):
+ if value.dependencies is None:
+ value.dependencies = []
+ value.dependencies.append(fieldvalue)
+ elif fieldvalue is not None:
+ self._escape(fieldvalue)
+
def mark_escaped(self, opnum, descr, argboxes):
if opnum == rop.SETFIELD_GC:
assert len(argboxes) == 2
- value, fieldvalue = self.getvalues(argboxes)
- if value.is_unescaped and fieldvalue.is_unescaped:
- if value.dependencies is None:
- value.dependencies = []
- value.dependencies.append(fieldvalue)
- else:
- self._escape(fieldvalue)
+ box, fieldbox = argboxes
+ self._escape_from_write(box, fieldbox)
elif opnum == rop.SETARRAYITEM_GC:
assert len(argboxes) == 3
- value, indexvalue, fieldvalue = self.getvalues(argboxes)
- if value.is_unescaped and fieldvalue.is_unescaped:
- if value.dependencies is None:
- value.dependencies = []
- value.dependencies.append(fieldvalue)
- else:
- self._escape(fieldvalue)
+ box, indexbox, fieldbox = argboxes
+ self._escape_from_write(box, fieldbox)
elif (opnum == rop.CALL and
descr.get_extra_info().oopspecindex == descr.get_extra_info().OS_ARRAYCOPY and
isinstance(argboxes[3], ConstInt) and
@@ -153,7 +174,7 @@
self._escape_box(box)
def _escape_box(self, box):
- value = self.values.get(box, None)
+ value = self.getvalue(box, create=False)
if not value:
return
self._escape(value)
@@ -261,7 +282,7 @@
self.reset_keep_likely_virtuals()
def is_class_known(self, box):
- value = self.values.get(box, None)
+ value = self.getvalue(box, create=False)
if value:
return value.known_class
return False
@@ -270,7 +291,7 @@
self.getvalue(box).known_class = True
def is_nonstandard_virtualizable(self, box):
- value = self.values.get(box, None)
+ value = self.getvalue(box, create=False)
if value:
return value.nonstandard_virtualizable
return False
@@ -279,13 +300,13 @@
self.getvalue(box).nonstandard_virtualizable = True
def is_unescaped(self, box):
- value = self.values.get(box, None)
+ value = self.getvalue(box, create=False)
if value:
return value.is_unescaped
return False
def is_likely_virtual(self, box):
- value = self.values.get(box, None)
+ value = self.getvalue(box, create=False)
if value:
return value.likely_virtual
return False
@@ -301,7 +322,7 @@
self.arraylen_now_known(box, lengthbox)
def getfield(self, box, descr):
- value = self.values.get(box, None)
+ value = self.getvalue(box, create=False)
if value:
cache = self.heap_cache.get(descr, None)
if cache:
@@ -310,26 +331,28 @@
return tovalue.box
return None
- def getfield_now_known(self, box, descr, fieldbox):
+ def get_field_updater(self, box, descr):
value = self.getvalue(box)
- fieldvalue = self.getvalue(fieldbox)
cache = self.heap_cache.get(descr, None)
if cache is None:
cache = self.heap_cache[descr] = CacheEntry()
- cache.read_now_known(value, fieldvalue)
+ fieldvalue = None
+ else:
+ fieldvalue = cache.read(value)
+ return FieldUpdater(self, value, cache, fieldvalue)
+
+ def getfield_now_known(self, box, descr, fieldbox):
+ upd = self.get_field_updater(box, descr)
+ upd.getfield_now_known(fieldbox)
def setfield(self, box, fieldbox, descr):
- cache = self.heap_cache.get(descr, None)
- if cache is None:
- cache = self.heap_cache[descr] = CacheEntry()
- value = self.getvalue(box)
- fieldvalue = self.getvalue(fieldbox)
- cache.do_write_with_aliasing(value, fieldvalue)
+ upd = self.get_field_updater(box, descr)
+ upd.setfield(fieldbox)
def getarrayitem(self, box, indexbox, descr):
if not isinstance(indexbox, ConstInt):
return None
- value = self.values.get(box, None)
+ value = self.getvalue(box, create=False)
if value is None:
return None
index = indexbox.getint()
@@ -373,7 +396,7 @@
indexcache.do_write_with_aliasing(value, fieldvalue)
def arraylen(self, box):
- value = self.values.get(box, None)
+ value = self.getvalue(box, create=False)
if value and value.length:
return value.length.box
return None
@@ -383,7 +406,7 @@
value.length = self.getvalue(lengthbox)
def replace_box(self, oldbox, newbox):
- value = self.values.get(oldbox, None)
+ value = self.getvalue(oldbox, create=False)
if value is None:
return
value.box = newbox
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -649,16 +649,16 @@
@specialize.arg(1)
def _opimpl_getfield_gc_any_pureornot(self, opnum, box, fielddescr):
- tobox = self.metainterp.heapcache.getfield(box, fielddescr)
- if tobox is not None:
+ upd = self.metainterp.heapcache.get_field_updater(box, fielddescr)
+ if upd.currfieldbox is not None:
# sanity check: see whether the current struct value
# corresponds to what the cache thinks the value is
resbox = executor.execute(self.metainterp.cpu, self.metainterp,
rop.GETFIELD_GC, fielddescr, box)
- assert resbox.constbox().same_constant(tobox.constbox())
- return tobox
+ assert resbox.constbox().same_constant(upd.currfieldbox.constbox())
+ return upd.currfieldbox
resbox = self.execute_with_descr(opnum, fielddescr, box)
- self.metainterp.heapcache.getfield_now_known(box, fielddescr, resbox)
+ upd.getfield_now_known(resbox)
return resbox
@arguments("box", "descr", "orgpc")
@@ -679,10 +679,11 @@
@arguments("box", "box", "descr")
def _opimpl_setfield_gc_any(self, box, valuebox, fielddescr):
- tobox = self.metainterp.heapcache.getfield(box, fielddescr)
- if tobox is valuebox:
+ upd = self.metainterp.heapcache.get_field_updater(box, fielddescr)
+ if upd.currfieldbox is valuebox:
return
- self.metainterp.execute_setfield_gc(fielddescr, box, valuebox)
+ self.metainterp.execute_and_record(rop.SETFIELD_GC, fielddescr, box, valuebox)
+ upd.setfield(valuebox)
# The following logic is disabled because buggy. It is supposed
# to be: not(we're writing null into a freshly allocated object)
# but the bug is that is_unescaped() can be True even after the
@@ -1922,9 +1923,10 @@
resbox = executor.execute(self.cpu, self, opnum, descr, *argboxes)
if rop._ALWAYS_PURE_FIRST <= opnum <= rop._ALWAYS_PURE_LAST:
return self._record_helper_pure(opnum, resbox, descr, *argboxes)
- else:
- return self._record_helper_nonpure_varargs(opnum, resbox, descr,
- list(argboxes))
+ if rop._OVF_FIRST <= opnum <= rop._OVF_LAST:
+ return self._record_helper_ovf(opnum, resbox, descr, *argboxes)
+ return self._record_helper_nonpure_varargs(opnum, resbox, descr,
+ list(argboxes))
@specialize.arg(1)
def execute_and_record_varargs(self, opnum, argboxes, descr=None):
@@ -1951,6 +1953,12 @@
resbox = resbox.nonconstbox() # ensure it is a Box
return self._record_helper_nonpure_varargs(opnum, resbox, descr, list(argboxes))
+ def _record_helper_ovf(self, opnum, resbox, descr, *argboxes):
+ if (self.last_exc_value_box is None and
+ self._all_constants(*argboxes)):
+ return resbox.constbox()
+ return self._record_helper_nonpure_varargs(opnum, resbox, descr, list(argboxes))
+
def _record_helper_pure_varargs(self, opnum, resbox, descr, argboxes):
canfold = self._all_constants_varargs(argboxes)
if canfold:
@@ -1962,10 +1970,6 @@
def _record_helper_nonpure_varargs(self, opnum, resbox, descr, argboxes):
assert resbox is None or isinstance(resbox, Box)
- if (rop._OVF_FIRST <= opnum <= rop._OVF_LAST and
- self.last_exc_value_box is None and
- self._all_constants_varargs(argboxes)):
- return resbox.constbox()
# record the operation
profiler = self.staticdata.profiler
profiler.count_ops(opnum, Counters.RECORDED_OPS)
diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py
--- a/rpython/memory/gctransform/framework.py
+++ b/rpython/memory/gctransform/framework.py
@@ -52,21 +52,22 @@
return (op.opname in LL_OPERATIONS and
LL_OPERATIONS[op.opname].canmallocgc)
-def find_initializing_stores(collect_analyzer, graph):
- from rpython.flowspace.model import mkentrymap
- entrymap = mkentrymap(graph)
- # a bit of a hackish analysis: if a block contains a malloc and check that
- # the result is not zero, then the block following the True link will
- # usually initialize the newly allocated object
- result = set()
- def find_in_block(block, mallocvars):
+def propagate_no_write_barrier_needed(result, block, mallocvars,
+ collect_analyzer, entrymap,
+ startindex=0):
+ # We definitely know that no write barrier is needed in the 'block'
+ # for any of the variables in 'mallocvars'. Propagate this information
+ # forward. Note that "definitely know" implies that we just did either
+ # a fixed-size malloc (variable-size might require card marking), or
+ # that we just did a full write barrier (not just for card marking).
+ if 1: # keep indentation
for i, op in enumerate(block.operations):
+ if i < startindex:
+ continue
if op.opname in ("cast_pointer", "same_as"):
if op.args[0] in mallocvars:
mallocvars[op.result] = True
elif op.opname in ("setfield", "setarrayitem", "setinteriorfield"):
- # note that 'mallocvars' only tracks fixed-size mallocs,
- # so no risk that they use card marking
TYPE = op.args[-1].concretetype
if (op.args[0] in mallocvars and
isinstance(TYPE, lltype.Ptr) and
@@ -83,7 +84,15 @@
if var in mallocvars:
newmallocvars[exit.target.inputargs[i]] = True
if newmallocvars:
- find_in_block(exit.target, newmallocvars)
+ propagate_no_write_barrier_needed(result, exit.target,
+ newmallocvars,
+ collect_analyzer, entrymap)
+
+def find_initializing_stores(collect_analyzer, graph, entrymap):
+ # a bit of a hackish analysis: if a block contains a malloc and check that
+ # the result is not zero, then the block following the True link will
+ # usually initialize the newly allocated object
+ result = set()
mallocnum = 0
blockset = set(graph.iterblocks())
while blockset:
@@ -113,7 +122,8 @@
target = exit.target
mallocvars = {target.inputargs[index]: True}
mallocnum += 1
- find_in_block(target, mallocvars)
+ propagate_no_write_barrier_needed(result, target, mallocvars,
+ collect_analyzer, entrymap)
#if result:
# print "found %s initializing stores in %s" % (len(result), graph.name)
return result
@@ -698,8 +708,11 @@
" %s" % func)
if self.write_barrier_ptr:
+ from rpython.flowspace.model import mkentrymap
+ self._entrymap = mkentrymap(graph)
self.clean_sets = (
- find_initializing_stores(self.collect_analyzer, graph))
+ find_initializing_stores(self.collect_analyzer, graph,
+ self._entrymap))
if self.gcdata.gc.can_optimize_clean_setarrayitems():
self.clean_sets = self.clean_sets.union(
find_clean_setarrayitems(self.collect_analyzer, graph))
@@ -1269,6 +1282,17 @@
hop.genop("direct_call", [self.write_barrier_ptr,
self.c_const_gc,
v_structaddr])
+ # we just did a full write barrier here, so we can use
+ # this helper to propagate this knowledge forward and
+ # avoid to repeat the write barrier.
+ if self.curr_block is not None: # for tests
+ assert self.curr_block.operations[hop.index] is hop.spaceop
+ propagate_no_write_barrier_needed(self.clean_sets,
+ self.curr_block,
+ {v_struct: True},
+ self.collect_analyzer,
+ self._entrymap,
+ hop.index + 1)
hop.rename('bare_' + opname)
def transform_getfield_typeptr(self, hop):
diff --git a/rpython/memory/gctransform/test/test_framework.py b/rpython/memory/gctransform/test/test_framework.py
--- a/rpython/memory/gctransform/test/test_framework.py
+++ b/rpython/memory/gctransform/test/test_framework.py
@@ -1,6 +1,6 @@
from rpython.annotator.listdef import s_list_of_strings
from rpython.annotator.model import SomeInteger
-from rpython.flowspace.model import Constant, SpaceOperation
+from rpython.flowspace.model import Constant, SpaceOperation, mkentrymap
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.memory.gc.semispace import SemiSpaceGC
@@ -231,6 +231,33 @@
Constant('b', lltype.Void), varoftype(PTR_TYPE2)],
varoftype(lltype.Void)))
+def test_remove_duplicate_write_barrier():
+ from rpython.translator.c.genc import CStandaloneBuilder
+ from rpython.flowspace.model import summary
+
+ class A(object):
+ pass
+ glob_a_1 = A()
+ glob_a_2 = A()
+
+ def f(a, cond):
+ a.x = a
+ a.z = a
+ if cond:
+ a.y = a
+ def g():
+ f(glob_a_1, 5)
+ f(glob_a_2, 0)
+ t = rtype(g, [])
+ t.config.translation.gc = "minimark"
+ cbuild = CStandaloneBuilder(t, g, t.config,
+ gcpolicy=FrameworkGcPolicy2)
+ db = cbuild.generate_graphs_for_llinterp()
+
+ ff = graphof(t, f)
+ #ff.show()
+ assert summary(ff)['direct_call'] == 1 # only one remember_young_pointer
+
def test_find_initializing_stores():
class A(object):
@@ -246,7 +273,8 @@
etrafo = ExceptionTransformer(t)
graphs = etrafo.transform_completely()
collect_analyzer = CollectAnalyzer(t)
- init_stores = find_initializing_stores(collect_analyzer, t.graphs[0])
+ init_stores = find_initializing_stores(collect_analyzer, t.graphs[0],
+ mkentrymap(t.graphs[0]))
assert len(init_stores) == 1
def test_find_initializing_stores_across_blocks():
@@ -271,7 +299,8 @@
etrafo = ExceptionTransformer(t)
graphs = etrafo.transform_completely()
collect_analyzer = CollectAnalyzer(t)
- init_stores = find_initializing_stores(collect_analyzer, t.graphs[0])
+ init_stores = find_initializing_stores(collect_analyzer, t.graphs[0],
+ mkentrymap(t.graphs[0]))
assert len(init_stores) == 5
def test_find_clean_setarrayitems():
diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py
--- a/rpython/memory/gctransform/transform.py
+++ b/rpython/memory/gctransform/transform.py
@@ -83,6 +83,7 @@
class BaseGCTransformer(object):
finished_helpers = False
+ curr_block = None
def __init__(self, translator, inline=False):
self.translator = translator
@@ -159,7 +160,7 @@
def transform_block(self, block, is_borrowed):
llops = LowLevelOpList()
- #self.curr_block = block
+ self.curr_block = block
self.livevars = [var for var in block.inputargs
if var_needsgc(var) and not is_borrowed(var)]
allvars = [var for var in block.getvariables() if var_needsgc(var)]
@@ -205,6 +206,7 @@
block.operations[:] = llops
self.livevars = None
self.var_last_needed_in = None
+ self.curr_block = None
def transform_graph(self, graph):
if graph in self.minimal_transform:
diff --git a/rpython/rlib/jit_libffi.py b/rpython/rlib/jit_libffi.py
--- a/rpython/rlib/jit_libffi.py
+++ b/rpython/rlib/jit_libffi.py
@@ -109,6 +109,11 @@
def jit_ffi_call(cif_description, func_addr, exchange_buffer):
"""Wrapper around ffi_call(). Must receive a CIF_DESCRIPTION_P that
describes the layout of the 'exchange_buffer'.
+
+ Note that this cannot be optimized if 'cif_description' is not
+ a constant for the JIT, so if it is ever possible, consider promoting
+ it. The promotion of 'cif_description' must be done earlier, before
+ the raw malloc of 'exchange_buffer'.
"""
reskind = types.getkind(cif_description.rtype)
if reskind == 'v':
diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py
--- a/rpython/translator/c/genc.py
+++ b/rpython/translator/c/genc.py
@@ -485,7 +485,7 @@
else:
mk.definition('DEBUGFLAGS', '-O1 -g')
if self.translator.platform.name == 'msvc':
- mk.rule('debug_target', 'debugmode_$(DEFAULT_TARGET)', 'rem')
+ mk.rule('debug_target', '$(DEFAULT_TARGET)', 'rem')
else:
mk.rule('debug_target', '$(DEFAULT_TARGET)', '#')
mk.write()
More information about the pypy-commit
mailing list