[pypy-commit] pypy keys_with_hash: hg merge default
arigo
noreply at buildbot.pypy.org
Tue Sep 1 10:49:11 CEST 2015
Author: Armin Rigo <arigo at tunes.org>
Branch: keys_with_hash
Changeset: r79341:2e2ff01da2a9
Date: 2015-09-01 10:49 +0200
http://bitbucket.org/pypy/pypy/changeset/2e2ff01da2a9/
Log: hg merge default
diff too long, truncating to 2000 out of 16833 lines
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -15,3 +15,4 @@
e03971291f3a0729ecd3ee7fae7ddb0bb82d476c release-2.6.0
e03971291f3a0729ecd3ee7fae7ddb0bb82d476c release-2.6.0
295ee98b69288471b0fcf2e0ede82ce5209eb90b release-2.6.0
+f3ad1e1e1d6215e20d34bb65ab85ff9188c9f559 release-2.6.1
diff --git a/LICENSE b/LICENSE
--- a/LICENSE
+++ b/LICENSE
@@ -168,7 +168,6 @@
Michael Twomey
Lucian Branescu Mihaila
Yichao Yu
- Anton Gulenko
Gabriel Lavoie
Olivier Dormond
Jared Grubb
@@ -215,6 +214,7 @@
Carl Meyer
Karl Ramm
Pieter Zieschang
+ Anton Gulenko
Gabriel
Lukas Vacek
Andrew Dalke
@@ -247,6 +247,7 @@
Toni Mattis
Lucas Stadler
Julian Berman
+ Markus Holtermann
roberto at goyle
Yury V. Zaytsev
Anna Katrina Dominguez
@@ -352,8 +353,7 @@
Except when otherwise stated (look for LICENSE files or copyright/license
information at the beginning of each file) the files in the 'lib-python/2.7'
directory are all copyrighted by the Python Software Foundation and licensed
-under the Python Software License of which you can find a copy here:
-http://www.python.org/doc/Copyright.html
+under the terms that you can find here: https://docs.python.org/2/license.html
License for 'pypy/module/unicodedata/'
======================================
@@ -435,4 +435,4 @@
The code is based on gperftools. You may see a copy of the License for it at
- https://code.google.com/p/gperftools/source/browse/COPYING
+ https://github.com/gperftools/gperftools/blob/master/COPYING
diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py
--- a/_pytest/assertion/rewrite.py
+++ b/_pytest/assertion/rewrite.py
@@ -308,7 +308,10 @@
if (len(data) != 8 or data[:4] != imp.get_magic() or
struct.unpack("<l", data[4:])[0] != mtime):
return None
- co = marshal.load(fp)
+ try:
+ co = marshal.load(fp)
+ except ValueError:
+ return None # e.g. bad marshal data because of pypy/cpython mix
if not isinstance(co, types.CodeType):
# That's interesting....
return None
diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py
--- a/lib_pypy/_sqlite3.py
+++ b/lib_pypy/_sqlite3.py
@@ -345,7 +345,10 @@
def _finalize_raw_statement(self, _statement):
if self.__rawstatements is not None:
- self.__rawstatements.remove(_statement)
+ try:
+ self.__rawstatements.remove(_statement)
+ except KeyError:
+ return # rare case: already finalized, see issue #2097
_lib.sqlite3_finalize(_statement)
def __do_all_statements(self, action, reset_cursors):
diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
--- a/lib_pypy/cffi.egg-info/PKG-INFO
+++ b/lib_pypy/cffi.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: cffi
-Version: 1.1.2
+Version: 1.3.0
Summary: Foreign Function Interface for Python calling C code.
Home-page: http://cffi.readthedocs.org
Author: Armin Rigo, Maciej Fijalkowski
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -4,8 +4,8 @@
from .api import FFI, CDefError, FFIError
from .ffiplatform import VerificationError, VerificationMissing
-__version__ = "1.1.2"
-__version_info__ = (1, 1, 2)
+__version__ = "1.3.0"
+__version_info__ = (1, 3, 0)
# The verifier module file names are based on the CRC32 of a string that
# contains the following version number. It may be older than __version__
diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h
--- a/lib_pypy/cffi/_cffi_include.h
+++ b/lib_pypy/cffi/_cffi_include.h
@@ -46,7 +46,7 @@
# endif
#else
# include <stdint.h>
-# if (defined (__SVR4) && defined (__sun)) || defined(_AIX)
+# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux)
# include <alloca.h>
# endif
#endif
@@ -214,6 +214,12 @@
(size) == 8 ? ((sign) ? _CFFI_PRIM_INT64 : _CFFI_PRIM_UINT64) : \
_CFFI__UNKNOWN_PRIM)
+#define _cffi_prim_float(size) \
+ ((size) == sizeof(float) ? _CFFI_PRIM_FLOAT : \
+ (size) == sizeof(double) ? _CFFI_PRIM_DOUBLE : \
+ (size) == sizeof(long double) ? _CFFI__UNKNOWN_LONG_DOUBLE : \
+ _CFFI__UNKNOWN_FLOAT_PRIM)
+
#define _cffi_check_int(got, got_nonpos, expected) \
((got_nonpos) == (expected <= 0) && \
(got) == (unsigned long long)expected)
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -236,6 +236,30 @@
cdecl = self._typeof(cdecl)
return self._backend.newp(cdecl, init)
+ def new_allocator(self, alloc=None, free=None,
+ should_clear_after_alloc=True):
+ """Return a new allocator, i.e. a function that behaves like ffi.new()
+ but uses the provided low-level 'alloc' and 'free' functions.
+
+ 'alloc' is called with the size as argument. If it returns NULL, a
+ MemoryError is raised. 'free' is called with the result of 'alloc'
+ as argument. Both can be either Python function or directly C
+ functions. If 'free' is None, then no free function is called.
+ If both 'alloc' and 'free' are None, the default is used.
+
+ If 'should_clear_after_alloc' is set to False, then the memory
+ returned by 'alloc' is assumed to be already cleared (or you are
+ fine with garbage); otherwise CFFI will clear it.
+ """
+ compiled_ffi = self._backend.FFI()
+ allocator = compiled_ffi.new_allocator(alloc, free,
+ should_clear_after_alloc)
+ def allocate(cdecl, init=None):
+ if isinstance(cdecl, basestring):
+ cdecl = self._typeof(cdecl)
+ return allocator(cdecl, init)
+ return allocate
+
def cast(self, cdecl, source):
"""Similar to a C cast: returns an instance of the named C
type initialized with the given 'source'. The source is
@@ -286,7 +310,7 @@
"""
return self._backend.from_buffer(self.BCharA, python_buffer)
- def callback(self, cdecl, python_callable=None, error=None):
+ def callback(self, cdecl, python_callable=None, error=None, onerror=None):
"""Return a callback object or a decorator making such a
callback object. 'cdecl' must name a C function pointer type.
The callback invokes the specified 'python_callable' (which may
@@ -298,7 +322,8 @@
if not callable(python_callable):
raise TypeError("the 'python_callable' argument "
"is not callable")
- return self._backend.callback(cdecl, python_callable, error)
+ return self._backend.callback(cdecl, python_callable,
+ error, onerror)
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl, consider_function_as_funcptr=True)
if python_callable is None:
@@ -327,6 +352,13 @@
data. Later, when this new cdata object is garbage-collected,
'destructor(old_cdata_object)' will be called.
"""
+ try:
+ gcp = self._backend.gcp
+ except AttributeError:
+ pass
+ else:
+ return gcp(cdata, destructor)
+ #
with self._lock:
try:
gc_weakrefs = self.gc_weakrefs
@@ -428,6 +460,8 @@
raise TypeError("ffi.include() expects an argument that is also of"
" type cffi.FFI, not %r" % (
type(ffi_to_include).__name__,))
+ if ffi_to_include is self:
+ raise ValueError("self.include(self)")
with ffi_to_include._lock:
with self._lock:
self._parser.include(ffi_to_include._parser)
diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py
--- a/lib_pypy/cffi/backend_ctypes.py
+++ b/lib_pypy/cffi/backend_ctypes.py
@@ -989,7 +989,8 @@
def cast(self, BType, source):
return BType._cast_from(source)
- def callback(self, BType, source, error):
+ def callback(self, BType, source, error, onerror):
+ assert onerror is None # XXX not implemented
return BType(source, error)
typeof = type
diff --git a/lib_pypy/cffi/cffi_opcode.py b/lib_pypy/cffi/cffi_opcode.py
--- a/lib_pypy/cffi/cffi_opcode.py
+++ b/lib_pypy/cffi/cffi_opcode.py
@@ -53,6 +53,7 @@
OP_GLOBAL_VAR = 33
OP_DLOPEN_FUNC = 35
OP_DLOPEN_CONST = 37
+OP_GLOBAL_VAR_F = 39
PRIM_VOID = 0
PRIM_BOOL = 1
@@ -105,7 +106,9 @@
PRIM_UINTMAX = 47
_NUM_PRIM = 48
-_UNKNOWN_PRIM = -1
+_UNKNOWN_PRIM = -1
+_UNKNOWN_FLOAT_PRIM = -2
+_UNKNOWN_LONG_DOUBLE = -3
PRIMITIVE_TO_INDEX = {
'char': PRIM_CHAR,
diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py
--- a/lib_pypy/cffi/cparser.py
+++ b/lib_pypy/cffi/cparser.py
@@ -15,9 +15,11 @@
except ImportError:
lock = None
-_r_comment = re.compile(r"/\*.*?\*/|//.*?$", re.DOTALL | re.MULTILINE)
-_r_define = re.compile(r"^\s*#\s*define\s+([A-Za-z_][A-Za-z_0-9]*)\s+(.*?)$",
- re.MULTILINE)
+_r_comment = re.compile(r"/\*.*?\*/|//([^\n\\]|\\.)*?$",
+ re.DOTALL | re.MULTILINE)
+_r_define = re.compile(r"^\s*#\s*define\s+([A-Za-z_][A-Za-z_0-9]*)"
+ r"\b((?:[^\n\\]|\\.)*?)$",
+ re.DOTALL | re.MULTILINE)
_r_partial_enum = re.compile(r"=\s*\.\.\.\s*[,}]|\.\.\.\s*\}")
_r_enum_dotdotdot = re.compile(r"__dotdotdot\d+__$")
_r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]")
@@ -39,6 +41,7 @@
macros = {}
for match in _r_define.finditer(csource):
macroname, macrovalue = match.groups()
+ macrovalue = macrovalue.replace('\\\n', '').strip()
macros[macroname] = macrovalue
csource = _r_define.sub('', csource)
# Replace "[...]" with "[__dotdotdotarray__]"
@@ -423,13 +426,10 @@
raise api.CDefError(
"%s: a function with only '(...)' as argument"
" is not correct C" % (funcname or 'in expression'))
- elif (len(params) == 1 and
- isinstance(params[0].type, pycparser.c_ast.TypeDecl) and
- isinstance(params[0].type.type, pycparser.c_ast.IdentifierType)
- and list(params[0].type.type.names) == ['void']):
- del params[0]
args = [self._as_func_arg(self._get_type(argdeclnode.type))
for argdeclnode in params]
+ if not ellipsis and args == [model.void_type]:
+ args = []
result = self._get_type(typenode.type)
return model.RawFunctionType(tuple(args), result, ellipsis)
@@ -633,6 +633,8 @@
def include(self, other):
for name, tp in other._declarations.items():
+ if name.startswith('anonymous $enum_$'):
+ continue # fix for test_anonymous_enum_include
kind = name.split(' ', 1)[0]
if kind in ('struct', 'union', 'enum', 'anonymous'):
self._declare(name, tp, included=True)
@@ -646,10 +648,21 @@
assert typenames[-1] == '__dotdotdot__'
if len(typenames) == 1:
return model.unknown_type(decl.name)
- for t in typenames[:-1]:
- if t not in ['int', 'short', 'long', 'signed', 'unsigned', 'char']:
- raise api.FFIError(':%d: bad usage of "..."' % decl.coord.line)
+
+ if (typenames[:-1] == ['float'] or
+ typenames[:-1] == ['double']):
+ # not for 'long double' so far
+ result = model.UnknownFloatType(decl.name)
+ else:
+ for t in typenames[:-1]:
+ if t not in ['int', 'short', 'long', 'signed',
+ 'unsigned', 'char']:
+ raise api.FFIError(':%d: bad usage of "..."' %
+ decl.coord.line)
+ result = model.UnknownIntegerType(decl.name)
+
if self._uses_new_feature is None:
self._uses_new_feature = "'typedef %s... %s'" % (
' '.join(typenames[:-1]), decl.name)
- return model.UnknownIntegerType(decl.name)
+
+ return result
diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py
--- a/lib_pypy/cffi/model.py
+++ b/lib_pypy/cffi/model.py
@@ -35,9 +35,6 @@
def is_integer_type(self):
return False
- def sizeof_enabled(self):
- return False
-
def get_cached_btype(self, ffi, finishlist, can_delay=False):
try:
BType = ffi._cached_btypes[self]
@@ -80,8 +77,7 @@
class BasePrimitiveType(BaseType):
- def sizeof_enabled(self):
- return True
+ pass
class PrimitiveType(BasePrimitiveType):
@@ -162,12 +158,23 @@
self.c_name_with_marker = name + '&'
def is_integer_type(self):
- return True # for now
+ return True
def build_backend_type(self, ffi, finishlist):
raise NotImplementedError("integer type '%s' can only be used after "
"compilation" % self.name)
+class UnknownFloatType(BasePrimitiveType):
+ _attrs_ = ('name', )
+
+ def __init__(self, name):
+ self.name = name
+ self.c_name_with_marker = name + '&'
+
+ def build_backend_type(self, ffi, finishlist):
+ raise NotImplementedError("float type '%s' can only be used after "
+ "compilation" % self.name)
+
class BaseFunctionType(BaseType):
_attrs_ = ('args', 'result', 'ellipsis')
@@ -205,9 +212,6 @@
class FunctionPtrType(BaseFunctionType):
_base_pattern = '(*&)(%s)'
- def sizeof_enabled(self):
- return True
-
def build_backend_type(self, ffi, finishlist):
result = self.result.get_cached_btype(ffi, finishlist)
args = []
@@ -233,9 +237,6 @@
extra = self._base_pattern
self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra)
- def sizeof_enabled(self):
- return True
-
def build_backend_type(self, ffi, finishlist):
BItem = self.totype.get_cached_btype(ffi, finishlist, can_delay=True)
return global_cache(self, ffi, 'new_pointer_type', BItem)
@@ -276,9 +277,6 @@
self.c_name_with_marker = (
self.item.c_name_with_marker.replace('&', brackets))
- def sizeof_enabled(self):
- return self.item.sizeof_enabled() and self.length is not None
-
def resolve_length(self, newlength):
return ArrayType(self.item, newlength)
@@ -433,9 +431,6 @@
from . import ffiplatform
raise ffiplatform.VerificationMissing(self._get_c_name())
- def sizeof_enabled(self):
- return self.fldtypes is not None
-
def build_backend_type(self, ffi, finishlist):
self.check_not_partial()
finishlist.append(self)
@@ -464,9 +459,6 @@
self.baseinttype = baseinttype
self.build_c_name_with_marker()
- def sizeof_enabled(self):
- return True # not strictly true, but external enums are obscure
-
def force_the_name(self, forcename):
StructOrUnionOrEnum.force_the_name(self, forcename)
if self.forcename is None:
diff --git a/lib_pypy/cffi/parse_c_type.h b/lib_pypy/cffi/parse_c_type.h
--- a/lib_pypy/cffi/parse_c_type.h
+++ b/lib_pypy/cffi/parse_c_type.h
@@ -26,6 +26,7 @@
#define _CFFI_OP_GLOBAL_VAR 33
#define _CFFI_OP_DLOPEN_FUNC 35
#define _CFFI_OP_DLOPEN_CONST 37
+#define _CFFI_OP_GLOBAL_VAR_F 39
#define _CFFI_PRIM_VOID 0
#define _CFFI_PRIM_BOOL 1
@@ -78,7 +79,9 @@
#define _CFFI_PRIM_UINTMAX 47
#define _CFFI__NUM_PRIM 48
-#define _CFFI__UNKNOWN_PRIM (-1)
+#define _CFFI__UNKNOWN_PRIM (-1)
+#define _CFFI__UNKNOWN_FLOAT_PRIM (-2)
+#define _CFFI__UNKNOWN_LONG_DOUBLE (-3)
struct _cffi_global_s {
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -4,11 +4,6 @@
VERSION = "0x2601"
-try:
- int_type = (int, long)
-except NameError: # Python 3
- int_type = int
-
class GlobalExpr:
def __init__(self, name, address, type_op, size=0, check_value=0):
@@ -473,6 +468,10 @@
if tp.is_integer_type() and tp.name != '_Bool':
converter = '_cffi_to_c_int'
extraarg = ', %s' % tp.name
+ elif isinstance(tp, model.UnknownFloatType):
+ # don't check with is_float_type(): it may be a 'long
+ # double' here, and _cffi_to_c_double would loose precision
+ converter = '(%s)_cffi_to_c_double' % (tp.get_c_name(''),)
else:
converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''),
tp.name.replace(' ', '_'))
@@ -527,6 +526,8 @@
if isinstance(tp, model.BasePrimitiveType):
if tp.is_integer_type():
return '_cffi_from_c_int(%s, %s)' % (var, tp.name)
+ elif isinstance(tp, model.UnknownFloatType):
+ return '_cffi_from_c_double(%s)' % (var,)
elif tp.name != 'long double':
return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var)
else:
@@ -981,10 +982,6 @@
if not self.target_is_python and tp.is_integer_type():
type_op = CffiOp(OP_CONSTANT_INT, -1)
else:
- if not tp.sizeof_enabled():
- raise ffiplatform.VerificationError(
- "constant '%s' is of type '%s', whose size is not known"
- % (name, tp._get_c_name()))
if self.target_is_python:
const_kind = OP_DLOPEN_CONST
else:
@@ -1069,18 +1066,36 @@
self._do_collect_type(self._global_type(tp, name))
def _generate_cpy_variable_decl(self, tp, name):
- pass
+ prnt = self._prnt
+ tp = self._global_type(tp, name)
+ if isinstance(tp, model.ArrayType) and tp.length is None:
+ tp = tp.item
+ ampersand = ''
+ else:
+ ampersand = '&'
+ # This code assumes that casts from "tp *" to "void *" is a
+ # no-op, i.e. a function that returns a "tp *" can be called
+ # as if it returned a "void *". This should be generally true
+ # on any modern machine. The only exception to that rule (on
+ # uncommon architectures, and as far as I can tell) might be
+ # if 'tp' were a function type, but that is not possible here.
+ # (If 'tp' is a function _pointer_ type, then casts from "fn_t
+ # **" to "void *" are again no-ops, as far as I can tell.)
+ prnt('static ' + tp.get_c_name('*_cffi_var_%s(void)' % (name,)))
+ prnt('{')
+ prnt(' return %s(%s);' % (ampersand, name))
+ prnt('}')
+ prnt()
def _generate_cpy_variable_ctx(self, tp, name):
tp = self._global_type(tp, name)
type_index = self._typesdict[tp]
- type_op = CffiOp(OP_GLOBAL_VAR, type_index)
- if tp.sizeof_enabled():
- size = "sizeof(%s)" % (name,)
+ if self.target_is_python:
+ op = OP_GLOBAL_VAR
else:
- size = 0
+ op = OP_GLOBAL_VAR_F
self._lsts["global"].append(
- GlobalExpr(name, '&%s' % name, type_op, size))
+ GlobalExpr(name, '_cffi_var_%s' % name, CffiOp(op, type_index)))
# ----------
# emitting the opcodes for individual types
@@ -1098,6 +1113,12 @@
' ) <= 0)' % (tp.name, tp.name, tp.name))
self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s)
+ def _emit_bytecode_UnknownFloatType(self, tp, index):
+ s = ('_cffi_prim_float(sizeof(%s) *\n'
+ ' (((%s)1) / 2) * 2 /* integer => 0, float => 1 */\n'
+ ' )' % (tp.name, tp.name))
+ self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s)
+
def _emit_bytecode_RawFunctionType(self, tp, index):
self.cffi_types[index] = CffiOp(OP_FUNCTION, self._typesdict[tp.result])
index += 1
diff --git a/lib_pypy/cffi/setuptools_ext.py b/lib_pypy/cffi/setuptools_ext.py
--- a/lib_pypy/cffi/setuptools_ext.py
+++ b/lib_pypy/cffi/setuptools_ext.py
@@ -81,10 +81,16 @@
allsources.extend(kwds.pop('sources', []))
ext = Extension(name=module_name, sources=allsources, **kwds)
- def make_mod(tmpdir):
+ def make_mod(tmpdir, pre_run=None):
c_file = os.path.join(tmpdir, module_name + source_extension)
log.info("generating cffi module %r" % c_file)
mkpath(tmpdir)
+ # a setuptools-only, API-only hook: called with the "ext" and "ffi"
+ # arguments just before we turn the ffi into C code. To use it,
+ # subclass the 'distutils.command.build_ext.build_ext' class and
+ # add a method 'def pre_run(self, ext, ffi)'.
+ if pre_run is not None:
+ pre_run(ext, ffi)
updated = recompiler.make_c_source(ffi, module_name, source, c_file)
if not updated:
log.info("already up-to-date")
@@ -98,7 +104,8 @@
class build_ext_make_mod(base_class):
def run(self):
if ext.sources[0] == '$PLACEHOLDER':
- ext.sources[0] = make_mod(self.build_temp)
+ pre_run = getattr(self, 'pre_run', None)
+ ext.sources[0] = make_mod(self.build_temp, pre_run)
base_class.run(self)
dist.cmdclass['build_ext'] = build_ext_make_mod
# NB. multiple runs here will create multiple 'build_ext_make_mod'
diff --git a/lib_pypy/ctypes_support.py b/lib_pypy/ctypes_support.py
--- a/lib_pypy/ctypes_support.py
+++ b/lib_pypy/ctypes_support.py
@@ -28,7 +28,7 @@
def _where_is_errno():
return standard_c_lib.__errno_location()
-elif sys.platform in ('darwin', 'freebsd7', 'freebsd8', 'freebsd9'):
+elif sys.platform == 'darwin' or sys.platform.startswith('freebsd'):
standard_c_lib.__error.restype = ctypes.POINTER(ctypes.c_int)
standard_c_lib.__error.argtypes = None
def _where_is_errno():
diff --git a/lib_pypy/greenlet.egg-info b/lib_pypy/greenlet.egg-info
--- a/lib_pypy/greenlet.egg-info
+++ b/lib_pypy/greenlet.egg-info
@@ -1,6 +1,6 @@
Metadata-Version: 1.0
Name: greenlet
-Version: 0.4.7
+Version: 0.4.9
Summary: Lightweight in-process concurrent programming
Home-page: https://github.com/python-greenlet/greenlet
Author: Ralf Schmitt (for CPython), PyPy team
diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py
--- a/lib_pypy/greenlet.py
+++ b/lib_pypy/greenlet.py
@@ -1,7 +1,7 @@
import sys
import _continuation
-__version__ = "0.4.7"
+__version__ = "0.4.9"
# ____________________________________________________________
# Exceptions
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -23,14 +23,14 @@
default_modules.update([
"_codecs", "gc", "_weakref", "marshal", "errno", "imp", "math", "cmath",
"_sre", "_pickle_support", "operator", "parser", "symbol", "token", "_ast",
- "_io", "_random", "__pypy__", "_testing"
+ "_io", "_random", "__pypy__", "_testing", "time"
])
# --allworkingmodules
working_modules = default_modules.copy()
working_modules.update([
- "_socket", "unicodedata", "mmap", "fcntl", "_locale", "pwd", "time" ,
+ "_socket", "unicodedata", "mmap", "fcntl", "_locale", "pwd",
"select", "zipimport", "_lsprof", "crypt", "signal", "_rawffi", "termios",
"zlib", "bz2", "struct", "_hashlib", "_md5", "_sha", "_minimal_curses",
"cStringIO", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array",
@@ -39,7 +39,8 @@
"_csv", "cppyy", "_pypyjson"
])
-if sys.platform.startswith('linux') and os.uname()[4] == 'x86_64':
+if (sys.platform.startswith('linux') and os.uname()[4] == 'x86_64'
+ and sys.maxint > 2**32): # it's not enough that we get x86_64
working_modules.add('_vmprof')
translation_modules = default_modules.copy()
diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py
--- a/pypy/doc/conf.py
+++ b/pypy/doc/conf.py
@@ -67,7 +67,7 @@
# The short X.Y version.
version = '2.6'
# The full version, including alpha/beta/rc tags.
-release = '2.6.0'
+release = '2.6.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst
--- a/pypy/doc/contributor.rst
+++ b/pypy/doc/contributor.rst
@@ -32,6 +32,7 @@
Lukas Diekmann
Sven Hager
Anders Lehmann
+ Richard Plangger
Aurelien Campeas
Remi Meier
Niklaus Haldimann
@@ -57,7 +58,6 @@
Ludovic Aubry
Jacob Hallen
Jason Creighton
- Richard Plangger
Alex Martelli
Michal Bendowski
stian
@@ -138,7 +138,6 @@
Michael Twomey
Lucian Branescu Mihaila
Yichao Yu
- Anton Gulenko
Gabriel Lavoie
Olivier Dormond
Jared Grubb
@@ -185,6 +184,7 @@
Carl Meyer
Karl Ramm
Pieter Zieschang
+ Anton Gulenko
Gabriel
Lukas Vacek
Andrew Dalke
@@ -217,6 +217,7 @@
Toni Mattis
Lucas Stadler
Julian Berman
+ Markus Holtermann
roberto at goyle
Yury V. Zaytsev
Anna Katrina Dominguez
@@ -252,6 +253,7 @@
shoma hosaka
Daniel Neuhäuser
Ben Mather
+ Niclas Olofsson
halgari
Boglarka Vezer
Chris Pressey
diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst
--- a/pypy/doc/embedding.rst
+++ b/pypy/doc/embedding.rst
@@ -46,7 +46,11 @@
source. It'll acquire the GIL.
Note: this is meant to be called *only once* or a few times at most. See
- the `more complete example`_ below.
+ the `more complete example`_ below. In PyPy <= 2.6.0, the globals
+ dictionary is *reused* across multiple calls, giving potentially
+ strange results (e.g. objects dying too early). In PyPy >= 2.6.1,
+ you get a new globals dictionary for every call (but then, all globals
+ dictionaries are all kept alive forever, in ``sys._pypy_execute_source``).
.. function:: int pypy_execute_source_ptr(char* source, void* ptr);
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -70,6 +70,20 @@
.. _`use virtualenv (as documented here)`: getting-started.html#installing-using-virtualenv
+Module xyz does not work in the sandboxed PyPy?
+-----------------------------------------------
+
+You cannot import *any* extension module in a `sandboxed PyPy`_,
+sorry. Even the built-in modules available are very limited.
+Sandboxing in PyPy is a good proof of concept, really safe IMHO, but
+it is only a proof of concept. It seriously requires someone working
+on it. Before this occurs, it can only be used it for "pure Python"
+examples: programs that import mostly nothing (or only pure Python
+modules, recursively).
+
+.. _`sandboxed PyPy`: sandbox.html
+
+
.. _`See below.`:
Do CPython Extension modules work with PyPy?
diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst
--- a/pypy/doc/how-to-release.rst
+++ b/pypy/doc/how-to-release.rst
@@ -31,15 +31,14 @@
and add the new file to pypy/doc/index-of-whatsnew.rst
* go to pypy/tool/release and run
``force-builds.py <release branch>``
- The following binaries should be built, however, we need more buildbots
- - JIT: windows, linux, os/x, armhf, armel
- - no JIT: windows, linux, os/x
- - sandbox: linux, os/x
+ The following JIT binaries should be built, however, we need more buildbots
+ windows, linux-32, linux-64, osx64, armhf-raring, armhf-raspberrian, armel,
+ freebsd64
* wait for builds to complete, make sure there are no failures
* download the builds, repackage binaries. Tag the release version
and download and repackage source from bitbucket. You may find it
- convenient to use the ``repackage.sh`` script in pypy/tools to do this.
+ convenient to use the ``repackage.sh`` script in pypy/tool/release to do this.
Otherwise repackage and upload source "-src.tar.bz2" to bitbucket
and to cobra, as some packagers prefer a clearly labeled source package
diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,7 @@
.. toctree::
+ release-2.6.1.rst
release-2.6.0.rst
release-2.5.1.rst
release-2.5.0.rst
diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst
--- a/pypy/doc/index-of-whatsnew.rst
+++ b/pypy/doc/index-of-whatsnew.rst
@@ -7,6 +7,7 @@
.. toctree::
whatsnew-head.rst
+ whatsnew-2.6.1.rst
whatsnew-2.6.0.rst
whatsnew-2.5.1.rst
whatsnew-2.5.0.rst
diff --git a/pypy/doc/release-2.6.1.rst b/pypy/doc/release-2.6.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-2.6.1.rst
@@ -0,0 +1,129 @@
+==========
+PyPy 2.6.1
+==========
+
+We're pleased to announce PyPy 2.6.1, an update to PyPy 2.6.0 released June 1.
+We have updated stdlib to 2.7.10, `cffi`_ to version 1.3, extended support for
+the new vmprof_ statistical profiler for multiple threads, and increased
+functionality of numpy.
+
+You can download the PyPy 2.6.1 release here:
+
+ http://pypy.org/download.html
+
+We would like to thank our donors for the continued support of the PyPy
+project, and our volunteers and contributors.
+
+.. _`cffi`: https://cffi.readthedocs.org
+
+We would also like to encourage new people to join the project. PyPy has many
+layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation
+improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ with making
+RPython's JIT even better.
+
+.. _`PyPy`: http://doc.pypy.org
+.. _`RPython`: https://rpython.readthedocs.org
+.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly
+.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+This release supports **x86** machines on most common operating systems
+(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD_, freebsd_),
+as well as newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux.
+
+We also welcome developers of other
+`dynamic languages`_ to see what RPython can do for them.
+
+.. _`pypy and cpython 2.7.x`: http://speed.pypy.org
+.. _OpenBSD: http://cvsweb.openbsd.org/cgi-bin/cvsweb/ports/lang/pypy
+.. _freebsd: https://svnweb.freebsd.org/ports/head/lang/pypy/
+.. _`dynamic languages`: http://pypyjs.org
+
+Highlights
+===========
+
+* Bug Fixes
+
+ * Revive non-SSE2 support
+
+ * Fixes for detaching _io.Buffer*
+
+ * On Windows, close (and flush) all open sockets on exiting
+
+ * Drop support for ancient macOS v10.4 and before
+
+ * Clear up contention in the garbage collector between trace-me-later and pinning
+
+ * Issues reported with our previous release were resolved_ after reports from users on
+ our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at
+ #pypy.
+
+* New features:
+
+ * cffi was updated to version 1.3
+
+ * The python stdlib was updated to 2.7.10 from 2.7.9
+
+ * vmprof now supports multiple threads and OS X
+
+ * The translation process builds cffi import libraries for some stdlib
+ packages, which should prevent confusion when package.py is not used
+
+ * better support for gdb debugging
+
+ * freebsd should be able to translate PyPy "out of the box" with no patches
+
+* Numpy:
+
+ * Better support for record dtypes, including the ``align`` keyword
+
+ * Implement casting and create output arrays accordingly (still missing some corner cases)
+
+ * Support creation of unicode ndarrays
+
+ * Better support ndarray.flags
+
+ * Support ``axis`` argument in more functions
+
+ * Refactor array indexing to support ellipses
+
+ * Allow the docstrings of built-in numpy objects to be set at run-time
+
+ * Support the ``buffered`` nditer creation keyword
+
+* Performance improvements:
+
+ * Delay recursive calls to make them non-recursive
+
+ * Skip loop unrolling if it compiles too much code
+
+ * Tweak the heapcache
+
+ * Add a list strategy for lists that store both floats and 32-bit integers.
+ The latter are encoded as nonstandard NaNs. Benchmarks show that the speed
+ of such lists is now very close to the speed of purely-int or purely-float
+ lists.
+
+ * Simplify implementation of ffi.gc() to avoid most weakrefs
+
+ * Massively improve the performance of map() with more than
+ one sequence argument
+
+.. _`vmprof`: https://vmprof.readthedocs.org
+.. _resolved: http://doc.pypy.org/en/latest/whatsnew-2.6.1.html
+
+Please try it out and let us know what you think. We welcome
+success stories, `experiments`_, or `benchmarks`_, we know you are using PyPy, please tell us about it!
+
+Cheers
+
+The PyPy Team
+
+.. _`experiments`: https://morepypy.blogspot.com/2015/02/experiments-in-pyrlang-with-rpython.html
+.. _`benchmarks`: https://mithrandi.net/blog/2015/03/axiom-benchmark-results-on-pypy-2-5-0
diff --git a/pypy/doc/sandbox.rst b/pypy/doc/sandbox.rst
--- a/pypy/doc/sandbox.rst
+++ b/pypy/doc/sandbox.rst
@@ -103,12 +103,15 @@
Howto
-----
-In pypy/goal::
+Grab a copy of the pypy repository_. In the directory pypy/goal, run::
../../rpython/bin/rpython -O2 --sandbox targetpypystandalone.py
If you don't have a regular PyPy installed, you should, because it's
-faster to translate, but you can also run ``python translate.py`` instead.
+faster to translate; but you can also run the same line with ``python``
+in front.
+
+.. _repository: https://bitbucket.org/pypy/pypy
To run it, use the tools in the pypy/sandbox directory::
@@ -136,8 +139,6 @@
Not all operations are supported; e.g. if you type os.readlink('...'),
the controller crashes with an exception and the subprocess is killed.
Other operations make the subprocess die directly with a "Fatal RPython
-error". None of this is a security hole; it just means that if you try
-to run some random program, it risks getting killed depending on the
-Python built-in functions it tries to call. This is a matter of the
-sandboxing layer being incomplete so far, but it should not really be
-a problem in practice.
+error". None of this is a security hole. More importantly, *most other
+built-in modules are not enabled. Please read all the warnings in this
+page before complaining about this. Contributions welcome.*
diff --git a/pypy/doc/whatsnew-2.6.1.rst b/pypy/doc/whatsnew-2.6.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/whatsnew-2.6.1.rst
@@ -0,0 +1,76 @@
+========================
+What's new in PyPy 2.6.1
+========================
+
+.. this is a revision shortly after release-2.6.0
+.. startrev: 91904d5c5188
+
+.. branch: use_min_scalar
+Correctly resolve the output dtype of ufunc(array, scalar) calls.
+
+.. branch: stdlib-2.7.10
+
+Update stdlib to version 2.7.10
+
+.. branch: issue2062
+
+.. branch: disable-unroll-for-short-loops
+The JIT no longer performs loop unrolling if the loop compiles to too much code.
+
+.. branch: run-create_cffi_imports
+
+Build cffi import libraries as part of translation by monkey-patching an
+additional task into translation
+
+.. branch: int-float-list-strategy
+
+Use a compact strategy for Python lists that mix integers and floats,
+at least if the integers fit inside 32 bits. These lists are now
+stored as an array of floats, like lists that contain only floats; the
+difference is that integers are stored as tagged NaNs. (This should
+have no visible effect! After ``lst = [42, 42.5]``, the value of
+``lst[0]`` is still *not* the float ``42.0`` but the integer ``42``.)
+
+.. branch: cffi-callback-onerror
+Part of cffi 1.2.
+
+.. branch: cffi-new-allocator
+Part of cffi 1.2.
+
+.. branch: unicode-dtype
+
+Partial implementation of unicode dtype and unicode scalars.
+
+.. branch: dtypes-compatability
+
+Improve compatibility with numpy dtypes; handle offsets to create unions,
+fix str() and repr(), allow specifying itemsize, metadata and titles, add flags,
+allow subclassing dtype
+
+.. branch: indexing
+
+Refactor array indexing to support ellipses.
+
+.. branch: numpy-docstrings
+
+Allow the docstrings of built-in numpy objects to be set at run-time.
+
+.. branch: nditer-revisited
+
+Implement nditer 'buffered' flag and fix some edge cases
+
+.. branch: ufunc-reduce
+
+Allow multiple axes in ufunc.reduce()
+
+.. branch: fix-tinylang-goals
+
+Update tinylang goals to match current rpython
+
+.. branch: vmprof-review
+
+Clean up of vmprof, notably to handle correctly multiple threads
+
+.. branch: no_boehm_dl
+
+Remove extra link library from Boehm GC
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -2,23 +2,6 @@
What's new in PyPy 2.6+
=======================
-.. this is a revision shortly after release-2.6.0
-.. startrev: 91904d5c5188
+.. this is a revision shortly after release-2.6.1
+.. startrev: 07769be4057b
-.. branch: use_min_scalar
-Correctly resolve the output dtype of ufunc(array, scalar) calls.
-
-.. branch: stdlib-2.7.10
-
-Update stdlib to version 2.7.10
-
-.. branch: issue2062
-
-.. branch: disable-unroll-for-short-loops
-The JIT no longer performs loop unrolling if the loop compiles to too much code.
-
-.. branch: run-create_cffi_imports
-
-Build cffi import libraries as part of translation by monkey-patching an
-aditional task into translation
-
diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -128,13 +128,7 @@
@entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source')
def pypy_execute_source(ll_source):
- after = rffi.aroundstate.after
- if after: after()
- source = rffi.charp2str(ll_source)
- res = _pypy_execute_source(source)
- before = rffi.aroundstate.before
- if before: before()
- return rffi.cast(rffi.INT, res)
+ return pypy_execute_source_ptr(ll_source, 0)
@entrypoint('main', [rffi.CCHARP, lltype.Signed],
c_name='pypy_execute_source_ptr')
@@ -142,9 +136,7 @@
after = rffi.aroundstate.after
if after: after()
source = rffi.charp2str(ll_source)
- space.setitem(w_globals, space.wrap('c_argument'),
- space.wrap(ll_ptr))
- res = _pypy_execute_source(source)
+ res = _pypy_execute_source(source, ll_ptr)
before = rffi.aroundstate.before
if before: before()
return rffi.cast(rffi.INT, res)
@@ -169,15 +161,21 @@
before = rffi.aroundstate.before
if before: before()
- w_globals = space.newdict()
- space.setitem(w_globals, space.wrap('__builtins__'),
- space.builtin_modules['__builtin__'])
-
- def _pypy_execute_source(source):
+ def _pypy_execute_source(source, c_argument):
try:
- compiler = space.createcompiler()
- stmt = compiler.compile(source, 'c callback', 'exec', 0)
- stmt.exec_code(space, w_globals, w_globals)
+ w_globals = space.newdict(module=True)
+ space.setitem(w_globals, space.wrap('__builtins__'),
+ space.builtin_modules['__builtin__'])
+ space.setitem(w_globals, space.wrap('c_argument'),
+ space.wrap(c_argument))
+ space.appexec([space.wrap(source), w_globals], """(src, glob):
+ import sys
+ stmt = compile(src, 'c callback', 'exec')
+ if not hasattr(sys, '_pypy_execute_source'):
+ sys._pypy_execute_source = []
+ sys._pypy_execute_source.append(glob)
+ exec stmt in glob
+ """)
except OperationError, e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
@@ -297,7 +295,12 @@
options = make_dict(config)
wrapstr = 'space.wrap(%r)' % (options)
pypy.module.sys.Module.interpleveldefs['pypy_translation_info'] = wrapstr
+ if config.objspace.usemodules._cffi_backend:
+ self.hack_for_cffi_modules(driver)
+ return self.get_entry_point(config)
+
+ def hack_for_cffi_modules(self, driver):
# HACKHACKHACK
# ugly hack to modify target goal from compile_c to build_cffi_imports
# this should probably get cleaned up and merged with driver.create_exe
@@ -336,8 +339,6 @@
driver.default_goal = 'build_cffi_imports'
# HACKHACKHACK end
- return self.get_entry_point(config)
-
def jitpolicy(self, driver):
from pypy.module.pypyjit.policy import PyPyJitPolicy
from pypy.module.pypyjit.hooks import pypy_hooks
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -11,7 +11,7 @@
INT_MIN, INT_MAX, UINT_MAX, USHRT_MAX
from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag,
- UserDelAction, CodeUniqueIds)
+ UserDelAction)
from pypy.interpreter.error import OperationError, new_exception_class, oefmt
from pypy.interpreter.argument import Arguments
from pypy.interpreter.miscutils import ThreadLocals, make_weak_value_dictionary
@@ -200,7 +200,7 @@
w_result = space.get_and_call_function(w_impl, self)
if space.isinstance_w(w_result, space.w_buffer):
return w_result.buffer_w(space, flags)
- raise TypeError
+ raise BufferInterfaceNotFound
def readbuf_w(self, space):
w_impl = space.lookup(self, '__buffer__')
@@ -208,7 +208,7 @@
w_result = space.get_and_call_function(w_impl, self)
if space.isinstance_w(w_result, space.w_buffer):
return w_result.readbuf_w(space)
- raise TypeError
+ raise BufferInterfaceNotFound
def writebuf_w(self, space):
w_impl = space.lookup(self, '__buffer__')
@@ -216,7 +216,7 @@
w_result = space.get_and_call_function(w_impl, self)
if space.isinstance_w(w_result, space.w_buffer):
return w_result.writebuf_w(space)
- raise TypeError
+ raise BufferInterfaceNotFound
def charbuf_w(self, space):
w_impl = space.lookup(self, '__buffer__')
@@ -224,7 +224,7 @@
w_result = space.get_and_call_function(w_impl, self)
if space.isinstance_w(w_result, space.w_buffer):
return w_result.charbuf_w(space)
- raise TypeError
+ raise BufferInterfaceNotFound
def str_w(self, space):
self._typed_unwrap_error(space, "string")
@@ -354,6 +354,9 @@
class DescrMismatch(Exception):
pass
+class BufferInterfaceNotFound(Exception):
+ pass
+
def wrappable_class_name(Class):
try:
return Class.typedef.name
@@ -388,7 +391,6 @@
self.actionflag = ActionFlag() # changed by the signal module
self.check_signal_action = None # changed by the signal module
self.user_del_action = UserDelAction(self)
- self.code_unique_ids = CodeUniqueIds()
self._code_of_sys_exc_info = None
# can be overridden to a subclass
@@ -667,16 +669,6 @@
assert ec is not None
return ec
- def register_code_callback(self, callback):
- cui = self.code_unique_ids
- cui.code_callback = callback
-
- def register_code_object(self, pycode):
- cui = self.code_unique_ids
- if cui.code_callback is None:
- return
- cui.code_callback(self, pycode)
-
def _freeze_(self):
return True
@@ -1403,7 +1395,7 @@
# New buffer interface, returns a buffer based on flags (PyObject_GetBuffer)
try:
return w_obj.buffer_w(self, flags)
- except TypeError:
+ except BufferInterfaceNotFound:
raise oefmt(self.w_TypeError,
"'%T' does not have the buffer interface", w_obj)
@@ -1411,7 +1403,7 @@
# Old buffer interface, returns a readonly buffer (PyObject_AsReadBuffer)
try:
return w_obj.readbuf_w(self)
- except TypeError:
+ except BufferInterfaceNotFound:
raise oefmt(self.w_TypeError,
"expected a readable buffer object")
@@ -1419,7 +1411,7 @@
# Old buffer interface, returns a writeable buffer (PyObject_AsWriteBuffer)
try:
return w_obj.writebuf_w(self)
- except TypeError:
+ except BufferInterfaceNotFound:
raise oefmt(self.w_TypeError,
"expected a writeable buffer object")
@@ -1427,7 +1419,7 @@
# Old buffer interface, returns a character buffer (PyObject_AsCharBuffer)
try:
return w_obj.charbuf_w(self)
- except TypeError:
+ except BufferInterfaceNotFound:
raise oefmt(self.w_TypeError,
"expected a character buffer object")
@@ -1451,11 +1443,11 @@
return self.str(w_obj).readbuf_w(self)
try:
return w_obj.buffer_w(self, 0)
- except TypeError:
+ except BufferInterfaceNotFound:
pass
try:
return w_obj.readbuf_w(self)
- except TypeError:
+ except BufferInterfaceNotFound:
self._getarg_error("string or buffer", w_obj)
elif code == 's#':
if self.isinstance_w(w_obj, self.w_str):
@@ -1464,24 +1456,23 @@
return self.str(w_obj).str_w(self)
try:
return w_obj.readbuf_w(self).as_str()
- except TypeError:
+ except BufferInterfaceNotFound:
self._getarg_error("string or read-only buffer", w_obj)
elif code == 'w*':
try:
- try:
- return w_obj.buffer_w(self, self.BUF_WRITABLE)
- except OperationError:
- self._getarg_error("read-write buffer", w_obj)
- except TypeError:
+ return w_obj.buffer_w(self, self.BUF_WRITABLE)
+ except OperationError:
+ self._getarg_error("read-write buffer", w_obj)
+ except BufferInterfaceNotFound:
pass
try:
return w_obj.writebuf_w(self)
- except TypeError:
+ except BufferInterfaceNotFound:
self._getarg_error("read-write buffer", w_obj)
elif code == 't#':
try:
return w_obj.charbuf_w(self)
- except TypeError:
+ except BufferInterfaceNotFound:
self._getarg_error("string or read-only character buffer", w_obj)
else:
assert False
@@ -1503,13 +1494,13 @@
raise
try:
buf = w_obj.buffer_w(self, 0)
- except TypeError:
+ except BufferInterfaceNotFound:
pass
else:
return buf.as_str()
try:
buf = w_obj.readbuf_w(self)
- except TypeError:
+ except BufferInterfaceNotFound:
self._getarg_error("string or buffer", w_obj)
else:
return buf.as_str()
diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py
--- a/pypy/interpreter/error.py
+++ b/pypy/interpreter/error.py
@@ -252,7 +252,8 @@
w_t, w_v, w_tb],
"""(where, objrepr, extra_line, t, v, tb):
import sys, traceback
- sys.stderr.write('From %s%s:\\n' % (where, objrepr))
+ if where or objrepr:
+ sys.stderr.write('From %s%s:\\n' % (where, objrepr))
if extra_line:
sys.stderr.write(extra_line)
traceback.print_exception(t, v, tb)
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -590,11 +590,3 @@
# there is no list of length n: if n is large, then the GC
# will run several times while walking the list, but it will
# see lower and lower memory usage, with no lower bound of n.
-
-class CodeUniqueIds(object):
- def __init__(self):
- if sys.maxint == 2147483647:
- self.code_unique_id = 0 # XXX this is wrong, it won't work on 32bit
- else:
- self.code_unique_id = 0x7000000000000000
- self.code_callback = None
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -15,7 +15,10 @@
self.running = False
def descr__repr__(self, space):
- code_name = self.pycode.co_name
+ if self.pycode is None:
+ code_name = '<finished>'
+ else:
+ code_name = self.pycode.co_name
addrstring = self.getaddrstring(space)
return space.wrap("<generator object %s at 0x%s>" %
(code_name, addrstring))
@@ -45,6 +48,8 @@
w_framestate, w_running = args_w
if space.is_w(w_framestate, space.w_None):
self.frame = None
+ self.space = space
+ self.pycode = None
else:
frame = instantiate(space.FrameClass) # XXX fish
frame.descr__setstate__(space, w_framestate)
@@ -62,9 +67,10 @@
def send_ex(self, w_arg, operr=None):
pycode = self.pycode
- if jit.we_are_jitted() and should_not_inline(pycode):
- generatorentry_driver.jit_merge_point(gen=self, w_arg=w_arg,
- operr=operr, pycode=pycode)
+ if pycode is not None:
+ if jit.we_are_jitted() and should_not_inline(pycode):
+ generatorentry_driver.jit_merge_point(gen=self, w_arg=w_arg,
+ operr=operr, pycode=pycode)
return self._send_ex(w_arg, operr)
def _send_ex(self, w_arg, operr):
@@ -158,7 +164,10 @@
return self.pycode
def descr__name__(self, space):
- code_name = self.pycode.co_name
+ if self.pycode is None:
+ code_name = '<finished>'
+ else:
+ code_name = self.pycode.co_name
return space.wrap(code_name)
# Results can be either an RPython list of W_Root, or it can be an
diff --git a/pypy/interpreter/miscutils.py b/pypy/interpreter/miscutils.py
--- a/pypy/interpreter/miscutils.py
+++ b/pypy/interpreter/miscutils.py
@@ -9,6 +9,7 @@
implementation for this feature, and patches 'space.threadlocals' when
'thread' is initialized.
"""
+ _immutable_fields_ = ['_value?']
_value = None
def get_ec(self):
diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
--- a/pypy/interpreter/pycode.py
+++ b/pypy/interpreter/pycode.py
@@ -85,7 +85,7 @@
self.magic = magic
self._signature = cpython_code_signature(self)
self._initialize()
- space.register_code_object(self)
+ self._init_ready()
def _initialize(self):
if self.co_cellvars:
@@ -127,14 +127,8 @@
from pypy.objspace.std.mapdict import init_mapdict_cache
init_mapdict_cache(self)
- cui = self.space.code_unique_ids
- self._unique_id = cui.code_unique_id
- cui.code_unique_id += 4 # so we have two bits that we can mark stuff
- # with
-
- def _get_full_name(self):
- return "py:%s:%d:%s" % (self.co_name, self.co_firstlineno,
- self.co_filename)
+ def _init_ready(self):
+ "This is a hook for the vmprof module, which overrides this method."
def _cleanup_(self):
if (self.magic == cpython_magic and
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -711,11 +711,17 @@
w_item = self.popvalue()
if self.space.is_w(w_stream, self.space.w_None):
w_stream = sys_stdout(self.space) # grumble grumble special cases
- print_item_to(self.space, w_item, w_stream)
+ print_item_to(self.space, self._printable_object(w_item), w_stream)
def PRINT_ITEM(self, oparg, next_instr):
w_item = self.popvalue()
- print_item(self.space, w_item)
+ print_item(self.space, self._printable_object(w_item))
+
+ def _printable_object(self, w_obj):
+ space = self.space
+ if not space.isinstance_w(w_obj, space.w_unicode):
+ w_obj = space.str(w_obj)
+ return w_obj
def PRINT_NEWLINE_TO(self, oparg, next_instr):
w_stream = self.popvalue()
@@ -1535,9 +1541,9 @@
stream.write(" ")
# give to write() an argument which is either a string or a unicode
- # (and let it deals itself with unicode handling)
- if not isinstance(x, unicode):
- x = str(x)
+ # (and let it deals itself with unicode handling). The check "is
+ # unicode" should not use isinstance() at app-level, because that
+ # could be fooled by strange objects, so it is done at interp-level.
stream.write(x)
# add a softspace unless we just printed a string which ends in a '\t'
diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py
--- a/pypy/interpreter/test/test_interpreter.py
+++ b/pypy/interpreter/test/test_interpreter.py
@@ -299,6 +299,30 @@
finally:
sys.stdout = save
+ def test_print_strange_object(self):
+ import sys
+
+ class A(object):
+ def __getattribute__(self, name):
+ print "seeing", name
+ def __str__(self):
+ return 'A!!'
+ save = sys.stdout
+ class Out(object):
+ def __init__(self):
+ self.data = []
+ def write(self, x):
+ self.data.append((type(x), x))
+ sys.stdout = out = Out()
+ try:
+ a = A()
+ assert out.data == []
+ print a
+ assert out.data == [(str, 'A!!'),
+ (str, '\n')]
+ finally:
+ sys.stdout = save
+
def test_identity(self):
def f(x): return x
assert f(666) == 666
diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py
--- a/pypy/interpreter/test/test_zzpickle_and_slow.py
+++ b/pypy/interpreter/test/test_zzpickle_and_slow.py
@@ -491,6 +491,22 @@
assert pack.mod is result
+ def test_pickle_generator_crash(self):
+ import pickle
+
+ def f():
+ yield 0
+
+ x = f()
+ x.next()
+ try:
+ x.next()
+ except StopIteration:
+ y = pickle.loads(pickle.dumps(x))
+ assert 'finished' in y.__name__
+ assert 'finished' in repr(y)
+ assert y.gi_code is None
+
class AppTestGeneratorCloning:
def setup_class(cls):
diff --git a/pypy/module/__builtin__/app_functional.py b/pypy/module/__builtin__/app_functional.py
--- a/pypy/module/__builtin__/app_functional.py
+++ b/pypy/module/__builtin__/app_functional.py
@@ -53,6 +53,33 @@
last = last + x
return last
+
+class _Cons(object):
+ def __init__(self, prev, iter):
+ self.prev = prev
+ self.iter = iter
+
+ def fetch(self):
+ # recursive, loop-less version of the algorithm: works best for a
+ # fixed number of "collections" in the call to map(func, *collections)
+ prev = self.prev
+ if prev is None:
+ args1 = ()
+ stop = True
+ else:
+ args1, stop = prev.fetch()
+ iter = self.iter
+ if iter is None:
+ val = None
+ else:
+ try:
+ val = next(iter)
+ stop = False
+ except StopIteration:
+ self.iter = None
+ val = None
+ return args1 + (val,), stop
+
def map(func, *collections):
"""map(function, sequence[, sequence, ...]) -> list
@@ -69,45 +96,30 @@
if num_collections == 1:
if none_func:
return list(collections[0])
- # Special case for the really common case of a single collection,
- # this can be eliminated if we could unroll that loop that creates
- # `args` based on whether or not len(collections) was constant
+ # Special case for the really common case of a single collection
seq = collections[0]
with _ManagedNewlistHint(operator._length_hint(seq, 0)) as result:
for item in seq:
result.append(func(item))
return result
- # Gather the iterators (pair of (iter, has_finished)) and guess the
+ # Gather the iterators into _Cons objects and guess the
# result length (the max of the input lengths)
- iterators = []
+ c = None
max_hint = 0
for seq in collections:
- iterators.append((iter(seq), False))
+ c = _Cons(c, iter(seq))
max_hint = max(max_hint, operator._length_hint(seq, 0))
with _ManagedNewlistHint(max_hint) as result:
while True:
- cont = False
- args = []
- for idx, (iterator, has_finished) in enumerate(iterators):
- val = None
- if not has_finished:
- try:
- val = next(iterator)
- except StopIteration:
- iterators[idx] = (None, True)
- else:
- cont = True
- args.append(val)
- args = tuple(args)
- if cont:
- if none_func:
- result.append(args)
- else:
- result.append(func(*args))
+ args, stop = c.fetch()
+ if stop:
+ return result
+ if none_func:
+ result.append(args)
else:
- return result
+ result.append(func(*args))
class _ManagedNewlistHint(object):
""" Context manager returning a newlist_hint upon entry.
diff --git a/pypy/module/__builtin__/test/test_abstractinst.py b/pypy/module/__builtin__/test/test_abstractinst.py
--- a/pypy/module/__builtin__/test/test_abstractinst.py
+++ b/pypy/module/__builtin__/test/test_abstractinst.py
@@ -202,3 +202,17 @@
__subclass__ = set([int])
assert issubclass(int, Integer)
assert issubclass(int, (Integer,))
+
+ def test_dont_call_instancecheck_fast_path(self):
+ called = []
+
+ class M(type):
+ def __instancecheck__(self, obj):
+ called.append("called")
+
+ class C:
+ __metaclass__ = M
+
+ c = C()
+ assert isinstance(c, C)
+ assert not called
diff --git a/pypy/module/__builtin__/test/test_functional.py b/pypy/module/__builtin__/test/test_functional.py
--- a/pypy/module/__builtin__/test/test_functional.py
+++ b/pypy/module/__builtin__/test/test_functional.py
@@ -57,6 +57,11 @@
b = []
assert map(lambda x, y: x, a, b) == a
+ def test_map_second_item(self):
+ a = []
+ b = [1, 2, 3, 4, 5]
+ assert map(lambda x, y: y, a, b) == b
+
def test_map_iterables(self):
class A(object):
def __init__(self, n):
diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py
--- a/pypy/module/__pypy__/__init__.py
+++ b/pypy/module/__pypy__/__init__.py
@@ -62,6 +62,7 @@
}
interpleveldefs = {
+ 'attach_gdb' : 'interp_magic.attach_gdb',
'internal_repr' : 'interp_magic.internal_repr',
'bytebuffer' : 'bytebuffer.bytebuffer',
'identity_dict' : 'interp_identitydict.W_IdentityDict',
@@ -100,8 +101,6 @@
def setup_after_space_initialization(self):
"""NOT_RPYTHON"""
- if not self.space.config.translating:
- self.extra_interpdef('interp_pdb', 'interp_magic.interp_pdb')
if self.space.config.objspace.std.withmethodcachecounter:
self.extra_interpdef('method_cache_counter',
'interp_magic.method_cache_counter')
diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py
--- a/pypy/module/__pypy__/interp_magic.py
+++ b/pypy/module/__pypy__/interp_magic.py
@@ -15,12 +15,10 @@
return space.wrap('%r' % (w_object,))
-def interp_pdb(space):
- """Run an interp-level pdb.
- This is not available in translated versions of PyPy."""
- assert not we_are_translated()
- import pdb
- pdb.set_trace()
+def attach_gdb(space):
+ """Run an interp-level gdb (or pdb when untranslated)"""
+ from rpython.rlib.debug import attach_gdb
+ attach_gdb()
@unwrap_spec(name=str)
diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -2,7 +2,7 @@
from pypy.interpreter.mixedmodule import MixedModule
from rpython.rlib import rdynload
-VERSION = "1.1.2"
+VERSION = "1.3.0"
class Module(MixedModule):
diff --git a/pypy/module/_cffi_backend/allocator.py b/pypy/module/_cffi_backend/allocator.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/_cffi_backend/allocator.py
@@ -0,0 +1,86 @@
+from pypy.interpreter.error import oefmt
+from pypy.interpreter.baseobjspace import W_Root
+from pypy.interpreter.typedef import TypeDef
+from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault
+
+from rpython.rtyper.lltypesystem import lltype, rffi
+
+
+class W_Allocator(W_Root):
+ _immutable_ = True
+
+ def __init__(self, ffi, w_alloc, w_free, should_clear_after_alloc):
+ self.ffi = ffi # may be None
+ self.w_alloc = w_alloc
+ self.w_free = w_free
+ self.should_clear_after_alloc = should_clear_after_alloc
+
+ def allocate(self, space, datasize, ctype, length=-1):
+ from pypy.module._cffi_backend import cdataobj, ctypeptr
+ if self.w_alloc is None:
+ if self.should_clear_after_alloc:
+ ptr = lltype.malloc(rffi.CCHARP.TO, datasize,
+ flavor='raw', zero=True)
+ else:
+ ptr = lltype.malloc(rffi.CCHARP.TO, datasize,
+ flavor='raw', zero=False)
+ return cdataobj.W_CDataNewStd(space, ptr, ctype, length)
+ else:
+ w_raw_cdata = space.call_function(self.w_alloc,
+ space.wrap(datasize))
+ if not isinstance(w_raw_cdata, cdataobj.W_CData):
+ raise oefmt(space.w_TypeError,
+ "alloc() must return a cdata object (got %T)",
+ w_raw_cdata)
+ if not isinstance(w_raw_cdata.ctype, ctypeptr.W_CTypePtrOrArray):
+ raise oefmt(space.w_TypeError,
+ "alloc() must return a cdata pointer, not '%s'",
+ w_raw_cdata.ctype.name)
+ #
+ ptr = w_raw_cdata.unsafe_escaping_ptr()
+ if not ptr:
+ raise oefmt(space.w_MemoryError, "alloc() returned NULL")
+ #
+ if self.should_clear_after_alloc:
+ rffi.c_memset(rffi.cast(rffi.VOIDP, ptr), 0,
+ rffi.cast(rffi.SIZE_T, datasize))
+ #
+ if self.w_free is None:
+ # use this class which does not have a __del__, but still
+ # keeps alive w_raw_cdata
+ res = cdataobj.W_CDataNewNonStdNoFree(space, ptr, ctype, length)
+ else:
+ res = cdataobj.W_CDataNewNonStdFree(space, ptr, ctype, length)
+ res.w_free = self.w_free
+ res.w_raw_cdata = w_raw_cdata
+ return res
+
+ @unwrap_spec(w_init=WrappedDefault(None))
+ def descr_call(self, space, w_arg, w_init):
+ ffi = self.ffi
+ assert ffi is not None
+ w_ctype = ffi.ffi_type(w_arg, ffi.ACCEPT_STRING | ffi.ACCEPT_CTYPE)
+ return w_ctype.newp(w_init, self)
+
+
+W_Allocator.typedef = TypeDef(
+ 'FFIAllocator',
+ __call__ = interp2app(W_Allocator.descr_call),
+ )
+W_Allocator.typedef.acceptable_as_base_class = False
+
+
+def new_allocator(ffi, w_alloc, w_free, should_clear_after_alloc):
+ space = ffi.space
+ if space.is_none(w_alloc):
+ w_alloc = None
+ if space.is_none(w_free):
+ w_free = None
+ if w_alloc is None and w_free is not None:
+ raise oefmt(space.w_TypeError, "cannot pass 'free' without 'alloc'")
+ alloc = W_Allocator(ffi, w_alloc, w_free, bool(should_clear_after_alloc))
+ return space.wrap(alloc)
+
+
+default_allocator = W_Allocator(None, None, None, should_clear_after_alloc=True)
+nonzero_allocator = W_Allocator(None, None, None,should_clear_after_alloc=False)
diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py
--- a/pypy/module/_cffi_backend/ccallback.py
+++ b/pypy/module/_cffi_backend/ccallback.py
@@ -22,8 +22,9 @@
class W_CDataCallback(W_CData):
#_immutable_fields_ = ...
ll_error = lltype.nullptr(rffi.CCHARP.TO)
+ w_onerror = None
- def __init__(self, space, ctype, w_callable, w_error):
+ def __init__(self, space, ctype, w_callable, w_error, w_onerror):
raw_closure = rffi.cast(rffi.CCHARP, clibffi.closureHeap.alloc())
W_CData.__init__(self, space, raw_closure, ctype)
#
@@ -31,6 +32,12 @@
raise oefmt(space.w_TypeError,
"expected a callable object, not %T", w_callable)
self.w_callable = w_callable
+ if not space.is_none(w_onerror):
+ if not space.is_true(space.callable(w_onerror)):
+ raise oefmt(space.w_TypeError,
+ "expected a callable object for 'onerror', not %T",
+ w_onerror)
+ self.w_onerror = w_onerror
#
fresult = self.getfunctype().ctitem
size = fresult.size
@@ -161,6 +168,29 @@
STDERR = 2
+ at jit.dont_look_inside
+def _handle_applevel_exception(space, callback, e, ll_res, extra_line):
+ callback.write_error_return_value(ll_res)
+ if callback.w_onerror is None:
+ callback.print_error(e, extra_line)
+ else:
+ try:
+ e.normalize_exception(space)
+ w_t = e.w_type
+ w_v = e.get_w_value(space)
+ w_tb = space.wrap(e.get_traceback())
+ w_res = space.call_function(callback.w_onerror,
+ w_t, w_v, w_tb)
+ if not space.is_none(w_res):
+ callback.convert_result(ll_res, w_res)
+ except OperationError, e2:
+ # double exception! print a double-traceback...
+ callback.print_error(e, extra_line) # original traceback
+ e2.write_unraisable(space, '', with_traceback=True,
+ extra_line="\nDuring the call to 'onerror', "
+ "another exception occurred:\n\n")
+
+
@jit.jit_callback("CFFI")
def _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata):
""" Callback specification.
@@ -178,7 +208,7 @@
try:
os.write(STDERR, "SystemError: invoking a callback "
"that was already freed\n")
- except OSError:
+ except:
pass
# In this case, we don't even know how big ll_res is. Let's assume
# it is just a 'ffi_arg', and store 0 there.
@@ -195,9 +225,7 @@
extra_line = "Trying to convert the result back to C:\n"
callback.convert_result(ll_res, w_res)
except OperationError, e:
- # got an app-level exception
- callback.print_error(e, extra_line)
- callback.write_error_return_value(ll_res)
+ _handle_applevel_exception(space, callback, e, ll_res, extra_line)
#
except Exception, e:
# oups! last-level attempt to recover.
@@ -205,7 +233,7 @@
os.write(STDERR, "SystemError: callback raised ")
os.write(STDERR, str(e))
os.write(STDERR, "\n")
- except OSError:
+ except:
pass
callback.write_error_return_value(ll_res)
if must_leave:
diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py
--- a/pypy/module/_cffi_backend/cdataobj.py
+++ b/pypy/module/_cffi_backend/cdataobj.py
@@ -363,16 +363,19 @@
def _sizeof(self):
return self.ctype.size
+ def with_gc(self, w_destructor):
+ with self as ptr:
+ return W_CDataGCP(self.space, ptr, self.ctype, self, w_destructor)
+
class W_CDataMem(W_CData):
- """This is the base class used for cdata objects that own and free
- their memory. Used directly by the results of cffi.cast('int', x)
- or other primitive explicitly-casted types. It is further subclassed
- by W_CDataNewOwning."""
+ """This is used only by the results of cffi.cast('int', x)
+ or other primitive explicitly-casted types."""
_attrs_ = []
- def __init__(self, space, size, ctype):
- cdata = lltype.malloc(rffi.CCHARP.TO, size, flavor='raw', zero=True)
+ def __init__(self, space, ctype):
+ cdata = lltype.malloc(rffi.CCHARP.TO, ctype.size, flavor='raw',
+ zero=False)
W_CData.__init__(self, space, cdata, ctype)
@rgc.must_be_light_finalizer
@@ -380,36 +383,65 @@
lltype.free(self._ptr, flavor='raw')
-class W_CDataNewOwning(W_CDataMem):
- """This is the class used for the cata objects created by newp()."""
- _attrs_ = []
+class W_CDataNewOwning(W_CData):
+ """This is the abstract base class used for cdata objects created
+ by newp(). They create and free their own memory according to an
+ allocator."""
+
+ # the 'length' is either >= 0 for arrays, or -1 for pointers.
+ _attrs_ = ['length']
+ _immutable_fields_ = ['length']
+
+ def __init__(self, space, cdata, ctype, length=-1):
+ W_CData.__init__(self, space, cdata, ctype)
+ self.length = length
def _repr_extra(self):
return self._repr_extra_owning()
-
-class W_CDataNewOwningLength(W_CDataNewOwning):
- """Subclass with an explicit length, for allocated instances of
- the C type 'foo[]'."""
- _attrs_ = ['length']
- _immutable_fields_ = ['length']
-
- def __init__(self, space, size, ctype, length):
- W_CDataNewOwning.__init__(self, space, size, ctype)
- self.length = length
-
def _sizeof(self):
- from pypy.module._cffi_backend import ctypearray
ctype = self.ctype
- assert isinstance(ctype, ctypearray.W_CTypeArray)
- return self.length * ctype.ctitem.size
+ if self.length >= 0:
+ from pypy.module._cffi_backend import ctypearray
+ assert isinstance(ctype, ctypearray.W_CTypeArray)
+ return self.length * ctype.ctitem.size
+ else:
+ return ctype.size
def get_array_length(self):
return self.length
+class W_CDataNewStd(W_CDataNewOwning):
+ """Subclass using the standard allocator, lltype.malloc()/lltype.free()"""
+ _attrs_ = []
+
+ @rgc.must_be_light_finalizer
+ def __del__(self):
+ lltype.free(self._ptr, flavor='raw')
+
+
+class W_CDataNewNonStdNoFree(W_CDataNewOwning):
+ """Subclass using a non-standard allocator, no free()"""
+ _attrs_ = ['w_raw_cdata']
+
+class W_CDataNewNonStdFree(W_CDataNewNonStdNoFree):
+ """Subclass using a non-standard allocator, with a free()"""
+ _attrs_ = ['w_free']
+
+ def __del__(self):
+ self.clear_all_weakrefs()
+ self.enqueue_for_destruction(self.space,
+ W_CDataNewNonStdFree.call_destructor,
+ 'destructor of ')
+
+ def call_destructor(self):
+ assert isinstance(self, W_CDataNewNonStdFree)
More information about the pypy-commit
mailing list