[pypy-commit] pypy py3.5: hg merge default

rlamy pypy.commits at gmail.com
Sat Jan 21 17:38:23 EST 2017


Author: Ronan Lamy <ronan.lamy at gmail.com>
Branch: py3.5
Changeset: r89684:e03a42027bba
Date: 2017-01-21 22:37 +0000
http://bitbucket.org/pypy/pypy/changeset/e03a42027bba/

Log:	hg merge default

diff --git a/lib-python/2.7/distutils/unixccompiler.py b/lib-python/2.7/distutils/unixccompiler.py
--- a/lib-python/2.7/distutils/unixccompiler.py
+++ b/lib-python/2.7/distutils/unixccompiler.py
@@ -310,6 +310,10 @@
                 static = os.path.join(sysroot, dir[1:], static_f)
                 xcode_stub = os.path.join(sysroot, dir[1:], xcode_stub_f)
 
+            # PyPy extension here: 'shared' usually ends in something
+            # like '.pypy-41.so'.  Try without the '.pypy-41' part too.
+            shared_no_pypy = re.sub(r'[.]pypy[^.]+([.][^.]+)$', r'\1', shared)
+
             # We're second-guessing the linker here, with not much hard
             # data to go on: GCC seems to prefer the shared library, so I'm
             # assuming that *all* Unix C compilers do.  And of course I'm
@@ -320,6 +324,8 @@
                 return xcode_stub
             elif os.path.exists(shared):
                 return shared
+            elif os.path.exists(shared_no_pypy):
+                return shared_no_pypy
             elif os.path.exists(static):
                 return static
 
diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py
--- a/lib_pypy/_collections.py
+++ b/lib_pypy/_collections.py
@@ -1,13 +1,18 @@
 """High performance data structures
+
+Note that PyPy also contains a built-in module '_collections' which will hide
+this one if compiled in.
+
+THIS ONE IS BOGUS in the sense that it is NOT THREAD-SAFE!  It is provided
+only as documentation nowadays.  Please don't run in production a PyPy
+without the '_collections' built-in module.  The built-in module is
+correctly thread-safe, like it is on CPython.
 """
 #
 # Copied and completed from the sandbox of CPython
 #   (nondist/sandbox/collections/pydeque.py rev 1.1, Raymond Hettinger)
 #
 
-# Note that PyPy also contains a built-in module '_collections' which will hide
-# this one if compiled in.
-
 try:
     from _thread import _get_ident as _thread_ident
 except ImportError:
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -382,10 +382,14 @@
 
     def get_c_restype(self, c_writer):
         if self.cdecl:
-            return self.cdecl.split(self.c_name)[0].strip()
+            return self.cdecl.tp.result.get_c_name()
         return c_writer.gettype(self.restype).replace('@', '').strip()
 
     def get_c_args(self, c_writer):
+        if self.cdecl:
+            args = [tp.get_c_name('arg%d' % i) for i, tp in
+                enumerate(self.cdecl.tp.args)]
+            return ', '.join(args) or "void"
         args = []
         for i, argtype in enumerate(self.argtypes):
             if argtype is CONST_STRING:
@@ -482,14 +486,15 @@
         return unwrapper
     return decorate
 
-def api_decl(cdecl, cts, error=_NOT_SPECIFIED, header=DEFAULT_HEADER):
+def api_decl(cdef, cts, error=_NOT_SPECIFIED, header=DEFAULT_HEADER):
     def decorate(func):
         func._always_inline_ = 'try'
-        name, FUNC = cts.parse_func(cdecl)
+        cdecl = cts.parse_func(cdef)
+        RESULT = cdecl.get_llresult(cts)
         api_function = ApiFunction(
-            FUNC.ARGS, FUNC.RESULT, func,
-            error=_compute_error(error, FUNC.RESULT), cdecl=cdecl)
-        FUNCTIONS_BY_HEADER[header][name] = api_function
+            cdecl.get_llargs(cts), RESULT, func,
+            error=_compute_error(error, RESULT), cdecl=cdecl)
+        FUNCTIONS_BY_HEADER[header][cdecl.name] = api_function
         unwrapper = api_function.get_unwrapper()
         unwrapper.func = func
         unwrapper.api_func = api_function
diff --git a/pypy/module/cpyext/cmodel.py b/pypy/module/cpyext/cmodel.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/cpyext/cmodel.py
@@ -0,0 +1,607 @@
+import types
+import weakref
+
+from .error import CDefError, VerificationError, VerificationMissing
+
+# type qualifiers
+Q_CONST    = 0x01
+Q_RESTRICT = 0x02
+Q_VOLATILE = 0x04
+
+def qualify(quals, replace_with):
+    if quals & Q_CONST:
+        replace_with = ' const ' + replace_with.lstrip()
+    if quals & Q_VOLATILE:
+        replace_with = ' volatile ' + replace_with.lstrip()
+    if quals & Q_RESTRICT:
+        # It seems that __restrict is supported by gcc and msvc.
+        # If you hit some different compiler, add a #define in
+        # _cffi_include.h for it (and in its copies, documented there)
+        replace_with = ' __restrict ' + replace_with.lstrip()
+    return replace_with
+
+
+class BaseTypeByIdentity(object):
+    is_array_type = False
+    is_raw_function = False
+
+    def get_c_name(self, replace_with='', context='a C file', quals=0):
+        result = self.c_name_with_marker
+        assert result.count('&') == 1
+        # some logic duplication with ffi.getctype()... :-(
+        replace_with = replace_with.strip()
+        if replace_with:
+            if replace_with.startswith('*') and '&[' in result:
+                replace_with = '(%s)' % replace_with
+            elif not replace_with[0] in '[(':
+                replace_with = ' ' + replace_with
+        replace_with = qualify(quals, replace_with)
+        result = result.replace('&', replace_with)
+        if '$' in result:
+            raise VerificationError(
+                "cannot generate '%s' in %s: unknown type name"
+                % (self._get_c_name(), context))
+        return result
+
+    def _get_c_name(self):
+        return self.c_name_with_marker.replace('&', '')
+
+    def has_c_name(self):
+        return '$' not in self._get_c_name()
+
+    def is_integer_type(self):
+        return False
+
+    def get_cached_btype(self, ffi, finishlist, can_delay=False):
+        try:
+            BType = ffi._cached_btypes[self]
+        except KeyError:
+            BType = self.build_backend_type(ffi, finishlist)
+            BType2 = ffi._cached_btypes.setdefault(self, BType)
+            assert BType2 is BType
+        return BType
+
+    def __repr__(self):
+        return '<%s>' % (self._get_c_name(),)
+
+    def _get_items(self):
+        return [(name, getattr(self, name)) for name in self._attrs_]
+
+
+class BaseType(BaseTypeByIdentity):
+
+    def __eq__(self, other):
+        return (self.__class__ == other.__class__ and
+                self._get_items() == other._get_items())
+
+    def __ne__(self, other):
+        return not self == other
+
+    def __hash__(self):
+        return hash((self.__class__, tuple(self._get_items())))
+
+
+class VoidType(BaseType):
+    _attrs_ = ()
+
+    def __init__(self):
+        self.c_name_with_marker = 'void&'
+
+    def build_backend_type(self, ffi, finishlist):
+        return global_cache(self, ffi, 'new_void_type')
+
+void_type = VoidType()
+
+
+class BasePrimitiveType(BaseType):
+    pass
+
+
+class PrimitiveType(BasePrimitiveType):
+    _attrs_ = ('name',)
+
+    ALL_PRIMITIVE_TYPES = {
+        'char':               'c',
+        'short':              'i',
+        'int':                'i',
+        'long':               'i',
+        'long long':          'i',
+        'signed char':        'i',
+        'unsigned char':      'i',
+        'unsigned short':     'i',
+        'unsigned int':       'i',
+        'unsigned long':      'i',
+        'unsigned long long': 'i',
+        'float':              'f',
+        'double':             'f',
+        'long double':        'f',
+        '_Bool':              'i',
+        # the following types are not primitive in the C sense
+        'wchar_t':            'c',
+        'int8_t':             'i',
+        'uint8_t':            'i',
+        'int16_t':            'i',
+        'uint16_t':           'i',
+        'int32_t':            'i',
+        'uint32_t':           'i',
+        'int64_t':            'i',
+        'uint64_t':           'i',
+        'int_least8_t':       'i',
+        'uint_least8_t':      'i',
+        'int_least16_t':      'i',
+        'uint_least16_t':     'i',
+        'int_least32_t':      'i',
+        'uint_least32_t':     'i',
+        'int_least64_t':      'i',
+        'uint_least64_t':     'i',
+        'int_fast8_t':        'i',
+        'uint_fast8_t':       'i',
+        'int_fast16_t':       'i',
+        'uint_fast16_t':      'i',
+        'int_fast32_t':       'i',
+        'uint_fast32_t':      'i',
+        'int_fast64_t':       'i',
+        'uint_fast64_t':      'i',
+        'intptr_t':           'i',
+        'uintptr_t':          'i',
+        'intmax_t':           'i',
+        'uintmax_t':          'i',
+        'ptrdiff_t':          'i',
+        'size_t':             'i',
+        'ssize_t':            'i',
+        }
+
+    def __init__(self, name):
+        assert name in self.ALL_PRIMITIVE_TYPES
+        self.name = name
+        self.c_name_with_marker = name + '&'
+
+    def is_char_type(self):
+        return self.ALL_PRIMITIVE_TYPES[self.name] == 'c'
+    def is_integer_type(self):
+        return self.ALL_PRIMITIVE_TYPES[self.name] == 'i'
+    def is_float_type(self):
+        return self.ALL_PRIMITIVE_TYPES[self.name] == 'f'
+
+    def build_backend_type(self, ffi, finishlist):
+        return global_cache(self, ffi, 'new_primitive_type', self.name)
+
+
+class UnknownIntegerType(BasePrimitiveType):
+    _attrs_ = ('name',)
+
+    def __init__(self, name):
+        self.name = name
+        self.c_name_with_marker = name + '&'
+
+    def is_integer_type(self):
+        return True
+
+    def build_backend_type(self, ffi, finishlist):
+        raise NotImplementedError("integer type '%s' can only be used after "
+                                  "compilation" % self.name)
+
+class UnknownFloatType(BasePrimitiveType):
+    _attrs_ = ('name', )
+
+    def __init__(self, name):
+        self.name = name
+        self.c_name_with_marker = name + '&'
+
+    def build_backend_type(self, ffi, finishlist):
+        raise NotImplementedError("float type '%s' can only be used after "
+                                  "compilation" % self.name)
+
+class DefinedType(BaseType):
+    _attrs_ = ('name', )
+
+    def __init__(self, name, realtype, quals):
+        self.name = name
+        self.realtype = realtype
+        self.quals = quals
+        self.c_name_with_marker = name + '&'
+
+
+class BaseFunctionType(BaseType):
+    _attrs_ = ('args', 'result', 'ellipsis', 'abi')
+
+    def __init__(self, args, result, ellipsis, abi=None):
+        self.args = args
+        self.result = result
+        self.ellipsis = ellipsis
+        self.abi = abi
+        #
+        reprargs = [arg._get_c_name() for arg in self.args]
+        if self.ellipsis:
+            reprargs.append('...')
+        reprargs = reprargs or ['void']
+        replace_with = self._base_pattern % (', '.join(reprargs),)
+        if abi is not None:
+            replace_with = replace_with[:1] + abi + ' ' + replace_with[1:]
+        self.c_name_with_marker = (
+            self.result.c_name_with_marker.replace('&', replace_with))
+
+
+class RawFunctionType(BaseFunctionType):
+    # Corresponds to a C type like 'int(int)', which is the C type of
+    # a function, but not a pointer-to-function.  The backend has no
+    # notion of such a type; it's used temporarily by parsing.
+    _base_pattern = '(&)(%s)'
+    is_raw_function = True
+
+    def build_backend_type(self, ffi, finishlist):
+        raise CDefError("cannot render the type %r: it is a function "
+                        "type, not a pointer-to-function type" % (self,))
+
+    def as_function_pointer(self):
+        return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi)
+
+
+class FunctionPtrType(BaseFunctionType):
+    _base_pattern = '(*&)(%s)'
+
+    def build_backend_type(self, ffi, finishlist):
+        result = self.result.get_cached_btype(ffi, finishlist)
+        args = []
+        for tp in self.args:
+            args.append(tp.get_cached_btype(ffi, finishlist))
+        abi_args = ()
+        if self.abi == "__stdcall":
+            if not self.ellipsis:    # __stdcall ignored for variadic funcs
+                try:
+                    abi_args = (ffi._backend.FFI_STDCALL,)
+                except AttributeError:
+                    pass
+        return global_cache(self, ffi, 'new_function_type',
+                            tuple(args), result, self.ellipsis, *abi_args)
+
+    def as_raw_function(self):
+        return RawFunctionType(self.args, self.result, self.ellipsis, self.abi)
+
+
+class PointerType(BaseType):
+    _attrs_ = ('totype', 'quals')
+
+    def __init__(self, totype, quals=0):
+        self.totype = totype
+        self.quals = quals
+        extra = qualify(quals, " *&")
+        if totype.is_array_type:
+            extra = "(%s)" % (extra.lstrip(),)
+        self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra)
+
+    def build_backend_type(self, ffi, finishlist):
+        BItem = self.totype.get_cached_btype(ffi, finishlist, can_delay=True)
+        return global_cache(self, ffi, 'new_pointer_type', BItem)
+
+voidp_type = PointerType(void_type)
+
+def ConstPointerType(totype):
+    return PointerType(totype, Q_CONST)
+
+const_voidp_type = ConstPointerType(void_type)
+
+
+class NamedPointerType(PointerType):
+    _attrs_ = ('totype', 'name')
+
+    def __init__(self, totype, name, quals=0):
+        PointerType.__init__(self, totype, quals)
+        self.name = name
+        self.c_name_with_marker = name + '&'
+
+
+class ArrayType(BaseType):
+    _attrs_ = ('item', 'length')
+    is_array_type = True
+
+    def __init__(self, item, length):
+        self.item = item
+        self.length = length
+        #
+        if length is None:
+            brackets = '&[]'
+        elif length == '...':
+            brackets = '&[/*...*/]'
+        else:
+            brackets = '&[%s]' % length
+        self.c_name_with_marker = (
+            self.item.c_name_with_marker.replace('&', brackets))
+
+    def resolve_length(self, newlength):
+        return ArrayType(self.item, newlength)
+
+    def build_backend_type(self, ffi, finishlist):
+        if self.length == '...':
+            raise CDefError("cannot render the type %r: unknown length" %
+                            (self,))
+        self.item.get_cached_btype(ffi, finishlist)   # force the item BType
+        BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist)
+        return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length)
+
+char_array_type = ArrayType(PrimitiveType('char'), None)
+
+
+class StructOrUnionOrEnum(BaseTypeByIdentity):
+    _attrs_ = ('name',)
+    forcename = None
+
+    def build_c_name_with_marker(self):
+        name = self.forcename or '%s %s' % (self.kind, self.name)
+        self.c_name_with_marker = name + '&'
+
+    def force_the_name(self, forcename):
+        self.forcename = forcename
+        self.build_c_name_with_marker()
+
+    def get_official_name(self):
+        assert self.c_name_with_marker.endswith('&')
+        return self.c_name_with_marker[:-1]
+
+
+class StructOrUnion(StructOrUnionOrEnum):
+    fixedlayout = None
+    completed = 0
+    partial = False
+    packed = False
+
+    def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None):
+        self.name = name
+        self.fldnames = fldnames
+        self.fldtypes = fldtypes
+        self.fldbitsize = fldbitsize
+        self.fldquals = fldquals
+        self.build_c_name_with_marker()
+
+    def has_anonymous_struct_fields(self):
+        if self.fldtypes is None:
+            return False
+        for name, type in zip(self.fldnames, self.fldtypes):
+            if name == '' and isinstance(type, StructOrUnion):
+                return True
+        return False
+
+    def enumfields(self):
+        fldquals = self.fldquals
+        if fldquals is None:
+            fldquals = (0,) * len(self.fldnames)
+        for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes,
+                                              self.fldbitsize, fldquals):
+            if name == '' and isinstance(type, StructOrUnion):
+                # nested anonymous struct/union
+                for result in type.enumfields():
+                    yield result
+            else:
+                yield (name, type, bitsize, quals)
+
+    def force_flatten(self):
+        # force the struct or union to have a declaration that lists
+        # directly all fields returned by enumfields(), flattening
+        # nested anonymous structs/unions.
+        names = []
+        types = []
+        bitsizes = []
+        fldquals = []
+        for name, type, bitsize, quals in self.enumfields():
+            names.append(name)
+            types.append(type)
+            bitsizes.append(bitsize)
+            fldquals.append(quals)
+        self.fldnames = tuple(names)
+        self.fldtypes = tuple(types)
+        self.fldbitsize = tuple(bitsizes)
+        self.fldquals = tuple(fldquals)
+
+    def get_cached_btype(self, ffi, finishlist, can_delay=False):
+        BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist,
+                                                     can_delay)
+        if not can_delay:
+            self.finish_backend_type(ffi, finishlist)
+        return BType
+
+    def finish_backend_type(self, ffi, finishlist):
+        if self.completed:
+            if self.completed != 2:
+                raise NotImplementedError("recursive structure declaration "
+                                          "for '%s'" % (self.name,))
+            return
+        BType = ffi._cached_btypes[self]
+        #
+        self.completed = 1
+        #
+        if self.fldtypes is None:
+            pass    # not completing it: it's an opaque struct
+            #
+        elif self.fixedlayout is None:
+            fldtypes = [tp.get_cached_btype(ffi, finishlist)
+                        for tp in self.fldtypes]
+            lst = list(zip(self.fldnames, fldtypes, self.fldbitsize))
+            sflags = 0
+            if self.packed:
+                sflags = 8    # SF_PACKED
+            ffi._backend.complete_struct_or_union(BType, lst, self,
+                                                  -1, -1, sflags)
+            #
+        else:
+            fldtypes = []
+            fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout
+            for i in range(len(self.fldnames)):
+                fsize = fieldsize[i]
+                ftype = self.fldtypes[i]
+                #
+                if isinstance(ftype, ArrayType) and ftype.length == '...':
+                    # fix the length to match the total size
+                    BItemType = ftype.item.get_cached_btype(ffi, finishlist)
+                    nlen, nrest = divmod(fsize, ffi.sizeof(BItemType))
+                    if nrest != 0:
+                        self._verification_error(
+                            "field '%s.%s' has a bogus size?" % (
+                            self.name, self.fldnames[i] or '{}'))
+                    ftype = ftype.resolve_length(nlen)
+                    self.fldtypes = (self.fldtypes[:i] + (ftype,) +
+                                     self.fldtypes[i+1:])
+                #
+                BFieldType = ftype.get_cached_btype(ffi, finishlist)
+                if isinstance(ftype, ArrayType) and ftype.length is None:
+                    assert fsize == 0
+                else:
+                    bitemsize = ffi.sizeof(BFieldType)
+                    if bitemsize != fsize:
+                        self._verification_error(
+                            "field '%s.%s' is declared as %d bytes, but is "
+                            "really %d bytes" % (self.name,
+                                                 self.fldnames[i] or '{}',
+                                                 bitemsize, fsize))
+                fldtypes.append(BFieldType)
+            #
+            lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs))
+            ffi._backend.complete_struct_or_union(BType, lst, self,
+                                                  totalsize, totalalignment)
+        self.completed = 2
+
+    def _verification_error(self, msg):
+        raise VerificationError(msg)
+
+    def check_not_partial(self):
+        if self.partial and self.fixedlayout is None:
+            raise VerificationMissing(self._get_c_name())
+
+    def build_backend_type(self, ffi, finishlist):
+        self.check_not_partial()
+        finishlist.append(self)
+        #
+        return global_cache(self, ffi, 'new_%s_type' % self.kind,
+                            self.get_official_name(), key=self)
+
+
+class StructType(StructOrUnion):
+    kind = 'struct'
+
+
+class UnionType(StructOrUnion):
+    kind = 'union'
+
+
+class EnumType(StructOrUnionOrEnum):
+    kind = 'enum'
+    partial = False
+    partial_resolved = False
+
+    def __init__(self, name, enumerators, enumvalues, baseinttype=None):
+        self.name = name
+        self.enumerators = enumerators
+        self.enumvalues = enumvalues
+        self.baseinttype = baseinttype
+        self.build_c_name_with_marker()
+
+    def force_the_name(self, forcename):
+        StructOrUnionOrEnum.force_the_name(self, forcename)
+        if self.forcename is None:
+            name = self.get_official_name()
+            self.forcename = '$' + name.replace(' ', '_')
+
+    def check_not_partial(self):
+        if self.partial and not self.partial_resolved:
+            raise VerificationMissing(self._get_c_name())
+
+    def build_backend_type(self, ffi, finishlist):
+        self.check_not_partial()
+        base_btype = self.build_baseinttype(ffi, finishlist)
+        return global_cache(self, ffi, 'new_enum_type',
+                            self.get_official_name(),
+                            self.enumerators, self.enumvalues,
+                            base_btype, key=self)
+
+    def build_baseinttype(self, ffi, finishlist):
+        if self.baseinttype is not None:
+            return self.baseinttype.get_cached_btype(ffi, finishlist)
+        #
+        if self.enumvalues:
+            smallest_value = min(self.enumvalues)
+            largest_value = max(self.enumvalues)
+        else:
+            import warnings
+            try:
+                # XXX!  The goal is to ensure that the warnings.warn()
+                # will not suppress the warning.  We want to get it
+                # several times if we reach this point several times.
+                __warningregistry__.clear()
+            except NameError:
+                pass
+            warnings.warn("%r has no values explicitly defined; "
+                          "guessing that it is equivalent to 'unsigned int'"
+                          % self._get_c_name())
+            smallest_value = largest_value = 0
+        if smallest_value < 0:   # needs a signed type
+            sign = 1
+            candidate1 = PrimitiveType("int")
+            candidate2 = PrimitiveType("long")
+        else:
+            sign = 0
+            candidate1 = PrimitiveType("unsigned int")
+            candidate2 = PrimitiveType("unsigned long")
+        btype1 = candidate1.get_cached_btype(ffi, finishlist)
+        btype2 = candidate2.get_cached_btype(ffi, finishlist)
+        size1 = ffi.sizeof(btype1)
+        size2 = ffi.sizeof(btype2)
+        if (smallest_value >= ((-1) << (8*size1-1)) and
+            largest_value < (1 << (8*size1-sign))):
+            return btype1
+        if (smallest_value >= ((-1) << (8*size2-1)) and
+            largest_value < (1 << (8*size2-sign))):
+            return btype2
+        raise CDefError("%s values don't all fit into either 'long' "
+                        "or 'unsigned long'" % self._get_c_name())
+
+def unknown_type(name, structname=None):
+    if structname is None:
+        structname = '$%s' % name
+    tp = StructType(structname, None, None, None)
+    tp.force_the_name(name)
+    tp.origin = "unknown_type"
+    return tp
+
+def unknown_ptr_type(name, structname=None):
+    if structname is None:
+        structname = '$$%s' % name
+    tp = StructType(structname, None, None, None)
+    return NamedPointerType(tp, name)
+
+
+def global_cache(srctype, ffi, funcname, *args, **kwds):
+    key = kwds.pop('key', (funcname, args))
+    assert not kwds
+    try:
+        return ffi._backend.__typecache[key]
+    except KeyError:
+        pass
+    except AttributeError:
+        # initialize the __typecache attribute, either at the module level
+        # if ffi._backend is a module, or at the class level if ffi._backend
+        # is some instance.
+        if isinstance(ffi._backend, types.ModuleType):
+            ffi._backend.__typecache = weakref.WeakValueDictionary()
+        else:
+            type(ffi._backend).__typecache = weakref.WeakValueDictionary()
+    try:
+        res = getattr(ffi._backend, funcname)(*args)
+    except NotImplementedError as e:
+        raise NotImplementedError("%s: %r: %s" % (funcname, srctype, e))
+    # note that setdefault() on WeakValueDictionary is not atomic
+    # and contains a rare bug (http://bugs.python.org/issue19542);
+    # we have to use a lock and do it ourselves
+    cache = ffi._backend.__typecache
+    with global_lock:
+        res1 = cache.get(key)
+        if res1 is None:
+            cache[key] = res
+            return res
+        else:
+            return res1
+
+def pointer_cache(ffi, BType):
+    return global_cache('?', ffi, 'new_pointer_type', BType)
+
+def attach_exception_info(e, name):
+    if e.args and type(e.args[0]) is str:
+        e.args = ('%s: %s' % (name, e.args[0]),) + e.args[1:]
diff --git a/pypy/module/cpyext/commontypes.py b/pypy/module/cpyext/commontypes.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/cpyext/commontypes.py
@@ -0,0 +1,80 @@
+import sys
+from . import cmodel as model
+from .error import FFIError
+
+
+COMMON_TYPES = {}
+
+try:
+    # fetch "bool" and all simple Windows types
+    from _cffi_backend import _get_common_types
+    _get_common_types(COMMON_TYPES)
+except ImportError:
+    pass
+
+COMMON_TYPES['FILE'] = model.unknown_type('FILE', '_IO_FILE')
+COMMON_TYPES['bool'] = '_Bool'    # in case we got ImportError above
+
+for _type in model.PrimitiveType.ALL_PRIMITIVE_TYPES:
+    if _type.endswith('_t'):
+        COMMON_TYPES[_type] = _type
+del _type
+
+_CACHE = {}
+
+def resolve_common_type(parser, commontype):
+    try:
+        return _CACHE[commontype]
+    except KeyError:
+        cdecl = COMMON_TYPES.get(commontype, commontype)
+        if not isinstance(cdecl, str):
+            result, quals = cdecl, 0    # cdecl is already a BaseType
+        elif cdecl in model.PrimitiveType.ALL_PRIMITIVE_TYPES:
+            result, quals = model.PrimitiveType(cdecl), 0
+        elif cdecl == 'set-unicode-needed':
+            raise FFIError("The Windows type %r is only available after "
+                           "you call ffi.set_unicode()" % (commontype,))
+        else:
+            if commontype == cdecl:
+                raise FFIError(
+                    "Unsupported type: %r.  Please look at "
+        "http://cffi.readthedocs.io/en/latest/cdef.html#ffi-cdef-limitations "
+                    "and file an issue if you think this type should really "
+                    "be supported." % (commontype,))
+            result, quals = parser.parse_type_and_quals(cdecl)   # recursive
+
+        assert isinstance(result, model.BaseTypeByIdentity)
+        _CACHE[commontype] = result, quals
+        return result, quals
+
+
+# ____________________________________________________________
+# extra types for Windows (most of them are in commontypes.c)
+
+
+def win_common_types():
+    return {
+        "UNICODE_STRING": model.StructType(
+            "_UNICODE_STRING",
+            ["Length",
+             "MaximumLength",
+             "Buffer"],
+            [model.PrimitiveType("unsigned short"),
+             model.PrimitiveType("unsigned short"),
+             model.PointerType(model.PrimitiveType("wchar_t"))],
+            [-1, -1, -1]),
+        "PUNICODE_STRING": "UNICODE_STRING *",
+        "PCUNICODE_STRING": "const UNICODE_STRING *",
+
+        "TBYTE": "set-unicode-needed",
+        "TCHAR": "set-unicode-needed",
+        "LPCTSTR": "set-unicode-needed",
+        "PCTSTR": "set-unicode-needed",
+        "LPTSTR": "set-unicode-needed",
+        "PTSTR": "set-unicode-needed",
+        "PTBYTE": "set-unicode-needed",
+        "PTCHAR": "set-unicode-needed",
+        }
+
+if sys.platform == 'win32':
+    COMMON_TYPES.update(win_common_types())
diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py
--- a/pypy/module/cpyext/cparser.py
+++ b/pypy/module/cpyext/cparser.py
@@ -1,12 +1,12 @@
-import sys
 from collections import OrderedDict
-from cffi import api, model
-from cffi.commontypes import COMMON_TYPES, resolve_common_type
+from . import cmodel as model
+from .commontypes import COMMON_TYPES, resolve_common_type
+from .error import FFIError, CDefError
 try:
     from cffi import _pycparser as pycparser
 except ImportError:
     import pycparser
-import weakref, re
+import weakref, re, sys
 from rpython.translator.tool.cbuild import ExternalCompilationInfo
 from rpython.rlib.rfile import FILEP
 from rpython.rtyper.lltypesystem import rffi, lltype
@@ -161,7 +161,7 @@
             msg = 'cannot parse "%s"\n%s' % (line.strip(), msg)
         else:
             msg = 'parse error\n%s' % (msg,)
-        raise api.CDefError(msg)
+        raise CDefError(msg)
 
     def parse(self, csource, override=False, packed=False, dllexport=False):
         prev_options = self._options
@@ -189,18 +189,12 @@
                 if isinstance(decl, pycparser.c_ast.Decl):
                     self._parse_decl(decl)
                 elif isinstance(decl, pycparser.c_ast.Typedef):
-                    if not decl.name:
-                        raise api.CDefError("typedef does not declare any name",
-                                            decl)
-                    quals = 0
-                    realtype, quals = self._get_type_and_quals(
-                        decl.type, name=decl.name, partial_length_ok=True)
-                    self._declare('typedef ' + decl.name, realtype, quals=quals)
+                    self._parse_typedef(decl)
                 elif decl.__class__.__name__ == 'Pragma':
                     pass    # skip pragma, only in pycparser 2.15
                 else:
-                    raise api.CDefError("unrecognized construct", decl)
-        except api.FFIError as e:
+                    raise CDefError("unrecognized construct", decl)
+        except FFIError as e:
             msg = self._convert_pycparser_error(e, csource)
             if msg:
                 e.args = (e.args[0] + "\n    *** Err: %s" % msg,)
@@ -210,7 +204,7 @@
         if key in self._int_constants:
             if self._int_constants[key] == val:
                 return     # ignore identical double declarations
-            raise api.FFIError(
+            raise FFIError(
                 "multiple declarations of constant: %s" % (key,))
         self._int_constants[key] = val
 
@@ -245,6 +239,14 @@
             tag = 'function '
         self._declare(tag + decl.name, tp)
 
+    def _parse_typedef(self, decl):
+        if not decl.name:
+            raise CDefError("typedef does not declare any name", decl)
+        realtype, quals = self._get_type_and_quals(
+            decl.type, name=decl.name, partial_length_ok=True)
+        tp = model.DefinedType(decl.name, realtype, quals)
+        self._declare('typedef ' + decl.name, tp)
+
     def _parse_decl(self, decl):
         node = decl.type
         if isinstance(node, pycparser.c_ast.FuncDecl):
@@ -259,8 +261,8 @@
             elif isinstance(node, pycparser.c_ast.Enum):
                 self._get_struct_union_enum_type('enum', node)
             elif not decl.name:
-                raise api.CDefError("construct does not declare any variable",
-                                    decl)
+                raise CDefError("construct does not declare any variable",
+                                decl)
             #
             if decl.name:
                 tp, quals = self._get_type_and_quals(node,
@@ -292,7 +294,7 @@
         ast, _, _ = self._parse('void __dummy(\n%s\n);' % cdecl)
         exprnode = ast.ext[-1].type.args.params[0]
         if isinstance(exprnode, pycparser.c_ast.ID):
-            raise api.CDefError("unknown identifier '%s'" % (exprnode.name,))
+            raise CDefError("unknown identifier '%s'" % (exprnode.name,))
         return self._get_type_and_quals(exprnode.type)
 
     def _declare(self, name, obj, included=False, quals=0):
@@ -326,15 +328,6 @@
         return model.PointerType(type, quals)
 
     def _get_type_and_quals(self, typenode, name=None, partial_length_ok=False):
-        # first, dereference typedefs, if we have it already parsed, we're good
-        if (isinstance(typenode, pycparser.c_ast.TypeDecl) and
-            isinstance(typenode.type, pycparser.c_ast.IdentifierType) and
-            len(typenode.type.names) == 1 and
-            ('typedef ' + typenode.type.names[0]) in self._declarations):
-            tp, quals = self._declarations['typedef ' + typenode.type.names[0]]
-            quals |= self._extract_quals(typenode)
-            return tp, quals
-        #
         if isinstance(typenode, pycparser.c_ast.ArrayDecl):
             # array type
             if typenode.dim is None:
@@ -357,6 +350,11 @@
             quals = self._extract_quals(typenode)
             type = typenode.type
             if isinstance(type, pycparser.c_ast.IdentifierType):
+                # first, dereference typedefs, if we have it already parsed, we're good
+                if (len(type.names) == 1 and
+                    ('typedef ' + type.names[0]) in self._declarations):
+                    tp0, quals0 = self._declarations['typedef ' + type.names[0]]
+                    return tp0, (quals | quals0)
                 # assume a primitive type.  get it from .names, but reduce
                 # synonyms to a single chosen combination
                 names = list(type.names)
@@ -413,14 +411,14 @@
             return self._get_struct_union_enum_type('union', typenode, name,
                                                     nested=True), 0
         #
-        raise api.FFIError(":%d: bad or unsupported type declaration" %
+        raise FFIError(":%d: bad or unsupported type declaration" %
                 typenode.coord.line)
 
     def _parse_function_type(self, typenode, funcname=None):
         params = list(getattr(typenode.args, 'params', []))
         for i, arg in enumerate(params):
             if not hasattr(arg, 'type'):
-                raise api.CDefError("%s arg %d: unknown type '%s'"
+                raise CDefError("%s arg %d: unknown type '%s'"
                     " (if you meant to use the old C syntax of giving"
                     " untyped arguments, it is not supported)"
                     % (funcname or 'in expression', i + 1,
@@ -434,7 +432,7 @@
         if ellipsis:
             params.pop()
             if not params:
-                raise api.CDefError(
+                raise CDefError(
                     "%s: a function with only '(...)' as argument"
                     " is not correct C" % (funcname or 'in expression'))
         args = [self._as_func_arg(*self._get_type_and_quals(argdeclnode.type))
@@ -533,7 +531,7 @@
             return tp
         #
         if tp.fldnames is not None:
-            raise api.CDefError("duplicate declaration of struct %s" % name)
+            raise CDefError("duplicate declaration of struct %s" % name)
         fldnames = []
         fldtypes = []
         fldbitsize = []
@@ -570,7 +568,7 @@
 
     def _make_partial(self, tp, nested):
         if not isinstance(tp, model.StructOrUnion):
-            raise api.CDefError("%s cannot be partial" % (tp,))
+            raise CDefError("%s cannot be partial" % (tp,))
         if not tp.has_c_name() and not nested:
             raise NotImplementedError("%s is partial but has no C name" %(tp,))
         tp.partial = True
@@ -590,7 +588,7 @@
                     len(s) == 3 or (len(s) == 4 and s[1] == "\\")):
                 return ord(s[-2])
             else:
-                raise api.CDefError("invalid constant %r" % (s,))
+                raise CDefError("invalid constant %r" % (s,))
         #
         if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and
                 exprnode.op == '+'):
@@ -609,12 +607,12 @@
             if partial_length_ok:
                 self._partial_length = True
                 return '...'
-            raise api.FFIError(":%d: unsupported '[...]' here, cannot derive "
-                               "the actual array length in this context"
-                               % exprnode.coord.line)
+            raise FFIError(":%d: unsupported '[...]' here, cannot derive "
+                           "the actual array length in this context"
+                           % exprnode.coord.line)
         #
-        raise api.FFIError(":%d: unsupported expression: expected a "
-                           "simple numeric constant" % exprnode.coord.line)
+        raise FFIError(":%d: unsupported expression: expected a "
+                       "simple numeric constant" % exprnode.coord.line)
 
     def _build_enum_type(self, explicit_name, decls):
         if decls is not None:
@@ -806,6 +804,8 @@
                 del self._TYPES[name]
 
     def convert_type(self, obj, quals=0):
+        if isinstance(obj, model.DefinedType):
+            return self.convert_type(obj.realtype, obj.quals)
         if isinstance(obj, model.PrimitiveType):
             return cname_to_lltype(obj.name)
         elif isinstance(obj, model.StructType):
@@ -856,8 +856,7 @@
         ast, _, _ = self.ctx._parse(cdecl)
         decl = ast.ext[-1]
         tp, quals = self.ctx._get_type_and_quals(decl.type, name=decl.name)
-        FUNCP = self.convert_type(tp.as_function_pointer())
-        return decl.name, FUNCP.TO
+        return FunctionDeclaration(decl.name, tp)
 
     def _freeze_(self):
         if self._frozen:
@@ -881,6 +880,16 @@
         self._frozen = True
         return True
 
+class FunctionDeclaration(object):
+    def __init__(self, name, tp):
+        self.name = name
+        self.tp = tp
+
+    def get_llargs(self, cts):
+        return [cts.convert_type(arg) for arg in self.tp.args]
+
+    def get_llresult(self, cts):
+        return cts.convert_type(self.tp.result)
 
 def parse_source(source, includes=None, headers=None, configure_now=True):
     cts = CTypeSpace(headers=headers, includes=includes)
diff --git a/pypy/module/cpyext/error.py b/pypy/module/cpyext/error.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/cpyext/error.py
@@ -0,0 +1,20 @@
+
+class FFIError(Exception):
+    pass
+
+class CDefError(Exception):
+    def __str__(self):
+        try:
+            line = 'line %d: ' % (self.args[1].coord.line,)
+        except (AttributeError, TypeError, IndexError):
+            line = ''
+        return '%s%s' % (line, self.args[0])
+
+class VerificationError(Exception):
+    """ An error raised when verification fails
+    """
+
+class VerificationMissing(Exception):
+    """ An error raised when incomplete structures are passed into
+    cdef, but no verification has been done
+    """
diff --git a/pypy/module/cpyext/include/_numpypy/numpy/__multiarray_api.h b/pypy/module/cpyext/include/_numpypy/numpy/__multiarray_api.h
--- a/pypy/module/cpyext/include/_numpypy/numpy/__multiarray_api.h
+++ b/pypy/module/cpyext/include/_numpypy/numpy/__multiarray_api.h
@@ -11,6 +11,7 @@
 #define NUMPY_IMPORT_ARRAY_RETVAL
 #endif
 
-#define import_array() {return NUMPY_IMPORT_ARRAY_RETVAL;}
+/* on pypy import_array never fails, so it's just an empty macro */
+#define import_array()
 
 
diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h
--- a/pypy/module/cpyext/include/object.h
+++ b/pypy/module/cpyext/include/object.h
@@ -7,14 +7,7 @@
 extern "C" {
 #endif
 
-/* Hack: MSVC doesn't support ssize_t */
-#ifdef _WIN32
-#define ssize_t long
-#endif
 #include <cpyext_object.h>
-#ifdef _WIN32
-#undef ssize_t
-#endif
 
 #define PY_SSIZE_T_MAX ((Py_ssize_t)(((size_t)-1)>>1))
 #define PY_SSIZE_T_MIN (-PY_SSIZE_T_MAX-1)
diff --git a/pypy/module/cpyext/parse/cpyext_object.h b/pypy/module/cpyext/parse/cpyext_object.h
--- a/pypy/module/cpyext/parse/cpyext_object.h
+++ b/pypy/module/cpyext/parse/cpyext_object.h
@@ -1,5 +1,5 @@
 
-typedef ssize_t Py_ssize_t;
+typedef long Py_ssize_t;
 
 #define PyObject_HEAD  \
     Py_ssize_t ob_refcnt;        \
diff --git a/pypy/module/cpyext/test/test_borrow.py b/pypy/module/cpyext/test/test_borrow.py
--- a/pypy/module/cpyext/test/test_borrow.py
+++ b/pypy/module/cpyext/test/test_borrow.py
@@ -12,13 +12,13 @@
                 PyObject *t = PyTuple_New(1);
                 PyObject *f = PyFloat_FromDouble(42.0);
                 PyObject *g = NULL;
-                printf("Refcnt1: %zd\\n", f->ob_refcnt);
+                printf("Refcnt1: %ld\\n", f->ob_refcnt);
                 PyTuple_SetItem(t, 0, f); // steals reference
-                printf("Refcnt2: %zd\\n", f->ob_refcnt);
+                printf("Refcnt2: %ld\\n", f->ob_refcnt);
                 f = PyTuple_GetItem(t, 0); // borrows reference
-                printf("Refcnt3: %zd\\n", f->ob_refcnt);
+                printf("Refcnt3: %ld\\n", f->ob_refcnt);
                 g = PyTuple_GetItem(t, 0); // borrows reference again
-                printf("Refcnt4: %zd\\n", f->ob_refcnt);
+                printf("Refcnt4: %ld\\n", f->ob_refcnt);
                 printf("COMPARE: %i\\n", f == g);
                 fflush(stdout);
                 Py_DECREF(t);
diff --git a/pypy/module/cpyext/test/test_cparser.py b/pypy/module/cpyext/test/test_cparser.py
--- a/pypy/module/cpyext/test/test_cparser.py
+++ b/pypy/module/cpyext/test/test_cparser.py
@@ -185,10 +185,27 @@
     typedef TestFloatObject* (*func_t)(int, int);
     """
     cts = parse_source(decl)
-    name, FUNC = cts.parse_func("func_t some_func(TestFloatObject*)")
-    assert name == 'some_func'
-    assert FUNC.RESULT == cts.gettype('func_t')
-    assert FUNC.ARGS == (cts.gettype('TestFloatObject *'),)
+    func_decl = cts.parse_func("func_t * some_func(TestFloatObject*)")
+    assert func_decl.name == 'some_func'
+    assert func_decl.get_llresult(cts) == cts.gettype('func_t*')
+    assert func_decl.get_llargs(cts) == [cts.gettype('TestFloatObject *')]
+
+def test_write_func():
+    from ..api import ApiFunction
+    from rpython.translator.c.database import LowLevelDatabase
+    db = LowLevelDatabase()
+    cdef = """
+    typedef ssize_t Py_ssize_t;
+    """
+    cts = parse_source(cdef)
+    cdecl = "Py_ssize_t * some_func(Py_ssize_t*)"
+    decl = cts.parse_func(cdecl)
+    api_function = ApiFunction(
+        decl.get_llargs(cts), decl.get_llresult(cts), lambda space, x: None,
+        cdecl=decl)
+    assert (api_function.get_api_decl('some_func', db) ==
+            "PyAPI_FUNC(Py_ssize_t *) some_func(Py_ssize_t * arg0);")
+
 
 def test_wchar_t():
     cdef = """
diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py
--- a/pypy/module/cpyext/test/test_cpyext.py
+++ b/pypy/module/cpyext/test/test_cpyext.py
@@ -627,7 +627,7 @@
             refcnt_after = true_obj->ob_refcnt;
             Py_DECREF(true_obj);
             Py_DECREF(true_obj);
-            fprintf(stderr, "REFCNT %zd %zd\\n", refcnt, refcnt_after);
+            fprintf(stderr, "REFCNT %ld %ld\\n", refcnt, refcnt_after);
             return PyBool_FromLong(refcnt_after == refcnt + 2);
         }
         static PyObject* foo_bar(PyObject* self, PyObject *args)
@@ -643,7 +643,7 @@
                 return NULL;
             refcnt_after = true_obj->ob_refcnt;
             Py_DECREF(tup);
-            fprintf(stderr, "REFCNT2 %zd %zd %zd\\n", refcnt, refcnt_after,
+            fprintf(stderr, "REFCNT2 %ld %ld %ld\\n", refcnt, refcnt_after,
                     true_obj->ob_refcnt);
             return PyBool_FromLong(refcnt_after == refcnt + 1 &&
                                    refcnt == true_obj->ob_refcnt);
diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py
--- a/rpython/jit/metainterp/history.py
+++ b/rpython/jit/metainterp/history.py
@@ -726,15 +726,7 @@
             op.setref_base(value)
 
     def _record_op(self, opnum, argboxes, descr=None):
-        from rpython.jit.metainterp.opencoder import FrontendTagOverflow
-
-        try:
-            return self.trace.record_op(opnum, argboxes, descr)
-        except FrontendTagOverflow:
-            # note that with the default settings this one should not
-            # happen - however if we hit that case, we don't get
-            # anything disabled
-            raise SwitchToBlackhole(Counters.ABORT_TOO_LONG)
+        return self.trace.record_op(opnum, argboxes, descr)
 
     @specialize.argtype(3)
     def record(self, opnum, argboxes, value, descr=None):
diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py
--- a/rpython/jit/metainterp/opencoder.py
+++ b/rpython/jit/metainterp/opencoder.py
@@ -49,8 +49,12 @@
     way up to lltype.Signed for indexes everywhere
     """
 
-class FrontendTagOverflow(Exception):
-    pass
+def frontend_tag_overflow():
+    # Minor abstraction leak: raise directly the right exception
+    # expected by the rest of the machinery
+    from rpython.jit.metainterp import history
+    from rpython.rlib.jit import Counters
+    raise history.SwitchToBlackhole(Counters.ABORT_TOO_LONG)
 
 class BaseTrace(object):
     pass
@@ -296,7 +300,7 @@
             # grow by 2X
             self._ops = self._ops + [rffi.cast(model.STORAGE_TP, 0)] * len(self._ops)
         if not model.MIN_VALUE <= v <= model.MAX_VALUE:
-            raise FrontendTagOverflow
+            raise frontend_tag_overflow()
         self._ops[self._pos] = rffi.cast(model.STORAGE_TP, v)
         self._pos += 1
 
diff --git a/rpython/jit/metainterp/optimizeopt/shortpreamble.py b/rpython/jit/metainterp/optimizeopt/shortpreamble.py
--- a/rpython/jit/metainterp/optimizeopt/shortpreamble.py
+++ b/rpython/jit/metainterp/optimizeopt/shortpreamble.py
@@ -519,6 +519,8 @@
         self.jump_args.append(preamble_op.preamble_op)
 
     def use_box(self, box, preamble_op, optimizer=None):
+        if not self.build_inplace:
+            raise InvalidLoop("Forcing boxes would modify an existing short preamble")
         jump_op = self.short.pop()
         AbstractShortPreambleBuilder.use_box(self, box, preamble_op, optimizer)
         self.short.append(jump_op)
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -2424,7 +2424,6 @@
         self.staticdata.profiler.start_tracing()
         assert jitdriver_sd is self.jitdriver_sd
         self.staticdata.try_to_free_some_loops()
-        self.create_empty_history()
         try:
             original_boxes = self.initialize_original_boxes(jitdriver_sd, *args)
             return self._compile_and_run_once(original_boxes)
@@ -2438,10 +2437,11 @@
         num_green_args = self.jitdriver_sd.num_green_args
         original_greenkey = original_boxes[:num_green_args]
         self.resumekey = compile.ResumeFromInterpDescr(original_greenkey)
-        self.history.set_inputargs(original_boxes[num_green_args:],
-                                   self.staticdata)
         self.seen_loop_header_for_jdindex = -1
         try:
+            self.create_empty_history()
+            self.history.set_inputargs(original_boxes[num_green_args:],
+                                       self.staticdata)
             self.interpret()
         except SwitchToBlackhole as stb:
             self.run_blackhole_interp_to_cancel_tracing(stb)
@@ -2461,9 +2461,11 @@
         if self.resumekey_original_loop_token is None:
             raise compile.giveup() # should be rare
         self.staticdata.try_to_free_some_loops()
-        inputargs = self.initialize_state_from_guard_failure(key, deadframe)
         try:
+            inputargs = self.initialize_state_from_guard_failure(key, deadframe)
             return self._handle_guard_failure(resumedescr, key, inputargs, deadframe)
+        except SwitchToBlackhole as stb:
+            self.run_blackhole_interp_to_cancel_tracing(stb)
         finally:
             self.resumekey_original_loop_token = None
             self.staticdata.profiler.end_tracing()
@@ -2475,13 +2477,10 @@
         self.seen_loop_header_for_jdindex = -1
         if isinstance(key, compile.ResumeAtPositionDescr):
             self.seen_loop_header_for_jdindex = self.jitdriver_sd.index
-        try:
-            self.prepare_resume_from_failure(deadframe, inputargs, resumedescr)
-            if self.resumekey_original_loop_token is None:   # very rare case
-                raise SwitchToBlackhole(Counters.ABORT_BRIDGE)
-            self.interpret()
-        except SwitchToBlackhole as stb:
-            self.run_blackhole_interp_to_cancel_tracing(stb)
+        self.prepare_resume_from_failure(deadframe, inputargs, resumedescr)
+        if self.resumekey_original_loop_token is None:   # very rare case
+            raise SwitchToBlackhole(Counters.ABORT_BRIDGE)
+        self.interpret()
         assert False, "should always raise"
 
     def run_blackhole_interp_to_cancel_tracing(self, stb):
diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py
--- a/rpython/jit/metainterp/test/test_opencoder.py
+++ b/rpython/jit/metainterp/test/test_opencoder.py
@@ -1,6 +1,5 @@
 import py
 from rpython.jit.metainterp.opencoder import Trace, untag, TAGINT, TAGBOX
-from rpython.jit.metainterp.opencoder import FrontendTagOverflow
 from rpython.jit.metainterp.resoperation import rop, AbstractResOp
 from rpython.jit.metainterp.history import ConstInt, IntFrontendOp
 from rpython.jit.metainterp.optimizeopt.optimizer import Optimizer
@@ -8,6 +7,7 @@
 from rpython.jit.metainterp.test.strategies import lists_of_operations
 from rpython.jit.metainterp.optimizeopt.test.test_util import BaseTest
 from rpython.jit.metainterp.history import TreeLoop, AbstractDescr
+from rpython.jit.metainterp.history import SwitchToBlackhole
 from hypothesis import given, strategies
 
 class JitCode(object):
@@ -209,5 +209,5 @@
     def test_tag_overflow(self):
         t = Trace([], metainterp_sd)
         i0 = FakeOp(100000)
-        py.test.raises(FrontendTagOverflow, t.record_op, rop.FINISH, [i0])
+        py.test.raises(SwitchToBlackhole, t.record_op, rop.FINISH, [i0])
         assert t.unpack() == ([], [])
diff --git a/rpython/jit/metainterp/test/test_vector.py b/rpython/jit/metainterp/test/test_zvector.py
rename from rpython/jit/metainterp/test/test_vector.py
rename to rpython/jit/metainterp/test/test_zvector.py


More information about the pypy-commit mailing list