[pypy-commit] pypy fortran-order: merge default into branch

mattip noreply at buildbot.pypy.org
Tue Oct 6 21:31:33 CEST 2015


Author: mattip <matti.picus at gmail.com>
Branch: fortran-order
Changeset: r80004:37e0897bbbbe
Date: 2015-10-06 22:31 +0300
http://bitbucket.org/pypy/pypy/changeset/37e0897bbbbe/

Log:	merge default into branch

diff too long, truncating to 2000 out of 5515 lines

diff --git a/dotviewer/graphclient.py b/dotviewer/graphclient.py
--- a/dotviewer/graphclient.py
+++ b/dotviewer/graphclient.py
@@ -127,16 +127,8 @@
         return spawn_graphserver_handler((host, port))
 
 def spawn_local_handler():
-    if hasattr(sys, 'pypy_objspaceclass'):
-        # if 'python' is actually PyPy, e.g. in a virtualenv, then
-        # try hard to find a real CPython
-        try:
-            python = subprocess.check_output(
-                'env -i $SHELL -l -c "which python"', shell=True).strip()
-        except subprocess.CalledProcessError:
-            # did not work, fall back to 'python'
-            python = 'python'
-    else:
+    python = os.getenv('PYPY_PYGAME_PYTHON')
+    if not python:
         python = sys.executable
     args = [python, '-u', GRAPHSERVER, '--stdio']
     p = subprocess.Popen(args,
diff --git a/lib-python/conftest.py b/lib-python/conftest.py
--- a/lib-python/conftest.py
+++ b/lib-python/conftest.py
@@ -158,7 +158,7 @@
     RegrTest('test_codecs.py', core=True, usemodules='_multibytecodec'),
     RegrTest('test_codeop.py', core=True),
     RegrTest('test_coding.py', core=True),
-    RegrTest('test_coercion.py', core=True),
+    RegrTest('test_coercion.py', core=True, usemodules='struct'),
     RegrTest('test_collections.py', usemodules='binascii struct'),
     RegrTest('test_colorsys.py'),
     RegrTest('test_commands.py'),
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -609,7 +609,7 @@
     def make_accessor_locked(name):
         key = 'function ' + name
         if key in ffi._parser._declarations:
-            tp = ffi._parser._declarations[key]
+            tp, _ = ffi._parser._declarations[key]
             BType = ffi._get_cached_btype(tp)
             try:
                 value = backendlib.load_function(BType, name)
@@ -620,7 +620,7 @@
         #
         key = 'variable ' + name
         if key in ffi._parser._declarations:
-            tp = ffi._parser._declarations[key]
+            tp, _ = ffi._parser._declarations[key]
             BType = ffi._get_cached_btype(tp)
             read_variable = backendlib.read_variable
             write_variable = backendlib.write_variable
@@ -631,12 +631,23 @@
         #
         if not copied_enums:
             from . import model
-            for key, tp in ffi._parser._declarations.items():
+            error = None
+            for key, (tp, _) in ffi._parser._declarations.items():
                 if not isinstance(tp, model.EnumType):
                     continue
+                try:
+                    tp.check_not_partial()
+                except Exception as e:
+                    error = e
+                    continue
                 for enumname, enumval in zip(tp.enumerators, tp.enumvalues):
                     if enumname not in library.__dict__:
                         library.__dict__[enumname] = enumval
+            if error is not None:
+                if name in library.__dict__:
+                    return     # ignore error, about a different enum
+                raise error
+
             for key, val in ffi._parser._int_constants.items():
                 if key not in library.__dict__:
                     library.__dict__[key] = val
diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py
--- a/lib_pypy/cffi/cparser.py
+++ b/lib_pypy/cffi/cparser.py
@@ -192,6 +192,7 @@
                     if not decl.name:
                         raise api.CDefError("typedef does not declare any name",
                                             decl)
+                    quals = 0
                     if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType)
                             and decl.type.type.names[-1] == '__dotdotdot__'):
                         realtype = self._get_unknown_type(decl)
@@ -202,8 +203,9 @@
                           decl.type.type.type.names == ['__dotdotdot__']):
                         realtype = model.unknown_ptr_type(decl.name)
                     else:
-                        realtype = self._get_type(decl.type, name=decl.name)
-                    self._declare('typedef ' + decl.name, realtype)
+                        realtype, quals = self._get_type_and_quals(
+                            decl.type, name=decl.name)
+                    self._declare('typedef ' + decl.name, realtype, quals=quals)
                 else:
                     raise api.CDefError("unrecognized construct", decl)
         except api.FFIError as e:
@@ -255,9 +257,9 @@
     def _parse_decl(self, decl):
         node = decl.type
         if isinstance(node, pycparser.c_ast.FuncDecl):
-            tp = self._get_type(node, name=decl.name)
+            tp, quals = self._get_type_and_quals(node, name=decl.name)
             assert isinstance(tp, model.RawFunctionType)
-            tp = self._get_type_pointer(tp)
+            tp = self._get_type_pointer(tp, quals)
             self._declare('function ' + decl.name, tp)
         else:
             if isinstance(node, pycparser.c_ast.Struct):
@@ -271,9 +273,10 @@
                                     decl)
             #
             if decl.name:
-                tp = self._get_type(node, partial_length_ok=True)
+                tp, quals = self._get_type_and_quals(node,
+                                                     partial_length_ok=True)
                 if tp.is_raw_function:
-                    tp = self._get_type_pointer(tp)
+                    tp = self._get_type_pointer(tp, quals)
                     self._declare('function ' + decl.name, tp)
                 elif (tp.is_integer_type() and
                         hasattr(decl, 'init') and
@@ -287,10 +290,10 @@
                         _r_int_literal.match(decl.init.expr.value)):
                     self._add_integer_constant(decl.name,
                                                '-' + decl.init.expr.value)
-                elif self._is_constant_globalvar(node):
-                    self._declare('constant ' + decl.name, tp)
+                elif (quals & model.Q_CONST) and not tp.is_array_type:
+                    self._declare('constant ' + decl.name, tp, quals=quals)
                 else:
-                    self._declare('variable ' + decl.name, tp)
+                    self._declare('variable ' + decl.name, tp, quals=quals)
 
     def parse_type(self, cdecl):
         ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2]
@@ -298,40 +301,51 @@
         exprnode = ast.ext[-1].type.args.params[0]
         if isinstance(exprnode, pycparser.c_ast.ID):
             raise api.CDefError("unknown identifier '%s'" % (exprnode.name,))
-        return self._get_type(exprnode.type)
+        tp, quals = self._get_type_and_quals(exprnode.type)
+        return tp
 
-    def _declare(self, name, obj, included=False):
+    def _declare(self, name, obj, included=False, quals=0):
         if name in self._declarations:
-            if self._declarations[name] is obj:
+            prevobj, prevquals = self._declarations[name]
+            if prevobj is obj and prevquals == quals:
                 return
             if not self._override:
                 raise api.FFIError(
                     "multiple declarations of %s (for interactive usage, "
                     "try cdef(xx, override=True))" % (name,))
         assert '__dotdotdot__' not in name.split()
-        self._declarations[name] = obj
+        self._declarations[name] = (obj, quals)
         if included:
             self._included_declarations.add(obj)
 
-    def _get_type_pointer(self, type, const=False, declname=None):
+    def _extract_quals(self, type):
+        quals = 0
+        if isinstance(type, (pycparser.c_ast.TypeDecl,
+                             pycparser.c_ast.PtrDecl)):
+            if 'const' in type.quals:
+                quals |= model.Q_CONST
+            if 'restrict' in type.quals:
+                quals |= model.Q_RESTRICT
+        return quals
+
+    def _get_type_pointer(self, type, quals, declname=None):
         if isinstance(type, model.RawFunctionType):
             return type.as_function_pointer()
         if (isinstance(type, model.StructOrUnionOrEnum) and
                 type.name.startswith('$') and type.name[1:].isdigit() and
                 type.forcename is None and declname is not None):
-            return model.NamedPointerType(type, declname)
-        if const:
-            return model.ConstPointerType(type)
-        return model.PointerType(type)
+            return model.NamedPointerType(type, declname, quals)
+        return model.PointerType(type, quals)
 
-    def _get_type(self, typenode, name=None, partial_length_ok=False):
+    def _get_type_and_quals(self, typenode, name=None, partial_length_ok=False):
         # first, dereference typedefs, if we have it already parsed, we're good
         if (isinstance(typenode, pycparser.c_ast.TypeDecl) and
             isinstance(typenode.type, pycparser.c_ast.IdentifierType) and
             len(typenode.type.names) == 1 and
             ('typedef ' + typenode.type.names[0]) in self._declarations):
-            type = self._declarations['typedef ' + typenode.type.names[0]]
-            return type
+            tp, quals = self._declarations['typedef ' + typenode.type.names[0]]
+            quals |= self._extract_quals(typenode)
+            return tp, quals
         #
         if isinstance(typenode, pycparser.c_ast.ArrayDecl):
             # array type
@@ -340,18 +354,19 @@
             else:
                 length = self._parse_constant(
                     typenode.dim, partial_length_ok=partial_length_ok)
-            tp = self._get_type(typenode.type,
+            tp, quals = self._get_type_and_quals(typenode.type,
                                 partial_length_ok=partial_length_ok)
-            return model.ArrayType(tp, length)
+            return model.ArrayType(tp, length), quals
         #
         if isinstance(typenode, pycparser.c_ast.PtrDecl):
             # pointer type
-            const = (isinstance(typenode.type, pycparser.c_ast.TypeDecl)
-                     and 'const' in typenode.type.quals)
-            return self._get_type_pointer(self._get_type(typenode.type), const,
-                                          declname=name)
+            itemtype, itemquals = self._get_type_and_quals(typenode.type)
+            tp = self._get_type_pointer(itemtype, itemquals, declname=name)
+            quals = self._extract_quals(typenode)
+            return tp, quals
         #
         if isinstance(typenode, pycparser.c_ast.TypeDecl):
+            quals = self._extract_quals(typenode)
             type = typenode.type
             if isinstance(type, pycparser.c_ast.IdentifierType):
                 # assume a primitive type.  get it from .names, but reduce
@@ -379,35 +394,38 @@
                     names = newnames + names
                 ident = ' '.join(names)
                 if ident == 'void':
-                    return model.void_type
+                    return model.void_type, quals
                 if ident == '__dotdotdot__':
                     raise api.FFIError(':%d: bad usage of "..."' %
                             typenode.coord.line)
-                return resolve_common_type(ident)
+                return resolve_common_type(ident), quals
             #
             if isinstance(type, pycparser.c_ast.Struct):
                 # 'struct foobar'
-                return self._get_struct_union_enum_type('struct', type, name)
+                tp = self._get_struct_union_enum_type('struct', type, name)
+                return tp, quals
             #
             if isinstance(type, pycparser.c_ast.Union):
                 # 'union foobar'
-                return self._get_struct_union_enum_type('union', type, name)
+                tp = self._get_struct_union_enum_type('union', type, name)
+                return tp, quals
             #
             if isinstance(type, pycparser.c_ast.Enum):
                 # 'enum foobar'
-                return self._get_struct_union_enum_type('enum', type, name)
+                tp = self._get_struct_union_enum_type('enum', type, name)
+                return tp, quals
         #
         if isinstance(typenode, pycparser.c_ast.FuncDecl):
             # a function type
-            return self._parse_function_type(typenode, name)
+            return self._parse_function_type(typenode, name), 0
         #
         # nested anonymous structs or unions end up here
         if isinstance(typenode, pycparser.c_ast.Struct):
             return self._get_struct_union_enum_type('struct', typenode, name,
-                                                    nested=True)
+                                                    nested=True), 0
         if isinstance(typenode, pycparser.c_ast.Union):
             return self._get_struct_union_enum_type('union', typenode, name,
-                                                    nested=True)
+                                                    nested=True), 0
         #
         raise api.FFIError(":%d: bad or unsupported type declaration" %
                 typenode.coord.line)
@@ -426,28 +444,21 @@
                 raise api.CDefError(
                     "%s: a function with only '(...)' as argument"
                     " is not correct C" % (funcname or 'in expression'))
-        args = [self._as_func_arg(self._get_type(argdeclnode.type))
+        args = [self._as_func_arg(*self._get_type_and_quals(argdeclnode.type))
                 for argdeclnode in params]
         if not ellipsis and args == [model.void_type]:
             args = []
-        result = self._get_type(typenode.type)
+        result, quals = self._get_type_and_quals(typenode.type)
         return model.RawFunctionType(tuple(args), result, ellipsis)
 
-    def _as_func_arg(self, type):
+    def _as_func_arg(self, type, quals):
         if isinstance(type, model.ArrayType):
-            return model.PointerType(type.item)
+            return model.PointerType(type.item, quals)
         elif isinstance(type, model.RawFunctionType):
             return type.as_function_pointer()
         else:
             return type
 
-    def _is_constant_globalvar(self, typenode):
-        if isinstance(typenode, pycparser.c_ast.PtrDecl):
-            return 'const' in typenode.quals
-        if isinstance(typenode, pycparser.c_ast.TypeDecl):
-            return 'const' in typenode.quals
-        return False
-
     def _get_struct_union_enum_type(self, kind, type, name=None, nested=False):
         # First, a level of caching on the exact 'type' node of the AST.
         # This is obscure, but needed because pycparser "unrolls" declarations
@@ -486,7 +497,7 @@
         else:
             explicit_name = name
             key = '%s %s' % (kind, name)
-            tp = self._declarations.get(key, None)
+            tp, _ = self._declarations.get(key, (None, None))
         #
         if tp is None:
             if kind == 'struct':
@@ -528,6 +539,7 @@
         fldnames = []
         fldtypes = []
         fldbitsize = []
+        fldquals = []
         for decl in type.decls:
             if (isinstance(decl.type, pycparser.c_ast.IdentifierType) and
                     ''.join(decl.type.names) == '__dotdotdot__'):
@@ -541,7 +553,8 @@
             else:
                 bitsize = self._parse_constant(decl.bitsize)
             self._partial_length = False
-            type = self._get_type(decl.type, partial_length_ok=True)
+            type, fqual = self._get_type_and_quals(decl.type,
+                                                   partial_length_ok=True)
             if self._partial_length:
                 self._make_partial(tp, nested)
             if isinstance(type, model.StructType) and type.partial:
@@ -549,9 +562,11 @@
             fldnames.append(decl.name or '')
             fldtypes.append(type)
             fldbitsize.append(bitsize)
+            fldquals.append(fqual)
         tp.fldnames = tuple(fldnames)
         tp.fldtypes = tuple(fldtypes)
         tp.fldbitsize = tuple(fldbitsize)
+        tp.fldquals = tuple(fldquals)
         if fldbitsize != [-1] * len(fldbitsize):
             if isinstance(tp, model.StructType) and tp.partial:
                 raise NotImplementedError("%s: using both bitfields and '...;'"
@@ -632,14 +647,12 @@
         return tp
 
     def include(self, other):
-        for name, tp in other._declarations.items():
+        for name, (tp, quals) in other._declarations.items():
             if name.startswith('anonymous $enum_$'):
                 continue   # fix for test_anonymous_enum_include
             kind = name.split(' ', 1)[0]
-            if kind in ('struct', 'union', 'enum', 'anonymous'):
-                self._declare(name, tp, included=True)
-            elif kind == 'typedef':
-                self._declare(name, tp, included=True)
+            if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef'):
+                self._declare(name, tp, included=True, quals=quals)
         for k, v in other._int_constants.items():
             self._add_constants(k, v)
 
diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py
--- a/lib_pypy/cffi/model.py
+++ b/lib_pypy/cffi/model.py
@@ -4,11 +4,26 @@
 from .lock import allocate_lock
 
 
+# type qualifiers
+Q_CONST    = 0x01
+Q_RESTRICT = 0x02
+
+def qualify(quals, replace_with):
+    if quals & Q_CONST:
+        replace_with = ' const ' + replace_with.lstrip()
+    if quals & Q_RESTRICT:
+        # It seems that __restrict is supported by gcc and msvc.
+        # If you hit some different compiler, add a #define in
+        # _cffi_include.h for it (and in its copies, documented there)
+        replace_with = ' __restrict ' + replace_with.lstrip()
+    return replace_with
+
+
 class BaseTypeByIdentity(object):
     is_array_type = False
     is_raw_function = False
 
-    def get_c_name(self, replace_with='', context='a C file'):
+    def get_c_name(self, replace_with='', context='a C file', quals=0):
         result = self.c_name_with_marker
         assert result.count('&') == 1
         # some logic duplication with ffi.getctype()... :-(
@@ -18,6 +33,7 @@
                 replace_with = '(%s)' % replace_with
             elif not replace_with[0] in '[(':
                 replace_with = ' ' + replace_with
+        replace_with = qualify(quals, replace_with)
         result = result.replace('&', replace_with)
         if '$' in result:
             from .ffiplatform import VerificationError
@@ -225,16 +241,14 @@
 
 
 class PointerType(BaseType):
-    _attrs_ = ('totype',)
-    _base_pattern       = " *&"
-    _base_pattern_array = "(*&)"
+    _attrs_ = ('totype', 'quals')
 
-    def __init__(self, totype):
+    def __init__(self, totype, quals=0):
         self.totype = totype
+        self.quals = quals
+        extra = qualify(quals, " *&")
         if totype.is_array_type:
-            extra = self._base_pattern_array
-        else:
-            extra = self._base_pattern
+            extra = "(%s)" % (extra.lstrip(),)
         self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra)
 
     def build_backend_type(self, ffi, finishlist):
@@ -243,10 +257,8 @@
 
 voidp_type = PointerType(void_type)
 
-
-class ConstPointerType(PointerType):
-    _base_pattern       = " const *&"
-    _base_pattern_array = "(const *&)"
+def ConstPointerType(totype):
+    return PointerType(totype, Q_CONST)
 
 const_voidp_type = ConstPointerType(void_type)
 
@@ -254,8 +266,8 @@
 class NamedPointerType(PointerType):
     _attrs_ = ('totype', 'name')
 
-    def __init__(self, totype, name):
-        PointerType.__init__(self, totype)
+    def __init__(self, totype, name, quals=0):
+        PointerType.__init__(self, totype, quals)
         self.name = name
         self.c_name_with_marker = name + '&'
 
@@ -315,11 +327,12 @@
     partial = False
     packed = False
 
-    def __init__(self, name, fldnames, fldtypes, fldbitsize):
+    def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None):
         self.name = name
         self.fldnames = fldnames
         self.fldtypes = fldtypes
         self.fldbitsize = fldbitsize
+        self.fldquals = fldquals
         self.build_c_name_with_marker()
 
     def has_anonymous_struct_fields(self):
@@ -331,14 +344,17 @@
         return False
 
     def enumfields(self):
-        for name, type, bitsize in zip(self.fldnames, self.fldtypes,
-                                       self.fldbitsize):
+        fldquals = self.fldquals
+        if fldquals is None:
+            fldquals = (0,) * len(self.fldnames)
+        for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes,
+                                              self.fldbitsize, fldquals):
             if name == '' and isinstance(type, StructOrUnion):
                 # nested anonymous struct/union
                 for result in type.enumfields():
                     yield result
             else:
-                yield (name, type, bitsize)
+                yield (name, type, bitsize, quals)
 
     def force_flatten(self):
         # force the struct or union to have a declaration that lists
@@ -347,13 +363,16 @@
         names = []
         types = []
         bitsizes = []
-        for name, type, bitsize in self.enumfields():
+        fldquals = []
+        for name, type, bitsize, quals in self.enumfields():
             names.append(name)
             types.append(type)
             bitsizes.append(bitsize)
+            fldquals.append(quals)
         self.fldnames = tuple(names)
         self.fldtypes = tuple(types)
         self.fldbitsize = tuple(bitsizes)
+        self.fldquals = tuple(fldquals)
 
     def get_cached_btype(self, ffi, finishlist, can_delay=False):
         BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist,
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -195,17 +195,15 @@
             elif isinstance(tp, model.StructOrUnion):
                 if tp.fldtypes is not None and (
                         tp not in self.ffi._parser._included_declarations):
-                    for name1, tp1, _ in tp.enumfields():
+                    for name1, tp1, _, _ in tp.enumfields():
                         self._do_collect_type(self._field_type(tp, name1, tp1))
             else:
                 for _, x in tp._get_items():
                     self._do_collect_type(x)
 
-    def _get_declarations(self):
-        return sorted(self.ffi._parser._declarations.items())
-
     def _generate(self, step_name):
-        for name, tp in self._get_declarations():
+        lst = self.ffi._parser._declarations.items()
+        for name, (tp, quals) in sorted(lst):
             kind, realname = name.split(' ', 1)
             try:
                 method = getattr(self, '_generate_cpy_%s_%s' % (kind,
@@ -214,6 +212,7 @@
                 raise ffiplatform.VerificationError(
                     "not implemented in recompile(): %r" % name)
             try:
+                self._current_quals = quals
                 method(tp, realname)
             except Exception as e:
                 model.attach_exception_info(e, name)
@@ -774,7 +773,7 @@
         prnt('{')
         prnt('  /* only to generate compile-time warnings or errors */')
         prnt('  (void)p;')
-        for fname, ftype, fbitsize in tp.enumfields():
+        for fname, ftype, fbitsize, fqual in tp.enumfields():
             try:
                 if ftype.is_integer_type() or fbitsize >= 0:
                     # accept all integers, but complain on float or double
@@ -789,7 +788,8 @@
                     ftype = ftype.item
                     fname = fname + '[0]'
                 prnt('  { %s = &p->%s; (void)tmp; }' % (
-                    ftype.get_c_name('*tmp', 'field %r'%fname), fname))
+                    ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual),
+                    fname))
             except ffiplatform.VerificationError as e:
                 prnt('  /* %s */' % str(e))   # cannot verify it, ignore
         prnt('}')
@@ -823,7 +823,7 @@
         c_fields = []
         if reason_for_not_expanding is None:
             enumfields = list(tp.enumfields())
-            for fldname, fldtype, fbitsize in enumfields:
+            for fldname, fldtype, fbitsize, fqual in enumfields:
                 fldtype = self._field_type(tp, fldname, fldtype)
                 # cname is None for _add_missing_struct_unions() only
                 op = OP_NOOP
@@ -879,7 +879,9 @@
         # because they don't have any known C name.  Check that they are
         # not partial (we can't complete or verify them!) and emit them
         # anonymously.
-        for tp in list(self._struct_unions):
+        lst = list(self._struct_unions.items())
+        lst.sort(key=lambda tp_order: tp_order[1])
+        for tp, order in lst:
             if tp not in self._seen_struct_unions:
                 if tp.partial:
                     raise NotImplementedError("internal inconsistency: %r is "
@@ -1004,6 +1006,8 @@
     def _enum_ctx(self, tp, cname):
         type_index = self._typesdict[tp]
         type_op = CffiOp(OP_ENUM, -1)
+        if self.target_is_python:
+            tp.check_not_partial()
         for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
             self._lsts["global"].append(
                 GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op,
@@ -1081,7 +1085,8 @@
         # if 'tp' were a function type, but that is not possible here.
         # (If 'tp' is a function _pointer_ type, then casts from "fn_t
         # **" to "void *" are again no-ops, as far as I can tell.)
-        prnt('static ' + tp.get_c_name('*_cffi_var_%s(void)' % (name,)))
+        decl = '*_cffi_var_%s(void)' % (name,)
+        prnt('static ' + tp.get_c_name(decl, quals=self._current_quals))
         prnt('{')
         prnt('  return %s(%s);' % (ampersand, name))
         prnt('}')
diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py
--- a/lib_pypy/cffi/vengine_cpy.py
+++ b/lib_pypy/cffi/vengine_cpy.py
@@ -197,7 +197,10 @@
         return library
 
     def _get_declarations(self):
-        return sorted(self.ffi._parser._declarations.items())
+        lst = [(key, tp) for (key, (tp, qual)) in
+                                self.ffi._parser._declarations.items()]
+        lst.sort()
+        return lst
 
     def _generate(self, step_name):
         for name, tp in self._get_declarations():
@@ -468,7 +471,7 @@
         prnt('{')
         prnt('  /* only to generate compile-time warnings or errors */')
         prnt('  (void)p;')
-        for fname, ftype, fbitsize in tp.enumfields():
+        for fname, ftype, fbitsize, fqual in tp.enumfields():
             if (isinstance(ftype, model.PrimitiveType)
                 and ftype.is_integer_type()) or fbitsize >= 0:
                 # accept all integers, but complain on float or double
@@ -477,7 +480,8 @@
                 # only accept exactly the type declared.
                 try:
                     prnt('  { %s = &p->%s; (void)tmp; }' % (
-                        ftype.get_c_name('*tmp', 'field %r'%fname), fname))
+                        ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual),
+                        fname))
                 except ffiplatform.VerificationError as e:
                     prnt('  /* %s */' % str(e))   # cannot verify it, ignore
         prnt('}')
@@ -488,7 +492,7 @@
         prnt('  static Py_ssize_t nums[] = {')
         prnt('    sizeof(%s),' % cname)
         prnt('    offsetof(struct _cffi_aligncheck, y),')
-        for fname, ftype, fbitsize in tp.enumfields():
+        for fname, ftype, fbitsize, fqual in tp.enumfields():
             if fbitsize >= 0:
                 continue      # xxx ignore fbitsize for now
             prnt('    offsetof(%s, %s),' % (cname, fname))
@@ -552,7 +556,7 @@
             check(layout[0], ffi.sizeof(BStruct), "wrong total size")
             check(layout[1], ffi.alignof(BStruct), "wrong total alignment")
             i = 2
-            for fname, ftype, fbitsize in tp.enumfields():
+            for fname, ftype, fbitsize, fqual in tp.enumfields():
                 if fbitsize >= 0:
                     continue        # xxx ignore fbitsize for now
                 check(layout[i], ffi.offsetof(BStruct, fname),
diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py
--- a/lib_pypy/cffi/vengine_gen.py
+++ b/lib_pypy/cffi/vengine_gen.py
@@ -87,7 +87,10 @@
         return library
 
     def _get_declarations(self):
-        return sorted(self.ffi._parser._declarations.items())
+        lst = [(key, tp) for (key, (tp, qual)) in
+                                self.ffi._parser._declarations.items()]
+        lst.sort()
+        return lst
 
     def _generate(self, step_name):
         for name, tp in self._get_declarations():
@@ -260,7 +263,7 @@
         prnt('{')
         prnt('  /* only to generate compile-time warnings or errors */')
         prnt('  (void)p;')
-        for fname, ftype, fbitsize in tp.enumfields():
+        for fname, ftype, fbitsize, fqual in tp.enumfields():
             if (isinstance(ftype, model.PrimitiveType)
                 and ftype.is_integer_type()) or fbitsize >= 0:
                 # accept all integers, but complain on float or double
@@ -269,7 +272,8 @@
                 # only accept exactly the type declared.
                 try:
                     prnt('  { %s = &p->%s; (void)tmp; }' % (
-                        ftype.get_c_name('*tmp', 'field %r'%fname), fname))
+                        ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual),
+                        fname))
                 except ffiplatform.VerificationError as e:
                     prnt('  /* %s */' % str(e))   # cannot verify it, ignore
         prnt('}')
@@ -280,7 +284,7 @@
         prnt('  static intptr_t nums[] = {')
         prnt('    sizeof(%s),' % cname)
         prnt('    offsetof(struct _cffi_aligncheck, y),')
-        for fname, ftype, fbitsize in tp.enumfields():
+        for fname, ftype, fbitsize, fqual in tp.enumfields():
             if fbitsize >= 0:
                 continue      # xxx ignore fbitsize for now
             prnt('    offsetof(%s, %s),' % (cname, fname))
@@ -342,7 +346,7 @@
             check(layout[0], ffi.sizeof(BStruct), "wrong total size")
             check(layout[1], ffi.alignof(BStruct), "wrong total alignment")
             i = 2
-            for fname, ftype, fbitsize in tp.enumfields():
+            for fname, ftype, fbitsize, fqual in tp.enumfields():
                 if fbitsize >= 0:
                     continue        # xxx ignore fbitsize for now
                 check(layout[i], ffi.offsetof(BStruct, fname),
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -39,8 +39,9 @@
     "_csv", "cppyy", "_pypyjson"
 ])
 
-if (sys.platform.startswith('linux') and os.uname()[4] == 'x86_64'
-        and sys.maxint > 2**32):    # it's not enough that we get x86_64
+if ((sys.platform.startswith('linux') or sys.platform == 'darwin')
+    and os.uname()[4] == 'x86_64' and sys.maxint > 2**32):
+    # it's not enough that we get x86_64
     working_modules.add('_vmprof')
 
 translation_modules = default_modules.copy()
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -43,3 +43,13 @@
 .. branch: numpy-ctypes
 
 Add support for ndarray.ctypes property.
+
+.. branch: share-guard-info
+
+Share guard resume data between consecutive guards that have only
+pure operations and guards in between.
+
+.. branch: issue-2148
+
+Fix performance regression on operations mixing numpy scalars and Python 
+floats, cf. issue #2148.
diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py
--- a/pypy/module/__builtin__/interp_classobj.py
+++ b/pypy/module/__builtin__/interp_classobj.py
@@ -253,26 +253,27 @@
 
     def binaryop(self, space, w_other):
         w_a, w_b = _coerce_helper(space, self, w_other)
-        if w_a is None:
-            w_a = self
-            w_b = w_other
-        if w_a is self:
-            w_meth = self.getattr(space, specialname, False)
+        if isinstance(w_a, W_InstanceObject):
+            w_meth = w_a.getattr(space, specialname, False)
             if w_meth is None:
                 return space.w_NotImplemented
             return space.call_function(w_meth, w_b)
         else:
+            # fall back to space.xxx() if coerce returns a non-W_Instance
+            # object as first argument
             return getattr(space, objspacename)(w_a, w_b)
     binaryop.func_name = name
 
     def rbinaryop(self, space, w_other):
         w_a, w_b = _coerce_helper(space, self, w_other)
-        if w_a is None or w_a is self:
-            w_meth = self.getattr(space, rspecialname, False)
+        if isinstance(w_a, W_InstanceObject):
+            w_meth = w_a.getattr(space, rspecialname, False)
             if w_meth is None:
                 return space.w_NotImplemented
-            return space.call_function(w_meth, w_other)
+            return space.call_function(w_meth, w_b)
         else:
+            # fall back to space.xxx() if coerce returns a non-W_Instance
+            # object as first argument
             return getattr(space, objspacename)(w_b, w_a)
     rbinaryop.func_name = "r" + name
     return binaryop, rbinaryop
@@ -283,7 +284,7 @@
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
-        return [None, None]
+        return [w_self, w_other]
     return space.fixedview(w_tup, 2)
 
 def descr_instance_new(space, w_type, w_class, w_dict=None):
@@ -523,13 +524,9 @@
 
     def descr_cmp(self, space, w_other): # do all the work here like CPython
         w_a, w_b = _coerce_helper(space, self, w_other)
-        if w_a is None:
-            w_a = self
-            w_b = w_other
-        else:
-            if (not isinstance(w_a, W_InstanceObject) and
-                not isinstance(w_b, W_InstanceObject)):
-                return space.cmp(w_a, w_b)
+        if (not isinstance(w_a, W_InstanceObject) and
+            not isinstance(w_b, W_InstanceObject)):
+            return space.cmp(w_a, w_b)
         if isinstance(w_a, W_InstanceObject):
             w_func = w_a.getattr(space, '__cmp__', False)
             if w_func is not None:
@@ -636,42 +633,36 @@
     def descr_pow(self, space, w_other, w_modulo=None):
         if space.is_none(w_modulo):
             w_a, w_b = _coerce_helper(space, self, w_other)
-            if w_a is None:
-                w_a = self
-                w_b = w_other
-            if w_a is self:
-                w_func = self.getattr(space, '__pow__', False)
-                if w_func is not None:
-                    return space.call_function(w_func, w_other)
-                return space.w_NotImplemented
+            if isinstance(w_a, W_InstanceObject):
+                w_func = w_a.getattr(space, '__pow__', False)
+                if w_func is None:
+                    return space.w_NotImplemented
+                return space.call_function(w_func, w_other)
             else:
                 return space.pow(w_a, w_b, space.w_None)
         else:
             # CPython also doesn't try coercion in this case
             w_func = self.getattr(space, '__pow__', False)
-            if w_func is not None:
-                return space.call_function(w_func, w_other, w_modulo)
-            return space.w_NotImplemented
+            if w_func is None:
+                return space.w_NotImplemented
+            return space.call_function(w_func, w_other, w_modulo)
 
     def descr_rpow(self, space, w_other, w_modulo=None):
         if space.is_none(w_modulo):
             w_a, w_b = _coerce_helper(space, self, w_other)
-            if w_a is None:
-                w_a = self
-                w_b = w_other
-            if w_a is self:
-                w_func = self.getattr(space, '__rpow__', False)
-                if w_func is not None:
-                    return space.call_function(w_func, w_other)
-                return space.w_NotImplemented
+            if isinstance(w_a, W_InstanceObject):
+                w_func = w_a.getattr(space, '__rpow__', False)
+                if w_func is None:
+                    return space.w_NotImplemented
+                return space.call_function(w_func, w_other)
             else:
                 return space.pow(w_b, w_a, space.w_None)
         else:
             # CPython also doesn't try coercion in this case
             w_func = self.getattr(space, '__rpow__', False)
-            if w_func is not None:
-                return space.call_function(w_func, w_other, w_modulo)
-            return space.w_NotImplemented
+            if w_func is None:
+                return space.w_NotImplemented
+            return space.call_function(w_func, w_other, w_modulo)
 
     def descr_next(self, space):
         w_func = self.getattr(space, 'next', False)
diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py
--- a/pypy/module/__builtin__/test/test_classobj.py
+++ b/pypy/module/__builtin__/test/test_classobj.py
@@ -417,6 +417,22 @@
             pass
         raises(TypeError, coerce, B(), [])
 
+    def test_coerce_inf(self):
+        class B:
+            def __coerce__(self, other):
+                return B(), B()
+            def __add__(self, other):
+                return 42
+        assert B() + B() == 42
+
+    def test_coerce_reverse(self):
+        class CoerceNumber:
+            def __coerce__(self, other):
+                assert isinstance(other, int)
+                return (6, other)
+        assert 5 + CoerceNumber() == 11
+        assert 2 ** CoerceNumber() == 64
+
     def test_binaryop(self):
         class A:
             def __add__(self, other):
diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py
--- a/pypy/module/_cffi_backend/ccallback.py
+++ b/pypy/module/_cffi_backend/ccallback.py
@@ -178,7 +178,8 @@
 
 
 @jit.dont_look_inside
-def _handle_applevel_exception(space, callback, e, ll_res, extra_line):
+def _handle_applevel_exception(callback, e, ll_res, extra_line):
+    space = callback.space
     callback.write_error_return_value(ll_res)
     if callback.w_onerror is None:
         callback.print_error(e, extra_line)
@@ -199,13 +200,21 @@
                                 extra_line="\nDuring the call to 'onerror', "
                                            "another exception occurred:\n\n")
 
+ at jit.jit_callback("CFFI")
+def py_invoke_callback(callback, ll_res, ll_args):
+    extra_line = ''
+    try:
+        w_res = callback.invoke(ll_args)
+        extra_line = "Trying to convert the result back to C:\n"
+        callback.convert_result(ll_res, w_res)
+    except OperationError, e:
+        _handle_applevel_exception(callback, e, ll_res, extra_line)
 
- at jit.jit_callback("CFFI")
 def _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata):
     """ Callback specification.
     ffi_cif - something ffi specific, don't care
     ll_args - rffi.VOIDPP - pointer to array of pointers to args
-    ll_restype - rffi.VOIDP - pointer to result
+    ll_res - rffi.VOIDP - pointer to result
     ll_userdata - a special structure which holds necessary information
                   (what the real callback is for example), casted to VOIDP
     """
@@ -228,13 +237,7 @@
     space = callback.space
     try:
         must_leave = space.threadlocals.try_enter_thread(space)
-        extra_line = ''
-        try:
-            w_res = callback.invoke(ll_args)
-            extra_line = "Trying to convert the result back to C:\n"
-            callback.convert_result(ll_res, w_res)
-        except OperationError, e:
-            _handle_applevel_exception(space, callback, e, ll_res, extra_line)
+        py_invoke_callback(callback, ll_res, ll_args)
         #
     except Exception, e:
         # oups! last-level attempt to recover.
diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py
--- a/pypy/module/_cffi_backend/ctypeobj.py
+++ b/pypy/module/_cffi_backend/ctypeobj.py
@@ -11,7 +11,8 @@
 
 
 class W_CType(W_Root):
-    _attrs_ = ['space', 'size',  'name', 'name_position', '_lifeline_']
+    _attrs_ = ['space', 'size',  'name', 'name_position', '_lifeline_',
+               '_pointer_type']
     _immutable_fields_ = ['size?', 'name', 'name_position']
     # note that 'size' is not strictly immutable, because it can change
     # from -1 to the real value in the W_CTypeStruct subclass.
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -168,7 +168,7 @@
 
 
 class W_CTypePointer(W_CTypePtrBase):
-    _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr']
+    _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr', '_array_types']
     _immutable_fields_ = ['is_file', 'cache_array_type?', 'is_void_ptr']
     kind = "pointer"
     cache_array_type = None
diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py
--- a/pypy/module/_cffi_backend/newtype.py
+++ b/pypy/module/_cffi_backend/newtype.py
@@ -4,7 +4,7 @@
 
 from rpython.rlib.objectmodel import specialize, r_dict, compute_identity_hash
 from rpython.rlib.rarithmetic import ovfcheck, intmask
-from rpython.rlib import jit
+from rpython.rlib import jit, rweakref
 from rpython.rtyper.lltypesystem import lltype, rffi
 from rpython.rtyper.tool import rffi_platform
 
@@ -23,27 +23,12 @@
 
 class UniqueCache:
     def __init__(self, space):
-        self.ctvoid = None      # There can be only one
-        self.ctvoidp = None     # Cache for self.pointers[self.ctvoid]
-        self.ctchara = None     # Cache for self.arrays[charp, -1]
-        self.primitives = {}    # Keys: name
-        self.pointers = {}      # Keys: base_ctype
-        self.arrays = {}        # Keys: (ptr_ctype, length_or_-1)
-        self.functions = r_dict(# Keys: (fargs, w_fresult, ellipsis)
-            _func_key_eq, _func_key_hash)
-
-def _func_key_eq((fargs1, w_fresult1, ellipsis1),
-                 (fargs2, w_fresult2, ellipsis2)):
-    return (fargs1 == fargs2 and      # list equality here
-            w_fresult1 is w_fresult2 and
-            ellipsis1 == ellipsis2)
-
-def _func_key_hash((fargs, w_fresult, ellipsis)):
-    x = compute_identity_hash(w_fresult) ^ ellipsis
-    for w_arg in fargs:
-        y = compute_identity_hash(w_arg)
-        x = intmask((1000003 * x) ^ y)
-    return x
+        self.ctvoid = None      # Cache for the 'void' type
+        self.ctvoidp = None     # Cache for the 'void *' type
+        self.ctchara = None     # Cache for the 'char[]' type
+        self.primitives = {}    # Cache for {name: primitive_type}
+        self.functions = []     # see _new_function_type()
+        self.for_testing = False
 
 def _clean_cache(space):
     "NOT_RPYTHON"
@@ -165,20 +150,24 @@
 
 # ____________________________________________________________
 
+ at specialize.memo()
+def _setup_wref(has_weakref_support):
+    assert has_weakref_support, "_cffi_backend requires weakrefs"
+    ctypeobj.W_CType._pointer_type = rweakref.dead_ref
+    ctypeptr.W_CTypePointer._array_types = None
+
 @unwrap_spec(w_ctype=ctypeobj.W_CType)
 def new_pointer_type(space, w_ctype):
     return _new_pointer_type(space, w_ctype)
 
 @jit.elidable
 def _new_pointer_type(space, w_ctype):
-    unique_cache = space.fromcache(UniqueCache)
-    try:
-        return unique_cache.pointers[w_ctype]
-    except KeyError:
-        pass
-    ctypepointer = ctypeptr.W_CTypePointer(space, w_ctype)
-    unique_cache.pointers[w_ctype] = ctypepointer
-    return ctypepointer
+    _setup_wref(rweakref.has_weakref_support())
+    ctptr = w_ctype._pointer_type()
+    if ctptr is None:
+        ctptr = ctypeptr.W_CTypePointer(space, w_ctype)
+        w_ctype._pointer_type = rweakref.ref(ctptr)
+    return ctptr
 
 # ____________________________________________________________
 
@@ -195,16 +184,19 @@
 
 @jit.elidable
 def _new_array_type(space, w_ctptr, length):
-    unique_cache = space.fromcache(UniqueCache)
-    unique_key = (w_ctptr, length)
-    try:
-        return unique_cache.arrays[unique_key]
-    except KeyError:
-        pass
-    #
+    _setup_wref(rweakref.has_weakref_support())
     if not isinstance(w_ctptr, ctypeptr.W_CTypePointer):
         raise OperationError(space.w_TypeError,
                              space.wrap("first arg must be a pointer ctype"))
+    arrays = w_ctptr._array_types
+    if arrays is None:
+        arrays = rweakref.RWeakValueDictionary(int, ctypearray.W_CTypeArray)
+        w_ctptr._array_types = arrays
+    else:
+        ctype = arrays.get(length)
+        if ctype is not None:
+            return ctype
+    #
     ctitem = w_ctptr.ctitem
     if ctitem.size < 0:
         raise oefmt(space.w_ValueError, "array item of unknown size: '%s'",
@@ -222,7 +214,7 @@
         extra = '[%d]' % length
     #
     ctype = ctypearray.W_CTypeArray(space, w_ctptr, length, arraysize, extra)
-    unique_cache.arrays[unique_key] = ctype
+    arrays.set(length, ctype)
     return ctype
 
 # ____________________________________________________________
@@ -612,29 +604,69 @@
         fargs.append(w_farg)
     return _new_function_type(space, fargs, w_fresult, bool(ellipsis))
 
+def _func_key_hash(unique_cache, fargs, fresult, ellipsis):
+    x = compute_identity_hash(fresult)
+    for w_arg in fargs:
+        y = compute_identity_hash(w_arg)
+        x = intmask((1000003 * x) ^ y)
+    x ^= ellipsis
+    if unique_cache.for_testing:    # constant-folded to False in translation;
+        x &= 3                      # but for test, keep only 2 bits of hash
+    return x
+
 # can't use @jit.elidable here, because it might call back to random
 # space functions via force_lazy_struct()
-def _new_function_type(space, fargs, w_fresult, ellipsis=False):
+def _new_function_type(space, fargs, fresult, ellipsis=False):
+    try:
+        return _get_function_type(space, fargs, fresult, ellipsis)
+    except KeyError:
+        return _build_function_type(space, fargs, fresult, ellipsis)
+
+ at jit.elidable
+def _get_function_type(space, fargs, fresult, ellipsis):
+    # This function is elidable because if called again with exactly the
+    # same arguments (and if it didn't raise KeyError), it would give
+    # the same result, at least as long as this result is still live.
+    #
+    # 'unique_cache.functions' is a list of weak dicts, each mapping
+    # the func_hash number to a W_CTypeFunc.  There is normally only
+    # one such dict, but in case of hash collision, there might be
+    # more.
+    unique_cache = space.fromcache(UniqueCache)
+    func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis)
+    for weakdict in unique_cache.functions:
+        ctype = weakdict.get(func_hash)
+        if (ctype is not None and
+            ctype.ctitem is fresult and
+            ctype.fargs == fargs and
+            ctype.ellipsis == ellipsis):
+            return ctype
+    raise KeyError
+
+ at jit.dont_look_inside
+def _build_function_type(space, fargs, fresult, ellipsis):
     from pypy.module._cffi_backend import ctypefunc
     #
-    unique_cache = space.fromcache(UniqueCache)
-    unique_key = (fargs, w_fresult, ellipsis)
-    try:
-        return unique_cache.functions[unique_key]
-    except KeyError:
-        pass
-    #
-    if ((w_fresult.size < 0 and
-         not isinstance(w_fresult, ctypevoid.W_CTypeVoid))
-        or isinstance(w_fresult, ctypearray.W_CTypeArray)):
-        if (isinstance(w_fresult, ctypestruct.W_CTypeStructOrUnion) and
-                w_fresult.size < 0):
+    if ((fresult.size < 0 and
+         not isinstance(fresult, ctypevoid.W_CTypeVoid))
+        or isinstance(fresult, ctypearray.W_CTypeArray)):
+        if (isinstance(fresult, ctypestruct.W_CTypeStructOrUnion) and
+                fresult.size < 0):
             raise oefmt(space.w_TypeError,
-                        "result type '%s' is opaque", w_fresult.name)
+                        "result type '%s' is opaque", fresult.name)
         else:
             raise oefmt(space.w_TypeError,
-                        "invalid result type: '%s'", w_fresult.name)
+                        "invalid result type: '%s'", fresult.name)
     #
-    fct = ctypefunc.W_CTypeFunc(space, fargs, w_fresult, ellipsis)
-    unique_cache.functions[unique_key] = fct
+    fct = ctypefunc.W_CTypeFunc(space, fargs, fresult, ellipsis)
+    unique_cache = space.fromcache(UniqueCache)
+    func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis)
+    for weakdict in unique_cache.functions:
+        if weakdict.get(func_hash) is None:
+            weakdict.set(func_hash, fct)
+            break
+    else:
+        weakdict = rweakref.RWeakValueDictionary(int, ctypefunc.W_CTypeFunc)
+        unique_cache.functions.append(weakdict)
+        weakdict.set(func_hash, fct)
     return fct
diff --git a/pypy/module/_cffi_backend/test/test_c.py b/pypy/module/_cffi_backend/test/test_c.py
--- a/pypy/module/_cffi_backend/test/test_c.py
+++ b/pypy/module/_cffi_backend/test/test_c.py
@@ -22,7 +22,7 @@
 from rpython.tool.udir import udir
 from pypy.interpreter import gateway
 from pypy.module._cffi_backend import Module
-from pypy.module._cffi_backend.newtype import _clean_cache
+from pypy.module._cffi_backend.newtype import _clean_cache, UniqueCache
 from rpython.translator import cdir
 from rpython.translator.platform import host
 from rpython.translator.tool.cbuild import ExternalCompilationInfo
@@ -86,8 +86,10 @@
             _all_test_c.find_and_load_library = func
             _all_test_c._testfunc = testfunc
         """)
+        UniqueCache.for_testing = True
 
     def teardown_method(self, method):
+        UniqueCache.for_testing = False
         _clean_cache(self.space)
 
 
diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py
--- a/pypy/module/micronumpy/ctors.py
+++ b/pypy/module/micronumpy/ctors.py
@@ -3,11 +3,13 @@
 from rpython.rlib.buffer import SubBuffer
 from rpython.rlib.rstring import strip_spaces
 from rpython.rtyper.lltypesystem import lltype, rffi
+
 from pypy.module.micronumpy import descriptor, loop, support
 from pypy.module.micronumpy.base import (wrap_impl,
     W_NDimArray, convert_to_array, W_NumpyObject)
 from pypy.module.micronumpy.converters import shape_converter, order_converter
 import pypy.module.micronumpy.constants as NPY
+from .casting import scalar2dtype
 
 
 def build_scalar(space, w_dtype, w_state):
@@ -83,7 +85,6 @@
     return w_res
 
 def _array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False):
-    from pypy.module.micronumpy import strides
 
     # for anything that isn't already an array, try __array__ method first
     if not isinstance(w_object, W_NDimArray):
@@ -139,16 +140,11 @@
                     w_base=w_base, start=imp.start)
     else:
         # not an array
-        shape, elems_w = strides.find_shape_and_elems(space, w_object, dtype)
+        shape, elems_w = find_shape_and_elems(space, w_object, dtype)
     if dtype is None and space.isinstance_w(w_object, space.w_buffer):
         dtype = descriptor.get_dtype_cache(space).w_uint8dtype
     if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1):
         dtype = find_dtype_for_seq(space, elems_w, dtype)
-        if dtype is None:
-            dtype = descriptor.get_dtype_cache(space).w_float64dtype
-        elif dtype.is_str_or_unicode() and dtype.elsize < 1:
-            # promote S0 -> S1, U0 -> U1
-            dtype = descriptor.variable_dtype(space, dtype.char + '1')
 
     w_arr = W_NDimArray.from_shape(space, shape, dtype, order=npy_order)
     if support.product(shape) == 1: # safe from overflow since from_shape checks
@@ -161,7 +157,6 @@
 def numpify(space, w_object):
     """Convert the object to a W_NumpyObject"""
     # XXX: code duplication with _array()
-    from pypy.module.micronumpy import strides
     if isinstance(w_object, W_NumpyObject):
         return w_object
     # for anything that isn't already an array, try __array__ method first
@@ -169,20 +164,82 @@
     if w_array is not None:
         return w_array
 
-    shape, elems_w = strides.find_shape_and_elems(space, w_object, None)
+    if is_scalar_like(space, w_object, dtype=None):
+        dtype = scalar2dtype(space, w_object)
+        if dtype.is_str_or_unicode() and dtype.elsize < 1:
+            # promote S0 -> S1, U0 -> U1
+            dtype = descriptor.variable_dtype(space, dtype.char + '1')
+        return dtype.coerce(space, w_object)
+
+    shape, elems_w = _find_shape_and_elems(space, w_object)
     dtype = find_dtype_for_seq(space, elems_w, None)
-    if dtype is None:
-        dtype = descriptor.get_dtype_cache(space).w_float64dtype
-    elif dtype.is_str_or_unicode() and dtype.elsize < 1:
-        # promote S0 -> S1, U0 -> U1
-        dtype = descriptor.variable_dtype(space, dtype.char + '1')
+    w_arr = W_NDimArray.from_shape(space, shape, dtype)
+    loop.assign(space, w_arr, elems_w)
+    return w_arr
 
-    if len(elems_w) == 1:
-        return dtype.coerce(space, elems_w[0])
+
+def find_shape_and_elems(space, w_iterable, dtype):
+    if is_scalar_like(space, w_iterable, dtype):
+        return [], [w_iterable]
+    is_rec_type = dtype is not None and dtype.is_record()
+    return _find_shape_and_elems(space, w_iterable, is_rec_type)
+
+def is_scalar_like(space, w_obj, dtype):
+    isstr = space.isinstance_w(w_obj, space.w_str)
+    if not support.issequence_w(space, w_obj) or isstr:
+        if dtype is None or dtype.char != NPY.CHARLTR:
+            return True
+    is_rec_type = dtype is not None and dtype.is_record()
+    if is_rec_type and is_single_elem(space, w_obj, is_rec_type):
+        return True
+    if isinstance(w_obj, W_NDimArray) and w_obj.is_scalar():
+        return True
+    return False
+
+def _find_shape_and_elems(space, w_iterable, is_rec_type=False):
+    from pypy.objspace.std.bufferobject import W_Buffer
+    shape = [space.len_w(w_iterable)]
+    if space.isinstance_w(w_iterable, space.w_buffer):
+        batch = [space.wrap(0)] * shape[0]
+        for i in range(shape[0]):
+            batch[i] = space.ord(space.getitem(w_iterable, space.wrap(i)))
     else:
-        w_arr = W_NDimArray.from_shape(space, shape, dtype)
-        loop.assign(space, w_arr, elems_w)
-        return w_arr
+        batch = space.listview(w_iterable)
+    while True:
+        if not batch:
+            return shape[:], []
+        if is_single_elem(space, batch[0], is_rec_type):
+            for w_elem in batch:
+                if not is_single_elem(space, w_elem, is_rec_type):
+                    raise OperationError(space.w_ValueError, space.wrap(
+                        "setting an array element with a sequence"))
+            return shape[:], batch
+        new_batch = []
+        size = space.len_w(batch[0])
+        for w_elem in batch:
+            if (is_single_elem(space, w_elem, is_rec_type) or
+                    space.len_w(w_elem) != size):
+                raise OperationError(space.w_ValueError, space.wrap(
+                    "setting an array element with a sequence"))
+            w_array = space.lookup(w_elem, '__array__')
+            if w_array is not None:
+                # Make sure we call the array implementation of listview,
+                # since for some ndarray subclasses (matrix, for instance)
+                # listview does not reduce but rather returns the same class
+                w_elem = space.get_and_call_function(w_array, w_elem, space.w_None)
+            new_batch += space.listview(w_elem)
+        shape.append(size)
+        batch = new_batch
+
+def is_single_elem(space, w_elem, is_rec_type):
+    if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)):
+        return True
+    if (space.isinstance_w(w_elem, space.w_tuple) or
+            space.isinstance_w(w_elem, space.w_list)):
+        return False
+    if isinstance(w_elem, W_NDimArray) and not w_elem.is_scalar():
+        return False
+    return True
 
 def _dtype_guess(space, dtype, w_elem):
     from .casting import scalar2dtype, find_binop_result_dtype
@@ -197,6 +254,11 @@
         return _dtype_guess(space, dtype, w_elem)
     for w_elem in elems_w:
         dtype = _dtype_guess(space, dtype, w_elem)
+    if dtype is None:
+        dtype = descriptor.get_dtype_cache(space).w_float64dtype
+    elif dtype.is_str_or_unicode() and dtype.elsize < 1:
+        # promote S0 -> S1, U0 -> U1
+        dtype = descriptor.variable_dtype(space, dtype.char + '1')
     return dtype
 
 
diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py
--- a/pypy/module/micronumpy/ndarray.py
+++ b/pypy/module/micronumpy/ndarray.py
@@ -855,6 +855,8 @@
         v = convert_to_array(space, w_v)
         ret = W_NDimArray.from_shape(
             space, v.get_shape(), get_dtype_cache(space).w_longdtype)
+        if ret.get_size() < 1:
+            return ret
         if side == NPY.SEARCHLEFT:
             binsearch = loop.binsearch_left
         else:
@@ -1301,6 +1303,9 @@
             [space.wrap(0)]), space.wrap("b")])
 
         builder = StringBuilder()
+        if self.get_dtype().is_object():
+            raise oefmt(space.w_NotImplementedError,
+                    "reduce for 'object' dtype not supported yet")
         if isinstance(self.implementation, SliceArray):
             iter, state = self.implementation.create_iter()
             while not iter.done(state):
diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py
--- a/pypy/module/micronumpy/strides.py
+++ b/pypy/module/micronumpy/strides.py
@@ -189,67 +189,6 @@
     return rstrides, rbackstrides
 
 
-def is_single_elem(space, w_elem, is_rec_type):
-    if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)):
-        return True
-    if (space.isinstance_w(w_elem, space.w_tuple) or
-            space.isinstance_w(w_elem, space.w_list)):
-        return False
-    if isinstance(w_elem, W_NDimArray) and not w_elem.is_scalar():
-        return False
-    return True
-
-
-def find_shape_and_elems(space, w_iterable, dtype):
-    isstr = space.isinstance_w(w_iterable, space.w_str)
-    if not support.issequence_w(space, w_iterable) or isstr:
-        if dtype is None or dtype.char != NPY.CHARLTR:
-            return [], [w_iterable]
-    is_rec_type = dtype is not None and dtype.is_record()
-    if is_rec_type and is_single_elem(space, w_iterable, is_rec_type):
-        return [], [w_iterable]
-    if isinstance(w_iterable, W_NDimArray) and w_iterable.is_scalar():
-        return [], [w_iterable]
-    return _find_shape_and_elems(space, w_iterable, is_rec_type)
-
-
-def _find_shape_and_elems(space, w_iterable, is_rec_type):
-    from pypy.objspace.std.bufferobject import W_Buffer
-    shape = [space.len_w(w_iterable)]
-    if space.isinstance_w(w_iterable, space.w_buffer):
-        batch = [space.wrap(0)] * shape[0]
-        for i in range(shape[0]):
-            batch[i] = space.ord(space.getitem(w_iterable, space.wrap(i)))
-    else:
-        batch = space.listview(w_iterable)
-    while True:
-        if not batch:
-            return shape[:], []
-        if is_single_elem(space, batch[0], is_rec_type):
-            for w_elem in batch:
-                if not is_single_elem(space, w_elem, is_rec_type):
-                    raise OperationError(space.w_ValueError, space.wrap(
-                        "setting an array element with a sequence"))
-            return shape[:], batch
-        new_batch = []
-        size = space.len_w(batch[0])
-        for w_elem in batch:
-            if (is_single_elem(space, w_elem, is_rec_type) or
-                    space.len_w(w_elem) != size):
-                raise OperationError(space.w_ValueError, space.wrap(
-                    "setting an array element with a sequence"))
-            w_array = space.lookup(w_elem, '__array__')
-            if w_array is not None:
-                # Make sure we call the array implementation of listview,
-                # since for some ndarray subclasses (matrix, for instance)
-                # listview does not reduce but rather returns the same class
-                w_elem = space.get_and_call_function(w_array, w_elem, space.w_None)
-            new_batch += space.listview(w_elem)
-        shape.append(size)
-        batch = new_batch
-
-
-
 @jit.unroll_safe
 def shape_agreement(space, shape1, w_arr2, broadcast_down=True):
     if w_arr2 is None:
diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py
--- a/pypy/module/micronumpy/support.py
+++ b/pypy/module/micronumpy/support.py
@@ -40,7 +40,10 @@
 def product_check(s):
     i = 1
     for x in s:
-        i = ovfcheck(i * x)
+        try:
+            i = ovfcheck(i * x)
+        except OverflowError:
+            raise
     return i
 
 def check_and_adjust_index(space, index, size, axis):
diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py
--- a/pypy/module/micronumpy/test/test_ndarray.py
+++ b/pypy/module/micronumpy/test/test_ndarray.py
@@ -170,7 +170,7 @@
                                     [1, 1, 1, 105, 105]
 
     def test_find_shape(self):
-        from pypy.module.micronumpy.strides import find_shape_and_elems
+        from pypy.module.micronumpy.ctors import find_shape_and_elems
 
         space = self.space
         shape, elems = find_shape_and_elems(space,
@@ -2478,6 +2478,18 @@
         a.fill(12)
         assert (a == u'1').all()
 
+    def test_unicode_record_array(self) :
+        from numpy import dtype, array
+        t = dtype([('a', 'S3'), ('b', 'U2')])
+        x = array([('a', u'b')], dtype=t)
+        assert str(x) ==  "[('a', u'b')]"
+
+        t = dtype([('a', 'U3'), ('b', 'S2')])
+        x = array([(u'a', 'b')], dtype=t)
+        x['a'] = u'1'
+        assert str(x) ==  "[(u'1', 'b')]"
+
+
     def test_boolean_indexing(self):
         import numpy as np
         a = np.zeros((1, 3))
@@ -2700,7 +2712,7 @@
                 "input array from shape (3,1) into shape (3)"
         a[:, 1] = b[:,0] > 0.5
         assert (a == [[0, 1], [0, 1], [0, 1]]).all()
-        
+
 
     def test_ufunc(self):
         from numpy import array
@@ -3856,7 +3868,7 @@
 
         assert a[0]['y'] == 2
         assert a[1]['y'] == 1
-        
+
         a = array([(1, [])], dtype=[('a', int32), ('b', int32, 0)])
         assert a['b'].shape == (1, 0)
         b = loads(dumps(a))
diff --git a/pypy/module/micronumpy/test/test_object_arrays.py b/pypy/module/micronumpy/test/test_object_arrays.py
--- a/pypy/module/micronumpy/test/test_object_arrays.py
+++ b/pypy/module/micronumpy/test/test_object_arrays.py
@@ -3,6 +3,8 @@
 
 
 class AppTestObjectDtypes(BaseNumpyAppTest):
+    spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"])
+
     def setup_class(cls):
         BaseNumpyAppTest.setup_class.im_func(cls)
         cls.w_runappdirect = cls.space.wrap(option.runappdirect)
@@ -187,3 +189,21 @@
         assert b.shape == (1,)
         assert b.dtype == np.float_
         assert (b == 1.0).all()
+
+
+    def test__reduce__(self):
+        from numpy import arange, dtype
+        from cPickle import loads, dumps
+        import sys
+        
+        a = arange(15).astype(object)
+        if '__pypy__' in sys.builtin_module_names:
+            raises(NotImplementedError, dumps, a)
+            skip('not implemented yet')
+        b = loads(dumps(a))
+        assert (a == b).all()
+
+        a = arange(15).astype(object).reshape((3, 5))
+        b = loads(dumps(a))
+        assert (a == b).all()
+        
diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py
--- a/pypy/module/micronumpy/test/test_scalar.py
+++ b/pypy/module/micronumpy/test/test_scalar.py
@@ -480,3 +480,9 @@
         u = unicode_(u'Aÿ')
         # raises(UnicodeEncodeError, "str(u)")  # XXX
         assert repr(u) == repr(u'Aÿ')
+
+    def test_binop_with_sequence(self):
+        import numpy as np
+        c = np.float64(1.) + [1.]
+        assert isinstance(c, np.ndarray)
+        assert (c == [2.]).all()
diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py
--- a/pypy/module/micronumpy/types.py
+++ b/pypy/module/micronumpy/types.py
@@ -2231,9 +2231,9 @@
             index = i + offset + 4*k
             data = rffi.cast(Int32.T, ord(box._value[k]))
             raw_storage_setitem_unaligned(storage, index, data)
-        for k in range(size, width // 4):
-            index = i + offset + 4*k
-            data = rffi.cast(Int32.T, 0)
+        # zero out the remaining memory
+        for index in range(size * 4 + i + offset, width):
+            data = rffi.cast(Int8.T, 0)
             raw_storage_setitem_unaligned(storage, index, data)
 
     def read(self, arr, i, offset, dtype):
diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py
--- a/pypy/module/micronumpy/ufuncs.py
+++ b/pypy/module/micronumpy/ufuncs.py
@@ -479,6 +479,7 @@
         dt_in, dt_out = self._calc_dtype(space, dtype, out, casting)
         return dt_in, dt_out, self.func
 
+    @jit.unroll_safe
     def _calc_dtype(self, space, arg_dtype, out=None, casting='unsafe'):
         if arg_dtype.is_object():
             return arg_dtype, arg_dtype
@@ -672,6 +673,7 @@
             "requested type has type code '%s'", self.name, dtype.char)
 
 
+    @jit.unroll_safe
     def _calc_dtype(self, space, l_dtype, r_dtype, out, casting,
                     w_arg1, w_arg2):
         if l_dtype.is_object() or r_dtype.is_object():
diff --git a/pypy/module/pypyjit/__init__.py b/pypy/module/pypyjit/__init__.py
--- a/pypy/module/pypyjit/__init__.py
+++ b/pypy/module/pypyjit/__init__.py
@@ -15,8 +15,12 @@
         'set_compile_hook': 'interp_resop.set_compile_hook',
         'set_abort_hook': 'interp_resop.set_abort_hook',
         'get_stats_snapshot': 'interp_resop.get_stats_snapshot',
-        'enable_debug': 'interp_resop.enable_debug',
-        'disable_debug': 'interp_resop.disable_debug',
+        # those things are disabled because they have bugs, but if
+        # they're found to be useful, fix test_ztranslation_jit_stats
+        # in the backend first. get_stats_snapshot still produces
+        # correct loop_runs if PYPYLOG is correct
+        #'enable_debug': 'interp_resop.enable_debug',
+        #'disable_debug': 'interp_resop.disable_debug',
         'ResOperation': 'interp_resop.WrappedOp',
         'DebugMergePoint': 'interp_resop.DebugMergePoint',
         'JitLoopInfo': 'interp_resop.W_JitLoopInfo',
diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py
--- a/pypy/module/pypyjit/interp_resop.py
+++ b/pypy/module/pypyjit/interp_resop.py
@@ -315,11 +315,12 @@
     """
     ll_times = jit_hooks.stats_get_loop_run_times(None)
     w_times = space.newdict()
-    for i in range(len(ll_times)):
-        w_key = space.newtuple([space.wrap(ll_times[i].type),
-                                space.wrap(ll_times[i].number)])
-        space.setitem(w_times, w_key,
-                      space.wrap(ll_times[i].counter))
+    if ll_times:
+        for i in range(len(ll_times)):
+            w_key = space.newtuple([space.wrap(ll_times[i].type),
+                                    space.wrap(ll_times[i].number)])
+            space.setitem(w_times, w_key,
+                          space.wrap(ll_times[i].counter))
     w_counters = space.newdict()
     for i, counter_name in enumerate(Counters.counter_names):
         v = jit_hooks.stats_get_counter_value(None, i)
diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py
--- a/pypy/module/pypyjit/test/test_jit_hook.py
+++ b/pypy/module/pypyjit/test/test_jit_hook.py
@@ -213,22 +213,6 @@
         self.on_abort()
         assert l == [('pypyjit', 'ABORT_TOO_LONG', [])]
 
-    def test_on_optimize(self):
-        import pypyjit
-        l = []
-
-        def hook(info):
-            l.append(info.jitdriver_name)
-
-        def optimize_hook(info):
-            return []
-
-        pypyjit.set_compile_hook(hook)
-        pypyjit.set_optimize_hook(optimize_hook)
-        self.on_optimize()
-        self.on_compile()
-        assert l == ['pypyjit']
-
     def test_creation(self):
         from pypyjit import ResOperation
 
diff --git a/pypy/module/pypyjit/test_pypy_c/test_alloc.py b/pypy/module/pypyjit/test_pypy_c/test_alloc.py
--- a/pypy/module/pypyjit/test_pypy_c/test_alloc.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_alloc.py
@@ -7,10 +7,11 @@
                           [2 ** n - 1 for n in range(26)])
 
     def test_newstr_constant_size(self):
-        for size in TestAlloc.SIZES:
+        for size in sorted(TestAlloc.SIZES):
             yield self.newstr_constant_size, size
 
     def newstr_constant_size(self, size):
+        print 'size =', size
         src = """if 1:
                     N = %(size)d
                     part_a = 'a' * N
diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py
--- a/pypy/module/pypyjit/test_pypy_c/test_buffers.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py
@@ -28,7 +28,7 @@
 
     def test_struct_unpack(self):
         def main(n):
-            import struct
+            import _struct as struct
             import array
             a = array.array('c', struct.pack('i', 42))
             i = 0
diff --git a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py
--- a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py
@@ -76,6 +76,6 @@
         assert len(mod_bridges) in (1, 2, 3)
 
         # check that counts are reasonable (precise # may change in the future)
-        assert N - 2000 < sum(l.count for l in fn_with_bridges_loops) < N + 1000
+        assert N - 2000 < sum(l.count for l in fn_with_bridges_loops) < N + 1500
 
 
diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py
--- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py
@@ -248,3 +248,42 @@
             guard_false(i157, descr=...)
             jump(..., descr=...)
         """)
+
+    def test_mixed_div(self):
+        N = 1500
+        def main():
+            N = 1500
+            import _numpypy.multiarray as np
+            arr = np.zeros(N)
+            l = [arr[i]/2. for i in range(N)]
+            return l
+        log = self.run(main, [])
+        assert log.result == [0.] * N
+        loop, = log.loops_by_filename(self.filepath)
+        assert loop.match("""
+            i92 = int_ge(i91, i37)
+            guard_false(i92, descr=...)
+            i93 = int_add(i91, 1)
+            setfield_gc(p23, i93, descr=<FieldS pypy.objspace.std.iterobject.W_AbstractSeqIterObject.inst_index 8>)
+            i94 = int_ge(i91, i56)
+            guard_false(i94, descr=...)
+            i96 = int_mul(i91, i58)
+            i97 = int_add(i51, i96)
+            f98 = raw_load_f(i63, i97, descr=<ArrayF 8>)
+            guard_not_invalidated(descr=...)
+            f100 = float_mul(f98, 0.500000)
+            i101 = int_add(i79, 1)
+            i102 = arraylen_gc(p85, descr=<ArrayP 8>)
+            i103 = int_lt(i102, i101)
+            cond_call(i103, ConstClass(_ll_list_resize_hint_really_look_inside_iff__listPtr_Signed_Bool), p76, i101, 1, descr=<Callv 0 rii EF=5>)
+            guard_no_exception(descr=...)
+            p104 = getfield_gc_r(p76, descr=<FieldP list.items 16>)
+            p105 = new_with_vtable(descr=<SizeDescr 24>)
+            setfield_gc(p105, f100, descr=<FieldF pypy.module.micronumpy.boxes.W_Float64Box.inst_value 16>)
+            setarrayitem_gc(p104, i79, p105, descr=<ArrayP 8>)
+            i106 = getfield_raw_i(#, descr=<FieldS pypysig_long_struct.c_value 0>)
+            setfield_gc(p76, i101, descr=<FieldS list.length 8>)
+            i107 = int_lt(i106, 0)
+            guard_false(i107, descr=...)
+            jump(..., descr=...)
+        """)
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py
@@ -2,7 +2,7 @@
 import py
 import platform
 import sys, ctypes
-from cffi import FFI, CDefError, FFIError
+from cffi import FFI, CDefError, FFIError, VerificationMissing
 from pypy.module.test_lib_pypy.cffi_tests.support import *
 
 SIZE_OF_INT   = ctypes.sizeof(ctypes.c_int)
@@ -757,8 +757,8 @@
         p = ffi.cast("long long", ffi.cast("wchar_t", -1))
         if SIZE_OF_WCHAR == 2:      # 2 bytes, unsigned
             assert int(p) == 0xffff
-        elif platform.machine() == 'aarch64': # 4 bytes, unsigned
-            assert int(p) == 0xffffffff
+        elif platform.machine().startswith(('arm', 'aarch64')):
+            assert int(p) == 0xffffffff      # 4 bytes, unsigned
         else:                       # 4 bytes, signed
             assert int(p) == -1
         p = ffi.cast("int", u+'\u1234')
@@ -927,6 +927,14 @@
         assert ffi.string(ffi.cast("enum foo", -16)) == "E"
         assert ffi.string(ffi.cast("enum foo", -8)) == "F"
 
+    def test_enum_partial(self):
+        ffi = FFI(backend=self.Backend())
+        ffi.cdef(r"enum foo {A, ...}; enum bar { B, C };")
+        lib = ffi.dlopen(None)
+        assert lib.B == 0
+        py.test.raises(VerificationMissing, getattr, lib, "A")
+        assert lib.C == 1
+
     def test_array_of_struct(self):
         ffi = FFI(backend=self.Backend())
         ffi.cdef("struct foo { int a, b; };")
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py
@@ -58,6 +58,11 @@
     assert ptr_type.get_c_name("") == "int(const *)[5]"
     assert ptr_type.get_c_name("*x") == "int(const * *x)[5]"
 
+def test_qual_pointer_type():
+    ptr_type = PointerType(PrimitiveType("long long"), Q_RESTRICT)
+    assert ptr_type.get_c_name("") == "long long __restrict *"
+    assert const_voidp_type.get_c_name("") == "void const *"
+
 def test_unknown_pointer_type():
     ptr_type = unknown_ptr_type("foo_p")
     assert ptr_type.get_c_name("") == "foo_p"
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py
@@ -308,7 +308,6 @@
     ffi.cdef("void f(WPARAM);")
 
 def test__is_constant_globalvar():
-    from cffi.cparser import Parser, _get_parser
     for input, expected_output in [
         ("int a;",          False),
         ("const int a;",    True),
@@ -325,11 +324,36 @@
         ("int a[5][6];",       False),
         ("const int a[5][6];", False),
         ]:
-        p = Parser()
-        ast = _get_parser().parse(input)
-        decl = ast.children()[0][1]
-        node = decl.type
-        assert p._is_constant_globalvar(node) == expected_output
+        ffi = FFI()
+        ffi.cdef(input)
+        declarations = ffi._parser._declarations
+        assert ('constant a' in declarations) == expected_output
+        assert ('variable a' in declarations) == (not expected_output)
+
+def test_restrict():
+    from cffi import model
+    for input, expected_output in [
+        ("int a;",             False),
+        ("restrict int a;",    True),
+        ("int *a;",            False),
+        ]:
+        ffi = FFI()
+        ffi.cdef(input)
+        tp, quals = ffi._parser._declarations['variable a']
+        assert bool(quals & model.Q_RESTRICT) == expected_output
+
+def test_different_const_funcptr_types():
+    lst = []
+    for input in [
+        "int(*)(int *a)",
+        "int(*)(int const *a)",
+        "int(*)(int * const a)",
+        "int(*)(int const a[])"]:
+        ffi = FFI(backend=FakeBackend())
+        lst.append(ffi._parser.parse_type(input))
+    assert lst[0] != lst[1]
+    assert lst[0] == lst[2]
+    assert lst[1] == lst[3]
 
 def test_enum():
     ffi = FFI()
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py
@@ -209,6 +209,9 @@
         # Check the particular results on Intel
         import platform
         if (platform.machine().startswith('i386') or
+            platform.machine().startswith('i486') or
+            platform.machine().startswith('i586') or
+            platform.machine().startswith('i686') or
             platform.machine().startswith('x86')):
             assert abs(more_precise - 0.656769) < 0.001
             assert abs(less_precise - 3.99091) < 0.001
@@ -1636,11 +1639,11 @@
 
 def test_FILE_stored_explicitly():
     ffi = FFI()
-    ffi.cdef("int myprintf(const char *, int); FILE *myfile;")
+    ffi.cdef("int myprintf11(const char *, int); FILE *myfile;")
     lib = ffi.verify("""
         #include <stdio.h>
         FILE *myfile;
-        int myprintf(const char *out, int value) {
+        int myprintf11(const char *out, int value) {
             return fprintf(myfile, out, value);
         }
     """)
@@ -1650,7 +1653,7 @@
     lib.myfile = ffi.cast("FILE *", fw1)
     #
     fw1.write(b"X")
-    r = lib.myprintf(b"hello, %d!\n", ffi.cast("int", 42))
+    r = lib.myprintf11(b"hello, %d!\n", ffi.cast("int", 42))
     fw1.close()
     assert r == len("hello, 42!\n")
     #
@@ -2248,3 +2251,13 @@
     e = py.test.raises(VerificationError, ffi.verify, "")
     assert str(e.value) == ("feature not supported with ffi.verify(), but only "
                          "with ffi.set_source(): 'typedef unsigned long... t1'")
+
+def test_const_fields():
+    ffi = FFI()
+    ffi.cdef("""struct foo_s { const int a; void *const b; };""")
+    ffi.verify("""struct foo_s { const int a; void *const b; };""")
+    foo_s = ffi.typeof("struct foo_s")
+    assert foo_s.fields[0][0] == 'a'
+    assert foo_s.fields[0][1].type is ffi.typeof("int")
+    assert foo_s.fields[1][0] == 'b'
+    assert foo_s.fields[1][1].type is ffi.typeof("void *")
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py
@@ -30,6 +30,32 @@
     assert ffi.typeof("int[][10]") is ffi.typeof("int[][10]")
     assert ffi.typeof("int(*)()") is ffi.typeof("int(*)()")
 
+def test_ffi_type_not_immortal():
+    import weakref, gc
+    ffi = _cffi1_backend.FFI()
+    t1 = ffi.typeof("int **")
+    t2 = ffi.typeof("int *")
+    w1 = weakref.ref(t1)
+    w2 = weakref.ref(t2)
+    del t1, ffi
+    gc.collect()
+    assert w1() is None
+    assert w2() is t2
+    ffi = _cffi1_backend.FFI()
+    assert ffi.typeof(ffi.new("int **")[0]) is t2
+    #
+    ffi = _cffi1_backend.FFI()
+    t1 = ffi.typeof("int ***")
+    t2 = ffi.typeof("int **")
+    w1 = weakref.ref(t1)
+    w2 = weakref.ref(t2)
+    del t2, ffi
+    gc.collect()
+    assert w1() is t1
+    assert w2() is not None   # kept alive by t1
+    ffi = _cffi1_backend.FFI()
+    assert ffi.typeof("int * *") is t1.item
+
 def test_ffi_cache_type_globally():
     ffi1 = _cffi1_backend.FFI()
     ffi2 = _cffi1_backend.FFI()
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py
@@ -782,8 +782,8 @@
         p = ffi.cast("long long", ffi.cast("wchar_t", -1))
         if SIZE_OF_WCHAR == 2:      # 2 bytes, unsigned
             assert int(p) == 0xffff
-        elif platform.machine() == 'aarch64': # 4 bytes, unsigned
-            assert int(p) == 0xffffffff
+        elif platform.machine().startswith(('arm', 'aarch64')):
+            assert int(p) == 0xffffffff      # 4 bytes, unsigned
         else:                       # 4 bytes, signed
             assert int(p) == -1
         p = ffi.cast("int", u+'\u1234')
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py
@@ -2,7 +2,7 @@
 import sys
 import py
 from cffi import FFI
-from cffi import recompiler, ffiplatform
+from cffi import recompiler, ffiplatform, VerificationMissing
 from pypy.module.test_lib_pypy.cffi_tests.udir import udir
 
 
@@ -204,3 +204,10 @@
                        "foobar", _version=0x2594)
     assert str(e.value).startswith(
         "cffi out-of-line Python module 'foobar' has unknown version")
+
+def test_partial_enum():
+    ffi = FFI()
+    ffi.cdef("enum foo { A, B, ... };")
+    ffi.set_source('test_partial_enum', None)
+    py.test.raises(VerificationMissing, ffi.emit_python_code,
+                   str(tmpdir.join('test_partial_enum.py')))
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
@@ -1192,3 +1192,92 @@
     py.test.raises(ffi.error, getattr, lib, 'my_value')
     e = py.test.raises(ffi.error, setattr, lib, 'my_value', 50)
     assert str(e.value) == "global variable 'my_value' is at address NULL"
+
+def test_const_fields():
+    ffi = FFI()
+    ffi.cdef("""struct foo_s { const int a; void *const b; };""")
+    lib = verify(ffi, 'test_const_fields', """
+        struct foo_s { const int a; void *const b; };""")
+    foo_s = ffi.typeof("struct foo_s")
+    assert foo_s.fields[0][0] == 'a'
+    assert foo_s.fields[0][1].type is ffi.typeof("int")
+    assert foo_s.fields[1][0] == 'b'
+    assert foo_s.fields[1][1].type is ffi.typeof("void *")
+
+def test_restrict_fields():
+    if sys.platform == 'win32':
+        py.test.skip("'__restrict__' probably not recognized")
+    ffi = FFI()
+    ffi.cdef("""struct foo_s { void * restrict b; };""")
+    lib = verify(ffi, 'test_restrict_fields', """
+        struct foo_s { void * __restrict__ b; };""")
+    foo_s = ffi.typeof("struct foo_s")
+    assert foo_s.fields[0][0] == 'b'
+    assert foo_s.fields[0][1].type is ffi.typeof("void *")
+
+def test_const_array_fields():
+    ffi = FFI()
+    ffi.cdef("""struct foo_s { const int a[4]; };""")
+    lib = verify(ffi, 'test_const_array_fields', """
+        struct foo_s { const int a[4]; };""")
+    foo_s = ffi.typeof("struct foo_s")
+    assert foo_s.fields[0][0] == 'a'
+    assert foo_s.fields[0][1].type is ffi.typeof("int[4]")
+
+def test_const_array_fields_varlength():


More information about the pypy-commit mailing list