[pypy-commit] pypy vecopt-merge: merged default
plan_rich
noreply at buildbot.pypy.org
Thu Oct 8 11:06:35 CEST 2015
Author: Richard Plangger <planrichi at gmail.com>
Branch: vecopt-merge
Changeset: r80036:598c56268e90
Date: 2015-10-08 08:59 +0200
http://bitbucket.org/pypy/pypy/changeset/598c56268e90/
Log: merged default
diff too long, truncating to 2000 out of 6822 lines
diff --git a/dotviewer/graphclient.py b/dotviewer/graphclient.py
--- a/dotviewer/graphclient.py
+++ b/dotviewer/graphclient.py
@@ -127,16 +127,8 @@
return spawn_graphserver_handler((host, port))
def spawn_local_handler():
- if hasattr(sys, 'pypy_objspaceclass'):
- # if 'python' is actually PyPy, e.g. in a virtualenv, then
- # try hard to find a real CPython
- try:
- python = subprocess.check_output(
- 'env -i $SHELL -l -c "which python"', shell=True).strip()
- except subprocess.CalledProcessError:
- # did not work, fall back to 'python'
- python = 'python'
- else:
+ python = os.getenv('PYPY_PYGAME_PYTHON')
+ if not python:
python = sys.executable
args = [python, '-u', GRAPHSERVER, '--stdio']
p = subprocess.Popen(args,
diff --git a/lib-python/conftest.py b/lib-python/conftest.py
--- a/lib-python/conftest.py
+++ b/lib-python/conftest.py
@@ -158,7 +158,7 @@
RegrTest('test_codecs.py', core=True, usemodules='_multibytecodec'),
RegrTest('test_codeop.py', core=True),
RegrTest('test_coding.py', core=True),
- RegrTest('test_coercion.py', core=True),
+ RegrTest('test_coercion.py', core=True, usemodules='struct'),
RegrTest('test_collections.py', usemodules='binascii struct'),
RegrTest('test_colorsys.py'),
RegrTest('test_commands.py'),
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -609,7 +609,7 @@
def make_accessor_locked(name):
key = 'function ' + name
if key in ffi._parser._declarations:
- tp = ffi._parser._declarations[key]
+ tp, _ = ffi._parser._declarations[key]
BType = ffi._get_cached_btype(tp)
try:
value = backendlib.load_function(BType, name)
@@ -620,7 +620,7 @@
#
key = 'variable ' + name
if key in ffi._parser._declarations:
- tp = ffi._parser._declarations[key]
+ tp, _ = ffi._parser._declarations[key]
BType = ffi._get_cached_btype(tp)
read_variable = backendlib.read_variable
write_variable = backendlib.write_variable
@@ -631,12 +631,23 @@
#
if not copied_enums:
from . import model
- for key, tp in ffi._parser._declarations.items():
+ error = None
+ for key, (tp, _) in ffi._parser._declarations.items():
if not isinstance(tp, model.EnumType):
continue
+ try:
+ tp.check_not_partial()
+ except Exception as e:
+ error = e
+ continue
for enumname, enumval in zip(tp.enumerators, tp.enumvalues):
if enumname not in library.__dict__:
library.__dict__[enumname] = enumval
+ if error is not None:
+ if name in library.__dict__:
+ return # ignore error, about a different enum
+ raise error
+
for key, val in ffi._parser._int_constants.items():
if key not in library.__dict__:
library.__dict__[key] = val
diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py
--- a/lib_pypy/cffi/cparser.py
+++ b/lib_pypy/cffi/cparser.py
@@ -26,6 +26,9 @@
_r_words = re.compile(r"\w+|\S")
_parser_cache = None
_r_int_literal = re.compile(r"-?0?x?[0-9a-f]+[lu]*$", re.IGNORECASE)
+_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b")
+_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b")
+_r_cdecl = re.compile(r"\b__cdecl\b")
def _get_parser():
global _parser_cache
@@ -44,6 +47,14 @@
macrovalue = macrovalue.replace('\\\n', '').strip()
macros[macroname] = macrovalue
csource = _r_define.sub('', csource)
+ # BIG HACK: replace WINAPI or __stdcall with "volatile const".
+ # It doesn't make sense for the return type of a function to be
+ # "volatile volatile const", so we abuse it to detect __stdcall...
+ # Hack number 2 is that "int(volatile *fptr)();" is not valid C
+ # syntax, so we place the "volatile" before the opening parenthesis.
+ csource = _r_stdcall2.sub(' volatile volatile const(', csource)
+ csource = _r_stdcall1.sub(' volatile volatile const ', csource)
+ csource = _r_cdecl.sub(' ', csource)
# Replace "[...]" with "[__dotdotdotarray__]"
csource = _r_partial_array.sub('[__dotdotdotarray__]', csource)
# Replace "...}" with "__dotdotdotNUM__}". This construction should
@@ -192,6 +203,7 @@
if not decl.name:
raise api.CDefError("typedef does not declare any name",
decl)
+ quals = 0
if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType)
and decl.type.type.names[-1] == '__dotdotdot__'):
realtype = self._get_unknown_type(decl)
@@ -202,8 +214,9 @@
decl.type.type.type.names == ['__dotdotdot__']):
realtype = model.unknown_ptr_type(decl.name)
else:
- realtype = self._get_type(decl.type, name=decl.name)
- self._declare('typedef ' + decl.name, realtype)
+ realtype, quals = self._get_type_and_quals(
+ decl.type, name=decl.name)
+ self._declare('typedef ' + decl.name, realtype, quals=quals)
else:
raise api.CDefError("unrecognized construct", decl)
except api.FFIError as e:
@@ -255,9 +268,9 @@
def _parse_decl(self, decl):
node = decl.type
if isinstance(node, pycparser.c_ast.FuncDecl):
- tp = self._get_type(node, name=decl.name)
+ tp, quals = self._get_type_and_quals(node, name=decl.name)
assert isinstance(tp, model.RawFunctionType)
- tp = self._get_type_pointer(tp)
+ tp = self._get_type_pointer(tp, quals)
self._declare('function ' + decl.name, tp)
else:
if isinstance(node, pycparser.c_ast.Struct):
@@ -271,9 +284,10 @@
decl)
#
if decl.name:
- tp = self._get_type(node, partial_length_ok=True)
+ tp, quals = self._get_type_and_quals(node,
+ partial_length_ok=True)
if tp.is_raw_function:
- tp = self._get_type_pointer(tp)
+ tp = self._get_type_pointer(tp, quals)
self._declare('function ' + decl.name, tp)
elif (tp.is_integer_type() and
hasattr(decl, 'init') and
@@ -287,10 +301,10 @@
_r_int_literal.match(decl.init.expr.value)):
self._add_integer_constant(decl.name,
'-' + decl.init.expr.value)
- elif self._is_constant_globalvar(node):
- self._declare('constant ' + decl.name, tp)
+ elif (quals & model.Q_CONST) and not tp.is_array_type:
+ self._declare('constant ' + decl.name, tp, quals=quals)
else:
- self._declare('variable ' + decl.name, tp)
+ self._declare('variable ' + decl.name, tp, quals=quals)
def parse_type(self, cdecl):
ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2]
@@ -298,40 +312,51 @@
exprnode = ast.ext[-1].type.args.params[0]
if isinstance(exprnode, pycparser.c_ast.ID):
raise api.CDefError("unknown identifier '%s'" % (exprnode.name,))
- return self._get_type(exprnode.type)
+ tp, quals = self._get_type_and_quals(exprnode.type)
+ return tp
- def _declare(self, name, obj, included=False):
+ def _declare(self, name, obj, included=False, quals=0):
if name in self._declarations:
- if self._declarations[name] is obj:
+ prevobj, prevquals = self._declarations[name]
+ if prevobj is obj and prevquals == quals:
return
if not self._override:
raise api.FFIError(
"multiple declarations of %s (for interactive usage, "
"try cdef(xx, override=True))" % (name,))
assert '__dotdotdot__' not in name.split()
- self._declarations[name] = obj
+ self._declarations[name] = (obj, quals)
if included:
self._included_declarations.add(obj)
- def _get_type_pointer(self, type, const=False, declname=None):
+ def _extract_quals(self, type):
+ quals = 0
+ if isinstance(type, (pycparser.c_ast.TypeDecl,
+ pycparser.c_ast.PtrDecl)):
+ if 'const' in type.quals:
+ quals |= model.Q_CONST
+ if 'restrict' in type.quals:
+ quals |= model.Q_RESTRICT
+ return quals
+
+ def _get_type_pointer(self, type, quals, declname=None):
if isinstance(type, model.RawFunctionType):
return type.as_function_pointer()
if (isinstance(type, model.StructOrUnionOrEnum) and
type.name.startswith('$') and type.name[1:].isdigit() and
type.forcename is None and declname is not None):
- return model.NamedPointerType(type, declname)
- if const:
- return model.ConstPointerType(type)
- return model.PointerType(type)
+ return model.NamedPointerType(type, declname, quals)
+ return model.PointerType(type, quals)
- def _get_type(self, typenode, name=None, partial_length_ok=False):
+ def _get_type_and_quals(self, typenode, name=None, partial_length_ok=False):
# first, dereference typedefs, if we have it already parsed, we're good
if (isinstance(typenode, pycparser.c_ast.TypeDecl) and
isinstance(typenode.type, pycparser.c_ast.IdentifierType) and
len(typenode.type.names) == 1 and
('typedef ' + typenode.type.names[0]) in self._declarations):
- type = self._declarations['typedef ' + typenode.type.names[0]]
- return type
+ tp, quals = self._declarations['typedef ' + typenode.type.names[0]]
+ quals |= self._extract_quals(typenode)
+ return tp, quals
#
if isinstance(typenode, pycparser.c_ast.ArrayDecl):
# array type
@@ -340,18 +365,19 @@
else:
length = self._parse_constant(
typenode.dim, partial_length_ok=partial_length_ok)
- tp = self._get_type(typenode.type,
+ tp, quals = self._get_type_and_quals(typenode.type,
partial_length_ok=partial_length_ok)
- return model.ArrayType(tp, length)
+ return model.ArrayType(tp, length), quals
#
if isinstance(typenode, pycparser.c_ast.PtrDecl):
# pointer type
- const = (isinstance(typenode.type, pycparser.c_ast.TypeDecl)
- and 'const' in typenode.type.quals)
- return self._get_type_pointer(self._get_type(typenode.type), const,
- declname=name)
+ itemtype, itemquals = self._get_type_and_quals(typenode.type)
+ tp = self._get_type_pointer(itemtype, itemquals, declname=name)
+ quals = self._extract_quals(typenode)
+ return tp, quals
#
if isinstance(typenode, pycparser.c_ast.TypeDecl):
+ quals = self._extract_quals(typenode)
type = typenode.type
if isinstance(type, pycparser.c_ast.IdentifierType):
# assume a primitive type. get it from .names, but reduce
@@ -379,35 +405,38 @@
names = newnames + names
ident = ' '.join(names)
if ident == 'void':
- return model.void_type
+ return model.void_type, quals
if ident == '__dotdotdot__':
raise api.FFIError(':%d: bad usage of "..."' %
typenode.coord.line)
- return resolve_common_type(ident)
+ return resolve_common_type(ident), quals
#
if isinstance(type, pycparser.c_ast.Struct):
# 'struct foobar'
- return self._get_struct_union_enum_type('struct', type, name)
+ tp = self._get_struct_union_enum_type('struct', type, name)
+ return tp, quals
#
if isinstance(type, pycparser.c_ast.Union):
# 'union foobar'
- return self._get_struct_union_enum_type('union', type, name)
+ tp = self._get_struct_union_enum_type('union', type, name)
+ return tp, quals
#
if isinstance(type, pycparser.c_ast.Enum):
# 'enum foobar'
- return self._get_struct_union_enum_type('enum', type, name)
+ tp = self._get_struct_union_enum_type('enum', type, name)
+ return tp, quals
#
if isinstance(typenode, pycparser.c_ast.FuncDecl):
# a function type
- return self._parse_function_type(typenode, name)
+ return self._parse_function_type(typenode, name), 0
#
# nested anonymous structs or unions end up here
if isinstance(typenode, pycparser.c_ast.Struct):
return self._get_struct_union_enum_type('struct', typenode, name,
- nested=True)
+ nested=True), 0
if isinstance(typenode, pycparser.c_ast.Union):
return self._get_struct_union_enum_type('union', typenode, name,
- nested=True)
+ nested=True), 0
#
raise api.FFIError(":%d: bad or unsupported type declaration" %
typenode.coord.line)
@@ -426,28 +455,28 @@
raise api.CDefError(
"%s: a function with only '(...)' as argument"
" is not correct C" % (funcname or 'in expression'))
- args = [self._as_func_arg(self._get_type(argdeclnode.type))
+ args = [self._as_func_arg(*self._get_type_and_quals(argdeclnode.type))
for argdeclnode in params]
if not ellipsis and args == [model.void_type]:
args = []
- result = self._get_type(typenode.type)
- return model.RawFunctionType(tuple(args), result, ellipsis)
+ result, quals = self._get_type_and_quals(typenode.type)
+ # the 'quals' on the result type are ignored. HACK: we absure them
+ # to detect __stdcall functions: we textually replace "__stdcall"
+ # with "volatile volatile const" above.
+ abi = None
+ if hasattr(typenode.type, 'quals'): # else, probable syntax error anyway
+ if typenode.type.quals[-3:] == ['volatile', 'volatile', 'const']:
+ abi = '__stdcall'
+ return model.RawFunctionType(tuple(args), result, ellipsis, abi)
- def _as_func_arg(self, type):
+ def _as_func_arg(self, type, quals):
if isinstance(type, model.ArrayType):
- return model.PointerType(type.item)
+ return model.PointerType(type.item, quals)
elif isinstance(type, model.RawFunctionType):
return type.as_function_pointer()
else:
return type
- def _is_constant_globalvar(self, typenode):
- if isinstance(typenode, pycparser.c_ast.PtrDecl):
- return 'const' in typenode.quals
- if isinstance(typenode, pycparser.c_ast.TypeDecl):
- return 'const' in typenode.quals
- return False
-
def _get_struct_union_enum_type(self, kind, type, name=None, nested=False):
# First, a level of caching on the exact 'type' node of the AST.
# This is obscure, but needed because pycparser "unrolls" declarations
@@ -486,7 +515,7 @@
else:
explicit_name = name
key = '%s %s' % (kind, name)
- tp = self._declarations.get(key, None)
+ tp, _ = self._declarations.get(key, (None, None))
#
if tp is None:
if kind == 'struct':
@@ -528,6 +557,7 @@
fldnames = []
fldtypes = []
fldbitsize = []
+ fldquals = []
for decl in type.decls:
if (isinstance(decl.type, pycparser.c_ast.IdentifierType) and
''.join(decl.type.names) == '__dotdotdot__'):
@@ -541,7 +571,8 @@
else:
bitsize = self._parse_constant(decl.bitsize)
self._partial_length = False
- type = self._get_type(decl.type, partial_length_ok=True)
+ type, fqual = self._get_type_and_quals(decl.type,
+ partial_length_ok=True)
if self._partial_length:
self._make_partial(tp, nested)
if isinstance(type, model.StructType) and type.partial:
@@ -549,9 +580,11 @@
fldnames.append(decl.name or '')
fldtypes.append(type)
fldbitsize.append(bitsize)
+ fldquals.append(fqual)
tp.fldnames = tuple(fldnames)
tp.fldtypes = tuple(fldtypes)
tp.fldbitsize = tuple(fldbitsize)
+ tp.fldquals = tuple(fldquals)
if fldbitsize != [-1] * len(fldbitsize):
if isinstance(tp, model.StructType) and tp.partial:
raise NotImplementedError("%s: using both bitfields and '...;'"
@@ -632,14 +665,12 @@
return tp
def include(self, other):
- for name, tp in other._declarations.items():
+ for name, (tp, quals) in other._declarations.items():
if name.startswith('anonymous $enum_$'):
continue # fix for test_anonymous_enum_include
kind = name.split(' ', 1)[0]
- if kind in ('struct', 'union', 'enum', 'anonymous'):
- self._declare(name, tp, included=True)
- elif kind == 'typedef':
- self._declare(name, tp, included=True)
+ if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef'):
+ self._declare(name, tp, included=True, quals=quals)
for k, v in other._int_constants.items():
self._add_constants(k, v)
diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py
--- a/lib_pypy/cffi/model.py
+++ b/lib_pypy/cffi/model.py
@@ -1,14 +1,29 @@
-import types
+import types, sys
import weakref
from .lock import allocate_lock
+# type qualifiers
+Q_CONST = 0x01
+Q_RESTRICT = 0x02
+
+def qualify(quals, replace_with):
+ if quals & Q_CONST:
+ replace_with = ' const ' + replace_with.lstrip()
+ if quals & Q_RESTRICT:
+ # It seems that __restrict is supported by gcc and msvc.
+ # If you hit some different compiler, add a #define in
+ # _cffi_include.h for it (and in its copies, documented there)
+ replace_with = ' __restrict ' + replace_with.lstrip()
+ return replace_with
+
+
class BaseTypeByIdentity(object):
is_array_type = False
is_raw_function = False
- def get_c_name(self, replace_with='', context='a C file'):
+ def get_c_name(self, replace_with='', context='a C file', quals=0):
result = self.c_name_with_marker
assert result.count('&') == 1
# some logic duplication with ffi.getctype()... :-(
@@ -18,6 +33,7 @@
replace_with = '(%s)' % replace_with
elif not replace_with[0] in '[(':
replace_with = ' ' + replace_with
+ replace_with = qualify(quals, replace_with)
result = result.replace('&', replace_with)
if '$' in result:
from .ffiplatform import VerificationError
@@ -177,18 +193,21 @@
class BaseFunctionType(BaseType):
- _attrs_ = ('args', 'result', 'ellipsis')
+ _attrs_ = ('args', 'result', 'ellipsis', 'abi')
- def __init__(self, args, result, ellipsis):
+ def __init__(self, args, result, ellipsis, abi=None):
self.args = args
self.result = result
self.ellipsis = ellipsis
+ self.abi = abi
#
reprargs = [arg._get_c_name() for arg in self.args]
if self.ellipsis:
reprargs.append('...')
reprargs = reprargs or ['void']
replace_with = self._base_pattern % (', '.join(reprargs),)
+ if abi is not None:
+ replace_with = replace_with[:1] + abi + ' ' + replace_with[1:]
self.c_name_with_marker = (
self.result.c_name_with_marker.replace('&', replace_with))
@@ -206,7 +225,7 @@
"type, not a pointer-to-function type" % (self,))
def as_function_pointer(self):
- return FunctionPtrType(self.args, self.result, self.ellipsis)
+ return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi)
class FunctionPtrType(BaseFunctionType):
@@ -217,24 +236,29 @@
args = []
for tp in self.args:
args.append(tp.get_cached_btype(ffi, finishlist))
+ abi_args = ()
+ if self.abi == "__stdcall":
+ if not self.ellipsis: # __stdcall ignored for variadic funcs
+ try:
+ abi_args = (ffi._backend.FFI_STDCALL,)
+ except AttributeError:
+ pass
return global_cache(self, ffi, 'new_function_type',
- tuple(args), result, self.ellipsis)
+ tuple(args), result, self.ellipsis, *abi_args)
def as_raw_function(self):
- return RawFunctionType(self.args, self.result, self.ellipsis)
+ return RawFunctionType(self.args, self.result, self.ellipsis, self.abi)
class PointerType(BaseType):
- _attrs_ = ('totype',)
- _base_pattern = " *&"
- _base_pattern_array = "(*&)"
+ _attrs_ = ('totype', 'quals')
- def __init__(self, totype):
+ def __init__(self, totype, quals=0):
self.totype = totype
+ self.quals = quals
+ extra = qualify(quals, " *&")
if totype.is_array_type:
- extra = self._base_pattern_array
- else:
- extra = self._base_pattern
+ extra = "(%s)" % (extra.lstrip(),)
self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra)
def build_backend_type(self, ffi, finishlist):
@@ -243,10 +267,8 @@
voidp_type = PointerType(void_type)
-
-class ConstPointerType(PointerType):
- _base_pattern = " const *&"
- _base_pattern_array = "(const *&)"
+def ConstPointerType(totype):
+ return PointerType(totype, Q_CONST)
const_voidp_type = ConstPointerType(void_type)
@@ -254,8 +276,8 @@
class NamedPointerType(PointerType):
_attrs_ = ('totype', 'name')
- def __init__(self, totype, name):
- PointerType.__init__(self, totype)
+ def __init__(self, totype, name, quals=0):
+ PointerType.__init__(self, totype, quals)
self.name = name
self.c_name_with_marker = name + '&'
@@ -315,11 +337,12 @@
partial = False
packed = False
- def __init__(self, name, fldnames, fldtypes, fldbitsize):
+ def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None):
self.name = name
self.fldnames = fldnames
self.fldtypes = fldtypes
self.fldbitsize = fldbitsize
+ self.fldquals = fldquals
self.build_c_name_with_marker()
def has_anonymous_struct_fields(self):
@@ -331,14 +354,17 @@
return False
def enumfields(self):
- for name, type, bitsize in zip(self.fldnames, self.fldtypes,
- self.fldbitsize):
+ fldquals = self.fldquals
+ if fldquals is None:
+ fldquals = (0,) * len(self.fldnames)
+ for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes,
+ self.fldbitsize, fldquals):
if name == '' and isinstance(type, StructOrUnion):
# nested anonymous struct/union
for result in type.enumfields():
yield result
else:
- yield (name, type, bitsize)
+ yield (name, type, bitsize, quals)
def force_flatten(self):
# force the struct or union to have a declaration that lists
@@ -347,13 +373,16 @@
names = []
types = []
bitsizes = []
- for name, type, bitsize in self.enumfields():
+ fldquals = []
+ for name, type, bitsize, quals in self.enumfields():
names.append(name)
types.append(type)
bitsizes.append(bitsize)
+ fldquals.append(quals)
self.fldnames = tuple(names)
self.fldtypes = tuple(types)
self.fldbitsize = tuple(bitsizes)
+ self.fldquals = tuple(fldquals)
def get_cached_btype(self, ffi, finishlist, can_delay=False):
BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist,
diff --git a/lib_pypy/cffi/parse_c_type.h b/lib_pypy/cffi/parse_c_type.h
--- a/lib_pypy/cffi/parse_c_type.h
+++ b/lib_pypy/cffi/parse_c_type.h
@@ -5,7 +5,7 @@
#define _CFFI_OP(opcode, arg) (_cffi_opcode_t)(opcode | (((uintptr_t)(arg)) << 8))
#define _CFFI_GETOP(cffi_opcode) ((unsigned char)(uintptr_t)cffi_opcode)
-#define _CFFI_GETARG(cffi_opcode) (((uintptr_t)cffi_opcode) >> 8)
+#define _CFFI_GETARG(cffi_opcode) (((intptr_t)cffi_opcode) >> 8)
#define _CFFI_OP_PRIMITIVE 1
#define _CFFI_OP_POINTER 3
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -195,17 +195,15 @@
elif isinstance(tp, model.StructOrUnion):
if tp.fldtypes is not None and (
tp not in self.ffi._parser._included_declarations):
- for name1, tp1, _ in tp.enumfields():
+ for name1, tp1, _, _ in tp.enumfields():
self._do_collect_type(self._field_type(tp, name1, tp1))
else:
for _, x in tp._get_items():
self._do_collect_type(x)
- def _get_declarations(self):
- return sorted(self.ffi._parser._declarations.items())
-
def _generate(self, step_name):
- for name, tp in self._get_declarations():
+ lst = self.ffi._parser._declarations.items()
+ for name, (tp, quals) in sorted(lst):
kind, realname = name.split(' ', 1)
try:
method = getattr(self, '_generate_cpy_%s_%s' % (kind,
@@ -214,6 +212,7 @@
raise ffiplatform.VerificationError(
"not implemented in recompile(): %r" % name)
try:
+ self._current_quals = quals
method(tp, realname)
except Exception as e:
model.attach_exception_info(e, name)
@@ -608,7 +607,11 @@
call_arguments.append('x%d' % i)
repr_arguments = ', '.join(arguments)
repr_arguments = repr_arguments or 'void'
- name_and_arguments = '_cffi_d_%s(%s)' % (name, repr_arguments)
+ if tp.abi:
+ abi = tp.abi + ' '
+ else:
+ abi = ''
+ name_and_arguments = '%s_cffi_d_%s(%s)' % (abi, name, repr_arguments)
prnt('static %s' % (tp.result.get_c_name(name_and_arguments),))
prnt('{')
call_arguments = ', '.join(call_arguments)
@@ -711,7 +714,8 @@
if difference:
repr_arguments = ', '.join(arguments)
repr_arguments = repr_arguments or 'void'
- name_and_arguments = '_cffi_f_%s(%s)' % (name, repr_arguments)
+ name_and_arguments = '%s_cffi_f_%s(%s)' % (abi, name,
+ repr_arguments)
prnt('static %s' % (tp_result.get_c_name(name_and_arguments),))
prnt('{')
if result_decl:
@@ -774,7 +778,7 @@
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
prnt(' (void)p;')
- for fname, ftype, fbitsize in tp.enumfields():
+ for fname, ftype, fbitsize, fqual in tp.enumfields():
try:
if ftype.is_integer_type() or fbitsize >= 0:
# accept all integers, but complain on float or double
@@ -789,7 +793,8 @@
ftype = ftype.item
fname = fname + '[0]'
prnt(' { %s = &p->%s; (void)tmp; }' % (
- ftype.get_c_name('*tmp', 'field %r'%fname), fname))
+ ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual),
+ fname))
except ffiplatform.VerificationError as e:
prnt(' /* %s */' % str(e)) # cannot verify it, ignore
prnt('}')
@@ -823,7 +828,7 @@
c_fields = []
if reason_for_not_expanding is None:
enumfields = list(tp.enumfields())
- for fldname, fldtype, fbitsize in enumfields:
+ for fldname, fldtype, fbitsize, fqual in enumfields:
fldtype = self._field_type(tp, fldname, fldtype)
# cname is None for _add_missing_struct_unions() only
op = OP_NOOP
@@ -879,7 +884,9 @@
# because they don't have any known C name. Check that they are
# not partial (we can't complete or verify them!) and emit them
# anonymously.
- for tp in list(self._struct_unions):
+ lst = list(self._struct_unions.items())
+ lst.sort(key=lambda tp_order: tp_order[1])
+ for tp, order in lst:
if tp not in self._seen_struct_unions:
if tp.partial:
raise NotImplementedError("internal inconsistency: %r is "
@@ -1004,6 +1011,8 @@
def _enum_ctx(self, tp, cname):
type_index = self._typesdict[tp]
type_op = CffiOp(OP_ENUM, -1)
+ if self.target_is_python:
+ tp.check_not_partial()
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
self._lsts["global"].append(
GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op,
@@ -1081,7 +1090,8 @@
# if 'tp' were a function type, but that is not possible here.
# (If 'tp' is a function _pointer_ type, then casts from "fn_t
# **" to "void *" are again no-ops, as far as I can tell.)
- prnt('static ' + tp.get_c_name('*_cffi_var_%s(void)' % (name,)))
+ decl = '*_cffi_var_%s(void)' % (name,)
+ prnt('static ' + tp.get_c_name(decl, quals=self._current_quals))
prnt('{')
prnt(' return %s(%s);' % (ampersand, name))
prnt('}')
@@ -1130,7 +1140,13 @@
else:
self.cffi_types[index] = CffiOp(OP_NOOP, realindex)
index += 1
- self.cffi_types[index] = CffiOp(OP_FUNCTION_END, int(tp.ellipsis))
+ flags = int(tp.ellipsis)
+ if tp.abi is not None:
+ if tp.abi == '__stdcall':
+ flags |= 2
+ else:
+ raise NotImplementedError("abi=%r" % (tp.abi,))
+ self.cffi_types[index] = CffiOp(OP_FUNCTION_END, flags)
def _emit_bytecode_PointerType(self, tp, index):
self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[tp.totype])
diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py
--- a/lib_pypy/cffi/vengine_cpy.py
+++ b/lib_pypy/cffi/vengine_cpy.py
@@ -197,7 +197,10 @@
return library
def _get_declarations(self):
- return sorted(self.ffi._parser._declarations.items())
+ lst = [(key, tp) for (key, (tp, qual)) in
+ self.ffi._parser._declarations.items()]
+ lst.sort()
+ return lst
def _generate(self, step_name):
for name, tp in self._get_declarations():
@@ -468,7 +471,7 @@
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
prnt(' (void)p;')
- for fname, ftype, fbitsize in tp.enumfields():
+ for fname, ftype, fbitsize, fqual in tp.enumfields():
if (isinstance(ftype, model.PrimitiveType)
and ftype.is_integer_type()) or fbitsize >= 0:
# accept all integers, but complain on float or double
@@ -477,7 +480,8 @@
# only accept exactly the type declared.
try:
prnt(' { %s = &p->%s; (void)tmp; }' % (
- ftype.get_c_name('*tmp', 'field %r'%fname), fname))
+ ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual),
+ fname))
except ffiplatform.VerificationError as e:
prnt(' /* %s */' % str(e)) # cannot verify it, ignore
prnt('}')
@@ -488,7 +492,7 @@
prnt(' static Py_ssize_t nums[] = {')
prnt(' sizeof(%s),' % cname)
prnt(' offsetof(struct _cffi_aligncheck, y),')
- for fname, ftype, fbitsize in tp.enumfields():
+ for fname, ftype, fbitsize, fqual in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
prnt(' offsetof(%s, %s),' % (cname, fname))
@@ -552,7 +556,7 @@
check(layout[0], ffi.sizeof(BStruct), "wrong total size")
check(layout[1], ffi.alignof(BStruct), "wrong total alignment")
i = 2
- for fname, ftype, fbitsize in tp.enumfields():
+ for fname, ftype, fbitsize, fqual in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
check(layout[i], ffi.offsetof(BStruct, fname),
diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py
--- a/lib_pypy/cffi/vengine_gen.py
+++ b/lib_pypy/cffi/vengine_gen.py
@@ -87,7 +87,10 @@
return library
def _get_declarations(self):
- return sorted(self.ffi._parser._declarations.items())
+ lst = [(key, tp) for (key, (tp, qual)) in
+ self.ffi._parser._declarations.items()]
+ lst.sort()
+ return lst
def _generate(self, step_name):
for name, tp in self._get_declarations():
@@ -156,7 +159,11 @@
arglist = ', '.join(arglist) or 'void'
wrappername = '_cffi_f_%s' % name
self.export_symbols.append(wrappername)
- funcdecl = ' %s(%s)' % (wrappername, arglist)
+ if tp.abi:
+ abi = tp.abi + ' '
+ else:
+ abi = ''
+ funcdecl = ' %s%s(%s)' % (abi, wrappername, arglist)
context = 'result of %s' % name
prnt(tpresult.get_c_name(funcdecl, context))
prnt('{')
@@ -260,7 +267,7 @@
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
prnt(' (void)p;')
- for fname, ftype, fbitsize in tp.enumfields():
+ for fname, ftype, fbitsize, fqual in tp.enumfields():
if (isinstance(ftype, model.PrimitiveType)
and ftype.is_integer_type()) or fbitsize >= 0:
# accept all integers, but complain on float or double
@@ -269,7 +276,8 @@
# only accept exactly the type declared.
try:
prnt(' { %s = &p->%s; (void)tmp; }' % (
- ftype.get_c_name('*tmp', 'field %r'%fname), fname))
+ ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual),
+ fname))
except ffiplatform.VerificationError as e:
prnt(' /* %s */' % str(e)) # cannot verify it, ignore
prnt('}')
@@ -280,7 +288,7 @@
prnt(' static intptr_t nums[] = {')
prnt(' sizeof(%s),' % cname)
prnt(' offsetof(struct _cffi_aligncheck, y),')
- for fname, ftype, fbitsize in tp.enumfields():
+ for fname, ftype, fbitsize, fqual in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
prnt(' offsetof(%s, %s),' % (cname, fname))
@@ -342,7 +350,7 @@
check(layout[0], ffi.sizeof(BStruct), "wrong total size")
check(layout[1], ffi.alignof(BStruct), "wrong total alignment")
i = 2
- for fname, ftype, fbitsize in tp.enumfields():
+ for fname, ftype, fbitsize, fqual in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
check(layout[i], ffi.offsetof(BStruct, fname),
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -39,8 +39,9 @@
"_csv", "cppyy", "_pypyjson"
])
-if (sys.platform.startswith('linux') and os.uname()[4] == 'x86_64'
- and sys.maxint > 2**32): # it's not enough that we get x86_64
+if ((sys.platform.startswith('linux') or sys.platform == 'darwin')
+ and os.uname()[4] == 'x86_64' and sys.maxint > 2**32):
+ # it's not enough that we get x86_64
working_modules.add('_vmprof')
translation_modules = default_modules.copy()
diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst
--- a/pypy/doc/extending.rst
+++ b/pypy/doc/extending.rst
@@ -83,7 +83,7 @@
RPython Mixed Modules
-=====================
+---------------------
This is the internal way to write built-in extension modules in PyPy.
It cannot be used by any 3rd-party module: the extension modules are
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -44,6 +44,19 @@
Add support for ndarray.ctypes property.
+.. branch: share-guard-info
+
+Share guard resume data between consecutive guards that have only
+pure operations and guards in between.
+
+.. branch: issue-2148
+
+Fix performance regression on operations mixing numpy scalars and Python
+floats, cf. issue #2148.
+
+.. branch: cffi-stdcall
+Win32: support '__stdcall' in CFFI.
+
.. branch: vecopt
.. branch: vecopt-merge
diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py
--- a/pypy/module/__builtin__/interp_classobj.py
+++ b/pypy/module/__builtin__/interp_classobj.py
@@ -253,26 +253,27 @@
def binaryop(self, space, w_other):
w_a, w_b = _coerce_helper(space, self, w_other)
- if w_a is None:
- w_a = self
- w_b = w_other
- if w_a is self:
- w_meth = self.getattr(space, specialname, False)
+ if isinstance(w_a, W_InstanceObject):
+ w_meth = w_a.getattr(space, specialname, False)
if w_meth is None:
return space.w_NotImplemented
return space.call_function(w_meth, w_b)
else:
+ # fall back to space.xxx() if coerce returns a non-W_Instance
+ # object as first argument
return getattr(space, objspacename)(w_a, w_b)
binaryop.func_name = name
def rbinaryop(self, space, w_other):
w_a, w_b = _coerce_helper(space, self, w_other)
- if w_a is None or w_a is self:
- w_meth = self.getattr(space, rspecialname, False)
+ if isinstance(w_a, W_InstanceObject):
+ w_meth = w_a.getattr(space, rspecialname, False)
if w_meth is None:
return space.w_NotImplemented
- return space.call_function(w_meth, w_other)
+ return space.call_function(w_meth, w_b)
else:
+ # fall back to space.xxx() if coerce returns a non-W_Instance
+ # object as first argument
return getattr(space, objspacename)(w_b, w_a)
rbinaryop.func_name = "r" + name
return binaryop, rbinaryop
@@ -283,7 +284,7 @@
except OperationError, e:
if not e.match(space, space.w_TypeError):
raise
- return [None, None]
+ return [w_self, w_other]
return space.fixedview(w_tup, 2)
def descr_instance_new(space, w_type, w_class, w_dict=None):
@@ -523,13 +524,9 @@
def descr_cmp(self, space, w_other): # do all the work here like CPython
w_a, w_b = _coerce_helper(space, self, w_other)
- if w_a is None:
- w_a = self
- w_b = w_other
- else:
- if (not isinstance(w_a, W_InstanceObject) and
- not isinstance(w_b, W_InstanceObject)):
- return space.cmp(w_a, w_b)
+ if (not isinstance(w_a, W_InstanceObject) and
+ not isinstance(w_b, W_InstanceObject)):
+ return space.cmp(w_a, w_b)
if isinstance(w_a, W_InstanceObject):
w_func = w_a.getattr(space, '__cmp__', False)
if w_func is not None:
@@ -636,42 +633,36 @@
def descr_pow(self, space, w_other, w_modulo=None):
if space.is_none(w_modulo):
w_a, w_b = _coerce_helper(space, self, w_other)
- if w_a is None:
- w_a = self
- w_b = w_other
- if w_a is self:
- w_func = self.getattr(space, '__pow__', False)
- if w_func is not None:
- return space.call_function(w_func, w_other)
- return space.w_NotImplemented
+ if isinstance(w_a, W_InstanceObject):
+ w_func = w_a.getattr(space, '__pow__', False)
+ if w_func is None:
+ return space.w_NotImplemented
+ return space.call_function(w_func, w_other)
else:
return space.pow(w_a, w_b, space.w_None)
else:
# CPython also doesn't try coercion in this case
w_func = self.getattr(space, '__pow__', False)
- if w_func is not None:
- return space.call_function(w_func, w_other, w_modulo)
- return space.w_NotImplemented
+ if w_func is None:
+ return space.w_NotImplemented
+ return space.call_function(w_func, w_other, w_modulo)
def descr_rpow(self, space, w_other, w_modulo=None):
if space.is_none(w_modulo):
w_a, w_b = _coerce_helper(space, self, w_other)
- if w_a is None:
- w_a = self
- w_b = w_other
- if w_a is self:
- w_func = self.getattr(space, '__rpow__', False)
- if w_func is not None:
- return space.call_function(w_func, w_other)
- return space.w_NotImplemented
+ if isinstance(w_a, W_InstanceObject):
+ w_func = w_a.getattr(space, '__rpow__', False)
+ if w_func is None:
+ return space.w_NotImplemented
+ return space.call_function(w_func, w_other)
else:
return space.pow(w_b, w_a, space.w_None)
else:
# CPython also doesn't try coercion in this case
w_func = self.getattr(space, '__rpow__', False)
- if w_func is not None:
- return space.call_function(w_func, w_other, w_modulo)
- return space.w_NotImplemented
+ if w_func is None:
+ return space.w_NotImplemented
+ return space.call_function(w_func, w_other, w_modulo)
def descr_next(self, space):
w_func = self.getattr(space, 'next', False)
diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py
--- a/pypy/module/__builtin__/test/test_classobj.py
+++ b/pypy/module/__builtin__/test/test_classobj.py
@@ -417,6 +417,22 @@
pass
raises(TypeError, coerce, B(), [])
+ def test_coerce_inf(self):
+ class B:
+ def __coerce__(self, other):
+ return B(), B()
+ def __add__(self, other):
+ return 42
+ assert B() + B() == 42
+
+ def test_coerce_reverse(self):
+ class CoerceNumber:
+ def __coerce__(self, other):
+ assert isinstance(other, int)
+ return (6, other)
+ assert 5 + CoerceNumber() == 11
+ assert 2 ** CoerceNumber() == 64
+
def test_binaryop(self):
class A:
def __add__(self, other):
diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -1,9 +1,16 @@
import sys
from pypy.interpreter.mixedmodule import MixedModule
-from rpython.rlib import rdynload
+from rpython.rlib import rdynload, clibffi
VERSION = "1.3.0"
+FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI
+try:
+ FFI_STDCALL = clibffi.FFI_STDCALL
+ has_stdcall = True
+except AttributeError:
+ has_stdcall = False
+
class Module(MixedModule):
@@ -44,8 +51,8 @@
'get_errno': 'cerrno.get_errno',
'set_errno': 'cerrno.set_errno',
- 'FFI_DEFAULT_ABI': 'ctypefunc._get_abi(space, "FFI_DEFAULT_ABI")',
- 'FFI_CDECL': 'ctypefunc._get_abi(space,"FFI_DEFAULT_ABI")',#win32 name
+ 'FFI_DEFAULT_ABI': 'space.wrap(%d)' % FFI_DEFAULT_ABI,
+ 'FFI_CDECL': 'space.wrap(%d)' % FFI_DEFAULT_ABI, # win32 name
# CFFI 1.0
'FFI': 'ffi_obj.W_FFIObject',
@@ -53,6 +60,9 @@
if sys.platform == 'win32':
interpleveldefs['getwinerror'] = 'cerrno.getwinerror'
+ if has_stdcall:
+ interpleveldefs['FFI_STDCALL'] = 'space.wrap(%d)' % FFI_STDCALL
+
def get_dict_rtld_constants():
found = {}
diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py
--- a/pypy/module/_cffi_backend/ccallback.py
+++ b/pypy/module/_cffi_backend/ccallback.py
@@ -178,7 +178,8 @@
@jit.dont_look_inside
-def _handle_applevel_exception(space, callback, e, ll_res, extra_line):
+def _handle_applevel_exception(callback, e, ll_res, extra_line):
+ space = callback.space
callback.write_error_return_value(ll_res)
if callback.w_onerror is None:
callback.print_error(e, extra_line)
@@ -199,13 +200,21 @@
extra_line="\nDuring the call to 'onerror', "
"another exception occurred:\n\n")
+ at jit.jit_callback("CFFI")
+def py_invoke_callback(callback, ll_res, ll_args):
+ extra_line = ''
+ try:
+ w_res = callback.invoke(ll_args)
+ extra_line = "Trying to convert the result back to C:\n"
+ callback.convert_result(ll_res, w_res)
+ except OperationError, e:
+ _handle_applevel_exception(callback, e, ll_res, extra_line)
- at jit.jit_callback("CFFI")
def _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata):
""" Callback specification.
ffi_cif - something ffi specific, don't care
ll_args - rffi.VOIDPP - pointer to array of pointers to args
- ll_restype - rffi.VOIDP - pointer to result
+ ll_res - rffi.VOIDP - pointer to result
ll_userdata - a special structure which holds necessary information
(what the real callback is for example), casted to VOIDP
"""
@@ -228,13 +237,7 @@
space = callback.space
try:
must_leave = space.threadlocals.try_enter_thread(space)
- extra_line = ''
- try:
- w_res = callback.invoke(ll_args)
- extra_line = "Trying to convert the result back to C:\n"
- callback.convert_result(ll_res, w_res)
- except OperationError, e:
- _handle_applevel_exception(space, callback, e, ll_res, extra_line)
+ py_invoke_callback(callback, ll_res, ll_args)
#
except Exception, e:
# oups! last-level attempt to recover.
diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py
--- a/pypy/module/_cffi_backend/ctypefunc.py
+++ b/pypy/module/_cffi_backend/ctypefunc.py
@@ -12,6 +12,7 @@
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
from pypy.interpreter.error import OperationError, oefmt
+from pypy.module import _cffi_backend
from pypy.module._cffi_backend import ctypearray, cdataobj, cerrno
from pypy.module._cffi_backend.ctypeobj import W_CType
from pypy.module._cffi_backend.ctypeptr import W_CTypePtrBase, W_CTypePointer
@@ -23,20 +24,22 @@
class W_CTypeFunc(W_CTypePtrBase):
- _attrs_ = ['fargs', 'ellipsis', 'cif_descr']
- _immutable_fields_ = ['fargs[*]', 'ellipsis', 'cif_descr']
+ _attrs_ = ['fargs', 'ellipsis', 'abi', 'cif_descr']
+ _immutable_fields_ = ['fargs[*]', 'ellipsis', 'abi', 'cif_descr']
kind = "function"
cif_descr = lltype.nullptr(CIF_DESCRIPTION)
- def __init__(self, space, fargs, fresult, ellipsis):
+ def __init__(self, space, fargs, fresult, ellipsis,
+ abi=_cffi_backend.FFI_DEFAULT_ABI):
assert isinstance(ellipsis, bool)
- extra = self._compute_extra_text(fargs, fresult, ellipsis)
+ extra, xpos = self._compute_extra_text(fargs, fresult, ellipsis, abi)
size = rffi.sizeof(rffi.VOIDP)
- W_CTypePtrBase.__init__(self, space, size, extra, 2, fresult,
+ W_CTypePtrBase.__init__(self, space, size, extra, xpos, fresult,
could_cast_anything=False)
self.fargs = fargs
self.ellipsis = ellipsis
+ self.abi = abi
# fresult is stored in self.ctitem
if not ellipsis:
@@ -44,7 +47,7 @@
# at all. The cif is computed on every call from the actual
# types passed in. For all other functions, the cif_descr
# is computed here.
- builder = CifDescrBuilder(fargs, fresult)
+ builder = CifDescrBuilder(fargs, fresult, abi)
try:
builder.rawallocate(self)
except OperationError, e:
@@ -76,7 +79,7 @@
ctypefunc.fargs = fvarargs
ctypefunc.ctitem = self.ctitem
#ctypefunc.cif_descr = NULL --- already provided as the default
- CifDescrBuilder(fvarargs, self.ctitem).rawallocate(ctypefunc)
+ CifDescrBuilder(fvarargs, self.ctitem, self.abi).rawallocate(ctypefunc)
return ctypefunc
@rgc.must_be_light_finalizer
@@ -84,8 +87,13 @@
if self.cif_descr:
lltype.free(self.cif_descr, flavor='raw')
- def _compute_extra_text(self, fargs, fresult, ellipsis):
+ def _compute_extra_text(self, fargs, fresult, ellipsis, abi):
+ from pypy.module._cffi_backend import newtype
argnames = ['(*)(']
+ xpos = 2
+ if _cffi_backend.has_stdcall and abi == _cffi_backend.FFI_STDCALL:
+ argnames[0] = '(__stdcall *)('
+ xpos += len('__stdcall ')
for i, farg in enumerate(fargs):
if i > 0:
argnames.append(', ')
@@ -95,7 +103,7 @@
argnames.append(', ')
argnames.append('...')
argnames.append(')')
- return ''.join(argnames)
+ return ''.join(argnames), xpos
def _fget(self, attrchar):
if attrchar == 'a': # args
@@ -106,7 +114,7 @@
if attrchar == 'E': # ellipsis
return self.space.wrap(self.ellipsis)
if attrchar == 'A': # abi
- return self.space.wrap(clibffi.FFI_DEFAULT_ABI) # XXX
+ return self.space.wrap(self.abi)
return W_CTypePtrBase._fget(self, attrchar)
def call(self, funcaddr, args_w):
@@ -181,11 +189,6 @@
def set_mustfree_flag(data, flag):
rffi.ptradd(data, -1)[0] = chr(flag)
-def _get_abi(space, name):
- abi = getattr(clibffi, name)
- assert isinstance(abi, int)
- return space.wrap(abi)
-
# ____________________________________________________________
@@ -260,9 +263,10 @@
class CifDescrBuilder(object):
rawmem = lltype.nullptr(rffi.CCHARP.TO)
- def __init__(self, fargs, fresult):
+ def __init__(self, fargs, fresult, fabi):
self.fargs = fargs
self.fresult = fresult
+ self.fabi = fabi
def fb_alloc(self, size):
size = llmemory.raw_malloc_usage(size)
@@ -421,7 +425,7 @@
cif_descr.exchange_size = exchange_offset
def fb_extra_fields(self, cif_descr):
- cif_descr.abi = clibffi.FFI_DEFAULT_ABI # XXX
+ cif_descr.abi = self.fabi
cif_descr.nargs = len(self.fargs)
cif_descr.rtype = self.rtype
cif_descr.atypes = self.atypes
diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py
--- a/pypy/module/_cffi_backend/ctypeobj.py
+++ b/pypy/module/_cffi_backend/ctypeobj.py
@@ -11,7 +11,8 @@
class W_CType(W_Root):
- _attrs_ = ['space', 'size', 'name', 'name_position', '_lifeline_']
+ _attrs_ = ['space', 'size', 'name', 'name_position', '_lifeline_',
+ '_pointer_type']
_immutable_fields_ = ['size?', 'name', 'name_position']
# note that 'size' is not strictly immutable, because it can change
# from -1 to the real value in the W_CTypeStruct subclass.
@@ -142,7 +143,7 @@
# obscure hack when untranslated, maybe, approximate, don't use
if isinstance(align, llmemory.FieldOffset):
align = rffi.sizeof(align.TYPE.y)
- if (1 << (8*align-2)) > sys.maxint:
+ if sys.platform != 'win32' and (1 << (8*align-2)) > sys.maxint:
align /= 2
else:
# a different hack when translated, to avoid seeing constants
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -168,7 +168,7 @@
class W_CTypePointer(W_CTypePtrBase):
- _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr']
+ _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr', '_array_types']
_immutable_fields_ = ['is_file', 'cache_array_type?', 'is_void_ptr']
kind = "pointer"
cache_array_type = None
diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py
--- a/pypy/module/_cffi_backend/newtype.py
+++ b/pypy/module/_cffi_backend/newtype.py
@@ -4,10 +4,11 @@
from rpython.rlib.objectmodel import specialize, r_dict, compute_identity_hash
from rpython.rlib.rarithmetic import ovfcheck, intmask
-from rpython.rlib import jit
+from rpython.rlib import jit, rweakref, clibffi
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.tool import rffi_platform
+from pypy.module import _cffi_backend
from pypy.module._cffi_backend import (ctypeobj, ctypeprim, ctypeptr,
ctypearray, ctypestruct, ctypevoid, ctypeenum)
@@ -23,27 +24,12 @@
class UniqueCache:
def __init__(self, space):
- self.ctvoid = None # There can be only one
- self.ctvoidp = None # Cache for self.pointers[self.ctvoid]
- self.ctchara = None # Cache for self.arrays[charp, -1]
- self.primitives = {} # Keys: name
- self.pointers = {} # Keys: base_ctype
- self.arrays = {} # Keys: (ptr_ctype, length_or_-1)
- self.functions = r_dict(# Keys: (fargs, w_fresult, ellipsis)
- _func_key_eq, _func_key_hash)
-
-def _func_key_eq((fargs1, w_fresult1, ellipsis1),
- (fargs2, w_fresult2, ellipsis2)):
- return (fargs1 == fargs2 and # list equality here
- w_fresult1 is w_fresult2 and
- ellipsis1 == ellipsis2)
-
-def _func_key_hash((fargs, w_fresult, ellipsis)):
- x = compute_identity_hash(w_fresult) ^ ellipsis
- for w_arg in fargs:
- y = compute_identity_hash(w_arg)
- x = intmask((1000003 * x) ^ y)
- return x
+ self.ctvoid = None # Cache for the 'void' type
+ self.ctvoidp = None # Cache for the 'void *' type
+ self.ctchara = None # Cache for the 'char[]' type
+ self.primitives = {} # Cache for {name: primitive_type}
+ self.functions = [] # see _new_function_type()
+ self.for_testing = False
def _clean_cache(space):
"NOT_RPYTHON"
@@ -165,20 +151,24 @@
# ____________________________________________________________
+ at specialize.memo()
+def _setup_wref(has_weakref_support):
+ assert has_weakref_support, "_cffi_backend requires weakrefs"
+ ctypeobj.W_CType._pointer_type = rweakref.dead_ref
+ ctypeptr.W_CTypePointer._array_types = None
+
@unwrap_spec(w_ctype=ctypeobj.W_CType)
def new_pointer_type(space, w_ctype):
return _new_pointer_type(space, w_ctype)
@jit.elidable
def _new_pointer_type(space, w_ctype):
- unique_cache = space.fromcache(UniqueCache)
- try:
- return unique_cache.pointers[w_ctype]
- except KeyError:
- pass
- ctypepointer = ctypeptr.W_CTypePointer(space, w_ctype)
- unique_cache.pointers[w_ctype] = ctypepointer
- return ctypepointer
+ _setup_wref(rweakref.has_weakref_support())
+ ctptr = w_ctype._pointer_type()
+ if ctptr is None:
+ ctptr = ctypeptr.W_CTypePointer(space, w_ctype)
+ w_ctype._pointer_type = rweakref.ref(ctptr)
+ return ctptr
# ____________________________________________________________
@@ -195,16 +185,19 @@
@jit.elidable
def _new_array_type(space, w_ctptr, length):
- unique_cache = space.fromcache(UniqueCache)
- unique_key = (w_ctptr, length)
- try:
- return unique_cache.arrays[unique_key]
- except KeyError:
- pass
- #
+ _setup_wref(rweakref.has_weakref_support())
if not isinstance(w_ctptr, ctypeptr.W_CTypePointer):
raise OperationError(space.w_TypeError,
space.wrap("first arg must be a pointer ctype"))
+ arrays = w_ctptr._array_types
+ if arrays is None:
+ arrays = rweakref.RWeakValueDictionary(int, ctypearray.W_CTypeArray)
+ w_ctptr._array_types = arrays
+ else:
+ ctype = arrays.get(length)
+ if ctype is not None:
+ return ctype
+ #
ctitem = w_ctptr.ctitem
if ctitem.size < 0:
raise oefmt(space.w_ValueError, "array item of unknown size: '%s'",
@@ -222,7 +215,7 @@
extra = '[%d]' % length
#
ctype = ctypearray.W_CTypeArray(space, w_ctptr, length, arraysize, extra)
- unique_cache.arrays[unique_key] = ctype
+ arrays.set(length, ctype)
return ctype
# ____________________________________________________________
@@ -600,8 +593,9 @@
# ____________________________________________________________
- at unwrap_spec(w_fresult=ctypeobj.W_CType, ellipsis=int)
-def new_function_type(space, w_fargs, w_fresult, ellipsis=0):
+ at unwrap_spec(w_fresult=ctypeobj.W_CType, ellipsis=int, abi=int)
+def new_function_type(space, w_fargs, w_fresult, ellipsis=0,
+ abi=_cffi_backend.FFI_DEFAULT_ABI):
fargs = []
for w_farg in space.fixedview(w_fargs):
if not isinstance(w_farg, ctypeobj.W_CType):
@@ -610,31 +604,72 @@
if isinstance(w_farg, ctypearray.W_CTypeArray):
w_farg = w_farg.ctptr
fargs.append(w_farg)
- return _new_function_type(space, fargs, w_fresult, bool(ellipsis))
+ return _new_function_type(space, fargs, w_fresult, bool(ellipsis), abi)
+
+def _func_key_hash(unique_cache, fargs, fresult, ellipsis, abi):
+ x = compute_identity_hash(fresult)
+ for w_arg in fargs:
+ y = compute_identity_hash(w_arg)
+ x = intmask((1000003 * x) ^ y)
+ x ^= (ellipsis - abi)
+ if unique_cache.for_testing: # constant-folded to False in translation;
+ x &= 3 # but for test, keep only 2 bits of hash
+ return x
# can't use @jit.elidable here, because it might call back to random
# space functions via force_lazy_struct()
-def _new_function_type(space, fargs, w_fresult, ellipsis=False):
+def _new_function_type(space, fargs, fresult, ellipsis, abi):
+ try:
+ return _get_function_type(space, fargs, fresult, ellipsis, abi)
+ except KeyError:
+ return _build_function_type(space, fargs, fresult, ellipsis, abi)
+
+ at jit.elidable
+def _get_function_type(space, fargs, fresult, ellipsis, abi):
+ # This function is elidable because if called again with exactly the
+ # same arguments (and if it didn't raise KeyError), it would give
+ # the same result, at least as long as this result is still live.
+ #
+ # 'unique_cache.functions' is a list of weak dicts, each mapping
+ # the func_hash number to a W_CTypeFunc. There is normally only
+ # one such dict, but in case of hash collision, there might be
+ # more.
+ unique_cache = space.fromcache(UniqueCache)
+ func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis, abi)
+ for weakdict in unique_cache.functions:
+ ctype = weakdict.get(func_hash)
+ if (ctype is not None and
+ ctype.ctitem is fresult and
+ ctype.fargs == fargs and
+ ctype.ellipsis == ellipsis and
+ ctype.abi == abi):
+ return ctype
+ raise KeyError
+
+ at jit.dont_look_inside
+def _build_function_type(space, fargs, fresult, ellipsis, abi):
from pypy.module._cffi_backend import ctypefunc
#
- unique_cache = space.fromcache(UniqueCache)
- unique_key = (fargs, w_fresult, ellipsis)
- try:
- return unique_cache.functions[unique_key]
- except KeyError:
- pass
- #
- if ((w_fresult.size < 0 and
- not isinstance(w_fresult, ctypevoid.W_CTypeVoid))
- or isinstance(w_fresult, ctypearray.W_CTypeArray)):
- if (isinstance(w_fresult, ctypestruct.W_CTypeStructOrUnion) and
- w_fresult.size < 0):
+ if ((fresult.size < 0 and
+ not isinstance(fresult, ctypevoid.W_CTypeVoid))
+ or isinstance(fresult, ctypearray.W_CTypeArray)):
+ if (isinstance(fresult, ctypestruct.W_CTypeStructOrUnion) and
+ fresult.size < 0):
raise oefmt(space.w_TypeError,
- "result type '%s' is opaque", w_fresult.name)
+ "result type '%s' is opaque", fresult.name)
else:
raise oefmt(space.w_TypeError,
- "invalid result type: '%s'", w_fresult.name)
+ "invalid result type: '%s'", fresult.name)
#
- fct = ctypefunc.W_CTypeFunc(space, fargs, w_fresult, ellipsis)
- unique_cache.functions[unique_key] = fct
+ fct = ctypefunc.W_CTypeFunc(space, fargs, fresult, ellipsis, abi)
+ unique_cache = space.fromcache(UniqueCache)
+ func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis, abi)
+ for weakdict in unique_cache.functions:
+ if weakdict.get(func_hash) is None:
+ weakdict.set(func_hash, fct)
+ break
+ else:
+ weakdict = rweakref.RWeakValueDictionary(int, ctypefunc.W_CTypeFunc)
+ unique_cache.functions.append(weakdict)
+ weakdict.set(func_hash, fct)
return fct
diff --git a/pypy/module/_cffi_backend/realize_c_type.py b/pypy/module/_cffi_backend/realize_c_type.py
--- a/pypy/module/_cffi_backend/realize_c_type.py
+++ b/pypy/module/_cffi_backend/realize_c_type.py
@@ -5,6 +5,7 @@
from rpython.rtyper.lltypesystem import lltype, rffi
from pypy.interpreter.error import oefmt
from pypy.interpreter.baseobjspace import W_Root
+from pypy.module import _cffi_backend
from pypy.module._cffi_backend.ctypeobj import W_CType
from pypy.module._cffi_backend import cffi_opcode, newtype, ctypestruct
from pypy.module._cffi_backend import parse_c_type
@@ -164,16 +165,28 @@
OP_FUNCTION_END = cffi_opcode.OP_FUNCTION_END
while getop(opcodes[base_index + num_args]) != OP_FUNCTION_END:
num_args += 1
- ellipsis = (getarg(opcodes[base_index + num_args]) & 1) != 0
+ #
+ ellipsis = (getarg(opcodes[base_index + num_args]) & 0x01) != 0
+ abi = (getarg(opcodes[base_index + num_args]) & 0xFE)
+ if abi == 0:
+ abi = _cffi_backend.FFI_DEFAULT_ABI
+ elif abi == 2:
+ if _cffi_backend.has_stdcall:
+ abi = _cffi_backend.FFI_STDCALL
+ else:
+ abi = _cffi_backend.FFI_DEFAULT_ABI
+ else:
+ raise oefmt(ffi.w_FFIError, "abi number %d not supported", abi)
+ #
fargs = [realize_c_type(ffi, opcodes, base_index + i)
for i in range(num_args)]
- return fargs, fret, ellipsis
+ return fargs, fret, ellipsis, abi
def unwrap_as_fnptr(self, ffi):
if self._ctfuncptr is None:
- fargs, fret, ellipsis = self._unpack(ffi)
+ fargs, fret, ellipsis, abi = self._unpack(ffi)
self._ctfuncptr = newtype._new_function_type(
- ffi.space, fargs, fret, ellipsis)
+ ffi.space, fargs, fret, ellipsis, abi)
return self._ctfuncptr
def unwrap_as_fnptr_in_elidable(self):
@@ -190,7 +203,7 @@
# type ptr-to-struct. This is how recompiler.py produces
# trampoline functions for PyPy.
if self.nostruct_ctype is None:
- fargs, fret, ellipsis = self._unpack(ffi)
+ fargs, fret, ellipsis, abi = self._unpack(ffi)
# 'locs' will be a string of the same length as the final fargs,
# containing 'A' where a struct argument was detected, and 'R'
# in first position if a struct return value was detected
@@ -207,7 +220,7 @@
locs = ['R'] + locs
fret = newtype.new_void_type(ffi.space)
ctfuncptr = newtype._new_function_type(
- ffi.space, fargs, fret, ellipsis)
+ ffi.space, fargs, fret, ellipsis, abi)
if locs == ['\x00'] * len(locs):
locs = None
else:
@@ -218,7 +231,7 @@
locs[0] == 'R')
def unexpected_fn_type(self, ffi):
- fargs, fret, ellipsis = self._unpack(ffi)
+ fargs, fret, ellipsis, abi = self._unpack(ffi)
argnames = [farg.name for farg in fargs]
if ellipsis:
argnames.append('...')
diff --git a/pypy/module/_cffi_backend/src/parse_c_type.c b/pypy/module/_cffi_backend/src/parse_c_type.c
--- a/pypy/module/_cffi_backend/src/parse_c_type.c
+++ b/pypy/module/_cffi_backend/src/parse_c_type.c
@@ -51,6 +51,9 @@
TOK_UNSIGNED,
TOK_VOID,
TOK_VOLATILE,
+
+ TOK_CDECL,
+ TOK_STDCALL,
};
typedef struct {
@@ -165,6 +168,8 @@
switch (*p) {
case '_':
if (tok->size == 5 && !memcmp(p, "_Bool", 5)) tok->kind = TOK__BOOL;
+ if (tok->size == 7 && !memcmp(p,"__cdecl",7)) tok->kind = TOK_CDECL;
+ if (tok->size == 9 && !memcmp(p,"__stdcall",9))tok->kind = TOK_STDCALL;
break;
case 'c':
if (tok->size == 4 && !memcmp(p, "char", 4)) tok->kind = TOK_CHAR;
@@ -236,7 +241,7 @@
type). The 'outer' argument is the index of the opcode outside
this "sequel".
*/
- int check_for_grouping;
+ int check_for_grouping, abi=0;
_cffi_opcode_t result, *p_current;
header:
@@ -253,6 +258,12 @@
/* ignored for now */
next_token(tok);
goto header;
+ case TOK_CDECL:
+ case TOK_STDCALL:
+ /* must be in a function; checked below */
+ abi = tok->kind;
+ next_token(tok);
+ goto header;
default:
break;
}
@@ -269,6 +280,11 @@
while (tok->kind == TOK_OPEN_PAREN) {
next_token(tok);
+ if (tok->kind == TOK_CDECL || tok->kind == TOK_STDCALL) {
+ abi = tok->kind;
+ next_token(tok);
+ }
+
if ((check_for_grouping--) == 1 && (tok->kind == TOK_STAR ||
tok->kind == TOK_CONST ||
tok->kind == TOK_VOLATILE ||
@@ -286,7 +302,14 @@
}
else {
/* function type */
- int arg_total, base_index, arg_next, has_ellipsis=0;
+ int arg_total, base_index, arg_next, flags=0;
+
+ if (abi == TOK_STDCALL) {
+ flags = 2;
+ /* note that an ellipsis below will overwrite this flags,
+ which is the goal: variadic functions are always cdecl */
+ }
+ abi = 0;
if (tok->kind == TOK_VOID && get_following_char(tok) == ')') {
next_token(tok);
@@ -315,7 +338,7 @@
_cffi_opcode_t oarg;
if (tok->kind == TOK_DOTDOTDOT) {
- has_ellipsis = 1;
+ flags = 1; /* ellipsis */
next_token(tok);
break;
}
@@ -339,8 +362,7 @@
next_token(tok);
}
}
- tok->output[arg_next] = _CFFI_OP(_CFFI_OP_FUNCTION_END,
- has_ellipsis);
+ tok->output[arg_next] = _CFFI_OP(_CFFI_OP_FUNCTION_END, flags);
}
if (tok->kind != TOK_CLOSE_PAREN)
@@ -348,6 +370,9 @@
next_token(tok);
}
+ if (abi != 0)
+ return parse_error(tok, "expected '('");
+
while (tok->kind == TOK_OPEN_BRACKET) {
*p_current = _CFFI_OP(_CFFI_GETOP(*p_current), tok->output_index);
p_current = tok->output + tok->output_index;
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -2316,9 +2316,6 @@
f(); f()
assert get_errno() == 77
-def test_abi():
- assert isinstance(FFI_DEFAULT_ABI, int)
-
def test_cast_to_array():
# not valid in C! extension to get a non-owning <cdata 'int[3]'>
BInt = new_primitive_type("int")
@@ -3427,3 +3424,16 @@
"be 'foo *', but the types are different (check "
"that you are not e.g. mixing up different ffi "
"instances)")
+
+def test_stdcall_function_type():
+ assert FFI_CDECL == FFI_DEFAULT_ABI
+ try:
+ stdcall = FFI_STDCALL
+ except NameError:
+ stdcall = FFI_DEFAULT_ABI
+ BInt = new_primitive_type("int")
+ BFunc = new_function_type((BInt, BInt), BInt, False, stdcall)
+ if stdcall != FFI_DEFAULT_ABI:
+ assert repr(BFunc) == "<ctype 'int(__stdcall *)(int, int)'>"
+ else:
+ assert repr(BFunc) == "<ctype 'int(*)(int, int)'>"
diff --git a/pypy/module/_cffi_backend/test/test_c.py b/pypy/module/_cffi_backend/test/test_c.py
--- a/pypy/module/_cffi_backend/test/test_c.py
+++ b/pypy/module/_cffi_backend/test/test_c.py
@@ -22,7 +22,7 @@
from rpython.tool.udir import udir
from pypy.interpreter import gateway
from pypy.module._cffi_backend import Module
-from pypy.module._cffi_backend.newtype import _clean_cache
+from pypy.module._cffi_backend.newtype import _clean_cache, UniqueCache
from rpython.translator import cdir
from rpython.translator.platform import host
from rpython.translator.tool.cbuild import ExternalCompilationInfo
@@ -86,8 +86,10 @@
_all_test_c.find_and_load_library = func
_all_test_c._testfunc = testfunc
""")
+ UniqueCache.for_testing = True
def teardown_method(self, method):
+ UniqueCache.for_testing = False
_clean_cache(self.space)
diff --git a/pypy/module/_cffi_backend/test/test_parse_c_type.py b/pypy/module/_cffi_backend/test/test_parse_c_type.py
--- a/pypy/module/_cffi_backend/test/test_parse_c_type.py
+++ b/pypy/module/_cffi_backend/test/test_parse_c_type.py
@@ -338,3 +338,17 @@
# not supported (really obscure):
# "char[+5]"
# "char['A']"
+
+def test_stdcall_cdecl():
+ assert parse("int __stdcall(int)") == [Prim(cffi_opcode.PRIM_INT),
+ '->', Func(0), NoOp(4), FuncEnd(2),
+ Prim(cffi_opcode.PRIM_INT)]
+ assert parse("int __stdcall func(int)") == parse("int __stdcall(int)")
+ assert parse("int (__stdcall *)()") == [Prim(cffi_opcode.PRIM_INT),
+ NoOp(3), '->', Pointer(1),
+ Func(0), FuncEnd(2), 0]
+ assert parse("int (__stdcall *p)()") == parse("int (__stdcall*)()")
+ parse_error("__stdcall int", "identifier expected", 0)
+ parse_error("__cdecl int", "identifier expected", 0)
+ parse_error("int __stdcall", "expected '('", 13)
+ parse_error("int __cdecl", "expected '('", 11)
diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py
--- a/pypy/module/micronumpy/ctors.py
+++ b/pypy/module/micronumpy/ctors.py
@@ -3,10 +3,13 @@
from rpython.rlib.buffer import SubBuffer
from rpython.rlib.rstring import strip_spaces
from rpython.rtyper.lltypesystem import lltype, rffi
+
from pypy.module.micronumpy import descriptor, loop, support
from pypy.module.micronumpy.base import (
W_NDimArray, convert_to_array, W_NumpyObject)
from pypy.module.micronumpy.converters import shape_converter
+from . import constants as NPY
+from .casting import scalar2dtype
def build_scalar(space, w_dtype, w_state):
@@ -82,7 +85,6 @@
return w_res
def _array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False):
- from pypy.module.micronumpy import strides
# for anything that isn't already an array, try __array__ method first
if not isinstance(w_object, W_NDimArray):
@@ -143,16 +145,11 @@
w_base=w_base, start=imp.start)
else:
# not an array
- shape, elems_w = strides.find_shape_and_elems(space, w_object, dtype)
+ shape, elems_w = find_shape_and_elems(space, w_object, dtype)
if dtype is None and space.isinstance_w(w_object, space.w_buffer):
dtype = descriptor.get_dtype_cache(space).w_uint8dtype
if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1):
dtype = find_dtype_for_seq(space, elems_w, dtype)
- if dtype is None:
- dtype = descriptor.get_dtype_cache(space).w_float64dtype
- elif dtype.is_str_or_unicode() and dtype.elsize < 1:
- # promote S0 -> S1, U0 -> U1
- dtype = descriptor.variable_dtype(space, dtype.char + '1')
w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order)
if support.product(shape) == 1: # safe from overflow since from_shape checks
@@ -165,7 +162,6 @@
def numpify(space, w_object):
"""Convert the object to a W_NumpyObject"""
# XXX: code duplication with _array()
- from pypy.module.micronumpy import strides
if isinstance(w_object, W_NumpyObject):
return w_object
# for anything that isn't already an array, try __array__ method first
@@ -173,20 +169,82 @@
if w_array is not None:
return w_array
- shape, elems_w = strides.find_shape_and_elems(space, w_object, None)
+ if is_scalar_like(space, w_object, dtype=None):
+ dtype = scalar2dtype(space, w_object)
+ if dtype.is_str_or_unicode() and dtype.elsize < 1:
+ # promote S0 -> S1, U0 -> U1
+ dtype = descriptor.variable_dtype(space, dtype.char + '1')
+ return dtype.coerce(space, w_object)
+
+ shape, elems_w = _find_shape_and_elems(space, w_object)
dtype = find_dtype_for_seq(space, elems_w, None)
- if dtype is None:
- dtype = descriptor.get_dtype_cache(space).w_float64dtype
- elif dtype.is_str_or_unicode() and dtype.elsize < 1:
- # promote S0 -> S1, U0 -> U1
- dtype = descriptor.variable_dtype(space, dtype.char + '1')
+ w_arr = W_NDimArray.from_shape(space, shape, dtype)
+ loop.assign(space, w_arr, elems_w)
+ return w_arr
- if len(elems_w) == 1:
- return dtype.coerce(space, elems_w[0])
+
+def find_shape_and_elems(space, w_iterable, dtype):
+ if is_scalar_like(space, w_iterable, dtype):
+ return [], [w_iterable]
+ is_rec_type = dtype is not None and dtype.is_record()
+ return _find_shape_and_elems(space, w_iterable, is_rec_type)
+
+def is_scalar_like(space, w_obj, dtype):
+ isstr = space.isinstance_w(w_obj, space.w_str)
+ if not support.issequence_w(space, w_obj) or isstr:
+ if dtype is None or dtype.char != NPY.CHARLTR:
+ return True
+ is_rec_type = dtype is not None and dtype.is_record()
+ if is_rec_type and is_single_elem(space, w_obj, is_rec_type):
+ return True
+ if isinstance(w_obj, W_NDimArray) and w_obj.is_scalar():
+ return True
+ return False
+
+def _find_shape_and_elems(space, w_iterable, is_rec_type=False):
+ from pypy.objspace.std.bufferobject import W_Buffer
+ shape = [space.len_w(w_iterable)]
+ if space.isinstance_w(w_iterable, space.w_buffer):
+ batch = [space.wrap(0)] * shape[0]
+ for i in range(shape[0]):
+ batch[i] = space.ord(space.getitem(w_iterable, space.wrap(i)))
else:
- w_arr = W_NDimArray.from_shape(space, shape, dtype)
- loop.assign(space, w_arr, elems_w)
- return w_arr
+ batch = space.listview(w_iterable)
+ while True:
+ if not batch:
+ return shape[:], []
+ if is_single_elem(space, batch[0], is_rec_type):
+ for w_elem in batch:
+ if not is_single_elem(space, w_elem, is_rec_type):
+ raise OperationError(space.w_ValueError, space.wrap(
+ "setting an array element with a sequence"))
+ return shape[:], batch
+ new_batch = []
+ size = space.len_w(batch[0])
+ for w_elem in batch:
+ if (is_single_elem(space, w_elem, is_rec_type) or
+ space.len_w(w_elem) != size):
+ raise OperationError(space.w_ValueError, space.wrap(
+ "setting an array element with a sequence"))
+ w_array = space.lookup(w_elem, '__array__')
+ if w_array is not None:
+ # Make sure we call the array implementation of listview,
+ # since for some ndarray subclasses (matrix, for instance)
+ # listview does not reduce but rather returns the same class
+ w_elem = space.get_and_call_function(w_array, w_elem, space.w_None)
+ new_batch += space.listview(w_elem)
+ shape.append(size)
+ batch = new_batch
+
+def is_single_elem(space, w_elem, is_rec_type):
+ if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)):
+ return True
+ if (space.isinstance_w(w_elem, space.w_tuple) or
+ space.isinstance_w(w_elem, space.w_list)):
+ return False
+ if isinstance(w_elem, W_NDimArray) and not w_elem.is_scalar():
+ return False
+ return True
def _dtype_guess(space, dtype, w_elem):
from .casting import scalar2dtype, find_binop_result_dtype
@@ -201,6 +259,11 @@
return _dtype_guess(space, dtype, w_elem)
for w_elem in elems_w:
dtype = _dtype_guess(space, dtype, w_elem)
+ if dtype is None:
+ dtype = descriptor.get_dtype_cache(space).w_float64dtype
+ elif dtype.is_str_or_unicode() and dtype.elsize < 1:
+ # promote S0 -> S1, U0 -> U1
+ dtype = descriptor.variable_dtype(space, dtype.char + '1')
return dtype
diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py
--- a/pypy/module/micronumpy/ndarray.py
+++ b/pypy/module/micronumpy/ndarray.py
@@ -862,6 +862,8 @@
v = convert_to_array(space, w_v)
ret = W_NDimArray.from_shape(
space, v.get_shape(), get_dtype_cache(space).w_longdtype)
+ if ret.get_size() < 1:
+ return ret
if side == NPY.SEARCHLEFT:
binsearch = loop.binsearch_left
else:
@@ -1308,6 +1310,9 @@
[space.wrap(0)]), space.wrap("b")])
builder = StringBuilder()
+ if self.get_dtype().is_object():
+ raise oefmt(space.w_NotImplementedError,
+ "reduce for 'object' dtype not supported yet")
if isinstance(self.implementation, SliceArray):
iter, state = self.implementation.create_iter()
while not iter.done(state):
diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py
--- a/pypy/module/micronumpy/strides.py
+++ b/pypy/module/micronumpy/strides.py
@@ -190,67 +190,6 @@
return rstrides, rbackstrides
-def is_single_elem(space, w_elem, is_rec_type):
- if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)):
- return True
- if (space.isinstance_w(w_elem, space.w_tuple) or
- space.isinstance_w(w_elem, space.w_list)):
- return False
- if isinstance(w_elem, W_NDimArray) and not w_elem.is_scalar():
- return False
- return True
-
-
-def find_shape_and_elems(space, w_iterable, dtype):
- isstr = space.isinstance_w(w_iterable, space.w_str)
- if not support.issequence_w(space, w_iterable) or isstr:
- if dtype is None or dtype.char != NPY.CHARLTR:
- return [], [w_iterable]
- is_rec_type = dtype is not None and dtype.is_record()
- if is_rec_type and is_single_elem(space, w_iterable, is_rec_type):
- return [], [w_iterable]
- if isinstance(w_iterable, W_NDimArray) and w_iterable.is_scalar():
- return [], [w_iterable]
- return _find_shape_and_elems(space, w_iterable, is_rec_type)
-
-
-def _find_shape_and_elems(space, w_iterable, is_rec_type):
- from pypy.objspace.std.bufferobject import W_Buffer
- shape = [space.len_w(w_iterable)]
- if space.isinstance_w(w_iterable, space.w_buffer):
- batch = [space.wrap(0)] * shape[0]
- for i in range(shape[0]):
- batch[i] = space.ord(space.getitem(w_iterable, space.wrap(i)))
- else:
- batch = space.listview(w_iterable)
- while True:
- if not batch:
- return shape[:], []
- if is_single_elem(space, batch[0], is_rec_type):
- for w_elem in batch:
- if not is_single_elem(space, w_elem, is_rec_type):
- raise OperationError(space.w_ValueError, space.wrap(
- "setting an array element with a sequence"))
- return shape[:], batch
- new_batch = []
- size = space.len_w(batch[0])
More information about the pypy-commit
mailing list