[pypy-commit] pypy emit-call-arm: merge default
bivab
noreply at buildbot.pypy.org
Fri May 24 10:45:56 CEST 2013
Author: David Schneider <david.schneider at picle.org>
Branch: emit-call-arm
Changeset: r64534:0bb98defe2bc
Date: 2013-05-24 03:43 -0500
http://bitbucket.org/pypy/pypy/changeset/0bb98defe2bc/
Log: merge default
diff too long, truncating to 2000 out of 5143 lines
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -4,5 +4,5 @@
from .api import FFI, CDefError, FFIError
from .ffiplatform import VerificationError, VerificationMissing
-__version__ = "0.6"
-__version_info__ = (0, 6)
+__version__ = "0.7"
+__version_info__ = (0, 7)
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -73,15 +73,15 @@
if name.startswith('RTLD_'):
setattr(self, name, getattr(backend, name))
#
- BVoidP = self._get_cached_btype(model.voidp_type)
+ self.BVoidP = self._get_cached_btype(model.voidp_type)
if isinstance(backend, types.ModuleType):
# _cffi_backend: attach these constants to the class
if not hasattr(FFI, 'NULL'):
- FFI.NULL = self.cast(BVoidP, 0)
+ FFI.NULL = self.cast(self.BVoidP, 0)
FFI.CData, FFI.CType = backend._get_types()
else:
# ctypes backend: attach these constants to the instance
- self.NULL = self.cast(BVoidP, 0)
+ self.NULL = self.cast(self.BVoidP, 0)
self.CData, self.CType = backend._get_types()
def cdef(self, csource, override=False):
@@ -346,6 +346,12 @@
self._cdefsources.extend(ffi_to_include._cdefsources)
self._cdefsources.append(']')
+ def new_handle(self, x):
+ return self._backend.newp_handle(self.BVoidP, x)
+
+ def from_handle(self, x):
+ return self._backend.from_handle(x)
+
def _make_ffi_library(ffi, libname, flags):
import os
@@ -372,8 +378,8 @@
BType = ffi._get_cached_btype(tp)
try:
value = backendlib.load_function(BType, name)
- except KeyError:
- raise AttributeError(name)
+ except KeyError as e:
+ raise AttributeError('%s: %s' % (name, e))
library.__dict__[name] = value
return
#
diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py
--- a/lib_pypy/cffi/backend_ctypes.py
+++ b/lib_pypy/cffi/backend_ctypes.py
@@ -16,6 +16,7 @@
class CTypesData(object):
__metaclass__ = CTypesType
__slots__ = ['__weakref__']
+ __name__ = '<cdata>'
def __init__(self, *args):
raise TypeError("cannot instantiate %r" % (self.__class__,))
@@ -491,6 +492,8 @@
elif BItem in (getbtype(model.PrimitiveType('signed char')),
getbtype(model.PrimitiveType('unsigned char'))):
kind = 'bytep'
+ elif BItem is getbtype(model.void_type):
+ kind = 'voidp'
else:
kind = 'generic'
#
@@ -546,13 +549,13 @@
def __setitem__(self, index, value):
self._as_ctype_ptr[index] = BItem._to_ctypes(value)
- if kind == 'charp':
+ if kind == 'charp' or kind == 'voidp':
@classmethod
- def _arg_to_ctypes(cls, value):
- if isinstance(value, bytes):
- return ctypes.c_char_p(value)
+ def _arg_to_ctypes(cls, *value):
+ if value and isinstance(value[0], bytes):
+ return ctypes.c_char_p(value[0])
else:
- return super(CTypesPtr, cls)._arg_to_ctypes(value)
+ return super(CTypesPtr, cls)._arg_to_ctypes(*value)
if kind == 'charp' or kind == 'bytep':
def _to_string(self, maxlen):
diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py
--- a/lib_pypy/cffi/vengine_cpy.py
+++ b/lib_pypy/cffi/vengine_cpy.py
@@ -15,6 +15,20 @@
def patch_extension_kwds(self, kwds):
pass
+ def find_module(self, module_name, path, so_suffix):
+ try:
+ f, filename, descr = imp.find_module(module_name, path)
+ except ImportError:
+ return None
+ if f is not None:
+ f.close()
+ # Note that after a setuptools installation, there are both .py
+ # and .so files with the same basename. The code here relies on
+ # imp.find_module() locating the .so in priority.
+ if descr[0] != so_suffix:
+ return None
+ return filename
+
def collect_types(self):
self._typesdict = {}
self._generate("collecttype")
@@ -427,9 +441,9 @@
prnt('static void %s(%s *p)' % (checkfuncname, cname))
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
- for fname, ftype, _ in tp.enumfields():
+ for fname, ftype, fbitsize in tp.enumfields():
if (isinstance(ftype, model.PrimitiveType)
- and ftype.is_integer_type()):
+ and ftype.is_integer_type()) or fbitsize >= 0:
# accept all integers, but complain on float or double
prnt(' (void)((p->%s) << 1);' % fname)
else:
diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py
--- a/lib_pypy/cffi/vengine_gen.py
+++ b/lib_pypy/cffi/vengine_gen.py
@@ -1,4 +1,4 @@
-import sys
+import sys, os
import types
from . import model, ffiplatform
@@ -20,6 +20,16 @@
# up in kwds['export_symbols'].
kwds.setdefault('export_symbols', self.export_symbols)
+ def find_module(self, module_name, path, so_suffix):
+ basename = module_name + so_suffix
+ if path is None:
+ path = sys.path
+ for dirname in path:
+ filename = os.path.join(dirname, basename)
+ if os.path.isfile(filename):
+ return filename
+ return None
+
def collect_types(self):
pass # not needed in the generic engine
@@ -216,9 +226,9 @@
prnt('static void %s(%s *p)' % (checkfuncname, cname))
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
- for fname, ftype, _ in tp.enumfields():
+ for fname, ftype, fbitsize in tp.enumfields():
if (isinstance(ftype, model.PrimitiveType)
- and ftype.is_integer_type()):
+ and ftype.is_integer_type()) or fbitsize >= 0:
# accept all integers, but complain on float or double
prnt(' (void)((p->%s) << 1);' % fname)
else:
diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py
--- a/lib_pypy/cffi/verifier.py
+++ b/lib_pypy/cffi/verifier.py
@@ -102,21 +102,10 @@
path = pkg.__path__
else:
path = None
- try:
- f, filename, descr = imp.find_module(self.get_module_name(),
- path)
- except ImportError:
+ filename = self._vengine.find_module(self.get_module_name(), path,
+ _get_so_suffix())
+ if filename is None:
return
- if f is not None:
- f.close()
- if filename.lower().endswith('.py'):
- # on PyPy, if there are both .py and .pypy-19.so files in
- # the same directory, the .py file is returned. That's the
- # case after a setuptools installation. We never want to
- # load the .py file here...
- filename = filename[:-3] + _get_so_suffix()
- if not os.path.isfile(filename):
- return
self.modulefilename = filename
self._vengine.collect_types()
self._has_module = True
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -65,7 +65,8 @@
del working_modules["termios"]
del working_modules["_minimal_curses"]
- del working_modules["cppyy"] # not tested on win32
+ if "cppyy" in working_modules:
+ del working_modules["cppyy"] # not tested on win32
# The _locale module is needed by site.py on Windows
default_modules["_locale"] = None
@@ -78,7 +79,8 @@
del working_modules["_minimal_curses"]
del working_modules["termios"]
del working_modules["_multiprocessing"] # depends on rctime
- del working_modules["cppyy"] # depends on ctypes
+ if "cppyy" in working_modules:
+ del working_modules["cppyy"] # depends on ctypes
module_dependencies = {
@@ -215,10 +217,6 @@
"(the empty string and potentially single-char strings)",
default=False),
- BoolOption("withsmalltuple",
- "use small tuples",
- default=False),
-
BoolOption("withspecialisedtuple",
"use specialised tuples",
default=False),
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -28,3 +28,9 @@
.. branch: arm-stacklet
Stacklet support for ARM, enables _continuation support
+
+.. branch: remove-tuple-smm
+Remove multi-methods on tuple
+
+.. branch: remove-iter-smm
+Remove multi-methods on iterators
diff --git a/pypy/interpreter/pyparser/future.py b/pypy/interpreter/pyparser/future.py
--- a/pypy/interpreter/pyparser/future.py
+++ b/pypy/interpreter/pyparser/future.py
@@ -1,307 +1,3 @@
-"""
-This automaton is designed to be invoked on a Python source string
-before the real parser starts working, in order to find all legal
-'from __future__ import blah'. As soon as something is encountered that
-would prevent more future imports, the analysis is aborted.
-The resulting legal futures are avaliable in self.flags after the
-pass has ended.
-
-Invocation is through get_futures(src), which returns a field of flags, one per
-found correct future import.
-
-The flags can then be used to set up the parser.
-All error detection is left to the parser.
-
-The reason we are not using the regular lexer/parser toolchain is that
-we do not want the overhead of generating tokens for entire files just
-to find information that resides in the first few lines of the file.
-Neither do we require sane error messages, as this job is handled by
-the parser.
-
-To make the parsing fast, especially when the module is translated to C,
-the code has been written in a very serial fashion, using an almost
-assembler like style. A further speedup could be achieved by replacing
-the "in" comparisons with explicit numeric comparisons.
-"""
-
-from pypy.interpreter.astcompiler.consts import CO_GENERATOR_ALLOWED, \
- CO_FUTURE_DIVISION, CO_FUTURE_WITH_STATEMENT, CO_FUTURE_ABSOLUTE_IMPORT
-
-def get_futures(future_flags, source):
- futures = FutureAutomaton(future_flags, source)
- try:
- futures.start()
- except DoneException, e:
- pass
- return futures.flags, (futures.lineno, futures.col_offset)
-
-class DoneException(Exception):
- pass
-
-whitespace = ' \t\f'
-whitespace_or_newline = whitespace + '\n\r'
-letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxyz_'
-alphanumerics = letters + '1234567890'
-
-class FutureAutomaton(object):
- """
- A future statement must appear near the top of the module.
- The only lines that can appear before a future statement are:
-
- * the module docstring (if any),
- * comments,
- * blank lines, and
- * other future statements.
-
- The features recognized by Python 2.5 are "generators",
- "division", "nested_scopes" and "with_statement", "absolute_import".
- "generators", "division" and "nested_scopes" are redundant
- in 2.5 because they are always enabled.
-
- This module parses the input until it encounters something that is
- not recognized as a valid future statement or something that may
- precede a future statement.
- """
-
- def __init__(self, future_flags, string):
- self.future_flags = future_flags
- self.s = string
- self.pos = 0
- self.current_lineno = 1
- self.lineno = -1
- self.line_start_pos = 0
- self.col_offset = 0
- self.docstring_consumed = False
- self.flags = 0
- self.got_features = 0
-
- def getc(self, offset=0):
- try:
- return self.s[self.pos + offset]
- except IndexError:
- raise DoneException
-
- def start(self):
- c = self.getc()
- if c in ("'", '"', "r", "u") and not self.docstring_consumed:
- self.consume_docstring()
- elif c == '\\' or c in whitespace_or_newline:
- self.consume_empty_line()
- elif c == '#':
- self.consume_comment()
- elif c == 'f':
- self.consume_from()
- else:
- return
-
- def atbol(self):
- self.current_lineno += 1
- self.line_start_pos = self.pos
-
- def consume_docstring(self):
- self.docstring_consumed = True
- if self.getc() == "r":
- self.pos += 1
- if self.getc() == "u":
- self.pos += 1
- endchar = self.getc()
- if (self.getc() == self.getc(+1) and
- self.getc() == self.getc(+2)):
- self.pos += 3
- while 1: # Deal with a triple quoted docstring
- c = self.getc()
- if c == '\\':
- self.pos += 1
- self._skip_next_char_from_docstring()
- elif c != endchar:
- self._skip_next_char_from_docstring()
- else:
- self.pos += 1
- if (self.getc() == endchar and
- self.getc(+1) == endchar):
- self.pos += 2
- self.consume_empty_line()
- break
-
- else: # Deal with a single quoted docstring
- self.pos += 1
- while 1:
- c = self.getc()
- self.pos += 1
- if c == endchar:
- self.consume_empty_line()
- return
- elif c == '\\':
- self._skip_next_char_from_docstring()
- elif c in '\r\n':
- # Syntax error
- return
-
- def _skip_next_char_from_docstring(self):
- c = self.getc()
- self.pos += 1
- if c == '\n':
- self.atbol()
- elif c == '\r':
- if self.getc() == '\n':
- self.pos += 1
- self.atbol()
-
- def consume_continuation(self):
- c = self.getc()
- if c in '\n\r':
- self.pos += 1
- self.atbol()
-
- def consume_empty_line(self):
- """
- Called when the remainder of the line can only contain whitespace
- and comments.
- """
- while self.getc() in whitespace:
- self.pos += 1
- if self.getc() == '#':
- self.consume_comment()
- elif self.getc() == ';':
- self.pos += 1
- self.consume_whitespace()
- self.start()
- elif self.getc() in '\\':
- self.pos += 1
- self.consume_continuation()
- self.start()
- elif self.getc() in '\r\n':
- c = self.getc()
- self.pos += 1
- if c == '\r':
- if self.getc() == '\n':
- self.pos += 1
- self.atbol()
- else:
- self.atbol()
- self.start()
-
- def consume_comment(self):
- self.pos += 1
- while self.getc() not in '\r\n':
- self.pos += 1
- self.consume_empty_line()
-
- def consume_from(self):
- col_offset = self.pos - self.line_start_pos
- line = self.current_lineno
- self.pos += 1
- if self.getc() == 'r' and self.getc(+1) == 'o' and self.getc(+2) == 'm':
- self.docstring_consumed = True
- self.pos += 3
- self.consume_mandatory_whitespace()
- if self.s[self.pos:self.pos+10] != '__future__':
- raise DoneException
- self.pos += 10
- self.consume_mandatory_whitespace()
- if self.s[self.pos:self.pos+6] != 'import':
- raise DoneException
- self.pos += 6
- self.consume_whitespace()
- old_got = self.got_features
- try:
- if self.getc() == '(':
- self.pos += 1
- self.consume_whitespace()
- self.set_flag(self.get_name())
- # Set flag corresponding to name
- self.get_more(paren_list=True)
- else:
- self.set_flag(self.get_name())
- self.get_more()
- finally:
- if self.got_features > old_got:
- self.col_offset = col_offset
- self.lineno = line
- self.consume_empty_line()
-
- def consume_mandatory_whitespace(self):
- if self.getc() not in whitespace + '\\':
- raise DoneException
- self.consume_whitespace()
-
- def consume_whitespace(self, newline_ok=False):
- while 1:
- c = self.getc()
- if c in whitespace:
- self.pos += 1
- continue
- elif c == '\\' or newline_ok:
- slash = c == '\\'
- if slash:
- self.pos += 1
- c = self.getc()
- if c == '\n':
- self.pos += 1
- self.atbol()
- continue
- elif c == '\r':
- self.pos += 1
- if self.getc() == '\n':
- self.pos += 1
- self.atbol()
- elif slash:
- raise DoneException
- else:
- return
- else:
- return
-
- def get_name(self):
- if self.getc() not in letters:
- raise DoneException
- p = self.pos
- try:
- while self.getc() in alphanumerics:
- self.pos += 1
- except DoneException:
- # If there's any name at all, we want to call self.set_flag().
- # Something else while get the DoneException again.
- if self.pos == p:
- raise
- end = self.pos
- else:
- end = self.pos
- self.consume_whitespace()
- return self.s[p:end]
-
- def get_more(self, paren_list=False):
- if paren_list and self.getc() == ')':
- self.pos += 1
- return
- if (self.getc() == 'a' and
- self.getc(+1) == 's' and
- self.getc(+2) in whitespace):
- self.get_name()
- self.get_name()
- self.get_more(paren_list=paren_list)
- return
- elif self.getc() != ',':
- return
- else:
- self.pos += 1
- self.consume_whitespace(paren_list)
- if paren_list and self.getc() == ')':
- self.pos += 1
- return # Handles trailing comma inside parenthesis
- self.set_flag(self.get_name())
- self.get_more(paren_list=paren_list)
-
- def set_flag(self, feature):
- self.got_features += 1
- try:
- self.flags |= self.future_flags.compiler_features[feature]
- except KeyError:
- pass
-
-from codeop import PyCF_DONT_IMPLY_DEDENT
-from pypy.interpreter.error import OperationError
-
from pypy.tool import stdlib___future__ as future
class FutureFlags(object):
@@ -327,6 +23,81 @@
flag_names.append(name)
return flag_names
+ def get_compiler_feature(self, name):
+ return self.compiler_features.get(name, 0)
+
futureFlags_2_4 = FutureFlags((2, 4, 4, 'final', 0))
futureFlags_2_5 = FutureFlags((2, 5, 0, 'final', 0))
futureFlags_2_7 = FutureFlags((2, 7, 0, 'final', 0))
+
+
+class TokenIterator:
+ def __init__(self, tokens):
+ self.tokens = tokens
+ self.index = 0
+ self.next()
+
+ def next(self):
+ index = self.index
+ self.index = index + 1
+ self.tok = self.tokens[index]
+
+ def skip(self, n):
+ if self.tok[0] == n:
+ self.next()
+ return True
+ else:
+ return False
+
+ def skip_name(self, name):
+ from pypy.interpreter.pyparser import pygram
+ if self.tok[0] == pygram.tokens.NAME and self.tok[1] == name:
+ self.next()
+ return True
+ else:
+ return False
+
+ def next_feature_name(self):
+ from pypy.interpreter.pyparser import pygram
+ if self.tok[0] == pygram.tokens.NAME:
+ name = self.tok[1]
+ self.next()
+ if self.skip_name("as"):
+ self.skip(pygram.tokens.NAME)
+ return name
+ else:
+ return ''
+
+ def skip_newlines(self):
+ from pypy.interpreter.pyparser import pygram
+ while self.skip(pygram.tokens.NEWLINE):
+ pass
+
+
+def add_future_flags(future_flags, tokens):
+ from pypy.interpreter.pyparser import pygram
+ it = TokenIterator(tokens)
+ result = 0
+ #
+ # The only things that can precede a future statement are another
+ # future statement and a doc string (only one). This is a very
+ # permissive parsing of the given list of tokens; it relies on
+ # the real parsing done afterwards to give errors.
+ it.skip_newlines()
+ it.skip_name("r") or it.skip_name("u") or it.skip_name("ru")
+ if it.skip(pygram.tokens.STRING):
+ it.skip_newlines()
+
+ while (it.skip_name("from") and
+ it.skip_name("__future__") and
+ it.skip_name("import")):
+ it.skip(pygram.tokens.LPAR) # optionally
+ result |= future_flags.get_compiler_feature(it.next_feature_name())
+ while it.skip(pygram.tokens.COMMA):
+ result |= future_flags.get_compiler_feature(it.next_feature_name())
+ it.skip(pygram.tokens.RPAR) # optionally
+ it.skip(pygram.tokens.SEMI) # optionally
+ it.skip_newlines()
+
+ position = (it.tok[2], it.tok[3])
+ return result, position
diff --git a/pypy/interpreter/pyparser/pyparse.py b/pypy/interpreter/pyparser/pyparse.py
--- a/pypy/interpreter/pyparser/pyparse.py
+++ b/pypy/interpreter/pyparser/pyparse.py
@@ -135,17 +135,8 @@
raise error.SyntaxError(space.str_w(w_message))
raise
- f_flags, future_info = future.get_futures(self.future_flags, textsrc)
- compile_info.last_future_import = future_info
- compile_info.flags |= f_flags
-
flags = compile_info.flags
- if flags & consts.CO_FUTURE_PRINT_FUNCTION:
- self.grammar = pygram.python_grammar_no_print
- else:
- self.grammar = pygram.python_grammar
-
# The tokenizer is very picky about how it wants its input.
source_lines = textsrc.splitlines(True)
if source_lines and not source_lines[-1].endswith("\n"):
@@ -157,7 +148,21 @@
tp = 0
try:
try:
+ # Note: we no longer pass the CO_FUTURE_* to the tokenizer,
+ # which is expected to work independently of them. It's
+ # certainly the case for all futures in Python <= 2.7.
tokens = pytokenizer.generate_tokens(source_lines, flags)
+
+ newflags, last_future_import = (
+ future.add_future_flags(self.future_flags, tokens))
+ compile_info.last_future_import = last_future_import
+ compile_info.flags |= newflags
+
+ if compile_info.flags & consts.CO_FUTURE_PRINT_FUNCTION:
+ self.grammar = pygram.python_grammar_no_print
+ else:
+ self.grammar = pygram.python_grammar
+
for tp, value, lineno, column, line in tokens:
if self.add_token(tp, value, lineno, column, line):
break
diff --git a/pypy/interpreter/pyparser/test/test_futureautomaton.py b/pypy/interpreter/pyparser/test/test_future.py
rename from pypy/interpreter/pyparser/test/test_futureautomaton.py
rename to pypy/interpreter/pyparser/test/test_future.py
--- a/pypy/interpreter/pyparser/test/test_futureautomaton.py
+++ b/pypy/interpreter/pyparser/test/test_future.py
@@ -1,29 +1,26 @@
import py
-import pypy.interpreter.pyparser.future as future
+from pypy.interpreter.pyparser import future, pytokenizer
from pypy.tool import stdlib___future__ as fut
-def run(s):
- f = future.FutureAutomaton(future.futureFlags_2_7, s)
- try:
- f.start()
- except future.DoneException:
- pass
- return f
+def run(s, expected_last_future=None):
+ source_lines = s.splitlines(True)
+ tokens = pytokenizer.generate_tokens(source_lines, 0)
+ expected_last_future = expected_last_future or tokens[-1][2:4]
+ #
+ flags, last_future_import = future.add_future_flags(
+ future.futureFlags_2_7, tokens)
+ assert last_future_import == expected_last_future
+ return flags
def test_docstring():
s = '"Docstring\\" "\nfrom __future__ import division\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_DIVISION
- assert f.lineno == 2
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_DIVISION
def test_comment():
s = '# A comment about nothing ;\n'
f = run(s)
- assert f.pos == len(s)
- assert f.lineno == -1
- assert f.col_offset == 0
+ assert f == 0
def test_tripledocstring():
s = '''""" This is a
@@ -31,9 +28,7 @@
breaks in it. It even has a \n"""
'''
f = run(s)
- assert f.pos == len(s)
- assert f.lineno == -1
- assert f.col_offset == 0
+ assert f == 0
def test_escapedquote_in_tripledocstring():
s = '''""" This is a
@@ -41,233 +36,176 @@
breaks in it. \\"""It even has an escaped quote!"""
'''
f = run(s)
- assert f.pos == len(s)
- assert f.lineno == -1
- assert f.col_offset == 0
+ assert f == 0
def test_empty_line():
s = ' \t \f \n \n'
f = run(s)
- assert f.pos == len(s)
- assert f.lineno == -1
- assert f.col_offset == 0
+ assert f == 0
def test_from():
s = 'from __future__ import division\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_DIVISION
- assert f.lineno == 1
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_DIVISION
def test_froms():
s = 'from __future__ import division, generators, with_statement\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == (fut.CO_FUTURE_DIVISION |
- fut.CO_GENERATOR_ALLOWED |
- fut.CO_FUTURE_WITH_STATEMENT)
- assert f.lineno == 1
- assert f.col_offset == 0
+ assert f == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED |
+ fut.CO_FUTURE_WITH_STATEMENT)
def test_from_as():
s = 'from __future__ import division as b\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_DIVISION
- assert f.lineno == 1
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_DIVISION
def test_froms_as():
s = 'from __future__ import division as b, generators as c\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == (fut.CO_FUTURE_DIVISION |
- fut.CO_GENERATOR_ALLOWED)
- assert f.lineno == 1
- assert f.col_offset == 0
+ assert f == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED)
def test_from_paren():
s = 'from __future__ import (division)\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_DIVISION
- assert f.lineno == 1
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_DIVISION
def test_froms_paren():
s = 'from __future__ import (division, generators)\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == (fut.CO_FUTURE_DIVISION |
- fut.CO_GENERATOR_ALLOWED)
- assert f.lineno == 1
- assert f.col_offset == 0
+ assert f == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED)
def test_froms_paren_as():
s = 'from __future__ import (division as b, generators,)\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == (fut.CO_FUTURE_DIVISION |
- fut.CO_GENERATOR_ALLOWED)
- assert f.lineno == 1
- assert f.col_offset == 0
+ assert f == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED)
def test_paren_with_newline():
s = 'from __future__ import (division,\nabsolute_import)\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == (fut.CO_FUTURE_DIVISION | fut.CO_FUTURE_ABSOLUTE_IMPORT)
- assert f.lineno == 1
- assert f.col_offset == 0
+ assert f == (fut.CO_FUTURE_DIVISION | fut.CO_FUTURE_ABSOLUTE_IMPORT)
+
+def test_paren_with_newline_2():
+ s = 'from __future__ import (\ndivision,\nabsolute_import)\n'
+ f = run(s)
+ assert f == (fut.CO_FUTURE_DIVISION | fut.CO_FUTURE_ABSOLUTE_IMPORT)
def test_multiline():
s = '"abc" #def\n #ghi\nfrom __future__ import (division as b, generators,)\nfrom __future__ import with_statement\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == (fut.CO_FUTURE_DIVISION |
- fut.CO_GENERATOR_ALLOWED |
- fut.CO_FUTURE_WITH_STATEMENT)
- assert f.lineno == 4
- assert f.col_offset == 0
+ assert f == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED |
+ fut.CO_FUTURE_WITH_STATEMENT)
def test_windows_style_lineendings():
s = '"abc" #def\r\n #ghi\r\nfrom __future__ import (division as b, generators,)\r\nfrom __future__ import with_statement\r\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == (fut.CO_FUTURE_DIVISION |
- fut.CO_GENERATOR_ALLOWED |
- fut.CO_FUTURE_WITH_STATEMENT)
- assert f.lineno == 4
- assert f.col_offset == 0
+ assert f == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED |
+ fut.CO_FUTURE_WITH_STATEMENT)
def test_mac_style_lineendings():
s = '"abc" #def\r #ghi\rfrom __future__ import (division as b, generators,)\rfrom __future__ import with_statement\r'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == (fut.CO_FUTURE_DIVISION |
- fut.CO_GENERATOR_ALLOWED |
- fut.CO_FUTURE_WITH_STATEMENT)
- assert f.lineno == 4
- assert f.col_offset == 0
+ assert f == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED |
+ fut.CO_FUTURE_WITH_STATEMENT)
def test_semicolon():
s = '"abc" #def\n #ghi\nfrom __future__ import (division as b, generators,); from __future__ import with_statement\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == (fut.CO_FUTURE_DIVISION |
- fut.CO_GENERATOR_ALLOWED |
- fut.CO_FUTURE_WITH_STATEMENT)
- assert f.lineno == 3
- assert f.col_offset == 55
+ assert f == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED |
+ fut.CO_FUTURE_WITH_STATEMENT)
+
+def test_semicolon_2():
+ s = 'from __future__ import division; from foo import bar'
+ f = run(s, expected_last_future=(1, 39))
+ assert f == fut.CO_FUTURE_DIVISION
def test_full_chain():
s = '"abc" #def\n #ghi\nfrom __future__ import (division as b, generators,); from __future__ import with_statement\n'
- flags, pos = future.get_futures(future.futureFlags_2_5, s)
- assert flags == (fut.CO_FUTURE_DIVISION |
- fut.CO_GENERATOR_ALLOWED |
- fut.CO_FUTURE_WITH_STATEMENT)
- assert pos == (3, 55)
+ f = run(s)
+ assert f == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED |
+ fut.CO_FUTURE_WITH_STATEMENT)
def test_intervening_code():
s = 'from __future__ import (division as b, generators,)\nfrom sys import modules\nfrom __future__ import with_statement\n'
- flags, pos = future.get_futures(future.futureFlags_2_5, s)
- assert flags & fut.CO_FUTURE_WITH_STATEMENT == 0
- assert pos == (1, 0)
+ f = run(s, expected_last_future=(2, 5))
+ assert f == (fut.CO_FUTURE_DIVISION | fut.CO_GENERATOR_ALLOWED)
def test_nonexisting():
s = 'from __future__ import non_existing_feature\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == 0
- assert f.lineno == 1
- assert f.col_offset == 0
+ assert f == 0
+
+def test_nonexisting_2():
+ s = 'from __future__ import non_existing_feature, with_statement\n'
+ f = run(s)
+ assert f == fut.CO_FUTURE_WITH_STATEMENT
def test_from_import_abs_import():
s = 'from __future__ import absolute_import\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_ABSOLUTE_IMPORT
- assert f.lineno == 1
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_ABSOLUTE_IMPORT
def test_raw_doc():
s = 'r"Doc"\nfrom __future__ import with_statement\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_WITH_STATEMENT
- assert f.lineno == 2
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_WITH_STATEMENT
def test_unicode_doc():
s = 'u"Doc"\nfrom __future__ import with_statement\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_WITH_STATEMENT
- assert f.lineno == 2
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_WITH_STATEMENT
def test_raw_unicode_doc():
s = 'ru"Doc"\nfrom __future__ import with_statement\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_WITH_STATEMENT
+ assert f == fut.CO_FUTURE_WITH_STATEMENT
def test_continuation_line():
s = "\\\nfrom __future__ import with_statement\n"
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_WITH_STATEMENT
- assert f.lineno == 2
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_WITH_STATEMENT
def test_continuation_lines():
s = "\\\n \t\\\nfrom __future__ import with_statement\n"
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_WITH_STATEMENT
- assert f.lineno == 3
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_WITH_STATEMENT
def test_lots_of_continuation_lines():
s = "\\\n\\\n\\\n\\\n\\\n\\\n\nfrom __future__ import with_statement\n"
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_WITH_STATEMENT
- assert f.lineno == 8
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_WITH_STATEMENT
-# This looks like a bug in cpython parser
-# and would require extensive modifications
-# to future.py in order to emulate the same behaviour
def test_continuation_lines_raise():
- py.test.skip("probably a CPython bug")
s = " \\\n \t\\\nfrom __future__ import with_statement\n"
- try:
- f = run(s)
- except IndentationError, e:
- assert e.args == 'unexpected indent'
- assert f.pos == len(s)
- assert f.flags == 0
- assert f.lineno == -1
- assert f.col_offset == 0
- else:
- raise AssertionError('IndentationError not raised')
- assert f.lineno == 2
- assert f.col_offset == 0
+ f = run(s, expected_last_future=(1, 0))
+ assert f == 0 # because of the INDENT
def test_continuation_lines_in_docstring_single_quoted():
s = '"\\\n\\\n\\\n\\\n\\\n\\\n"\nfrom __future__ import division\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_DIVISION
- assert f.lineno == 8
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_DIVISION
def test_continuation_lines_in_docstring_triple_quoted():
s = '"""\\\n\\\n\\\n\\\n\\\n\\\n"""\nfrom __future__ import division\n'
f = run(s)
- assert f.pos == len(s)
- assert f.flags == fut.CO_FUTURE_DIVISION
- assert f.lineno == 8
- assert f.col_offset == 0
+ assert f == fut.CO_FUTURE_DIVISION
+
+def test_blank_lines():
+ s = ('\n\t\n\nfrom __future__ import with_statement'
+ ' \n \n \nfrom __future__ import division')
+ f = run(s)
+ assert f == fut.CO_FUTURE_WITH_STATEMENT | fut.CO_FUTURE_DIVISION
+
+def test_dummy_semicolons():
+ s = ('from __future__ import division;\n'
+ 'from __future__ import with_statement;')
+ f = run(s)
+ assert f == fut.CO_FUTURE_DIVISION | fut.CO_FUTURE_WITH_STATEMENT
diff --git a/pypy/interpreter/pyparser/test/test_pyparse.py b/pypy/interpreter/pyparser/test/test_pyparse.py
--- a/pypy/interpreter/pyparser/test/test_pyparse.py
+++ b/pypy/interpreter/pyparser/test/test_pyparse.py
@@ -148,3 +148,6 @@
self.parse('0b1101')
self.parse('0b0l')
py.test.raises(SyntaxError, self.parse, "0b112")
+
+ def test_print_function(self):
+ self.parse("from __future__ import print_function\nx = print\n")
diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py
--- a/pypy/interpreter/test/test_compiler.py
+++ b/pypy/interpreter/test/test_compiler.py
@@ -303,6 +303,9 @@
'from __future__ import nested_scopes, generators',
'from __future__ import (nested_scopes,\ngenerators)',
'from __future__ import (nested_scopes,\ngenerators,)',
+ 'from __future__ import (\nnested_scopes,\ngenerators)',
+ 'from __future__ import(\n\tnested_scopes,\n\tgenerators)',
+ 'from __future__ import(\n\t\nnested_scopes)',
'from sys import stdin, stderr, stdout',
'from sys import (stdin, stderr,\nstdout)',
'from sys import (stdin, stderr,\nstdout,)',
diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -7,7 +7,7 @@
appleveldefs = {
}
interpleveldefs = {
- '__version__': 'space.wrap("0.6")',
+ '__version__': 'space.wrap("0.7")',
'load_library': 'libraryobj.load_library',
@@ -30,6 +30,8 @@
'typeoffsetof': 'func.typeoffsetof',
'rawaddressof': 'func.rawaddressof',
'getcname': 'func.getcname',
+ 'newp_handle': 'handle.newp_handle',
+ 'from_handle': 'handle.from_handle',
'_get_types': 'func._get_types',
'string': 'func.string',
diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py
--- a/pypy/module/_cffi_backend/cdataobj.py
+++ b/pypy/module/_cffi_backend/cdataobj.py
@@ -394,6 +394,19 @@
return self.length
+class W_CDataHandle(W_CData):
+ _attrs_ = ['w_keepalive']
+ _immutable_fields_ = ['w_keepalive']
+
+ def __init__(self, space, cdata, ctype, w_keepalive):
+ W_CData.__init__(self, space, cdata, ctype)
+ self.w_keepalive = w_keepalive
+
+ def _repr_extra(self):
+ w_repr = self.space.repr(self.w_keepalive)
+ return "handle to %s" % (self.space.str_w(w_repr),)
+
+
W_CData.typedef = TypeDef(
'CData',
__module__ = '_cffi_backend',
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -172,8 +172,8 @@
class W_CTypePointer(W_CTypePtrBase):
- _attrs_ = ['is_file', 'cache_array_type']
- _immutable_fields_ = ['is_file', 'cache_array_type?']
+ _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr']
+ _immutable_fields_ = ['is_file', 'cache_array_type?', 'is_void_ptr']
kind = "pointer"
cache_array_type = None
@@ -186,6 +186,7 @@
extra = " *"
self.is_file = (ctitem.name == "struct _IO_FILE" or
ctitem.name == "struct $FILE")
+ self.is_void_ptr = isinstance(ctitem, ctypevoid.W_CTypeVoid)
W_CTypePtrBase.__init__(self, space, size, extra, 2, ctitem)
def newp(self, w_init):
diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/_cffi_backend/handle.py
@@ -0,0 +1,93 @@
+import weakref
+from pypy.interpreter.error import OperationError, operationerrfmt
+from pypy.interpreter.gateway import unwrap_spec
+from pypy.module._cffi_backend import ctypeobj, ctypeptr, cdataobj
+from pypy.module._weakref.interp__weakref import dead_ref
+from rpython.rtyper.lltypesystem import lltype, rffi
+
+
+def reduced_value(s):
+ while True:
+ divide = s & 1
+ s >>= 1
+ if not divide:
+ return s
+
+# ____________________________________________________________
+
+
+class CffiHandles:
+ def __init__(self, space):
+ self.handles = []
+ self.look_distance = 0
+
+ def reserve_next_handle_index(self):
+ # The reservation ordering done here is tweaked for pypy's
+ # memory allocator. We look from index 'look_distance'.
+ # Look_distance increases from 0. But we also look at
+ # "look_distance/2" or "/4" or "/8", etc. If we find that one
+ # of these secondary locations is free, we assume it's because
+ # there was recently a minor collection; so we reset
+ # look_distance to 0 and start again from the lowest locations.
+ length = len(self.handles)
+ for d in range(self.look_distance, length):
+ if self.handles[d]() is None:
+ self.look_distance = d + 1
+ return d
+ s = reduced_value(d)
+ if self.handles[s]() is None:
+ break
+ # restart from the beginning
+ for d in range(0, length):
+ if self.handles[d]() is None:
+ self.look_distance = d + 1
+ return d
+ # full! extend, but don't use '!=' here
+ self.handles = self.handles + [dead_ref] * (length // 3 + 5)
+ self.look_distance = length + 1
+ return length
+
+ def store_handle(self, index, content):
+ self.handles[index] = weakref.ref(content)
+
+ def fetch_handle(self, index):
+ if 0 <= index < len(self.handles):
+ return self.handles[index]()
+ return None
+
+def get(space):
+ return space.fromcache(CffiHandles)
+
+# ____________________________________________________________
+
+ at unwrap_spec(w_ctype=ctypeobj.W_CType)
+def newp_handle(space, w_ctype, w_x):
+ if (not isinstance(w_ctype, ctypeptr.W_CTypePointer) or
+ not w_ctype.is_void_ptr):
+ raise operationerrfmt(space.w_TypeError,
+ "needs 'void *', got '%s'", w_ctype.name)
+ index = get(space).reserve_next_handle_index()
+ _cdata = rffi.cast(rffi.CCHARP, index + 1)
+ new_cdataobj = cdataobj.W_CDataHandle(space, _cdata, w_ctype, w_x)
+ get(space).store_handle(index, new_cdataobj)
+ return new_cdataobj
+
+ at unwrap_spec(w_cdata=cdataobj.W_CData)
+def from_handle(space, w_cdata):
+ ctype = w_cdata.ctype
+ if (not isinstance(ctype, ctypeptr.W_CTypePtrOrArray) or
+ not ctype.can_cast_anything):
+ raise operationerrfmt(space.w_TypeError,
+ "expected a 'cdata' object with a 'void *' out "
+ "of new_handle(), got '%s'", ctype.name)
+ index = rffi.cast(lltype.Signed, w_cdata._cdata)
+ original_cdataobj = get(space).fetch_handle(index - 1)
+ #
+ if isinstance(original_cdataobj, cdataobj.W_CDataHandle):
+ return original_cdataobj.w_keepalive
+ else:
+ if index == 0:
+ msg = "cannot use from_handle() on NULL pointer"
+ else:
+ msg = "'void *' value does not correspond to any object"
+ raise OperationError(space.w_RuntimeError, space.wrap(msg))
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -2732,6 +2732,32 @@
assert x != cast(BIntP, 12344)
assert hash(x) == hash(cast(BIntP, 12345))
+def test_new_handle():
+ import _weakref
+ BVoidP = new_pointer_type(new_void_type())
+ BCharP = new_pointer_type(new_primitive_type("char"))
+ class mylist(list):
+ pass
+ o = mylist([2, 3, 4])
+ x = newp_handle(BVoidP, o)
+ assert repr(x) == "<cdata 'void *' handle to [2, 3, 4]>"
+ assert x
+ assert from_handle(x) is o
+ assert from_handle(cast(BCharP, x)) is o
+ wr = _weakref.ref(o)
+ del o
+ import gc; gc.collect()
+ assert wr() is not None
+ assert from_handle(x) == list((2, 3, 4))
+ assert from_handle(cast(BCharP, x)) == list((2, 3, 4))
+ del x
+ for i in range(3):
+ if wr() is not None:
+ import gc; gc.collect()
+ assert wr() is None
+ py.test.raises(RuntimeError, from_handle, cast(BCharP, 0))
+
+
def test_version():
# this test is here mostly for PyPy
- assert __version__ == "0.6"
+ assert __version__ == "0.7"
diff --git a/pypy/module/_cffi_backend/test/test_handle.py b/pypy/module/_cffi_backend/test/test_handle.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/_cffi_backend/test/test_handle.py
@@ -0,0 +1,59 @@
+import random
+from pypy.module._cffi_backend.handle import CffiHandles, reduced_value
+
+
+def test_reduced_value():
+ assert reduced_value(0) == 0
+ assert reduced_value(1) == 0
+ assert reduced_value(2) == 1
+ assert reduced_value(3) == 0
+ assert reduced_value(4) == 2
+ assert reduced_value(5) == 1
+ assert reduced_value(6) == 3
+ assert reduced_value(7) == 0
+ assert reduced_value(8) == 4
+ assert reduced_value(9) == 2
+ assert reduced_value(10) == 5
+ assert reduced_value(11) == 1
+
+
+class PseudoWeakRef(object):
+ _content = 42
+
+ def __call__(self):
+ return self._content
+
+
+def test_cffi_handles_1():
+ ch = CffiHandles(None)
+ expected_content = {}
+ for i in range(10000):
+ index = ch.reserve_next_handle_index()
+ assert 0 <= index < len(ch.handles)
+ assert ch.handles[index]() is None
+ pwr = PseudoWeakRef()
+ expected_content[index] = pwr
+ ch.handles[index] = pwr
+ assert len(ch.handles) < 13500
+ for index, pwr in expected_content.items():
+ assert ch.handles[index] is pwr
+
+def test_cffi_handles_2():
+ ch = CffiHandles(None)
+ expected_content = {}
+ for i in range(10000):
+ index = ch.reserve_next_handle_index()
+ assert 0 <= index < len(ch.handles)
+ assert ch.handles[index]() is None
+ pwr = PseudoWeakRef()
+ expected_content[index] = pwr
+ ch.handles[index] = pwr
+ #
+ if len(expected_content) > 20:
+ r = random.choice(list(expected_content))
+ pwr = expected_content.pop(r)
+ pwr._content = None
+ #
+ assert len(ch.handles) < 100
+ for index, pwr in expected_content.items():
+ assert ch.handles[index] is pwr
diff --git a/pypy/module/cpyext/setobject.py b/pypy/module/cpyext/setobject.py
--- a/pypy/module/cpyext/setobject.py
+++ b/pypy/module/cpyext/setobject.py
@@ -6,7 +6,6 @@
borrow_from, make_ref, from_ref)
from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall
from pypy.objspace.std.setobject import W_SetObject, newset
-from pypy.objspace.std.smalltupleobject import W_SmallTupleObject
PySet_Check, PySet_CheckExact = build_type_checkers("Set")
diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py
--- a/pypy/module/micronumpy/arrayimpl/concrete.py
+++ b/pypy/module/micronumpy/arrayimpl/concrete.py
@@ -11,7 +11,6 @@
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rlib.rawstorage import free_raw_storage, raw_storage_getitem,\
raw_storage_setitem, RAW_STORAGE
-from pypy.module.micronumpy.arrayimpl.sort import argsort_array
from rpython.rlib.debug import make_sure_not_resized
@@ -70,6 +69,7 @@
new_backstrides = [0] * ndims
for nd in range(ndims):
new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd]
+ assert isinstance(orig_array, W_NDimArray) or orig_array is None
return SliceArray(self.start, new_strides, new_backstrides,
new_shape, self, orig_array)
else:
@@ -324,6 +324,7 @@
orig_array)
def argsort(self, space, w_axis):
+ from pypy.module.micronumpy.arrayimpl.sort import argsort_array
return argsort_array(self, space, w_axis)
def base(self):
@@ -356,13 +357,13 @@
self.strides = strides
self.backstrides = backstrides
self.shape = shape
+ if dtype is None:
+ dtype = parent.dtype
if isinstance(parent, SliceArray):
parent = parent.parent # one level only
self.parent = parent
self.storage = parent.storage
self.order = parent.order
- if dtype is None:
- dtype = parent.dtype
self.dtype = dtype
self.size = support.product(shape) * self.dtype.itemtype.get_element_size()
self.start = start
diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py
--- a/pypy/module/micronumpy/arrayimpl/scalar.py
+++ b/pypy/module/micronumpy/arrayimpl/scalar.py
@@ -2,6 +2,7 @@
from pypy.module.micronumpy.arrayimpl import base
from pypy.module.micronumpy.base import W_NDimArray, convert_to_array
from pypy.module.micronumpy import support
+from pypy.module.micronumpy.interp_boxes import W_GenericBox
from pypy.interpreter.error import OperationError
class ScalarIterator(base.BaseArrayIterator):
@@ -48,6 +49,7 @@
return self.value
def set_scalar_value(self, w_val):
+ assert isinstance(w_val, W_GenericBox)
self.value = w_val.convert_to(self.dtype)
def copy(self, space):
@@ -73,7 +75,7 @@
dtype = self.dtype.float_type or self.dtype
if len(w_arr.get_shape()) > 0:
raise OperationError(space.w_ValueError, space.wrap(
- "could not broadcast input array from shape " +
+ "could not broadcast input array from shape " +
"(%s) into shape ()" % (
','.join([str(x) for x in w_arr.get_shape()],))))
if self.dtype.is_complex_type():
@@ -102,7 +104,7 @@
dtype = self.dtype.float_type
if len(w_arr.get_shape()) > 0:
raise OperationError(space.w_ValueError, space.wrap(
- "could not broadcast input array from shape " +
+ "could not broadcast input array from shape " +
"(%s) into shape ()" % (
','.join([str(x) for x in w_arr.get_shape()],))))
self.value = self.dtype.itemtype.composite(
diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py
--- a/pypy/module/micronumpy/base.py
+++ b/pypy/module/micronumpy/base.py
@@ -27,10 +27,10 @@
from pypy.module.micronumpy.arrayimpl import concrete, scalar
if not shape:
- impl = scalar.Scalar(dtype)
+ impl = scalar.Scalar(dtype.base)
else:
- strides, backstrides = calc_strides(shape, dtype, order)
- impl = concrete.ConcreteArray(shape, dtype, order, strides,
+ strides, backstrides = calc_strides(shape, dtype.base, order)
+ impl = concrete.ConcreteArray(shape, dtype.base, order, strides,
backstrides)
return W_NDimArray(impl)
diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py
--- a/pypy/module/micronumpy/interp_boxes.py
+++ b/pypy/module/micronumpy/interp_boxes.py
@@ -268,14 +268,30 @@
class W_VoidBox(W_FlexibleBox):
- @unwrap_spec(item=str)
- def descr_getitem(self, space, item):
+ def descr_getitem(self, space, w_item):
+ from pypy.module.micronumpy.types import VoidType
+ if space.isinstance_w(w_item, space.w_str):
+ item = space.str_w(w_item)
+ elif space.isinstance_w(w_item, space.w_int):
+ #Called by iterator protocol
+ indx = space.int_w(w_item)
+ try:
+ item = self.dtype.fieldnames[indx]
+ except IndexError:
+ raise OperationError(space.w_IndexError,
+ space.wrap("Iterated over too many fields %d" % indx))
+ else:
+ raise OperationError(space.w_IndexError, space.wrap(
+ "Can only access fields of record with int or str"))
try:
ofs, dtype = self.dtype.fields[item]
except KeyError:
raise OperationError(space.w_IndexError,
space.wrap("Field %s does not exist" % item))
- read_val = dtype.itemtype.read(self.arr, self.ofs, ofs, dtype)
+ if isinstance(dtype.itemtype, VoidType):
+ read_val = dtype.itemtype.readarray(self.arr, self.ofs, ofs, dtype)
+ else:
+ read_val = dtype.itemtype.read(self.arr, self.ofs, ofs, dtype)
if isinstance (read_val, W_StringBox):
# StringType returns a str
return space.wrap(dtype.itemtype.to_str(read_val))
@@ -373,7 +389,7 @@
W_LongDoubleBox = W_Float64Box
W_CLongDoubleBox = W_Complex64Box
-
+
W_GenericBox.typedef = TypeDef("generic",
__module__ = "numpypy",
diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py
--- a/pypy/module/micronumpy/interp_dtype.py
+++ b/pypy/module/micronumpy/interp_dtype.py
@@ -46,11 +46,11 @@
class W_Dtype(W_Root):
- _immutable_fields_ = ["itemtype", "num", "kind"]
+ _immutable_fields_ = ["itemtype", "num", "kind", "shape"]
def __init__(self, itemtype, num, kind, name, char, w_box_type,
alternate_constructors=[], aliases=[],
- fields=None, fieldnames=None, native=True):
+ fields=None, fieldnames=None, native=True, shape=[], subdtype=None):
self.itemtype = itemtype
self.num = num
self.kind = kind
@@ -63,6 +63,12 @@
self.fieldnames = fieldnames
self.native = native
self.float_type = None
+ self.shape = list(shape)
+ self.subdtype = subdtype
+ if not subdtype:
+ self.base = self
+ else:
+ self.base = subdtype.base
@specialize.argtype(1)
def box(self, value):
@@ -78,7 +84,8 @@
return self.itemtype.coerce(space, self, w_item)
def getitem(self, arr, i):
- return self.itemtype.read(arr, i, 0)
+ item = self.itemtype.read(arr, i, 0)
+ return item
def getitem_bool(self, arr, i):
return self.itemtype.read_bool(arr, i, 0)
@@ -111,8 +118,15 @@
def descr_get_alignment(self, space):
return space.wrap(self.itemtype.alignment)
+ def descr_get_base(self, space):
+ return space.wrap(self.base)
+
+ def descr_get_subdtype(self, space):
+ return space.newtuple([space.wrap(self.subdtype), self.descr_get_shape(space)])
+
def descr_get_shape(self, space):
- return space.newtuple([])
+ w_shape = [space.wrap(dim) for dim in self.shape]
+ return space.newtuple(w_shape)
def eq(self, space, w_other):
w_other = space.call_function(space.gettypefor(W_Dtype), w_other)
@@ -137,6 +151,7 @@
if w_fields == space.w_None:
self.fields = None
else:
+ self.fields = {}
ofs_and_items = []
size = 0
for key in space.listview(w_fields):
@@ -279,15 +294,22 @@
ofs_and_items = []
fieldnames = []
for w_elem in lst_w:
- w_fldname, w_flddesc = space.fixedview(w_elem, 2)
- subdtype = descr__new__(space, space.gettypefor(W_Dtype), w_flddesc)
+ size = 1
+ w_shape = space.newtuple([])
+ if space.len_w(w_elem) == 3:
+ w_fldname, w_flddesc, w_shape = space.fixedview(w_elem)
+ if not base.issequence_w(space, w_shape):
+ w_shape = space.newtuple([w_shape,])
+ else:
+ w_fldname, w_flddesc = space.fixedview(w_elem)
+ subdtype = descr__new__(space, space.gettypefor(W_Dtype), w_flddesc, w_shape=w_shape)
fldname = space.str_w(w_fldname)
if fldname in fields:
raise OperationError(space.w_ValueError, space.wrap("two fields with the same name"))
assert isinstance(subdtype, W_Dtype)
fields[fldname] = (offset, subdtype)
ofs_and_items.append((offset, subdtype.itemtype))
- offset += subdtype.itemtype.get_element_size()
+ offset += subdtype.itemtype.get_element_size() * size
fieldnames.append(fldname)
itemtype = types.RecordType(ofs_and_items, offset)
return W_Dtype(itemtype, 20, VOIDLTR, "void" + str(8 * itemtype.get_element_size()),
@@ -317,8 +339,9 @@
elif char == 'V':
num = 20
basename = 'void'
- w_box_type = space.gettypefor(interp_boxes.W_VoidBox)
- return dtype_from_list(space, space.newlist([]))
+ itemtype = types.VoidType(size)
+ return W_Dtype(itemtype, 20, VOIDLTR, "void" + str(size),
+ "V", space.gettypefor(interp_boxes.W_VoidBox))
else:
assert char == 'U'
basename = 'unicode'
@@ -333,10 +356,24 @@
raise OperationError(space.w_NotImplementedError, space.wrap(
"dtype from spec"))
-def descr__new__(space, w_subtype, w_dtype, w_align=None, w_copy=None):
+def descr__new__(space, w_subtype, w_dtype, w_align=None, w_copy=None, w_shape=None):
# w_align and w_copy are necessary for pickling
cache = get_dtype_cache(space)
+ if w_shape is not None and (space.isinstance_w(w_shape, space.w_int) or space.len_w(w_shape) > 0):
+ subdtype = descr__new__(space, w_subtype, w_dtype, w_align, w_copy)
+ assert isinstance(subdtype, W_Dtype)
+ size = 1
+ if space.isinstance_w(w_shape, space.w_int):
+ w_shape = space.newtuple([w_shape])
+ shape = []
+ for w_dim in space.fixedview(w_shape):
+ dim = space.int_w(w_dim)
+ shape.append(dim)
+ size *= dim
+ return W_Dtype(types.VoidType(subdtype.itemtype.get_element_size() * size), 20, VOIDLTR, "void" + str(8 * subdtype.itemtype.get_element_size() * size),
+ "V", space.gettypefor(interp_boxes.W_VoidBox), shape=shape, subdtype=subdtype)
+
if space.is_none(w_dtype):
return cache.w_float64dtype
elif space.isinstance_w(w_dtype, w_subtype):
@@ -355,6 +392,8 @@
"data type %s not understood" % name))
elif space.isinstance_w(w_dtype, space.w_list):
return dtype_from_list(space, w_dtype)
+ elif space.isinstance_w(w_dtype, space.w_tuple):
+ return descr__new__(space, w_subtype, space.getitem(w_dtype, space.wrap(0)), w_align, w_copy, w_shape=space.getitem(w_dtype, space.wrap(1)))
elif space.isinstance_w(w_dtype, space.w_dict):
return dtype_from_dict(space, w_dtype)
for dtype in cache.builtin_dtypes:
@@ -391,6 +430,8 @@
name = interp_attrproperty('name', cls=W_Dtype),
fields = GetSetProperty(W_Dtype.descr_get_fields),
names = GetSetProperty(W_Dtype.descr_get_names),
+ subdtype = GetSetProperty(W_Dtype.descr_get_subdtype),
+ base = GetSetProperty(W_Dtype.descr_get_base),
)
W_Dtype.typedef.acceptable_as_base_class = False
diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py
--- a/pypy/module/micronumpy/interp_numarray.py
+++ b/pypy/module/micronumpy/interp_numarray.py
@@ -21,7 +21,7 @@
from rpython.rlib.rstring import StringBuilder
from pypy.module.micronumpy.arrayimpl.base import BaseArrayImplementation
-def _find_shape(space, w_size):
+def _find_shape(space, w_size, dtype):
if space.is_none(w_size):
return []
if space.isinstance_w(w_size, space.w_int):
@@ -29,6 +29,7 @@
shape = []
for w_item in space.fixedview(w_size):
shape.append(space.int_w(w_item))
+ shape += dtype.shape
return shape[:]
class __extend__(W_NDimArray):
@@ -829,7 +830,7 @@
space.wrap("unsupported param"))
dtype = space.interp_w(interp_dtype.W_Dtype,
space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype))
- shape = _find_shape(space, w_shape)
+ shape = _find_shape(space, w_shape, dtype)
if not shape:
return W_NDimArray.new_scalar(space, dtype)
return W_NDimArray.from_shape(shape, dtype)
@@ -842,10 +843,10 @@
"""
from rpython.rtyper.lltypesystem import rffi
from rpython.rlib.rawstorage import RAW_STORAGE_PTR
- shape = _find_shape(space, w_shape)
storage = rffi.cast(RAW_STORAGE_PTR, addr)
dtype = space.interp_w(interp_dtype.W_Dtype,
space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype))
+ shape = _find_shape(space, w_shape, dtype)
return W_NDimArray.from_shape_and_storage(shape, storage, dtype)
W_NDimArray.typedef = TypeDef(
@@ -1029,7 +1030,7 @@
dtype = space.interp_w(interp_dtype.W_Dtype,
space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)
)
- shape = _find_shape(space, w_shape)
+ shape = _find_shape(space, w_shape, dtype)
if not shape:
return W_NDimArray.new_scalar(space, dtype, space.wrap(0))
return space.wrap(W_NDimArray.from_shape(shape, dtype=dtype, order=order))
@@ -1039,7 +1040,7 @@
dtype = space.interp_w(interp_dtype.W_Dtype,
space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)
)
- shape = _find_shape(space, w_shape)
+ shape = _find_shape(space, w_shape, dtype)
if not shape:
return W_NDimArray.new_scalar(space, dtype, space.wrap(0))
arr = W_NDimArray.from_shape(shape, dtype=dtype, order=order)
diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py
--- a/pypy/module/micronumpy/iter.py
+++ b/pypy/module/micronumpy/iter.py
@@ -32,13 +32,13 @@
shape dimension
which is back 25 and forward 1,
which is x.strides[1] * (x.shape[1] - 1) + x.strides[0]
-so if we precalculate the overflow backstride as
+so if we precalculate the overflow backstride as
[x.strides[i] * (x.shape[i] - 1) for i in range(len(x.shape))]
we can go faster.
All the calculations happen in next()
next_skip_x() tries to do the iteration for a number of steps at once,
-but then we cannot gaurentee that we only overflow one single shape
+but then we cannot gaurentee that we only overflow one single shape
dimension, perhaps we could overflow times in one big step.
"""
@@ -170,7 +170,8 @@
self.dtype.setitem(self.array, self.offset, elem)
def getitem(self):
- return self.dtype.getitem(self.array, self.offset)
+ item = self.dtype.getitem(self.array, self.offset)
+ return item
def getitem_bool(self):
return self.dtype.getitem_bool(self.array, self.offset)
@@ -288,12 +289,13 @@
self.dim = dim
self.array = array
self.dtype = array.dtype
-
+
def setitem(self, elem):
self.dtype.setitem(self.array, self.offset, elem)
def getitem(self):
- return self.dtype.getitem(self.array, self.offset)
+ item = self.dtype.getitem(self.array, self.offset)
+ return item
@jit.unroll_safe
def next(self):
diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py
--- a/pypy/module/micronumpy/test/test_dtypes.py
+++ b/pypy/module/micronumpy/test/test_dtypes.py
@@ -275,23 +275,12 @@
from numpypy import array, dtype
from cPickle import loads, dumps
a = array([1,2,3])
- if self.ptr_size == 8:
+ if self.ptr_size == 8:
assert a.dtype.__reduce__() == (dtype, ('i8', 0, 1), (3, '<', None, None, None, -1, -1, 0))
else:
assert a.dtype.__reduce__() == (dtype, ('i4', 0, 1), (3, '<', None, None, None, -1, -1, 0))
assert loads(dumps(a.dtype)) == a.dtype
- def test_pickle_record(self):
- from numpypy import array, dtype
- from cPickle import loads, dumps
-
- d = dtype([("x", "int32"), ("y", "int32"), ("z", "int32"), ("value", float)])
- assert d.__reduce__() == (dtype, ('V20', 0, 1), (3, '<', None, ('x', 'y', 'z', 'value'), {'y': (dtype('int32'), 4), 'x': (dtype('int32'), 0), 'z': (dtype('int32'), 8), 'value': (dtype('float64'), 12)}, 20, 1, 0))
-
- new_d = loads(dumps(d))
-
- assert new_d.__reduce__() == d.__reduce__()
-
class AppTestTypes(BaseAppTestDtypes):
def test_abstract_types(self):
import numpypy as numpy
@@ -726,7 +715,7 @@
x = int8(42).ravel()
assert x.dtype == int8
assert (x == array(42)).all()
-
+
class AppTestStrUnicodeDtypes(BaseNumpyAppTest):
@@ -769,6 +758,7 @@
assert isinstance(unicode_(3), unicode)
class AppTestRecordDtypes(BaseNumpyAppTest):
+ spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"])
def test_create(self):
from numpypy import dtype, void
@@ -781,6 +771,7 @@
assert d.num == 20
assert d.itemsize == 20
assert d.kind == 'V'
+ assert d.base == d
assert d.type is void
assert d.char == 'V'
assert d.names == ("x", "y", "z", "value")
@@ -793,6 +784,51 @@
d = dtype({'names': ['a', 'b', 'c'],
})
+ def test_create_subarrays(self):
+ from numpypy import dtype
+ d = dtype([("x", "float", (2,)), ("y", "int", (2,))])
+ assert d.itemsize == 32
+ assert d.name == "void256"
+ keys = d.fields.keys()
+ assert "x" in keys
+ assert "y" in keys
+ assert d["x"].shape == (2,)
+ assert d["x"].itemsize == 16
+ e = dtype([("x", "float", 2), ("y", "int", 2)])
+ assert e.fields.keys() == keys
+ assert e['x'].shape == (2,)
+
+ dt = dtype((float, 10))
+ assert dt.shape == (10,)
+ assert dt.kind == 'V'
+ assert dt.fields == None
+ assert dt.subdtype == (dtype(float), (10,))
+ assert dt.base == dtype(float)
+
+ def test_pickle_record(self):
+ from numpypy import array, dtype
+ from cPickle import loads, dumps
+
+ d = dtype([("x", "int32"), ("y", "int32"), ("z", "int32"), ("value", float)])
+ assert d.__reduce__() == (dtype, ('V20', 0, 1), (3, '<', None, ('x', 'y', 'z', 'value'), {'y': (dtype('int32'), 4), 'x': (dtype('int32'), 0), 'z': (dtype('int32'), 8), 'value': (dtype('float64'), 12)}, 20, 1, 0))
+
+ new_d = loads(dumps(d))
+
+ assert new_d.__reduce__() == d.__reduce__()
+
+ def test_pickle_record_subarrays(self):
+ from numpypy import array, dtype
+ from cPickle import loads, dumps
+
+ d = dtype([("x", "int32", (3,)), ("y", "int32", (2,)), ("z", "int32", (4,)), ("value", float, (5,))])
+ new_d = loads(dumps(d))
+
+ keys = d.fields.keys()
+ keys.sort()
+ assert keys == ["value", "x", "y", "z"]
+
+ assert new_d.itemsize == d.itemsize == 76
+
class AppTestNotDirect(BaseNumpyAppTest):
def setup_class(cls):
BaseNumpyAppTest.setup_class.im_func(cls)
diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py
--- a/pypy/module/micronumpy/test/test_numarray.py
+++ b/pypy/module/micronumpy/test/test_numarray.py
@@ -18,10 +18,12 @@
def get_element_size():
return 1
+ def __init__(self):
+ self.base = self
+
def get_size(self):
return 1
-
def create_slice(a, chunks):
return Chunks(chunks).apply(W_NDimArray(a)).implementation
@@ -2699,6 +2701,56 @@
assert a[0]['y'] == 2
assert a[1]['y'] == 1
+ def test_subarrays(self):
+ from numpypy import dtype, array, zeros
+
+ d = dtype([("x", "int", 3), ("y", "float", 5)])
+ a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d)
+
+ assert (a[0]["x"] == [1, 2, 3]).all()
+ assert (a[0]["y"] == [0.5, 1.5, 2.5, 3.5, 4.5]).all()
+ assert (a[1]["x"] == [4, 5, 6]).all()
+ assert (a[1]["y"] == [5.5, 6.5, 7.5, 8.5, 9.5]).all()
+
+ a[0]["x"][0] = 200
+ assert a[0]["x"][0] == 200
+
+ d = dtype([("x", "int", (2, 3))])
+ a = array([([[1, 2, 3], [4, 5, 6]],)], dtype=d)
+
+ assert a[0]["x"].dtype == dtype("int64")
+ assert a[0]["x"][0].dtype == dtype("int64")
+
+ assert (a[0]["x"][0] == [1, 2, 3]).all()
+ assert (a[0]["x"] == [[1, 2, 3], [4, 5, 6]]).all()
+
+ d = dtype((float, (10, 10)))
+ a = zeros((3,3), dtype=d)
+ assert a[0, 0].shape == (10, 10)
+ assert a.shape == (3, 3, 10, 10)
+ a[0, 0] = 500
+ assert (a[0, 0, 0] == 500).all()
+ assert a[0, 0, 0].shape == (10,)
+
+ def test_multidim_subarray(self):
+ from numpypy import dtype, array
+
+ d = dtype([("x", "int", (2, 3))])
+ a = array([([[1, 2, 3], [4, 5, 6]],)], dtype=d)
+
+ assert a[0]["x"].dtype == dtype("int64")
+ assert a[0]["x"][0].dtype == dtype("int64")
+
+ assert (a[0]["x"][0] == [1, 2, 3]).all()
+ assert (a[0]["x"] == [[1, 2, 3], [4, 5, 6]]).all()
+
+ def test_list_record(self):
+ from numpypy import dtype, array
+
+ d = dtype([("x", "int", 3), ("y", "float", 5)])
+ a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d)
+
+ assert len(list(a[0])) == 2
class AppTestPyPy(BaseNumpyAppTest):
def setup_class(cls):
diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py
--- a/pypy/module/micronumpy/types.py
+++ b/pypy/module/micronumpy/types.py
@@ -3,7 +3,9 @@
from pypy.interpreter.error import OperationError
from pypy.module.micronumpy import interp_boxes
+from pypy.module.micronumpy import support
from pypy.module.micronumpy.arrayimpl.voidbox import VoidBoxStorage
+from pypy.module.micronumpy.arrayimpl.concrete import SliceArray
from pypy.objspace.std.floatobject import float2string
from pypy.objspace.std.complexobject import str_format
from rpython.rlib import rfloat, clibffi, rcomplex
@@ -1076,7 +1078,7 @@
def to_builtin_type(self, space, box):
real,imag = self.for_computation(self.unbox(box))
- return space.newcomplex(real, imag)
+ return space.newcomplex(real, imag)
def read_bool(self, arr, i, offset):
v = self.for_computation(self._read(arr.storage, i, offset))
@@ -1217,7 +1219,7 @@
@raw_binary_op
def le(self, v1, v2):
- return self._lt(v1, v2) or self._eq(v1, v2)
+ return self._lt(v1, v2) or self._eq(v1, v2)
@raw_binary_op
def gt(self, v1, v2):
@@ -1225,7 +1227,7 @@
@raw_binary_op
def ge(self, v1, v2):
- return self._lt(v2, v1) or self._eq(v2, v1)
+ return self._lt(v2, v1) or self._eq(v2, v1)
def _bool(self, v):
return bool(v[0]) or bool(v[1])
@@ -1341,7 +1343,7 @@
return rcomplex.c_div((v[0], -v[1]), (a2, 0.))
except ZeroDivisionError:
return rfloat.NAN, rfloat.NAN
-
+
# No floor, ceil, trunc in numpy for complex
#@simple_unary_op
#def floor(self, v):
@@ -1684,6 +1686,7 @@
return space.wrap(self.to_str(box))
def build_and_convert(self, space, mydtype, box):
+ assert isinstance(box, interp_boxes.W_GenericBox)
if box.get_dtype(space).is_str_or_unicode():
arg = box.get_dtype(space).itemtype.to_str(box)
else:
@@ -1696,10 +1699,68 @@
for j in range(i + 1, self.size):
arr.storage[j] = '\x00'
return interp_boxes.W_StringBox(arr, 0, arr.dtype)
-
+
class VoidType(BaseType, BaseStringType):
T = lltype.Char
+ def _coerce(self, space, arr, ofs, dtype, w_items, shape):
+ items_w = space.fixedview(w_items)
+ for i in range(len(items_w)):
+ subdtype = dtype.subdtype
+ itemtype = subdtype.itemtype
+ if space.len_w(shape) <= 1:
+ w_box = itemtype.coerce(space, dtype.subdtype, items_w[i])
+ itemtype.store(arr, 0, ofs, w_box)
+ ofs += itemtype.get_element_size()
+ else:
+ size = 1
+ for dimension in shape[1:]:
+ size *= dimension
+ size *= itemtype.get_element_size()
+ for w_item in items_w:
+ self._coerce(space, arr, ofs, dtype, w_items, shape[1:])
+ ofs += size
More information about the pypy-commit
mailing list