[pypy-commit] pypy ufuncapi: merge default into branch
mattip
noreply at buildbot.pypy.org
Fri Dec 5 12:19:08 CET 2014
Author: mattip <matti.picus at gmail.com>
Branch: ufuncapi
Changeset: r74827:8f7cc875fd26
Date: 2014-12-05 08:50 +0200
http://bitbucket.org/pypy/pypy/changeset/8f7cc875fd26/
Log: merge default into branch
diff too long, truncating to 2000 out of 8352 lines
diff --git a/lib-python/2.7/subprocess.py b/lib-python/2.7/subprocess.py
--- a/lib-python/2.7/subprocess.py
+++ b/lib-python/2.7/subprocess.py
@@ -655,6 +655,21 @@
"""Create new Popen instance."""
_cleanup()
+ # --- PyPy hack, see _pypy_install_libs_after_virtualenv() ---
+ # match arguments passed by different versions of virtualenv
+ if args[1:] in (
+ ['-c', 'import sys; print(sys.prefix)'], # 1.6 10ba3f3c
+ ['-c', "\nimport sys\nprefix = sys.prefix\n" # 1.7 0e9342ce
+ "if sys.version_info[0] == 3:\n"
+ " prefix = prefix.encode('utf8')\n"
+ "if hasattr(sys.stdout, 'detach'):\n"
+ " sys.stdout = sys.stdout.detach()\n"
+ "elif hasattr(sys.stdout, 'buffer'):\n"
+ " sys.stdout = sys.stdout.buffer\nsys.stdout.write(prefix)\n"],
+ ['-c', 'import sys;out=sys.stdout;getattr(out, "buffer"'
+ ', out).write(sys.prefix.encode("utf-8"))']): # 1.7.2 a9454bce
+ _pypy_install_libs_after_virtualenv(args[0])
+
if not isinstance(bufsize, (int, long)):
raise TypeError("bufsize must be an integer")
@@ -1560,6 +1575,27 @@
self.send_signal(signal.SIGKILL)
+def _pypy_install_libs_after_virtualenv(target_executable):
+ # https://bitbucket.org/pypy/pypy/issue/1922/future-proofing-virtualenv
+ #
+ # PyPy 2.4.1 turned --shared on by default. This means the pypy binary
+ # depends on the 'libpypy-c.so' shared library to be able to run.
+ # The virtualenv code existing at the time did not account for this
+ # and would break. Try to detect that we're running under such a
+ # virtualenv in the "Testing executable with" phase and copy the
+ # library ourselves.
+ caller = sys._getframe(2)
+ if ('virtualenv_version' in caller.f_globals and
+ 'copyfile' in caller.f_globals):
+ dest_dir = sys.pypy_resolvedirof(target_executable)
+ src_dir = sys.pypy_resolvedirof(sys.executable)
+ for libname in ['libpypy-c.so']:
+ dest_library = os.path.join(dest_dir, libname)
+ src_library = os.path.join(src_dir, libname)
+ if os.path.exists(src_library):
+ caller.f_globals['copyfile'](src_library, dest_library)
+
+
def _demo_posix():
#
# Example 1: Simple redirection: Get process list
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -205,23 +205,28 @@
The above is true both in CPython and in PyPy. Differences
can occur about whether a built-in function or method will
call an overridden method of *another* object than ``self``.
-In PyPy, they are generally always called, whereas not in
-CPython. For example, in PyPy, ``dict1.update(dict2)``
-considers that ``dict2`` is just a general mapping object, and
-will thus call overridden ``keys()`` and ``__getitem__()``
-methods on it. So the following code prints ``42`` on PyPy
-but ``foo`` on CPython::
+In PyPy, they are often called in cases where CPython would not.
+Two examples::
- >>>> class D(dict):
- .... def __getitem__(self, key):
- .... return 42
- ....
- >>>>
- >>>> d1 = {}
- >>>> d2 = D(a='foo')
- >>>> d1.update(d2)
- >>>> print d1['a']
- 42
+ class D(dict):
+ def __getitem__(self, key):
+ return "%r from D" % (key,)
+
+ class A(object):
+ pass
+
+ a = A()
+ a.__dict__ = D()
+ a.foo = "a's own foo"
+ print a.foo
+ # CPython => a's own foo
+ # PyPy => 'foo' from D
+
+ glob = D(foo="base item")
+ loc = {}
+ exec "print foo" in glob, loc
+ # CPython => base item
+ # PyPy => 'foo' from D
Mutating classes of objects which are already used as dictionary keys
@@ -292,6 +297,9 @@
above types will return a value that is computed from the argument, and can
thus be larger than ``sys.maxint`` (i.e. it can be an arbitrary long).
+Notably missing from the list above are ``str`` and ``unicode``. If your
+code relies on comparing strings with ``is``, then it might break in PyPy.
+
Miscellaneous
-------------
diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py
--- a/pypy/interpreter/astcompiler/assemble.py
+++ b/pypy/interpreter/astcompiler/assemble.py
@@ -1,15 +1,13 @@
-"""
-Python control flow graph generation and bytecode assembly.
-"""
+"""Python control flow graph generation and bytecode assembly."""
-from pypy.interpreter.astcompiler import ast, symtable
-from pypy.interpreter import pycode
+from rpython.rlib import rfloat
+from rpython.rlib.objectmodel import we_are_translated
+
+from pypy.interpreter.astcompiler import ast, misc, symtable
+from pypy.interpreter.error import OperationError
+from pypy.interpreter.pycode import PyCode
from pypy.tool import stdlib_opcode as ops
-from pypy.interpreter.error import OperationError
-from rpython.rlib.objectmodel import we_are_translated
-from rpython.rlib import rfloat
-
class Instruction(object):
"""Represents a single opcode."""
@@ -21,14 +19,12 @@
self.has_jump = False
def size(self):
- """Return the size of bytes of this instruction when it is encoded."""
+ """Return the size of bytes of this instruction when it is
+ encoded.
+ """
if self.opcode >= ops.HAVE_ARGUMENT:
- if self.arg > 0xFFFF:
- return 6
- else:
- return 3
- else:
- return 1
+ return (6 if self.arg > 0xFFFF else 3)
+ return 1
def jump_to(self, target, absolute=False):
"""Indicate the target this jump instruction.
@@ -54,9 +50,9 @@
class Block(object):
"""A basic control flow block.
- It has one entry point and several possible exit points. Its instructions
- may be jumps to other blocks, or if control flow reaches the end of the
- block, it continues to next_block.
+ It has one entry point and several possible exit points. Its
+ instructions may be jumps to other blocks, or if control flow
+ reaches the end of the block, it continues to next_block.
"""
def __init__(self):
@@ -71,10 +67,10 @@
stack.append(nextblock)
def post_order(self):
- """Return this block and its children in post order.
- This means that the graph of blocks is first cleaned up to
- ignore back-edges, thus turning it into a DAG. Then the DAG
- is linearized. For example:
+ """Return this block and its children in post order. This means
+ that the graph of blocks is first cleaned up to ignore
+ back-edges, thus turning it into a DAG. Then the DAG is
+ linearized. For example:
A --> B -\ => [A, D, B, C]
\-> D ---> C
@@ -105,7 +101,9 @@
return resultblocks
def code_size(self):
- """Return the encoded size of all the instructions in this block."""
+ """Return the encoded size of all the instructions in this
+ block.
+ """
i = 0
for instr in self.instructions:
i += instr.size()
@@ -141,6 +139,7 @@
i += 1
return result
+
def _list_to_dict(l, offset=0):
result = {}
index = offset
@@ -300,11 +299,11 @@
def _resolve_block_targets(self, blocks):
"""Compute the arguments of jump instructions."""
last_extended_arg_count = 0
- # The reason for this loop is extended jumps. EXTENDED_ARG extends the
- # bytecode size, so it might invalidate the offsets we've already given.
- # Thus we have to loop until the number of extended args is stable. Any
- # extended jump at all is extremely rare, so performance is not too
- # concerning.
+ # The reason for this loop is extended jumps. EXTENDED_ARG
+ # extends the bytecode size, so it might invalidate the offsets
+ # we've already given. Thus we have to loop until the number of
+ # extended args is stable. Any extended jump at all is
+ # extremely rare, so performance is not too concerning.
while True:
extended_arg_count = 0
offset = 0
@@ -330,7 +329,8 @@
instr.opcode = ops.JUMP_ABSOLUTE
absolute = True
elif target_op == ops.RETURN_VALUE:
- # Replace JUMP_* to a RETURN into just a RETURN
+ # Replace JUMP_* to a RETURN into
+ # just a RETURN
instr.opcode = ops.RETURN_VALUE
instr.arg = 0
instr.has_jump = False
@@ -345,7 +345,8 @@
instr.arg = jump_arg
if jump_arg > 0xFFFF:
extended_arg_count += 1
- if extended_arg_count == last_extended_arg_count and not force_redo:
+ if (extended_arg_count == last_extended_arg_count and
+ not force_redo):
break
else:
last_extended_arg_count = extended_arg_count
@@ -360,12 +361,14 @@
while True:
try:
w_key = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
break
w_index = space.getitem(w_consts, w_key)
- consts_w[space.int_w(w_index)] = space.getitem(w_key, first)
+ w_constant = space.getitem(w_key, first)
+ w_constant = misc.intern_if_common_string(space, w_constant)
+ consts_w[space.int_w(w_index)] = w_constant
return consts_w
def _get_code_flags(self):
@@ -433,15 +436,16 @@
continue
addr = offset - current_off
# Python assumes that lineno always increases with
- # increasing bytecode address (lnotab is unsigned char).
- # Depending on when SET_LINENO instructions are emitted this
- # is not always true. Consider the code:
+ # increasing bytecode address (lnotab is unsigned
+ # char). Depending on when SET_LINENO instructions
+ # are emitted this is not always true. Consider the
+ # code:
# a = (1,
# b)
- # In the bytecode stream, the assignment to "a" occurs after
- # the loading of "b". This works with the C Python compiler
- # because it only generates a SET_LINENO instruction for the
- # assignment.
+ # In the bytecode stream, the assignment to "a"
+ # occurs after the loading of "b". This works with
+ # the C Python compiler because it only generates a
+ # SET_LINENO instruction for the assignment.
if line or addr:
while addr > 255:
push(chr(255))
@@ -484,22 +488,22 @@
free_names = _list_from_dict(self.free_vars, len(cell_names))
flags = self._get_code_flags() | self.compile_info.flags
bytecode = ''.join([block.get_code() for block in blocks])
- return pycode.PyCode(self.space,
- self.argcount,
- len(self.var_names),
- stack_depth,
- flags,
- bytecode,
- list(consts_w),
- names,
- var_names,
- self.compile_info.filename,
- self.name,
- self.first_lineno,
- lnotab,
- free_names,
- cell_names,
- self.compile_info.hidden_applevel)
+ return PyCode(self.space,
+ self.argcount,
+ len(self.var_names),
+ stack_depth,
+ flags,
+ bytecode,
+ list(consts_w),
+ names,
+ var_names,
+ self.compile_info.filename,
+ self.name,
+ self.first_lineno,
+ lnotab,
+ free_names,
+ cell_names,
+ self.compile_info.hidden_applevel)
def _list_from_dict(d, offset=0):
@@ -510,134 +514,134 @@
_static_opcode_stack_effects = {
- ops.NOP : 0,
- ops.STOP_CODE : 0,
+ ops.NOP: 0,
+ ops.STOP_CODE: 0,
- ops.POP_TOP : -1,
- ops.ROT_TWO : 0,
- ops.ROT_THREE : 0,
- ops.ROT_FOUR : 0,
- ops.DUP_TOP : 1,
+ ops.POP_TOP: -1,
+ ops.ROT_TWO: 0,
+ ops.ROT_THREE: 0,
+ ops.ROT_FOUR: 0,
+ ops.DUP_TOP: 1,
- ops.UNARY_POSITIVE : 0,
- ops.UNARY_NEGATIVE : 0,
- ops.UNARY_NOT : 0,
- ops.UNARY_CONVERT : 0,
- ops.UNARY_INVERT : 0,
+ ops.UNARY_POSITIVE: 0,
+ ops.UNARY_NEGATIVE: 0,
+ ops.UNARY_NOT: 0,
+ ops.UNARY_CONVERT: 0,
+ ops.UNARY_INVERT: 0,
- ops.LIST_APPEND : -1,
- ops.SET_ADD : -1,
- ops.MAP_ADD : -2,
- ops.STORE_MAP : -2,
+ ops.LIST_APPEND: -1,
+ ops.SET_ADD: -1,
+ ops.MAP_ADD: -2,
+ ops.STORE_MAP: -2,
- ops.BINARY_POWER : -1,
- ops.BINARY_MULTIPLY : -1,
- ops.BINARY_DIVIDE : -1,
- ops.BINARY_MODULO : -1,
- ops.BINARY_ADD : -1,
- ops.BINARY_SUBTRACT : -1,
- ops.BINARY_SUBSCR : -1,
- ops.BINARY_FLOOR_DIVIDE : -1,
- ops.BINARY_TRUE_DIVIDE : -1,
- ops.BINARY_LSHIFT : -1,
- ops.BINARY_RSHIFT : -1,
- ops.BINARY_AND : -1,
- ops.BINARY_OR : -1,
- ops.BINARY_XOR : -1,
+ ops.BINARY_POWER: -1,
+ ops.BINARY_MULTIPLY: -1,
+ ops.BINARY_DIVIDE: -1,
+ ops.BINARY_MODULO: -1,
+ ops.BINARY_ADD: -1,
+ ops.BINARY_SUBTRACT: -1,
+ ops.BINARY_SUBSCR: -1,
+ ops.BINARY_FLOOR_DIVIDE: -1,
+ ops.BINARY_TRUE_DIVIDE: -1,
+ ops.BINARY_LSHIFT: -1,
+ ops.BINARY_RSHIFT: -1,
+ ops.BINARY_AND: -1,
+ ops.BINARY_OR: -1,
+ ops.BINARY_XOR: -1,
- ops.INPLACE_FLOOR_DIVIDE : -1,
- ops.INPLACE_TRUE_DIVIDE : -1,
- ops.INPLACE_ADD : -1,
- ops.INPLACE_SUBTRACT : -1,
- ops.INPLACE_MULTIPLY : -1,
- ops.INPLACE_DIVIDE : -1,
- ops.INPLACE_MODULO : -1,
- ops.INPLACE_POWER : -1,
- ops.INPLACE_LSHIFT : -1,
- ops.INPLACE_RSHIFT : -1,
- ops.INPLACE_AND : -1,
- ops.INPLACE_OR : -1,
- ops.INPLACE_XOR : -1,
+ ops.INPLACE_FLOOR_DIVIDE: -1,
+ ops.INPLACE_TRUE_DIVIDE: -1,
+ ops.INPLACE_ADD: -1,
+ ops.INPLACE_SUBTRACT: -1,
+ ops.INPLACE_MULTIPLY: -1,
+ ops.INPLACE_DIVIDE: -1,
+ ops.INPLACE_MODULO: -1,
+ ops.INPLACE_POWER: -1,
+ ops.INPLACE_LSHIFT: -1,
+ ops.INPLACE_RSHIFT: -1,
+ ops.INPLACE_AND: -1,
+ ops.INPLACE_OR: -1,
+ ops.INPLACE_XOR: -1,
- ops.SLICE+0 : 1,
- ops.SLICE+1 : 0,
- ops.SLICE+2 : 0,
- ops.SLICE+3 : -1,
- ops.STORE_SLICE+0 : -2,
- ops.STORE_SLICE+1 : -3,
- ops.STORE_SLICE+2 : -3,
- ops.STORE_SLICE+3 : -4,
- ops.DELETE_SLICE+0 : -1,
- ops.DELETE_SLICE+1 : -2,
- ops.DELETE_SLICE+2 : -2,
- ops.DELETE_SLICE+3 : -3,
+ ops.SLICE+0: 1,
+ ops.SLICE+1: 0,
+ ops.SLICE+2: 0,
+ ops.SLICE+3: -1,
+ ops.STORE_SLICE+0: -2,
+ ops.STORE_SLICE+1: -3,
+ ops.STORE_SLICE+2: -3,
+ ops.STORE_SLICE+3: -4,
+ ops.DELETE_SLICE+0: -1,
+ ops.DELETE_SLICE+1: -2,
+ ops.DELETE_SLICE+2: -2,
+ ops.DELETE_SLICE+3: -3,
- ops.STORE_SUBSCR : -2,
- ops.DELETE_SUBSCR : -2,
+ ops.STORE_SUBSCR: -2,
+ ops.DELETE_SUBSCR: -2,
- ops.GET_ITER : 0,
- ops.FOR_ITER : 1,
- ops.BREAK_LOOP : 0,
- ops.CONTINUE_LOOP : 0,
- ops.SETUP_LOOP : 0,
+ ops.GET_ITER: 0,
+ ops.FOR_ITER: 1,
+ ops.BREAK_LOOP: 0,
+ ops.CONTINUE_LOOP: 0,
+ ops.SETUP_LOOP: 0,
- ops.PRINT_EXPR : -1,
- ops.PRINT_ITEM : -1,
- ops.PRINT_NEWLINE : 0,
- ops.PRINT_ITEM_TO : -2,
- ops.PRINT_NEWLINE_TO : -1,
+ ops.PRINT_EXPR: -1,
+ ops.PRINT_ITEM: -1,
+ ops.PRINT_NEWLINE: 0,
+ ops.PRINT_ITEM_TO: -2,
+ ops.PRINT_NEWLINE_TO: -1,
- ops.WITH_CLEANUP : -1,
- ops.POP_BLOCK : 0,
- ops.END_FINALLY : -1,
- ops.SETUP_WITH : 1,
- ops.SETUP_FINALLY : 0,
- ops.SETUP_EXCEPT : 0,
+ ops.WITH_CLEANUP: -1,
+ ops.POP_BLOCK: 0,
+ ops.END_FINALLY: -1,
+ ops.SETUP_WITH: 1,
+ ops.SETUP_FINALLY: 0,
+ ops.SETUP_EXCEPT: 0,
- ops.LOAD_LOCALS : 1,
- ops.RETURN_VALUE : -1,
- ops.EXEC_STMT : -3,
- ops.YIELD_VALUE : 0,
- ops.BUILD_CLASS : -2,
- ops.BUILD_MAP : 1,
- ops.BUILD_SET : 1,
- ops.COMPARE_OP : -1,
+ ops.LOAD_LOCALS: 1,
+ ops.RETURN_VALUE: -1,
+ ops.EXEC_STMT: -3,
+ ops.YIELD_VALUE: 0,
+ ops.BUILD_CLASS: -2,
+ ops.BUILD_MAP: 1,
+ ops.BUILD_SET: 1,
+ ops.COMPARE_OP: -1,
- ops.LOOKUP_METHOD : 1,
+ ops.LOOKUP_METHOD: 1,
- ops.LOAD_NAME : 1,
- ops.STORE_NAME : -1,
- ops.DELETE_NAME : 0,
+ ops.LOAD_NAME: 1,
+ ops.STORE_NAME: -1,
+ ops.DELETE_NAME: 0,
- ops.LOAD_FAST : 1,
- ops.STORE_FAST : -1,
- ops.DELETE_FAST : 0,
+ ops.LOAD_FAST: 1,
+ ops.STORE_FAST: -1,
+ ops.DELETE_FAST: 0,
- ops.LOAD_ATTR : 0,
- ops.STORE_ATTR : -2,
- ops.DELETE_ATTR : -1,
+ ops.LOAD_ATTR: 0,
+ ops.STORE_ATTR: -2,
+ ops.DELETE_ATTR: -1,
- ops.LOAD_GLOBAL : 1,
- ops.STORE_GLOBAL : -1,
- ops.DELETE_GLOBAL : 0,
+ ops.LOAD_GLOBAL: 1,
+ ops.STORE_GLOBAL: -1,
+ ops.DELETE_GLOBAL: 0,
- ops.LOAD_CLOSURE : 1,
- ops.LOAD_DEREF : 1,
- ops.STORE_DEREF : -1,
+ ops.LOAD_CLOSURE: 1,
+ ops.LOAD_DEREF: 1,
+ ops.STORE_DEREF: -1,
- ops.LOAD_CONST : 1,
+ ops.LOAD_CONST: 1,
- ops.IMPORT_STAR : -1,
- ops.IMPORT_NAME : -1,
- ops.IMPORT_FROM : 1,
+ ops.IMPORT_STAR: -1,
+ ops.IMPORT_NAME: -1,
+ ops.IMPORT_FROM: 1,
- ops.JUMP_FORWARD : 0,
- ops.JUMP_ABSOLUTE : 0,
- ops.JUMP_IF_TRUE_OR_POP : 0,
- ops.JUMP_IF_FALSE_OR_POP : 0,
- ops.POP_JUMP_IF_TRUE : -1,
- ops.POP_JUMP_IF_FALSE : -1,
- ops.JUMP_IF_NOT_DEBUG : 0,
+ ops.JUMP_FORWARD: 0,
+ ops.JUMP_ABSOLUTE: 0,
+ ops.JUMP_IF_TRUE_OR_POP: 0,
+ ops.JUMP_IF_FALSE_OR_POP: 0,
+ ops.POP_JUMP_IF_TRUE: -1,
+ ops.POP_JUMP_IF_FALSE: -1,
+ ops.JUMP_IF_NOT_DEBUG: 0,
ops.BUILD_LIST_FROM_ARG: 1,
}
diff --git a/pypy/interpreter/astcompiler/misc.py b/pypy/interpreter/astcompiler/misc.py
--- a/pypy/interpreter/astcompiler/misc.py
+++ b/pypy/interpreter/astcompiler/misc.py
@@ -106,3 +106,13 @@
except IndexError:
return name
return "_%s%s" % (klass[i:], name)
+
+
+def intern_if_common_string(space, w_const):
+ # only intern identifier-like strings
+ if not space.is_w(space.type(w_const), space.w_str):
+ return w_const
+ for c in space.str_w(w_const):
+ if not (c.isalnum() or c == '_'):
+ return w_const
+ return space.new_interned_w_str(w_const)
diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py
--- a/pypy/interpreter/astcompiler/optimize.py
+++ b/pypy/interpreter/astcompiler/optimize.py
@@ -272,6 +272,11 @@
if w_const is None:
return tup
consts_w[i] = w_const
+ # intern the string constants packed into the tuple here,
+ # because assemble.py will see the result as just a tuple constant
+ for i in range(len(consts_w)):
+ consts_w[i] = misc.intern_if_common_string(
+ self.space, consts_w[i])
else:
consts_w = []
w_consts = self.space.newtuple(consts_w)
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -14,7 +14,7 @@
UserDelAction)
from pypy.interpreter.error import OperationError, new_exception_class, oefmt
from pypy.interpreter.argument import Arguments
-from pypy.interpreter.miscutils import ThreadLocals
+from pypy.interpreter.miscutils import ThreadLocals, make_weak_value_dictionary
__all__ = ['ObjSpace', 'OperationError', 'W_Root']
@@ -384,7 +384,7 @@
self.builtin_modules = {}
self.reloading_modules = {}
- self.interned_strings = {}
+ self.interned_strings = make_weak_value_dictionary(self, str, W_Root)
self.actionflag = ActionFlag() # changed by the signal module
self.check_signal_action = None # changed by the signal module
self.user_del_action = UserDelAction(self)
@@ -777,25 +777,30 @@
return self.w_False
def new_interned_w_str(self, w_s):
+ assert isinstance(w_s, W_Root) # and is not None
s = self.str_w(w_s)
if not we_are_translated():
assert type(s) is str
- try:
- return self.interned_strings[s]
- except KeyError:
- pass
- self.interned_strings[s] = w_s
- return w_s
+ w_s1 = self.interned_strings.get(s)
+ if w_s1 is None:
+ w_s1 = w_s
+ self.interned_strings.set(s, w_s1)
+ return w_s1
def new_interned_str(self, s):
if not we_are_translated():
assert type(s) is str
- try:
- return self.interned_strings[s]
- except KeyError:
- pass
- w_s = self.interned_strings[s] = self.wrap(s)
- return w_s
+ w_s1 = self.interned_strings.get(s)
+ if w_s1 is None:
+ w_s1 = self.wrap(s)
+ self.interned_strings.set(s, w_s1)
+ return w_s1
+
+ def is_interned_str(self, s):
+ # interface for marshal_impl
+ if not we_are_translated():
+ assert type(s) is str
+ return self.interned_strings.get(s) is not None
def descr_self_interp_w(self, RequiredClass, w_obj):
if not isinstance(w_obj, RequiredClass):
diff --git a/pypy/interpreter/miscutils.py b/pypy/interpreter/miscutils.py
--- a/pypy/interpreter/miscutils.py
+++ b/pypy/interpreter/miscutils.py
@@ -31,3 +31,19 @@
def getallvalues(self):
return {0: self._value}
+
+
+def make_weak_value_dictionary(space, keytype, valuetype):
+ "NOT_RPYTHON"
+ if space.config.translation.rweakref:
+ from rpython.rlib.rweakref import RWeakValueDictionary
+ return RWeakValueDictionary(keytype, valuetype)
+ else:
+ class FakeWeakValueDict(object):
+ def __init__(self):
+ self._dict = {}
+ def get(self, key):
+ return self._dict.get(key, None)
+ def set(self, key, value):
+ self._dict[key] = value
+ return FakeWeakValueDict()
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -131,7 +131,6 @@
# class bodies only have CO_NEWLOCALS.
# CO_NEWLOCALS: make a locals dict unless optimized is also set
# CO_OPTIMIZED: no locals dict needed at all
- # NB: this method is overridden in nestedscope.py
flags = code.co_flags
if not (flags & pycode.CO_OPTIMIZED):
if flags & pycode.CO_NEWLOCALS:
diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py
--- a/pypy/interpreter/test/test_compiler.py
+++ b/pypy/interpreter/test/test_compiler.py
@@ -970,7 +970,12 @@
sys.stdout = out
output = s.getvalue()
assert "CALL_METHOD" in output
-
+
+ def test_interned_strings(self):
+ source = """x = ('foo_bar42', 5); y = 'foo_bar42'; z = x[0]"""
+ exec source
+ assert y is z
+
class AppTestExceptions:
def test_indentation_error(self):
diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
--- a/pypy/interpreter/test/test_objspace.py
+++ b/pypy/interpreter/test/test_objspace.py
@@ -378,3 +378,41 @@
assert space.str_w(space.getattr(space.sys, w_executable)) == 'foobar'
space.startup()
assert space.str_w(space.getattr(space.sys, w_executable)) == 'foobar'
+
+ def test_interned_strings_are_weak(self):
+ import weakref, gc, random
+ space = self.space
+ assert space.config.translation.rweakref
+ w1 = space.new_interned_str("abcdef")
+ w2 = space.new_interned_str("abcdef")
+ assert w2 is w1
+ #
+ # check that 'w1' goes away if we don't hold a reference to it
+ rw1 = weakref.ref(w1)
+ del w1, w2
+ i = 10
+ while rw1() is not None:
+ i -= 1
+ assert i >= 0
+ gc.collect()
+ #
+ s = "foobar%r" % random.random()
+ w0 = space.wrap(s)
+ w1 = space.new_interned_w_str(w0)
+ assert w1 is w0
+ w2 = space.new_interned_w_str(w0)
+ assert w2 is w0
+ w3 = space.wrap(s)
+ assert w3 is not w0
+ w4 = space.new_interned_w_str(w3)
+ assert w4 is w0
+ #
+ # check that 'w0' goes away if we don't hold a reference to it
+ # (even if we hold a reference to 'w3')
+ rw0 = weakref.ref(w0)
+ del w0, w1, w2, w4
+ i = 10
+ while rw0() is not None:
+ i -= 1
+ assert i >= 0
+ gc.collect()
diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py
--- a/pypy/module/_cffi_backend/ctypeprim.py
+++ b/pypy/module/_cffi_backend/ctypeprim.py
@@ -158,21 +158,14 @@
class W_CTypePrimitiveSigned(W_CTypePrimitive):
- _attrs_ = ['value_fits_long', 'vmin', 'vrangemax']
- _immutable_fields_ = ['value_fits_long', 'vmin', 'vrangemax']
+ _attrs_ = ['value_fits_long', 'value_smaller_than_long']
+ _immutable_fields_ = ['value_fits_long', 'value_smaller_than_long']
is_primitive_integer = True
def __init__(self, *args):
W_CTypePrimitive.__init__(self, *args)
self.value_fits_long = self.size <= rffi.sizeof(lltype.Signed)
- if self.size < rffi.sizeof(lltype.Signed):
- assert self.value_fits_long
- sh = self.size * 8
- self.vmin = r_uint(-1) << (sh - 1)
- self.vrangemax = (r_uint(1) << sh) - 1
- else:
- self.vmin = r_uint(0)
- self.vrangemax = r_uint(-1)
+ self.value_smaller_than_long = self.size < rffi.sizeof(lltype.Signed)
def cast_to_int(self, cdata):
return self.convert_to_object(cdata)
@@ -192,8 +185,17 @@
def convert_from_object(self, cdata, w_ob):
if self.value_fits_long:
value = misc.as_long(self.space, w_ob)
- if self.size < rffi.sizeof(lltype.Signed):
- if r_uint(value) - self.vmin > self.vrangemax:
+ if self.value_smaller_than_long:
+ size = self.size
+ if size == 1:
+ signextended = misc.signext(value, 1)
+ elif size == 2:
+ signextended = misc.signext(value, 2)
+ elif size == 4:
+ signextended = misc.signext(value, 4)
+ else:
+ raise AssertionError("unsupported size")
+ if value != signextended:
self._overflow(w_ob)
misc.write_raw_signed_data(cdata, value, self.size)
else:
@@ -221,7 +223,7 @@
length = w_cdata.get_array_length()
populate_list_from_raw_array(res, buf, length)
return res
- elif self.value_fits_long:
+ elif self.value_smaller_than_long:
res = [0] * w_cdata.get_array_length()
misc.unpack_list_from_raw_array(res, w_cdata._cdata, self.size)
return res
@@ -235,8 +237,8 @@
cdata = rffi.cast(rffi.LONGP, cdata)
copy_list_to_raw_array(int_list, cdata)
else:
- overflowed = misc.pack_list_to_raw_array_bounds(
- int_list, cdata, self.size, self.vmin, self.vrangemax)
+ overflowed = misc.pack_list_to_raw_array_bounds_signed(
+ int_list, cdata, self.size)
if overflowed != 0:
self._overflow(self.space.wrap(overflowed))
return True
@@ -314,8 +316,8 @@
def pack_list_of_items(self, cdata, w_ob):
int_list = self.space.listview_int(w_ob)
if int_list is not None:
- overflowed = misc.pack_list_to_raw_array_bounds(
- int_list, cdata, self.size, r_uint(0), self.vrangemax)
+ overflowed = misc.pack_list_to_raw_array_bounds_unsigned(
+ int_list, cdata, self.size, self.vrangemax)
if overflowed != 0:
self._overflow(self.space.wrap(overflowed))
return True
diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py
--- a/pypy/module/_cffi_backend/misc.py
+++ b/pypy/module/_cffi_backend/misc.py
@@ -216,6 +216,19 @@
neg_msg = "can't convert negative number to unsigned"
ovf_msg = "long too big to convert"
+ at specialize.arg(1)
+def signext(value, size):
+ # 'value' is sign-extended from 'size' bytes to a full integer.
+ # 'size' should be a constant smaller than a full integer size.
+ if size == rffi.sizeof(rffi.SIGNEDCHAR):
+ return rffi.cast(lltype.Signed, rffi.cast(rffi.SIGNEDCHAR, value))
+ elif size == rffi.sizeof(rffi.SHORT):
+ return rffi.cast(lltype.Signed, rffi.cast(rffi.SHORT, value))
+ elif size == rffi.sizeof(rffi.INT):
+ return rffi.cast(lltype.Signed, rffi.cast(rffi.INT, value))
+ else:
+ raise AssertionError("unsupported size")
+
# ____________________________________________________________
class _NotStandardObject(Exception):
@@ -334,13 +347,26 @@
# ____________________________________________________________
-def pack_list_to_raw_array_bounds(int_list, target, size, vmin, vrangemax):
+def pack_list_to_raw_array_bounds_signed(int_list, target, size):
for TP, TPP in _prim_signed_types:
if size == rffi.sizeof(TP):
ptr = rffi.cast(TPP, target)
for i in range(len(int_list)):
x = int_list[i]
- if r_uint(x) - vmin > vrangemax:
+ y = rffi.cast(TP, x)
+ if x != rffi.cast(lltype.Signed, y):
+ return x # overflow
+ ptr[i] = y
+ return 0
+ raise NotImplementedError("bad integer size")
+
+def pack_list_to_raw_array_bounds_unsigned(int_list, target, size, vrangemax):
+ for TP, TPP in _prim_signed_types:
+ if size == rffi.sizeof(TP):
+ ptr = rffi.cast(TPP, target)
+ for i in range(len(int_list)):
+ x = int_list[i]
+ if r_uint(x) > vrangemax:
return x # overflow
ptr[i] = rffi.cast(TP, x)
return 0
diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py
--- a/pypy/module/_file/test/test_file.py
+++ b/pypy/module/_file/test/test_file.py
@@ -304,7 +304,7 @@
py.test.skip("works with internals of _file impl on py.py")
state = [0]
def read(fd, n=None):
- if fd != 42:
+ if fd != 424242:
return cls.old_read(fd, n)
if state[0] == 0:
state[0] += 1
@@ -315,7 +315,7 @@
return ''
os.read = read
stdin = W_File(cls.space)
- stdin.file_fdopen(42, 'rb', 1)
+ stdin.file_fdopen(424242, 'rb', 1)
stdin.name = '<stdin>'
cls.w_stream = stdin
diff --git a/pypy/module/_ssl/thread_lock.py b/pypy/module/_ssl/thread_lock.py
--- a/pypy/module/_ssl/thread_lock.py
+++ b/pypy/module/_ssl/thread_lock.py
@@ -24,12 +24,19 @@
separate_module_source = """
#include <openssl/crypto.h>
+#ifndef _WIN32
+# include <pthread.h>
+#endif
static unsigned int _ssl_locks_count = 0;
static struct RPyOpaque_ThreadLock *_ssl_locks;
static unsigned long _ssl_thread_id_function(void) {
- return RPyThreadGetIdent();
+#ifdef _WIN32
+ return (unsigned long)GetCurrentThreadId();
+#else
+ return (unsigned long)pthread_self();
+#endif
}
static void _ssl_thread_locking_function(int mode, int n, const char *file,
diff --git a/pypy/module/cpyext/src/pythread.c b/pypy/module/cpyext/src/pythread.c
--- a/pypy/module/cpyext/src/pythread.c
+++ b/pypy/module/cpyext/src/pythread.c
@@ -1,11 +1,18 @@
#include <Python.h>
+#ifndef _WIN32
+# include <pthread.h>
+#endif
#include "pythread.h"
#include "src/thread.h"
long
PyThread_get_thread_ident(void)
{
- return RPyThreadGetIdent();
+#ifdef _WIN32
+ return (long)GetCurrentThreadId();
+#else
+ return (long)pthread_self();
+#endif
}
PyThread_type_lock
diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py
--- a/pypy/module/marshal/interp_marshal.py
+++ b/pypy/module/marshal/interp_marshal.py
@@ -144,7 +144,6 @@
atom_int(tc, int) puts code and int
atom_int64(tc, int64) puts code and int64
atom_str(tc, str) puts code, len and string
- atom_strlist(tc, strlist) puts code, len and list of strings
building blocks for compound types:
@@ -198,15 +197,6 @@
self.atom_int(typecode, len(x))
self.put(x)
- def atom_strlist(self, typecode, tc2, x):
- self.atom_int(typecode, len(x))
- atom_str = self.atom_str
- for item in x:
- # type(str) seems to be forbidden
- #if type(item) is not str:
- # self.raise_exc('object with wrong type in strlist')
- atom_str(tc2, item)
-
def start(self, typecode):
# type(char) not supported
self.put(typecode)
@@ -379,16 +369,6 @@
self.start(typecode)
return self.get_lng()
- def atom_strlist(self, typecode, tc2):
- self.start(typecode)
- lng = self.get_lng()
- res = [None] * lng
- idx = 0
- while idx < lng:
- res[idx] = self.atom_str(tc2)
- idx += 1
- return res
-
def start(self, typecode):
tc = self.get1()
if tc != typecode:
@@ -436,7 +416,6 @@
def get_w_obj(self, allow_null=False):
space = self.space
- w_ret = space.w_None # something not None
tc = self.get1()
w_ret = self._dispatch[ord(tc)](space, self, tc)
if w_ret is None and not allow_null:
diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py
--- a/pypy/module/micronumpy/base.py
+++ b/pypy/module/micronumpy/base.py
@@ -1,7 +1,7 @@
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, oefmt
from rpython.tool.pairtype import extendabletype
-
+from pypy.module.micronumpy import support
def wrap_impl(space, w_cls, w_instance, impl):
if w_cls is None or space.is_w(w_cls, space.gettypefor(W_NDimArray)):
@@ -45,11 +45,32 @@
return W_NDimArray(impl)
@staticmethod
- def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False,
- w_subtype=None, w_base=None, writable=True):
+ def from_shape_and_storage(space, shape, storage, dtype, storage_bytes=-1,
+ order='C', owning=False, w_subtype=None,
+ w_base=None, writable=True, strides=None):
from pypy.module.micronumpy import concrete
- from pypy.module.micronumpy.strides import calc_strides
- strides, backstrides = calc_strides(shape, dtype, order)
+ from pypy.module.micronumpy.strides import (calc_strides,
+ calc_backstrides)
+ isize = dtype.elsize
+ if storage_bytes > 0 :
+ totalsize = support.product(shape) * isize
+ if totalsize > storage_bytes:
+ raise OperationError(space.w_TypeError, space.wrap(
+ "buffer is too small for requested array"))
+ else:
+ storage_bytes = support.product(shape) * isize
+ if strides is None:
+ strides, backstrides = calc_strides(shape, dtype, order)
+ else:
+ if len(strides) != len(shape):
+ raise oefmt(space.w_ValueError,
+ 'strides, if given, must be the same length as shape')
+ for i in range(len(strides)):
+ if strides[i] < 0 or strides[i]*shape[i] > storage_bytes:
+ raise oefmt(space.w_ValueError,
+ 'strides is incompatible with shape of requested '
+ 'array and size of buffer')
+ backstrides = calc_backstrides(strides, shape)
if w_base is not None:
if owning:
raise OperationError(space.w_ValueError,
diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py
--- a/pypy/module/micronumpy/boxes.py
+++ b/pypy/module/micronumpy/boxes.py
@@ -168,7 +168,7 @@
if len(args_w) >= 1:
for w_arg in args_w:
try:
- idx = support.index_w(space, w_arg)
+ support.index_w(space, w_arg)
except OperationError:
raise oefmt(space.w_TypeError, "an integer is required")
raise oefmt(space.w_ValueError, "axes don't match array")
diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py
--- a/pypy/module/micronumpy/concrete.py
+++ b/pypy/module/micronumpy/concrete.py
@@ -11,7 +11,7 @@
from pypy.module.micronumpy.iterators import ArrayIter
from pypy.module.micronumpy.strides import (Chunk, Chunks, NewAxisChunk,
RecordChunk, calc_strides, calc_new_strides, shape_agreement,
- calculate_broadcast_strides)
+ calculate_broadcast_strides, calc_backstrides)
class BaseConcreteArray(object):
@@ -47,6 +47,7 @@
def setitem(self, index, value):
self.dtype.itemtype.store(self, index, 0, value)
+ @jit.unroll_safe
def setslice(self, space, arr):
if len(arr.get_shape()) > 0 and len(self.get_shape()) == 0:
raise oefmt(space.w_ValueError,
@@ -78,10 +79,7 @@
self.get_strides(), self.order)
if new_strides is not None:
# We can create a view, strides somehow match up.
- ndims = len(new_shape)
- new_backstrides = [0] * ndims
- for nd in range(ndims):
- new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd]
+ new_backstrides = calc_backstrides(new_strides, new_shape)
assert isinstance(orig_array, W_NDimArray) or orig_array is None
return SliceArray(self.start, new_strides, new_backstrides,
new_shape, self, orig_array)
diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py
--- a/pypy/module/micronumpy/ctors.py
+++ b/pypy/module/micronumpy/ctors.py
@@ -302,5 +302,5 @@
return a
else:
writable = not buf.readonly
- return W_NDimArray.from_shape_and_storage(space, [n], storage, dtype=dtype,
- w_base=w_buffer, writable=writable)
+ return W_NDimArray.from_shape_and_storage(space, [n], storage, storage_bytes=s,
+ dtype=dtype, w_base=w_buffer, writable=writable)
diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py
--- a/pypy/module/micronumpy/descriptor.py
+++ b/pypy/module/micronumpy/descriptor.py
@@ -288,7 +288,6 @@
def descr_hash(self, space):
return space.wrap(self._compute_hash(space, 0x345678))
-
def descr_str(self, space):
if self.fields:
return space.str(self.descr_get_descr(space))
diff --git a/pypy/module/micronumpy/flagsobj.py b/pypy/module/micronumpy/flagsobj.py
--- a/pypy/module/micronumpy/flagsobj.py
+++ b/pypy/module/micronumpy/flagsobj.py
@@ -1,3 +1,5 @@
+from rpython.rlib import jit
+
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.error import OperationError
from pypy.interpreter.gateway import interp2app
@@ -13,6 +15,7 @@
arr.flags &= ~flags
+ at jit.unroll_safe
def _update_contiguous_flags(arr):
shape = arr.shape
strides = arr.strides
diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py
--- a/pypy/module/micronumpy/flatiter.py
+++ b/pypy/module/micronumpy/flatiter.py
@@ -22,6 +22,9 @@
def get_shape(self):
return self.shape
+ def get_size(self):
+ return self.base().get_size()
+
def create_iter(self, shape=None, backward_broadcast=False):
assert isinstance(self.base(), W_NDimArray)
return self.base().create_iter()
@@ -41,8 +44,8 @@
return space.wrap(self.state.index)
def descr_coords(self, space):
- self.state = self.iter.update(self.state)
- return space.newtuple([space.wrap(c) for c in self.state.indices])
+ coords = self.iter.indices(self.state)
+ return space.newtuple([space.wrap(c) for c in coords])
def descr_iter(self):
return self
@@ -54,7 +57,7 @@
if self.iter.done(self.state):
raise OperationError(space.w_StopIteration, space.w_None)
w_res = self.iter.getitem(self.state)
- self.state = self.iter.next(self.state)
+ self.iter.next(self.state, mutate=True)
return w_res
def descr_getitem(self, space, w_idx):
@@ -71,7 +74,7 @@
base.get_order(), w_instance=base)
return loop.flatiter_getitem(res, self.iter, state, step)
finally:
- self.state = self.iter.reset(self.state)
+ self.iter.reset(self.state, mutate=True)
def descr_setitem(self, space, w_idx, w_value):
if not (space.isinstance_w(w_idx, space.w_int) or
@@ -91,7 +94,7 @@
arr = convert_to_array(space, w_value)
loop.flatiter_setitem(space, dtype, arr, self.iter, state, step, length)
finally:
- self.state = self.iter.reset(self.state)
+ self.iter.reset(self.state, mutate=True)
W_FlatIterator.typedef = TypeDef("numpy.flatiter",
diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py
--- a/pypy/module/micronumpy/iterators.py
+++ b/pypy/module/micronumpy/iterators.py
@@ -41,16 +41,6 @@
from pypy.module.micronumpy.base import W_NDimArray
from pypy.module.micronumpy.flagsobj import _update_contiguous_flags
-class OpFlag(object):
- def __init__(self):
- self.rw = ''
- self.broadcast = True
- self.force_contig = False
- self.force_align = False
- self.native_byte_order = False
- self.tmp_copy = ''
- self.allocate = False
-
class PureShapeIter(object):
def __init__(self, shape, idx_w):
@@ -87,25 +77,24 @@
class IterState(object):
- _immutable_fields_ = ['iterator', 'index', 'indices', 'offset']
+ _immutable_fields_ = ['iterator', '_indices']
def __init__(self, iterator, index, indices, offset):
self.iterator = iterator
self.index = index
- self.indices = indices
+ self._indices = indices
self.offset = offset
class ArrayIter(object):
_immutable_fields_ = ['contiguous', 'array', 'size', 'ndim_m1', 'shape_m1[*]',
'strides[*]', 'backstrides[*]', 'factors[*]',
- 'slice_shape[*]', 'slice_stride[*]', 'slice_backstride[*]',
- 'track_index', 'operand_type', 'slice_operand_type']
+ 'track_index']
track_index = True
- def __init__(self, array, size, shape, strides, backstrides, op_flags=OpFlag()):
- from pypy.module.micronumpy import concrete
+ @jit.unroll_safe
+ def __init__(self, array, size, shape, strides, backstrides):
assert len(shape) == len(strides) == len(backstrides)
_update_contiguous_flags(array)
self.contiguous = (array.flags & NPY.ARRAY_C_CONTIGUOUS and
@@ -117,10 +106,6 @@
self.shape_m1 = [s - 1 for s in shape]
self.strides = strides
self.backstrides = backstrides
- self.slice_shape = []
- self.slice_stride = []
- self.slice_backstride = []
- self.slice_operand_type = concrete.SliceArray
ndim = len(shape)
factors = [0] * ndim
@@ -130,32 +115,35 @@
else:
factors[ndim-i-1] = factors[ndim-i] * shape[ndim-i]
self.factors = factors
- if op_flags.rw == 'r':
- self.operand_type = concrete.ConcreteNonWritableArrayWithBase
- else:
- self.operand_type = concrete.ConcreteArrayWithBase
@jit.unroll_safe
- def reset(self, state=None):
+ def reset(self, state=None, mutate=False):
+ index = 0
if state is None:
indices = [0] * len(self.shape_m1)
else:
assert state.iterator is self
- indices = state.indices
+ indices = state._indices
for i in xrange(self.ndim_m1, -1, -1):
indices[i] = 0
- return IterState(self, 0, indices, self.array.start)
+ offset = self.array.start
+ if not mutate:
+ return IterState(self, index, indices, offset)
+ state.index = index
+ state.offset = offset
@jit.unroll_safe
- def next(self, state):
+ def next(self, state, mutate=False):
assert state.iterator is self
index = state.index
if self.track_index:
index += 1
- indices = state.indices
+ indices = state._indices
offset = state.offset
if self.contiguous:
offset += self.array.dtype.elsize
+ elif self.ndim_m1 == 0:
+ offset += self.strides[0]
else:
for i in xrange(self.ndim_m1, -1, -1):
idx = indices[i]
@@ -166,13 +154,18 @@
else:
indices[i] = 0
offset -= self.backstrides[i]
- return IterState(self, index, indices, offset)
+ if not mutate:
+ return IterState(self, index, indices, offset)
+ state.index = index
+ state.offset = offset
@jit.unroll_safe
def goto(self, index):
offset = self.array.start
if self.contiguous:
offset += index * self.array.dtype.elsize
+ elif self.ndim_m1 == 0:
+ offset += index * self.strides[0]
else:
current = index
for i in xrange(len(self.shape_m1)):
@@ -181,20 +174,20 @@
return IterState(self, index, None, offset)
@jit.unroll_safe
- def update(self, state):
+ def indices(self, state):
assert state.iterator is self
assert self.track_index
- if not self.contiguous:
- return state
+ indices = state._indices
+ if not (self.contiguous or self.ndim_m1 == 0):
+ return indices
current = state.index
- indices = state.indices
for i in xrange(len(self.shape_m1)):
if self.factors[i] != 0:
indices[i] = current / self.factors[i]
current %= self.factors[i]
else:
indices[i] = 0
- return IterState(self, state.index, indices, state.offset)
+ return indices
def done(self, state):
assert state.iterator is self
@@ -213,12 +206,6 @@
assert state.iterator is self
self.array.setitem(state.offset, elem)
- def getoperand(self, st, base):
- impl = self.operand_type
- res = impl([], self.array.dtype, self.array.order, [], [],
- self.array.storage, base)
- res.start = st.offset
- return res
def AxisIter(array, shape, axis, cumulative):
strides = array.get_strides()
@@ -242,42 +229,3 @@
size /= shape[axis]
shape[axis] = backstrides[axis] = 0
return ArrayIter(array, size, shape, array.strides, backstrides)
-
-class SliceIter(ArrayIter):
- '''
- used with external loops, getitem and setitem return a SliceArray
- view into the original array
- '''
- _immutable_fields_ = ['base']
-
- def __init__(self, array, size, shape, strides, backstrides, slice_shape,
- slice_stride, slice_backstride, op_flags, base):
- from pypy.module.micronumpy import concrete
- ArrayIter.__init__(self, array, size, shape, strides, backstrides, op_flags)
- self.slice_shape = slice_shape
- self.slice_stride = slice_stride
- self.slice_backstride = slice_backstride
- self.base = base
- if op_flags.rw == 'r':
- self.slice_operand_type = concrete.NonWritableSliceArray
- else:
- self.slice_operand_type = concrete.SliceArray
-
- def getitem(self, state):
- # XXX cannot be called - must return a boxed value
- assert False
-
- def getitem_bool(self, state):
- # XXX cannot be called - must return a boxed value
- assert False
-
- def setitem(self, state, elem):
- # XXX cannot be called - must return a boxed value
- assert False
-
- def getoperand(self, state, base):
- assert state.iterator is self
- impl = self.slice_operand_type
- arr = impl(state.offset, self.slice_stride, self.slice_backstride,
- self.slice_shape, self.array, self.base)
- return arr
diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py
--- a/pypy/module/micronumpy/loop.py
+++ b/pypy/module/micronumpy/loop.py
@@ -43,23 +43,38 @@
# TODO handle __array_priorities__ and maybe flip the order
+ if w_lhs.get_size() == 1:
+ w_left = w_lhs.get_scalar_value().convert_to(space, calc_dtype)
+ left_iter = left_state = None
+ else:
+ w_left = None
+ left_iter, left_state = w_lhs.create_iter(shape)
+ left_iter.track_index = False
+
+ if w_rhs.get_size() == 1:
+ w_right = w_rhs.get_scalar_value().convert_to(space, calc_dtype)
+ right_iter = right_state = None
+ else:
+ w_right = None
+ right_iter, right_state = w_rhs.create_iter(shape)
+ right_iter.track_index = False
+
if out is None:
out = W_NDimArray.from_shape(space, shape, res_dtype,
w_instance=lhs_for_subtype)
- left_iter, left_state = w_lhs.create_iter(shape)
- right_iter, right_state = w_rhs.create_iter(shape)
out_iter, out_state = out.create_iter(shape)
- left_iter.track_index = right_iter.track_index = False
shapelen = len(shape)
while not out_iter.done(out_state):
call2_driver.jit_merge_point(shapelen=shapelen, func=func,
calc_dtype=calc_dtype, res_dtype=res_dtype)
- w_left = left_iter.getitem(left_state).convert_to(space, calc_dtype)
- w_right = right_iter.getitem(right_state).convert_to(space, calc_dtype)
+ if left_iter:
+ w_left = left_iter.getitem(left_state).convert_to(space, calc_dtype)
+ left_state = left_iter.next(left_state)
+ if right_iter:
+ w_right = right_iter.getitem(right_state).convert_to(space, calc_dtype)
+ right_state = right_iter.next(right_state)
out_iter.setitem(out_state, func(calc_dtype, w_left, w_right).convert_to(
space, res_dtype))
- left_state = left_iter.next(left_state)
- right_state = right_iter.next(right_state)
out_state = out_iter.next(out_state)
return out
@@ -69,11 +84,12 @@
reds='auto')
def call1(space, shape, func, calc_dtype, res_dtype, w_obj, out):
+ obj_iter, obj_state = w_obj.create_iter(shape)
+ obj_iter.track_index = False
+
if out is None:
out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=w_obj)
- obj_iter, obj_state = w_obj.create_iter(shape)
out_iter, out_state = out.create_iter(shape)
- obj_iter.track_index = False
shapelen = len(shape)
while not out_iter.done(out_state):
call1_driver.jit_merge_point(shapelen=shapelen, func=func,
@@ -172,10 +188,23 @@
reds = 'auto')
def setslice(space, shape, target, source):
+ if not shape:
+ dtype = target.dtype
+ val = source.getitem(source.start)
+ if dtype.is_str_or_unicode():
+ val = dtype.coerce(space, val)
+ else:
+ val = val.convert_to(space, dtype)
+ target.setitem(target.start, val)
+ return target
+ return _setslice(space, shape, target, source)
+
+def _setslice(space, shape, target, source):
# note that unlike everything else, target and source here are
# array implementations, not arrays
target_iter, target_state = target.create_iter(shape)
source_iter, source_state = source.create_iter(shape)
+ source_iter.track_index = False
dtype = target.dtype
shapelen = len(shape)
while not target_iter.done(target_state):
@@ -294,10 +323,9 @@
state = x_state
return out
-axis_reduce__driver = jit.JitDriver(name='numpy_axis_reduce',
- greens=['shapelen',
- 'func', 'dtype'],
- reds='auto')
+axis_reduce_driver = jit.JitDriver(name='numpy_axis_reduce',
+ greens=['shapelen', 'func', 'dtype'],
+ reds='auto')
def do_axis_reduce(space, shape, func, arr, dtype, axis, out, identity, cumulative,
temp):
@@ -310,21 +338,24 @@
temp_iter = out_iter # hack
temp_state = out_state
arr_iter, arr_state = arr.create_iter()
+ arr_iter.track_index = False
if identity is not None:
identity = identity.convert_to(space, dtype)
shapelen = len(shape)
while not out_iter.done(out_state):
- axis_reduce__driver.jit_merge_point(shapelen=shapelen, func=func,
- dtype=dtype)
- assert not arr_iter.done(arr_state)
+ axis_reduce_driver.jit_merge_point(shapelen=shapelen, func=func,
+ dtype=dtype)
w_val = arr_iter.getitem(arr_state).convert_to(space, dtype)
- out_state = out_iter.update(out_state)
- if out_state.indices[axis] == 0:
+ arr_state = arr_iter.next(arr_state)
+
+ out_indices = out_iter.indices(out_state)
+ if out_indices[axis] == 0:
if identity is not None:
w_val = func(dtype, identity, w_val)
else:
cur = temp_iter.getitem(temp_state)
w_val = func(dtype, cur, w_val)
+
out_iter.setitem(out_state, w_val)
out_state = out_iter.next(out_state)
if cumulative:
@@ -332,7 +363,6 @@
temp_state = temp_iter.next(temp_state)
else:
temp_state = out_state
- arr_state = arr_iter.next(arr_state)
return out
@@ -451,9 +481,9 @@
while not arr_iter.done(arr_state):
nonzero_driver.jit_merge_point(shapelen=shapelen, dims=dims, dtype=dtype)
if arr_iter.getitem_bool(arr_state):
- arr_state = arr_iter.update(arr_state)
+ arr_indices = arr_iter.indices(arr_state)
for d in dims:
- res_iter.setitem(res_state, box(arr_state.indices[d]))
+ res_iter.setitem(res_state, box(arr_indices[d]))
res_state = res_iter.next(res_state)
arr_state = arr_iter.next(arr_state)
return res
diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py
--- a/pypy/module/micronumpy/ndarray.py
+++ b/pypy/module/micronumpy/ndarray.py
@@ -516,9 +516,10 @@
"__array__(dtype) not implemented"))
if type(self) is W_NDimArray:
return self
+ sz = support.product(self.get_shape()) * self.get_dtype().elsize
return W_NDimArray.from_shape_and_storage(
space, self.get_shape(), self.implementation.storage,
- self.get_dtype(), w_base=self)
+ self.get_dtype(), storage_bytes=sz, w_base=self)
def descr_array_iface(self, space):
addr = self.implementation.get_storage_as_int(space)
@@ -1180,8 +1181,8 @@
"improper dtype '%R'", dtype)
self.implementation = W_NDimArray.from_shape_and_storage(
space, [space.int_w(i) for i in space.listview(shape)],
- rffi.str2charp(space.str_w(storage), track_allocation=False),
- dtype, owning=True).implementation
+ rffi.str2charp(space.str_w(storage), track_allocation=False),
+ dtype, storage_bytes=space.len_w(storage), owning=True).implementation
def descr___array_finalize__(self, space, w_obj):
pass
@@ -1205,8 +1206,10 @@
if not space.is_none(w_buffer):
if (not space.is_none(w_strides)):
- raise OperationError(space.w_NotImplementedError,
- space.wrap("unsupported param"))
+ strides = [space.int_w(w_i) for w_i in
+ space.unpackiterable(w_strides)]
+ else:
+ strides = None
try:
buf = space.writebuf_w(w_buffer)
@@ -1220,16 +1223,14 @@
if not shape:
raise OperationError(space.w_TypeError, space.wrap(
"numpy scalars from buffers not supported yet"))
- totalsize = support.product(shape) * dtype.elsize
- if totalsize + offset > buf.getlength():
- raise OperationError(space.w_TypeError, space.wrap(
- "buffer is too small for requested array"))
storage = rffi.cast(RAW_STORAGE_PTR, raw_ptr)
storage = rffi.ptradd(storage, offset)
- return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype,
+ return W_NDimArray.from_shape_and_storage(space, shape, storage,
+ dtype, w_base=w_buffer,
+ storage_bytes=buf.getlength()-offset,
w_subtype=w_subtype,
- w_base=w_buffer,
- writable=not buf.readonly)
+ writable=not buf.readonly,
+ strides=strides)
order = order_converter(space, w_order, NPY.CORDER)
if order == NPY.CORDER:
@@ -1247,8 +1248,9 @@
return w_ret
- at unwrap_spec(addr=int)
-def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype, w_subtype=None):
+ at unwrap_spec(addr=int, buf_len=int)
+def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype,
+ buf_len=-1, w_subtype=None, w_strides=None):
"""
Create an array from an existing buffer, given its address as int.
PyPy-only implementation detail.
@@ -1257,14 +1259,22 @@
dtype = space.interp_w(descriptor.W_Dtype, space.call_function(
space.gettypefor(descriptor.W_Dtype), w_dtype))
shape = shape_converter(space, w_shape, dtype)
+ if not space.is_none(w_strides):
+ strides = [space.int_w(w_i) for w_i in
+ space.unpackiterable(w_strides)]
+ else:
+ strides = None
if w_subtype:
if not space.isinstance_w(w_subtype, space.w_type):
raise OperationError(space.w_ValueError, space.wrap(
"subtype must be a subtype of ndarray, not a class instance"))
return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype,
- 'C', False, w_subtype)
+ buf_len, 'C', False, w_subtype,
+ strides=strides)
else:
- return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype)
+ return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype,
+ storage_bytes=buf_len,
+ strides=strides)
app_take = applevel(r"""
def take(a, indices, axis, out, mode):
diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py
--- a/pypy/module/micronumpy/nditer.py
+++ b/pypy/module/micronumpy/nditer.py
@@ -1,3 +1,4 @@
+from rpython.rlib import jit
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.typedef import TypeDef, GetSetProperty
from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault
@@ -5,7 +6,7 @@
from pypy.module.micronumpy import support, concrete
from pypy.module.micronumpy.base import W_NDimArray, convert_to_array
from pypy.module.micronumpy.descriptor import decode_w_dtype
-from pypy.module.micronumpy.iterators import ArrayIter, SliceIter, OpFlag
+from pypy.module.micronumpy.iterators import ArrayIter
from pypy.module.micronumpy.strides import (calculate_broadcast_strides,
shape_agreement, shape_agreement_multiple)
@@ -35,6 +36,16 @@
return ret
+class OpFlag(object):
+ def __init__(self):
+ self.rw = ''
+ self.broadcast = True
+ self.force_contig = False
+ self.force_align = False
+ self.native_byte_order = False
+ self.tmp_copy = ''
+ self.allocate = False
+
def parse_op_flag(space, lst):
op_flag = OpFlag()
for w_item in lst:
@@ -141,11 +152,73 @@
raise NotImplementedError('not implemented yet')
-def get_iter(space, order, arr, shape, dtype, op_flags):
+class OperandIter(ArrayIter):
+ _immutable_fields_ = ['slice_shape', 'slice_stride', 'slice_backstride',
+ 'operand_type', 'base']
+
+ def getitem(self, state):
+ # cannot be called - must return a boxed value
+ assert False
+
+ def getitem_bool(self, state):
+ # cannot be called - must return a boxed value
+ assert False
+
+ def setitem(self, state, elem):
+ # cannot be called - must return a boxed value
+ assert False
+
+
+class ConcreteIter(OperandIter):
+ def __init__(self, array, size, shape, strides, backstrides,
+ op_flags, base):
+ OperandIter.__init__(self, array, size, shape, strides, backstrides)
+ self.slice_shape = 1
+ self.slice_stride = -1
+ if strides:
+ self.slice_stride = strides[-1]
+ self.slice_backstride = 1
+ if op_flags.rw == 'r':
+ self.operand_type = concrete.ConcreteNonWritableArrayWithBase
+ else:
+ self.operand_type = concrete.ConcreteArrayWithBase
+ self.base = base
+
+ def getoperand(self, state):
+ assert state.iterator is self
+ impl = self.operand_type
+ res = impl([], self.array.dtype, self.array.order, [], [],
+ self.array.storage, self.base)
+ res.start = state.offset
+ return res
+
+
+class SliceIter(OperandIter):
+ def __init__(self, array, size, shape, strides, backstrides, slice_shape,
+ slice_stride, slice_backstride, op_flags, base):
+ OperandIter.__init__(self, array, size, shape, strides, backstrides)
+ self.slice_shape = slice_shape
+ self.slice_stride = slice_stride
+ self.slice_backstride = slice_backstride
+ if op_flags.rw == 'r':
+ self.operand_type = concrete.NonWritableSliceArray
+ else:
+ self.operand_type = concrete.SliceArray
+ self.base = base
+
+ def getoperand(self, state):
+ assert state.iterator is self
+ impl = self.operand_type
+ arr = impl(state.offset, [self.slice_stride], [self.slice_backstride],
+ [self.slice_shape], self.array, self.base)
+ return arr
+
+
+def get_iter(space, order, arr, shape, dtype, op_flags, base):
imp = arr.implementation
backward = is_backward(imp, order)
if arr.is_scalar():
- return ArrayIter(imp, 1, [], [], [], op_flags=op_flags)
+ return ConcreteIter(imp, 1, [], [], [], op_flags, base)
if (imp.strides[0] < imp.strides[-1] and not backward) or \
(imp.strides[0] > imp.strides[-1] and backward):
# flip the strides. Is this always true for multidimension?
@@ -160,7 +233,7 @@
backstrides = imp.backstrides
r = calculate_broadcast_strides(strides, backstrides, imp.shape,
shape, backward)
- return ArrayIter(imp, imp.get_size(), shape, r[0], r[1], op_flags=op_flags)
+ return ConcreteIter(imp, imp.get_size(), shape, r[0], r[1], op_flags, base)
def calculate_ndim(op_in, oa_ndim):
if oa_ndim >=0:
@@ -269,8 +342,8 @@
self.index = [0] * len(shape)
self.backward = backward
+ @jit.unroll_safe
def next(self):
- # TODO It's probably possible to refactor all the "next" method from each iterator
for i in range(len(self.shape) - 1, -1, -1):
if self.index[i] < self.shape[i] - 1:
self.index[i] += 1
@@ -404,7 +477,7 @@
self.iters = []
for i in range(len(self.seq)):
it = get_iter(space, self.order, self.seq[i], self.shape,
- self.dtypes[i], self.op_flags[i])
+ self.dtypes[i], self.op_flags[i], self)
it.contiguous = False
self.iters.append((it, it.reset()))
@@ -443,7 +516,7 @@
return space.wrap(self)
def getitem(self, it, st):
- res = it.getoperand(st, self)
+ res = it.getoperand(st)
return W_NDimArray(res)
def descr_getitem(self, space, w_idx):
@@ -461,6 +534,7 @@
def descr_len(self, space):
space.wrap(len(self.iters))
+ @jit.unroll_safe
def descr_next(self, space):
for it, st in self.iters:
if not it.done(st):
diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py
--- a/pypy/module/micronumpy/strides.py
+++ b/pypy/module/micronumpy/strides.py
@@ -270,7 +270,7 @@
shape = shape_agreement(space, shape, arr)
return shape
-
+ at jit.unroll_safe
def _shape_agreement(shape1, shape2):
""" Checks agreement about two shapes with respect to broadcasting. Returns
the resulting shape.
@@ -362,6 +362,13 @@
backstrides.reverse()
return strides, backstrides
+ at jit.unroll_safe
+def calc_backstrides(strides, shape):
+ ndims = len(shape)
+ new_backstrides = [0] * ndims
+ for nd in range(ndims):
+ new_backstrides[nd] = (shape[nd] - 1) * strides[nd]
+ return new_backstrides
# Recalculating strides. Find the steps that the iteration does for each
# dimension, given the stride and shape. Then try to create a new stride that
diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py
--- a/pypy/module/micronumpy/test/test_dtypes.py
+++ b/pypy/module/micronumpy/test/test_dtypes.py
@@ -392,6 +392,7 @@
t5 = dtype([('x', '<f4'), ('y', '<i4')])
t6 = dtype([('y', '<i4'), ('x', '<f4')])
assert hash(t5) != hash(t6)
+
def test_pickle(self):
import numpy as np
from numpypy import array, dtype
diff --git a/pypy/module/micronumpy/test/test_iterators.py b/pypy/module/micronumpy/test/test_iterators.py
--- a/pypy/module/micronumpy/test/test_iterators.py
+++ b/pypy/module/micronumpy/test/test_iterators.py
@@ -31,16 +31,14 @@
s = i.next(s)
assert s.offset == 3
assert not i.done(s)
- assert s.indices == [0,0]
- s = i.update(s)
- assert s.indices == [0,3]
+ assert s._indices == [0,0]
+ assert i.indices(s) == [0,3]
#cause a dimension overflow
s = i.next(s)
s = i.next(s)
assert s.offset == 5
- assert s.indices == [0,3]
- s = i.update(s)
- assert s.indices == [1,0]
+ assert s._indices == [0,3]
+ assert i.indices(s) == [1,0]
#Now what happens if the array is transposed? strides[-1] != 1
# therefore layout is non-contiguous
@@ -56,12 +54,12 @@
s = i.next(s)
assert s.offset == 9
assert not i.done(s)
- assert s.indices == [0,3]
+ assert s._indices == [0,3]
#cause a dimension overflow
s = i.next(s)
s = i.next(s)
assert s.offset == 1
- assert s.indices == [1,0]
+ assert s._indices == [1,0]
def test_iterator_goto(self):
shape = [3, 5]
@@ -74,9 +72,9 @@
assert not i.contiguous
s = i.reset()
assert s.index == 0
- assert s.indices == [0, 0]
+ assert s._indices == [0, 0]
assert s.offset == a.start
s = i.goto(11)
assert s.index == 11
- assert s.indices is None
+ assert s._indices is None
assert s.offset == a.start + 5
diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py
--- a/pypy/module/micronumpy/test/test_ndarray.py
+++ b/pypy/module/micronumpy/test/test_ndarray.py
@@ -186,7 +186,8 @@
#
dtypes = get_dtype_cache(self.space)
w_array = W_NDimArray.from_shape_and_storage(self.space, [2, 2],
- storage, dtypes.w_int8dtype)
+ storage, dtypes.w_int8dtype,
+ storage_bytes=4)
def get(i, j):
return w_array.getitem(self.space, [i, j]).value
assert get(0, 0) == 0
@@ -3416,6 +3417,21 @@
assert str(array(1.5)) == '1.5'
assert str(array(1.5).real) == '1.5'
+ def test_ndarray_buffer_strides(self):
+ from numpy import ndarray, array
+ base = array([1, 2, 3, 4], dtype=int)
+ a = ndarray((4,), buffer=base, dtype=int)
+ assert a[1] == 2
+ a = ndarray((4,), buffer=base, dtype=int, strides=[base.strides[0]])
+ assert a[1] == 2
+ a = ndarray((2,), buffer=base, dtype=int, strides=[2 * base.strides[0]])
+ assert a[1] == 3
+ exc = raises(ValueError, ndarray, (4,), buffer=base, dtype=int, strides=[2 * base.strides[0]])
+ assert exc.value[0] == 'strides is incompatible with shape of requested array and size of buffer'
+ exc = raises(ValueError, ndarray, (2, 1), buffer=base, dtype=int, strides=[base.strides[0]])
+ assert exc.value[0] == 'strides, if given, must be the same length as shape'
+
+
class AppTestRepr(BaseNumpyAppTest):
def setup_class(cls):
@@ -3883,6 +3899,7 @@
assert np.greater(a, a) is NotImplemented
assert np.less_equal(a, a) is NotImplemented
+
class AppTestPyPy(BaseNumpyAppTest):
def setup_class(cls):
if option.runappdirect and '__pypy__' not in sys.builtin_module_names:
@@ -3906,13 +3923,14 @@
from numpypy import array, ndarray
x = array([1, 2, 3, 4])
addr, _ = x.__array_interface__['data']
- y = ndarray._from_shape_and_storage([2, 2], addr, x.dtype)
+ sz = x.size * x.dtype.itemsize
+ y = ndarray._from_shape_and_storage([2, 2], addr, x.dtype, sz)
assert y[0, 1] == 2
y[0, 1] = 42
assert x[1] == 42
class C(ndarray):
pass
- z = ndarray._from_shape_and_storage([4, 1], addr, x.dtype, C)
+ z = ndarray._from_shape_and_storage([4, 1], addr, x.dtype, sz, C)
assert isinstance(z, C)
assert z.shape == (4, 1)
assert z[1, 0] == 42
@@ -3926,3 +3944,17 @@
assert x.__pypy_data__ is obj
del x.__pypy_data__
assert x.__pypy_data__ is None
+
+ def test_from_shape_and_storage_strides(self):
+ from numpy import ndarray, array
+ base = array([1, 2, 3, 4], dtype=int)
+ addr, _ = base.__array_interface__['data']
+ sz = base.size * base.dtype.itemsize
+ a = ndarray._from_shape_and_storage((4,), addr, int, sz)
+ assert a[1] == 2
+ a = ndarray._from_shape_and_storage((4,), addr, int, sz,
+ strides=[base.strides[0]])
+ assert a[1] == 2
+ a = ndarray._from_shape_and_storage((2,), addr, int, sz,
+ strides=[2 * base.strides[0]])
+ assert a[1] == 3
diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py
--- a/pypy/module/micronumpy/test/test_ufuncs.py
+++ b/pypy/module/micronumpy/test/test_ufuncs.py
@@ -947,6 +947,7 @@
assert exc.value[0] == "'axis' entry is out of bounds"
def test_reduce_1d(self):
+ import numpy as np
from numpy import array, add, maximum, less, float16, complex64
assert less.reduce([5, 4, 3, 2, 1])
@@ -961,6 +962,10 @@
assert type(add.reduce(array([True, False] * 200, dtype='float16'))) is float16
assert type(add.reduce(array([True, False] * 200, dtype='complex64'))) is complex64
+ for dtype in ['bool', 'int']:
+ assert np.equal.reduce([1, 2], dtype=dtype) == True
+ assert np.equal.reduce([1, 2, 0], dtype=dtype) == False
+
def test_reduceND(self):
from numpy import add, arange
a = arange(12).reshape(3, 4)
diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py
--- a/pypy/module/micronumpy/test/test_zjit.py
+++ b/pypy/module/micronumpy/test/test_zjit.py
@@ -103,17 +103,13 @@
self.check_trace_count(1)
self.check_simple_loop({
'float_add': 1,
More information about the pypy-commit
mailing list