[pypy-commit] pypy arm-backend-2: merge default up to 6fb87770b5d2
bivab
noreply at buildbot.pypy.org
Thu Dec 29 09:57:33 CET 2011
Author: David Schneider <david.schneider at picle.org>
Branch: arm-backend-2
Changeset: r50944:561c9d8112aa
Date: 2011-12-27 14:27 +0100
http://bitbucket.org/pypy/pypy/changeset/561c9d8112aa/
Log: merge default up to 6fb87770b5d2
diff --git a/lib_pypy/distributed/socklayer.py b/lib_pypy/distributed/socklayer.py
--- a/lib_pypy/distributed/socklayer.py
+++ b/lib_pypy/distributed/socklayer.py
@@ -2,7 +2,7 @@
import py
from socket import socket
-XXX needs import adaptation as 'green' is removed from py lib for years
+raise ImportError("XXX needs import adaptation as 'green' is removed from py lib for years")
from py.impl.green.msgstruct import decodemessage, message
from socket import socket, AF_INET, SOCK_STREAM
import marshal
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -252,6 +252,10 @@
"use small tuples",
default=False),
+ BoolOption("withspecialisedtuple",
+ "use specialised tuples",
+ default=False),
+
BoolOption("withrope", "use ropes as the string implementation",
default=False,
requires=[("objspace.std.withstrslice", False),
@@ -365,6 +369,7 @@
config.objspace.std.suggest(optimized_list_getitem=True)
config.objspace.std.suggest(getattributeshortcut=True)
config.objspace.std.suggest(newshortcut=True)
+ config.objspace.std.suggest(withspecialisedtuple=True)
#if not IS_64_BITS:
# config.objspace.std.suggest(withsmalllong=True)
diff --git a/pypy/conftest.py b/pypy/conftest.py
--- a/pypy/conftest.py
+++ b/pypy/conftest.py
@@ -496,6 +496,17 @@
def setup(self):
super(AppClassCollector, self).setup()
cls = self.obj
+ #
+ # <hack>
+ for name in dir(cls):
+ if name.startswith('test_'):
+ func = getattr(cls, name, None)
+ code = getattr(func, 'func_code', None)
+ if code and code.co_flags & 32:
+ raise AssertionError("unsupported: %r is a generator "
+ "app-level test method" % (name,))
+ # </hack>
+ #
space = cls.space
clsname = cls.__name__
if self.config.option.runappdirect:
diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py
--- a/pypy/doc/conf.py
+++ b/pypy/doc/conf.py
@@ -45,9 +45,9 @@
# built documents.
#
# The short X.Y version.
-version = '1.6'
+version = '1.7'
# The full version, including alpha/beta/rc tags.
-release = '1.6'
+release = '1.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -112,10 +112,32 @@
You might be interested in our `benchmarking site`_ and our
`jit documentation`_.
+Note that the JIT has a very high warm-up cost, meaning that the
+programs are slow at the beginning. If you want to compare the timings
+with CPython, even relatively simple programs need to run *at least* one
+second, preferrably at least a few seconds. Large, complicated programs
+need even more time to warm-up the JIT.
+
.. _`benchmarking site`: http://speed.pypy.org
.. _`jit documentation`: jit/index.html
+---------------------------------------------------------------
+Couldn't the JIT dump and reload already-compiled machine code?
+---------------------------------------------------------------
+
+No, we found no way of doing that. The JIT generates machine code
+containing a large number of constant addresses --- constant at the time
+the machine code is written. The vast majority is probably not at all
+constants that you find in the executable, with a nice link name. E.g.
+the addresses of Python classes are used all the time, but Python
+classes don't come statically from the executable; they are created anew
+every time you restart your program. This makes saving and reloading
+machine code completely impossible without some very advanced way of
+mapping addresses in the old (now-dead) process to addresses in the new
+process, including checking that all the previous assumptions about the
+(now-dead) object are still true about the new object.
+
.. _`prolog and javascript`:
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -487,6 +487,16 @@
'parser', 'fcntl', '_codecs', 'binascii'
]
+ # These modules are treated like CPython treats built-in modules,
+ # i.e. they always shadow any xx.py. The other modules are treated
+ # like CPython treats extension modules, and are loaded in sys.path
+ # order by the fake entry '.../lib_pypy/__extensions__'.
+ MODULES_THAT_ALWAYS_SHADOW = dict.fromkeys([
+ '__builtin__', '__pypy__', '_ast', '_codecs', '_sre', '_warnings',
+ '_weakref', 'errno', 'exceptions', 'gc', 'imp', 'marshal',
+ 'posix', 'nt', 'pwd', 'signal', 'sys', 'thread', 'zipimport',
+ ], None)
+
def make_builtins(self):
"NOT_RPYTHON: only for initializing the space."
diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py
--- a/pypy/jit/backend/llgraph/runner.py
+++ b/pypy/jit/backend/llgraph/runner.py
@@ -261,8 +261,10 @@
return fail_index
def execute_token(self, loop_token):
- """Calls the assembler generated for the given loop.
- Returns the ResOperation that failed, of type rop.FAIL.
+ """Calls the fake 'assembler' generated for the given loop.
+ Returns the descr of the last executed operation: either the one
+ attached to the failing guard, or the one attached to the FINISH.
+ Use set_future_value_xxx() before, and get_latest_value_xxx() after.
"""
fail_index = self._execute_token(loop_token)
return self.get_fail_descr_from_number(fail_index)
diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py
--- a/pypy/jit/backend/llsupport/regalloc.py
+++ b/pypy/jit/backend/llsupport/regalloc.py
@@ -17,32 +17,101 @@
""" Manage frame positions
"""
def __init__(self):
- self.frame_bindings = {}
- self.frame_depth = 0
+ self.bindings = {}
+ self.used = [] # list of bools
+ self.hint_frame_locations = {}
+
+ frame_depth = property(lambda:xxx, lambda:xxx) # XXX kill me
+
+ def get_frame_depth(self):
+ return len(self.used)
def get(self, box):
- return self.frame_bindings.get(box, None)
+ return self.bindings.get(box, None)
def loc(self, box):
- res = self.get(box)
- if res is not None:
- return res
+ """Return or create the frame location associated with 'box'."""
+ # first check if it's already in the frame_manager
+ try:
+ return self.bindings[box]
+ except KeyError:
+ pass
+ # check if we have a hint for this box
+ if box in self.hint_frame_locations:
+ # if we do, try to reuse the location for this box
+ loc = self.hint_frame_locations[box]
+ if self.try_to_reuse_location(box, loc):
+ return loc
+ # no valid hint. make up a new free location
+ return self.get_new_loc(box)
+
+ def get_new_loc(self, box):
size = self.frame_size(box.type)
- self.frame_depth += ((-self.frame_depth) & (size-1))
- # ^^^ frame_depth is rounded up to a multiple of 'size', assuming
+ # frame_depth is rounded up to a multiple of 'size', assuming
# that 'size' is a power of two. The reason for doing so is to
# avoid obscure issues in jump.py with stack locations that try
# to move from position (6,7) to position (7,8).
- newloc = self.frame_pos(self.frame_depth, box.type)
- self.frame_bindings[box] = newloc
- self.frame_depth += size
+ while self.get_frame_depth() & (size - 1):
+ self.used.append(False)
+ #
+ index = self.get_frame_depth()
+ newloc = self.frame_pos(index, box.type)
+ for i in range(size):
+ self.used.append(True)
+ #
+ if not we_are_translated(): # extra testing
+ testindex = self.get_loc_index(newloc)
+ assert testindex == index
+ #
+ self.bindings[box] = newloc
return newloc
+ def set_binding(self, box, loc):
+ self.bindings[box] = loc
+ #
+ index = self.get_loc_index(loc)
+ endindex = index + self.frame_size(box.type)
+ while len(self.used) < endindex:
+ self.used.append(False)
+ while index < endindex:
+ self.used[index] = True
+ index += 1
+
def reserve_location_in_frame(self, size):
- frame_depth = self.frame_depth
- self.frame_depth += size
+ frame_depth = self.get_frame_depth()
+ for i in range(size):
+ self.used.append(True)
return frame_depth
+ def mark_as_free(self, box):
+ try:
+ loc = self.bindings[box]
+ except KeyError:
+ return # already gone
+ del self.bindings[box]
+ #
+ size = self.frame_size(box.type)
+ baseindex = self.get_loc_index(loc)
+ for i in range(size):
+ index = baseindex + i
+ assert 0 <= index < len(self.used)
+ self.used[index] = False
+
+ def try_to_reuse_location(self, box, loc):
+ index = self.get_loc_index(loc)
+ assert index >= 0
+ size = self.frame_size(box.type)
+ for i in range(size):
+ while (index + i) >= len(self.used):
+ self.used.append(False)
+ if self.used[index + i]:
+ return False # already in use
+ # good, we can reuse the location
+ for i in range(size):
+ self.used[index + i] = True
+ self.bindings[box] = loc
+ return True
+
# abstract methods that need to be overwritten for specific assemblers
@staticmethod
def frame_pos(loc, type):
@@ -50,6 +119,10 @@
@staticmethod
def frame_size(type):
return 1
+ @staticmethod
+ def get_loc_index(loc):
+ raise NotImplementedError("Purely abstract")
+
class RegisterManager(object):
""" Class that keeps track of register allocations
@@ -70,7 +143,14 @@
self.frame_manager = frame_manager
self.assembler = assembler
+ def is_still_alive(self, v):
+ # Check if 'v' is alive at the current position.
+ # Return False if the last usage is strictly before.
+ return self.longevity[v][1] >= self.position
+
def stays_alive(self, v):
+ # Check if 'v' stays alive after the current position.
+ # Return False if the last usage is before or at position.
return self.longevity[v][1] > self.position
def next_instruction(self, incr=1):
@@ -86,11 +166,14 @@
point for all variables that might be in registers.
"""
self._check_type(v)
- if isinstance(v, Const) or v not in self.reg_bindings:
+ if isinstance(v, Const):
return
if v not in self.longevity or self.longevity[v][1] <= self.position:
- self.free_regs.append(self.reg_bindings[v])
- del self.reg_bindings[v]
+ if v in self.reg_bindings:
+ self.free_regs.append(self.reg_bindings[v])
+ del self.reg_bindings[v]
+ if self.frame_manager is not None:
+ self.frame_manager.mark_as_free(v)
def possibly_free_vars(self, vars):
""" Same as 'possibly_free_var', but for all v in vars.
diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py
--- a/pypy/jit/backend/llsupport/test/test_regalloc.py
+++ b/pypy/jit/backend/llsupport/test/test_regalloc.py
@@ -42,8 +42,13 @@
def frame_size(self, box_type):
if box_type == FLOAT:
return 2
+ elif box_type == INT:
+ return 1
else:
- return 1
+ raise ValueError(box_type)
+ def get_loc_index(self, loc):
+ assert isinstance(loc, FakeFramePos)
+ return loc.pos
class MockAsm(object):
def __init__(self):
@@ -282,7 +287,7 @@
rm.force_allocate_reg(b)
rm.before_call()
assert len(rm.reg_bindings) == 2
- assert fm.frame_depth == 2
+ assert fm.get_frame_depth() == 2
assert len(asm.moves) == 2
rm._check_invariants()
rm.after_call(boxes[-1])
@@ -305,7 +310,7 @@
rm.force_allocate_reg(b)
rm.before_call(save_all_regs=True)
assert len(rm.reg_bindings) == 0
- assert fm.frame_depth == 4
+ assert fm.get_frame_depth() == 4
assert len(asm.moves) == 4
rm._check_invariants()
rm.after_call(boxes[-1])
@@ -327,7 +332,7 @@
xrm = XRegisterManager(longevity, frame_manager=fm, assembler=asm)
xrm.loc(f0)
rm.loc(b0)
- assert fm.frame_depth == 3
+ assert fm.get_frame_depth() == 3
@@ -348,3 +353,123 @@
spilled2 = rm.force_allocate_reg(b5)
assert spilled2 is loc
rm._check_invariants()
+
+
+ def test_hint_frame_locations_1(self):
+ b0, = newboxes(0)
+ fm = TFrameManager()
+ loc123 = FakeFramePos(123, INT)
+ fm.hint_frame_locations[b0] = loc123
+ assert fm.get_frame_depth() == 0
+ loc = fm.loc(b0)
+ assert loc == loc123
+ assert fm.get_frame_depth() == 124
+
+ def test_hint_frame_locations_2(self):
+ b0, b1, b2 = newboxes(0, 1, 2)
+ longevity = {b0: (0, 1), b1: (0, 2), b2: (0, 2)}
+ fm = TFrameManager()
+ asm = MockAsm()
+ rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
+ rm.force_allocate_reg(b0)
+ rm.force_allocate_reg(b1)
+ rm.force_allocate_reg(b2)
+ rm.force_spill_var(b0)
+ loc = rm.loc(b0)
+ assert isinstance(loc, FakeFramePos)
+ assert fm.get_loc_index(loc) == 0
+ rm.position = 1
+ assert fm.used == [True]
+ rm.possibly_free_var(b0)
+ assert fm.used == [False]
+ #
+ fm.hint_frame_locations[b1] = loc
+ rm.force_spill_var(b1)
+ loc1 = rm.loc(b1)
+ assert loc1 == loc
+ assert fm.used == [True]
+ #
+ fm.hint_frame_locations[b2] = loc
+ rm.force_spill_var(b2)
+ loc2 = rm.loc(b2)
+ assert loc2 != loc1 # because it was not free
+ assert fm.used == [True, True]
+ #
+ rm._check_invariants()
+
+ def test_frame_manager_basic(self):
+ b0, b1 = newboxes(0, 1)
+ fm = TFrameManager()
+ loc0 = fm.loc(b0)
+ assert fm.get_loc_index(loc0) == 0
+ #
+ assert fm.get(b1) is None
+ loc1 = fm.loc(b1)
+ assert fm.get_loc_index(loc1) == 1
+ assert fm.get(b1) == loc1
+ #
+ loc0b = fm.loc(b0)
+ assert loc0b == loc0
+ #
+ fm.loc(BoxInt())
+ assert fm.get_frame_depth() == 3
+ #
+ f0 = BoxFloat()
+ locf0 = fm.loc(f0)
+ assert fm.get_loc_index(locf0) == 4
+ assert fm.get_frame_depth() == 6
+ #
+ f1 = BoxFloat()
+ locf1 = fm.loc(f1)
+ assert fm.get_loc_index(locf1) == 6
+ assert fm.get_frame_depth() == 8
+ assert fm.used == [True, True, True, False, True, True, True, True]
+ #
+ fm.mark_as_free(b0)
+ assert fm.used == [False, True, True, False, True, True, True, True]
+ fm.mark_as_free(b0)
+ assert fm.used == [False, True, True, False, True, True, True, True]
+ fm.mark_as_free(f1)
+ assert fm.used == [False, True, True, False, True, True, False, False]
+ #
+ fm.reserve_location_in_frame(1)
+ assert fm.get_frame_depth() == 9
+ assert fm.used == [False, True, True, False, True, True, False, False, True]
+ #
+ assert b0 not in fm.bindings
+ fm.set_binding(b0, loc0)
+ assert b0 in fm.bindings
+ assert fm.used == [True, True, True, False, True, True, False, False, True]
+ #
+ b3 = BoxInt()
+ assert not fm.try_to_reuse_location(b3, loc0)
+ assert fm.used == [True, True, True, False, True, True, False, False, True]
+ #
+ fm.mark_as_free(b0)
+ assert fm.used == [False, True, True, False, True, True, False, False, True]
+ assert fm.try_to_reuse_location(b3, loc0)
+ assert fm.used == [True, True, True, False, True, True, False, False, True]
+ #
+ fm.mark_as_free(b0) # already free
+ assert fm.used == [True, True, True, False, True, True, False, False, True]
+ #
+ fm.mark_as_free(b3)
+ assert fm.used == [False, True, True, False, True, True, False, False, True]
+ f3 = BoxFloat()
+ assert not fm.try_to_reuse_location(f3, fm.frame_pos(0, FLOAT))
+ assert not fm.try_to_reuse_location(f3, fm.frame_pos(1, FLOAT))
+ assert not fm.try_to_reuse_location(f3, fm.frame_pos(2, FLOAT))
+ assert not fm.try_to_reuse_location(f3, fm.frame_pos(3, FLOAT))
+ assert not fm.try_to_reuse_location(f3, fm.frame_pos(4, FLOAT))
+ assert not fm.try_to_reuse_location(f3, fm.frame_pos(5, FLOAT))
+ assert fm.used == [False, True, True, False, True, True, False, False, True]
+ assert fm.try_to_reuse_location(f3, fm.frame_pos(6, FLOAT))
+ assert fm.used == [False, True, True, False, True, True, True, True, True]
+ #
+ fm.used = [False]
+ assert fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT))
+ assert fm.used == [True, True]
+ #
+ fm.used = [True]
+ assert not fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT))
+ assert fm.used == [True]
diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py
--- a/pypy/jit/backend/x86/assembler.py
+++ b/pypy/jit/backend/x86/assembler.py
@@ -690,10 +690,11 @@
def _assemble(self, regalloc, operations):
self._regalloc = regalloc
+ regalloc.compute_hint_frame_locations(operations)
regalloc.walk_operations(operations)
if we_are_translated() or self.cpu.dont_keepalive_stuff:
self._regalloc = None # else keep it around for debugging
- frame_depth = regalloc.fm.frame_depth
+ frame_depth = regalloc.fm.get_frame_depth()
param_depth = regalloc.param_depth
jump_target_descr = regalloc.jump_target_descr
if jump_target_descr is not None:
diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py
--- a/pypy/jit/backend/x86/regalloc.py
+++ b/pypy/jit/backend/x86/regalloc.py
@@ -138,6 +138,10 @@
return 2
else:
return 1
+ @staticmethod
+ def get_loc_index(loc):
+ assert isinstance(loc, StackLoc)
+ return loc.position
if WORD == 4:
gpr_reg_mgr_cls = X86RegisterManager
@@ -184,7 +188,6 @@
allgcrefs):
operations, _ = self._prepare(inputargs, operations, allgcrefs)
self._update_bindings(arglocs, inputargs)
- self.fm.frame_depth = prev_depths[0]
self.param_depth = prev_depths[1]
return operations
@@ -297,7 +300,7 @@
self.xrm.reg_bindings[arg] = loc
used[loc] = None
else:
- self.fm.frame_bindings[arg] = loc
+ self.fm.set_binding(arg, loc)
else:
if isinstance(loc, RegLoc):
if loc is ebp:
@@ -306,7 +309,7 @@
self.rm.reg_bindings[arg] = loc
used[loc] = None
else:
- self.fm.frame_bindings[arg] = loc
+ self.fm.set_binding(arg, loc)
self.rm.free_regs = []
for reg in self.rm.all_regs:
if reg not in used:
@@ -342,7 +345,7 @@
def get_current_depth(self):
# return (self.fm.frame_depth, self.param_depth), but trying to share
# the resulting tuple among several calls
- arg0 = self.fm.frame_depth
+ arg0 = self.fm.get_frame_depth()
arg1 = self.param_depth
result = self.assembler._current_depths_cache
if result[0] != arg0 or result[1] != arg1:
@@ -1259,6 +1262,29 @@
self.rm.possibly_free_var(tmpbox_low)
self.rm.possibly_free_var(tmpbox_high)
+ def compute_hint_frame_locations(self, operations):
+ # optimization only: fill in the 'hint_frame_locations' dictionary
+ # of rm and xrm based on the JUMP at the end of the loop, by looking
+ # at where we would like the boxes to be after the jump.
+ op = operations[-1]
+ if op.getopnum() != rop.JUMP:
+ return
+ descr = op.getdescr()
+ assert isinstance(descr, LoopToken)
+ nonfloatlocs, floatlocs = self.assembler.target_arglocs(descr)
+ for i in range(op.numargs()):
+ box = op.getarg(i)
+ if isinstance(box, Box):
+ loc = nonfloatlocs[i]
+ if isinstance(loc, StackLoc):
+ assert box.type != FLOAT
+ self.fm.hint_frame_locations[box] = loc
+ else:
+ loc = floatlocs[i]
+ if isinstance(loc, StackLoc):
+ assert box.type == FLOAT
+ self.fm.hint_frame_locations[box] = loc
+
def consider_jump(self, op):
assembler = self.assembler
assert self.jump_target_descr is None
@@ -1303,7 +1329,7 @@
def get_mark_gc_roots(self, gcrootmap, use_copy_area=False):
shape = gcrootmap.get_basic_shape(IS_X86_64)
- for v, val in self.fm.frame_bindings.items():
+ for v, val in self.fm.bindings.items():
if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)):
assert isinstance(val, StackLoc)
gcrootmap.add_frame_offset(shape, get_ebp_ofs(val.position))
diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py
--- a/pypy/jit/backend/x86/test/test_recompilation.py
+++ b/pypy/jit/backend/x86/test/test_recompilation.py
@@ -42,6 +42,7 @@
i5 = int_add(i4, 1)
i6 = int_add(i5, 1)
i7 = int_add(i5, i4)
+ force_spill(i5)
i8 = int_add(i7, 1)
i9 = int_add(i8, 1)
finish(i3, i4, i5, i6, i7, i8, i9, descr=fdescr2)
@@ -49,10 +50,9 @@
bridge = self.attach_bridge(ops, loop, -2)
descr = loop.operations[2].getdescr()
new = descr._x86_bridge_frame_depth
- assert descr._x86_bridge_param_depth == 0
- # XXX: Maybe add enough ops to force stack on 64-bit as well?
- if IS_X86_32:
- assert new > previous
+ assert descr._x86_bridge_param_depth == 0
+ # the force_spill() forces the stack to grow
+ assert new > previous
self.cpu.set_future_value_int(0, 0)
fail = self.run(loop)
assert fail.identifier == 2
@@ -104,6 +104,9 @@
i8 = int_add(i3, 1)
i6 = int_add(i8, i10)
i7 = int_add(i3, i6)
+ force_spill(i6)
+ force_spill(i7)
+ force_spill(i8)
i12 = int_add(i7, i8)
i11 = int_add(i12, i6)
jump(i3, i12, i11, i10, i6, i7, descr=looptoken)
@@ -112,9 +115,8 @@
guard_op = loop.operations[5]
loop_frame_depth = loop.token._x86_frame_depth
assert loop.token._x86_param_depth == 0
- # XXX: Maybe add enough ops to force stack on 64-bit as well?
- if IS_X86_32:
- assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth
+ # the force_spill() forces the stack to grow
+ assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth
assert guard_op.getdescr()._x86_bridge_param_depth == 0
self.cpu.set_future_value_int(0, 0)
self.cpu.set_future_value_int(1, 0)
diff --git a/pypy/jit/metainterp/test/test_del.py b/pypy/jit/metainterp/test/test_del.py
--- a/pypy/jit/metainterp/test/test_del.py
+++ b/pypy/jit/metainterp/test/test_del.py
@@ -1,5 +1,7 @@
import py
-from pypy.rlib.jit import JitDriver
+from pypy.rlib.jit import JitDriver, dont_look_inside
+from pypy.rlib.objectmodel import keepalive_until_here
+from pypy.rlib import rgc
from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin
@@ -80,6 +82,47 @@
assert res == 1
self.check_resops(call=1) # for the case B(), but not for the case A()
+ def test_keepalive(self):
+ py.test.skip("XXX fails") # hum, I think the test itself is broken
+ #
+ mydriver = JitDriver(reds = ['n', 'states'], greens = [])
+ class State:
+ num = 1
+ class X:
+ def __init__(self, state):
+ self.state = state
+ def __del__(self):
+ self.state.num += 1
+ @dont_look_inside
+ def do_stuff():
+ pass
+ def f(n):
+ states = []
+ while n > 0:
+ mydriver.jit_merge_point(n=n, states=states)
+ state = State()
+ states.append(state)
+ x = X(state)
+ do_stuff()
+ state.num *= 1000
+ do_stuff()
+ keepalive_until_here(x)
+ n -= 1
+ return states
+ def main(n):
+ states = f(n)
+ rgc.collect()
+ rgc.collect()
+ err = 1001
+ for state in states:
+ if state.num != 1001:
+ err = state.num
+ print 'ERROR:', err
+ return err
+ assert main(20) == 1001
+ res = self.meta_interp(main, [20])
+ assert res == 1001
+
class TestLLtype(DelTests, LLJitMixin):
def test_signal_action(self):
diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py
--- a/pypy/jit/metainterp/test/test_fficall.py
+++ b/pypy/jit/metainterp/test/test_fficall.py
@@ -1,3 +1,4 @@
+from __future__ import with_statement
import py
from pypy.jit.metainterp.test.support import LLJitMixin
diff --git a/pypy/jit/tl/pypyjit_demo.py b/pypy/jit/tl/pypyjit_demo.py
--- a/pypy/jit/tl/pypyjit_demo.py
+++ b/pypy/jit/tl/pypyjit_demo.py
@@ -2,13 +2,15 @@
pypyjit.set_param(threshold=200)
+def g(*args):
+ return len(args)
+
def f(n):
- pairs = [(0.0, 1.0), (2.0, 3.0)] * n
- mag = 0
- for (x1, x2) in pairs:
- dx = x1 - x2
- mag += ((dx * dx ) ** (-1.5))
- return n
+ s = 0
+ for i in range(n):
+ l = [i, n, 2]
+ s += g(*l)
+ return s
try:
print f(301)
diff --git a/pypy/module/_continuation/test/test_translated.py b/pypy/module/_continuation/test/test_translated.py
--- a/pypy/module/_continuation/test/test_translated.py
+++ b/pypy/module/_continuation/test/test_translated.py
@@ -93,13 +93,20 @@
if not option.runappdirect:
py.test.skip("meant only for -A run")
- def test_single_threaded(self):
- for i in range(20):
- yield Runner().run_test,
-
- def test_multi_threaded(self):
- for i in range(5):
- yield multithreaded_test,
+def _setup():
+ for _i in range(20):
+ def test_single_threaded(self):
+ Runner().run_test()
+ test_single_threaded.func_name = 'test_single_threaded_%d' % _i
+ setattr(AppTestWrapper, test_single_threaded.func_name,
+ test_single_threaded)
+ for _i in range(5):
+ def test_multi_threaded(self):
+ multithreaded_test()
+ test_multi_threaded.func_name = 'test_multi_threaded_%d' % _i
+ setattr(AppTestWrapper, test_multi_threaded.func_name,
+ test_multi_threaded)
+_setup()
class ThreadTest(object):
def __init__(self, lock):
diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py
--- a/pypy/module/_hashlib/interp_hashlib.py
+++ b/pypy/module/_hashlib/interp_hashlib.py
@@ -21,11 +21,11 @@
class W_Hash(Wrappable):
ctx = lltype.nullptr(ropenssl.EVP_MD_CTX.TO)
- _block_size = -1
def __init__(self, space, name):
self.name = name
- self.digest_size = self.compute_digest_size()
+ digest_type = self.digest_type_by_name(space)
+ self.digest_size = rffi.getintfield(digest_type, 'c_md_size')
# Allocate a lock for each HASH object.
# An optimization would be to not release the GIL on small requests,
@@ -34,21 +34,22 @@
ctx = lltype.malloc(ropenssl.EVP_MD_CTX.TO, flavor='raw')
rgc.add_memory_pressure(HASH_MALLOC_SIZE + self.digest_size)
+ ropenssl.EVP_DigestInit(ctx, digest_type)
self.ctx = ctx
- def initdigest(self, space, name):
- digest = ropenssl.EVP_get_digestbyname(name)
- if not digest:
- raise OperationError(space.w_ValueError,
- space.wrap("unknown hash function"))
- ropenssl.EVP_DigestInit(self.ctx, digest)
-
def __del__(self):
# self.lock.free()
if self.ctx:
ropenssl.EVP_MD_CTX_cleanup(self.ctx)
lltype.free(self.ctx, flavor='raw')
+ def digest_type_by_name(self, space):
+ digest_type = ropenssl.EVP_get_digestbyname(self.name)
+ if not digest_type:
+ raise OperationError(space.w_ValueError,
+ space.wrap("unknown hash function"))
+ return digest_type
+
def descr_repr(self, space):
addrstring = self.getaddrstring(space)
return space.wrap("<%s HASH object at 0x%s>" % (
@@ -87,7 +88,9 @@
return space.wrap(self.digest_size)
def get_block_size(self, space):
- return space.wrap(self.compute_block_size())
+ digest_type = self.digest_type_by_name(space)
+ block_size = rffi.getintfield(digest_type, 'c_block_size')
+ return space.wrap(block_size)
def _digest(self, space):
with lltype.scoped_alloc(ropenssl.EVP_MD_CTX.TO) as ctx:
@@ -99,36 +102,6 @@
ropenssl.EVP_MD_CTX_cleanup(ctx)
return rffi.charpsize2str(digest, digest_size)
- def compute_digest_size(self):
- # XXX This isn't the nicest way, but the EVP_MD_size OpenSSL
- # XXX function is defined as a C macro on OS X and would be
- # XXX significantly harder to implement in another way.
- # Values are digest sizes in bytes
- return {
- 'md5': 16, 'MD5': 16,
- 'sha1': 20, 'SHA1': 20,
- 'sha224': 28, 'SHA224': 28,
- 'sha256': 32, 'SHA256': 32,
- 'sha384': 48, 'SHA384': 48,
- 'sha512': 64, 'SHA512': 64,
- }.get(self.name, 0)
-
- def compute_block_size(self):
- if self._block_size != -1:
- return self._block_size
- # XXX This isn't the nicest way, but the EVP_MD_CTX_block_size
- # XXX OpenSSL function is defined as a C macro on some systems
- # XXX and would be significantly harder to implement in
- # XXX another way.
- self._block_size = {
- 'md5': 64, 'MD5': 64,
- 'sha1': 64, 'SHA1': 64,
- 'sha224': 64, 'SHA224': 64,
- 'sha256': 64, 'SHA256': 64,
- 'sha384': 128, 'SHA384': 128,
- 'sha512': 128, 'SHA512': 128,
- }.get(self.name, 0)
- return self._block_size
W_Hash.typedef = TypeDef(
'HASH',
@@ -142,11 +115,11 @@
digestsize=GetSetProperty(W_Hash.get_digest_size),
block_size=GetSetProperty(W_Hash.get_block_size),
)
+W_Hash.acceptable_as_base_class = False
@unwrap_spec(name=str, string='bufferstr')
def new(space, name, string=''):
w_hash = W_Hash(space, name)
- w_hash.initdigest(space, name)
w_hash.update(space, string)
return space.wrap(w_hash)
@@ -158,6 +131,6 @@
return new(space, name, string)
return new_hash
-for name in algorithms:
- newname = 'new_%s' % (name,)
- globals()[newname] = make_new_hash(name, newname)
+for _name in algorithms:
+ _newname = 'new_%s' % (_name,)
+ globals()[_newname] = make_new_hash(_name, _newname)
diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py
--- a/pypy/module/_hashlib/test/test_hashlib.py
+++ b/pypy/module/_hashlib/test/test_hashlib.py
@@ -79,3 +79,28 @@
assert h.digest() == _hashlib.openssl_md5('x' * 20).digest()
_hashlib.openssl_sha1(b).digest()
+ def test_extra_algorithms(self):
+ expected_results = {
+ "md5": "bb649c83dd1ea5c9d9dec9a18df0ffe9",
+ "md4": "c275b8454684ea416b93d7a418b43176",
+ "mdc2": None, # XXX find the correct expected value
+ "sha": "e2b0a8609b47c58e5d984c9ccfe69f9b654b032b",
+ "ripemd160": "cc4a5ce1b3df48aec5d22d1f16b894a0b894eccc",
+ "whirlpool": ("1a22b79fe5afda02c63a25927193ed01dc718b74"
+ "026e597608ce431f9c3d2c9e74a7350b7fbb7c5d"
+ "4effe5d7a31879b8b7a10fd2f544c4ca268ecc6793923583"),
+ }
+ import _hashlib
+ test_string = "Nobody inspects the spammish repetition"
+ for hash_name, expected in sorted(expected_results.items()):
+ try:
+ m = _hashlib.new(hash_name)
+ except ValueError, e:
+ print 'skipped %s: %s' % (hash_name, e)
+ continue
+ m.update(test_string)
+ got = m.hexdigest()
+ assert got and type(got) is str and len(got) % 2 == 0
+ got.decode('hex')
+ if expected is not None:
+ assert got == expected
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -864,6 +864,7 @@
elif sys.platform.startswith('linux'):
compile_extra.append("-Werror=implicit-function-declaration")
export_symbols_eci.append('pypyAPI')
+ compile_extra.append('-g')
else:
kwds["includes"] = ['Python.h'] # this is our Python.h
diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py
--- a/pypy/module/cpyext/methodobject.py
+++ b/pypy/module/cpyext/methodobject.py
@@ -14,7 +14,6 @@
METH_VARARGS, build_type_checkers, PyObjectFields, bootstrap_function)
from pypy.module.cpyext.pyerrors import PyErr_Occurred
from pypy.rlib.objectmodel import we_are_translated
-from pypy.objspace.std.tupleobject import W_TupleObject
PyCFunction_typedef = rffi.COpaquePtr(typedef='PyCFunction')
PyCFunction = lltype.Ptr(lltype.FuncType([PyObject, PyObject], PyObject))
diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py
--- a/pypy/module/cpyext/sequence.py
+++ b/pypy/module/cpyext/sequence.py
@@ -42,11 +42,11 @@
which case o is returned. Use PySequence_Fast_GET_ITEM() to access the
members of the result. Returns NULL on failure. If the object is not a
sequence, raises TypeError with m as the message text."""
- if (space.is_true(space.isinstance(w_obj, space.w_list)) or
- space.is_true(space.isinstance(w_obj, space.w_tuple))):
+ if (isinstance(w_obj, listobject.W_ListObject) or
+ isinstance(w_obj, tupleobject.W_TupleObject)):
return w_obj
try:
- return space.newtuple(space.fixedview(w_obj))
+ return tupleobject.W_TupleObject(space.fixedview(w_obj))
except OperationError:
raise OperationError(space.w_TypeError, space.wrap(rffi.charp2str(m)))
diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py
--- a/pypy/module/cpyext/tupleobject.py
+++ b/pypy/module/cpyext/tupleobject.py
@@ -6,13 +6,12 @@
borrow_from, make_ref, from_ref)
from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall
from pypy.objspace.std.tupleobject import W_TupleObject
-from pypy.objspace.std.smalltupleobject import W_SmallTupleObject
PyTuple_Check, PyTuple_CheckExact = build_type_checkers("Tuple")
@cpython_api([Py_ssize_t], PyObject)
def PyTuple_New(space, size):
- return space.newtuple([space.w_None] * size)
+ return W_TupleObject([space.w_None] * size)
@cpython_api([PyObject, Py_ssize_t, PyObject], rffi.INT_real, error=-1)
def PyTuple_SetItem(space, w_t, pos, w_obj):
@@ -24,12 +23,12 @@
return 0
def _setitem_tuple(w_t, pos, w_obj):
- if isinstance(w_t, W_TupleObject):
- w_t.wrappeditems[pos] = w_obj
- elif isinstance(w_t, W_SmallTupleObject):
- w_t.setitem(pos, w_obj)
- else:
- assert False
+ # this function checks that w_t is really a W_TupleObject. It
+ # should only ever be called with a freshly built tuple from
+ # PyTuple_New(), which always return a W_TupleObject, even if there
+ # are also other implementations of tuples.
+ assert isinstance(w_t, W_TupleObject)
+ w_t.wrappeditems[pos] = w_obj
@cpython_api([PyObject, Py_ssize_t], PyObject)
def PyTuple_GetItem(space, w_t, pos):
diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py
--- a/pypy/module/imp/importing.py
+++ b/pypy/module/imp/importing.py
@@ -483,10 +483,20 @@
# XXX Check for frozen modules?
# when w_path is a string
+ delayed_builtin = None
+ w_lib_extensions = None
+
if w_path is None:
# check the builtin modules
if modulename in space.builtin_modules:
- return FindInfo(C_BUILTIN, modulename, None)
+ delayed_builtin = FindInfo(C_BUILTIN, modulename, None)
+ # a "real builtin module xx" shadows every file "xx.py" there
+ # could possibly be; a "pseudo-extension module" does not, and
+ # is only loaded at the point in sys.path where we find
+ # '.../lib_pypy/__extensions__'.
+ if modulename in space.MODULES_THAT_ALWAYS_SHADOW:
+ return delayed_builtin
+ w_lib_extensions = space.sys.get_state(space).w_lib_extensions
w_path = space.sys.get('path')
# XXX check frozen modules?
@@ -495,6 +505,9 @@
if w_path is not None:
for w_pathitem in space.unpackiterable(w_path):
# sys.path_hooks import hook
+ if (w_lib_extensions is not None and
+ space.eq_w(w_pathitem, w_lib_extensions)):
+ return delayed_builtin
if use_loader:
w_loader = find_in_path_hooks(space, w_modulename, w_pathitem)
if w_loader:
@@ -527,7 +540,7 @@
# Out of file descriptors.
# not found
- return None
+ return delayed_builtin
def _prepare_module(space, w_mod, filename, pkgdir):
w = space.wrap
diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py
--- a/pypy/module/imp/test/test_import.py
+++ b/pypy/module/imp/test/test_import.py
@@ -38,6 +38,8 @@
test_reload = "def test():\n raise ValueError\n",
infinite_reload = "import infinite_reload; reload(infinite_reload)",
del_sys_module = "import sys\ndel sys.modules['del_sys_module']\n",
+ itertools = "hello_world = 42\n",
+ gc = "should_never_be_seen = 42\n",
)
root.ensure("notapackage", dir=1) # empty, no __init__.py
setuppkg("pkg",
@@ -147,6 +149,8 @@
class AppTestImport:
def setup_class(cls): # interpreter-level
+ cls.space = gettestobjspace(usemodules=['itertools'])
+ cls.w_runappdirect = cls.space.wrap(conftest.option.runappdirect)
cls.saved_modules = _setup(cls.space)
#XXX Compile class
@@ -571,6 +575,50 @@
else:
assert False, 'should not work'
+ def test_shadow_builtin(self):
+ if self.runappdirect: skip("hard to test: module is already imported")
+ # 'import gc' is supposed to always find the built-in module;
+ # like CPython, it is a built-in module, so it shadows everything,
+ # even though there is a gc.py.
+ import sys
+ assert 'gc' not in sys.modules
+ import gc
+ assert not hasattr(gc, 'should_never_be_seen')
+ assert '(built-in)' in repr(gc)
+ del sys.modules['gc']
+
+ def test_shadow_extension_1(self):
+ if self.runappdirect: skip("hard to test: module is already imported")
+ # 'import itertools' is supposed to find itertools.py if there is
+ # one in sys.path.
+ import sys
+ assert 'itertools' not in sys.modules
+ import itertools
+ assert hasattr(itertools, 'hello_world')
+ assert not hasattr(itertools, 'count')
+ assert '(built-in)' not in repr(itertools)
+ del sys.modules['itertools']
+
+ def test_shadow_extension_2(self):
+ if self.runappdirect: skip("hard to test: module is already imported")
+ # 'import itertools' is supposed to find the built-in module even
+ # if there is also one in sys.path as long as it is *after* the
+ # special entry '.../lib_pypy/__extensions__'. (Note that for now
+ # there is one in lib_pypy/itertools.py, which should not be seen
+ # either; hence the (built-in) test below.)
+ import sys
+ assert 'itertools' not in sys.modules
+ sys.path.append(sys.path.pop(0))
+ try:
+ import itertools
+ assert not hasattr(itertools, 'hello_world')
+ assert hasattr(itertools, 'izip')
+ assert '(built-in)' in repr(itertools)
+ finally:
+ sys.path.insert(0, sys.path.pop())
+ del sys.modules['itertools']
+
+
class TestAbi:
def test_abi_tag(self):
space1 = gettestobjspace(soabi='TEST')
diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py
--- a/pypy/module/micronumpy/__init__.py
+++ b/pypy/module/micronumpy/__init__.py
@@ -24,11 +24,16 @@
'number': 'interp_boxes.W_NumberBox',
'integer': 'interp_boxes.W_IntegerBox',
'signedinteger': 'interp_boxes.W_SignedIntegerBox',
+ 'unsignedinteger': 'interp_boxes.W_UnsignedIntegerBox',
'bool_': 'interp_boxes.W_BoolBox',
'int8': 'interp_boxes.W_Int8Box',
+ 'uint8': 'interp_boxes.W_UInt8Box',
'int16': 'interp_boxes.W_Int16Box',
+ 'uint16': 'interp_boxes.W_UInt16Box',
'int32': 'interp_boxes.W_Int32Box',
+ 'uint32': 'interp_boxes.W_UInt32Box',
'int64': 'interp_boxes.W_Int64Box',
+ 'uint64': 'interp_boxes.W_UInt64Box',
'int_': 'interp_boxes.W_LongBox',
'inexact': 'interp_boxes.W_InexactBox',
'floating': 'interp_boxes.W_FloatingBox',
diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py
--- a/pypy/module/micronumpy/compile.py
+++ b/pypy/module/micronumpy/compile.py
@@ -38,6 +38,7 @@
w_ValueError = None
w_TypeError = None
w_IndexError = None
+ w_OverflowError = None
w_None = None
w_bool = "bool"
@@ -149,6 +150,10 @@
# XXX array probably
assert False
+ def exception_match(self, w_exc_type, w_check_class):
+ # Good enough for now
+ raise NotImplementedError
+
class FloatObject(W_Root):
tp = FakeSpace.w_float
def __init__(self, floatval):
diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py
--- a/pypy/module/micronumpy/interp_boxes.py
+++ b/pypy/module/micronumpy/interp_boxes.py
@@ -91,6 +91,9 @@
descr_neg = _unaryop_impl("negative")
descr_abs = _unaryop_impl("absolute")
+ def descr_tolist(self, space):
+ return self.get_dtype(space).itemtype.to_builtin_type(space, self)
+
class W_BoolBox(W_GenericBox, PrimitiveBox):
descr__new__, get_dtype = new_dtype_getter("bool")
@@ -104,38 +107,38 @@
class W_SignedIntegerBox(W_IntegerBox):
pass
-class W_UnsignedIntgerBox(W_IntegerBox):
+class W_UnsignedIntegerBox(W_IntegerBox):
pass
class W_Int8Box(W_SignedIntegerBox, PrimitiveBox):
descr__new__, get_dtype = new_dtype_getter("int8")
-class W_UInt8Box(W_UnsignedIntgerBox, PrimitiveBox):
+class W_UInt8Box(W_UnsignedIntegerBox, PrimitiveBox):
descr__new__, get_dtype = new_dtype_getter("uint8")
class W_Int16Box(W_SignedIntegerBox, PrimitiveBox):
descr__new__, get_dtype = new_dtype_getter("int16")
-class W_UInt16Box(W_UnsignedIntgerBox, PrimitiveBox):
+class W_UInt16Box(W_UnsignedIntegerBox, PrimitiveBox):
descr__new__, get_dtype = new_dtype_getter("uint16")
class W_Int32Box(W_SignedIntegerBox, PrimitiveBox):
descr__new__, get_dtype = new_dtype_getter("int32")
-class W_UInt32Box(W_UnsignedIntgerBox, PrimitiveBox):
+class W_UInt32Box(W_UnsignedIntegerBox, PrimitiveBox):
descr__new__, get_dtype = new_dtype_getter("uint32")
class W_LongBox(W_SignedIntegerBox, PrimitiveBox):
descr__new__, get_dtype = new_dtype_getter("long")
-class W_ULongBox(W_UnsignedIntgerBox, PrimitiveBox):
+class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox):
pass
class W_Int64Box(W_SignedIntegerBox, PrimitiveBox):
descr__new__, get_dtype = new_dtype_getter("int64")
-class W_UInt64Box(W_UnsignedIntgerBox, PrimitiveBox):
- pass
+class W_UInt64Box(W_UnsignedIntegerBox, PrimitiveBox):
+ descr__new__, get_dtype = new_dtype_getter("uint64")
class W_InexactBox(W_NumberBox):
_attrs_ = ()
@@ -179,6 +182,8 @@
__neg__ = interp2app(W_GenericBox.descr_neg),
__abs__ = interp2app(W_GenericBox.descr_abs),
+
+ tolist = interp2app(W_GenericBox.descr_tolist),
)
W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef,
@@ -198,13 +203,18 @@
__module__ = "numpypy",
)
+W_UnsignedIntegerBox.typedef = TypeDef("unsignedinteger", W_IntegerBox.typedef,
+ __module__ = "numpypy",
+)
+
W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef,
__module__ = "numpypy",
__new__ = interp2app(W_Int8Box.descr__new__.im_func),
)
-W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntgerBox.typedef,
+W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntegerBox.typedef,
__module__ = "numpypy",
+ __new__ = interp2app(W_UInt8Box.descr__new__.im_func),
)
W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef,
@@ -212,8 +222,9 @@
__new__ = interp2app(W_Int16Box.descr__new__.im_func),
)
-W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntgerBox.typedef,
+W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntegerBox.typedef,
__module__ = "numpypy",
+ __new__ = interp2app(W_UInt16Box.descr__new__.im_func),
)
W_Int32Box.typedef = TypeDef("int32", W_SignedIntegerBox.typedef,
@@ -221,8 +232,9 @@
__new__ = interp2app(W_Int32Box.descr__new__.im_func),
)
-W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntgerBox.typedef,
+W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntegerBox.typedef,
__module__ = "numpypy",
+ __new__ = interp2app(W_UInt32Box.descr__new__.im_func),
)
if LONG_BIT == 32:
@@ -233,7 +245,7 @@
__module__ = "numpypy",
)
-W_ULongBox.typedef = TypeDef("u" + long_name, W_UnsignedIntgerBox.typedef,
+W_ULongBox.typedef = TypeDef("u" + long_name, W_UnsignedIntegerBox.typedef,
__module__ = "numpypy",
)
@@ -242,8 +254,9 @@
__new__ = interp2app(W_Int64Box.descr__new__.im_func),
)
-W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntgerBox.typedef,
+W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntegerBox.typedef,
__module__ = "numpypy",
+ __new__ = interp2app(W_UInt64Box.descr__new__.im_func),
)
W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef,
diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py
--- a/pypy/module/micronumpy/interp_numarray.py
+++ b/pypy/module/micronumpy/interp_numarray.py
@@ -876,6 +876,17 @@
arr.setshape(space, new_shape)
return arr
+ def descr_tolist(self, space):
+ if len(self.shape) == 0:
+ assert isinstance(self, Scalar)
+ return self.value.descr_tolist(space)
+ w_result = space.newlist([])
+ for i in range(self.shape[0]):
+ space.call_method(w_result, "append",
+ space.call_method(self.descr_getitem(space, space.wrap(i)), "tolist")
+ )
+ return w_result
+
def descr_mean(self, space):
return space.div(self.descr_sum(space), space.wrap(self.find_size()))
@@ -1485,6 +1496,7 @@
copy = interp2app(BaseArray.descr_copy),
reshape = interp2app(BaseArray.descr_reshape),
+ tolist = interp2app(BaseArray.descr_tolist),
)
diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py
--- a/pypy/module/micronumpy/test/test_dtypes.py
+++ b/pypy/module/micronumpy/test/test_dtypes.py
@@ -174,6 +174,8 @@
raises(TypeError, numpy.integer, 0)
exc = raises(TypeError, numpy.signedinteger, 0)
assert str(exc.value) == "cannot create 'signedinteger' instances"
+ exc = raises(TypeError, numpy.unsignedinteger, 0)
+ assert str(exc.value) == "cannot create 'unsignedinteger' instances"
raises(TypeError, numpy.floating, 0)
raises(TypeError, numpy.inexact, 0)
@@ -210,17 +212,54 @@
assert type(int(x)) is int
assert int(x) == -128
+ def test_uint8(self):
+ import numpypy as numpy
+
+ assert numpy.uint8.mro() == [numpy.uint8, numpy.unsignedinteger, numpy.integer, numpy.number, numpy.generic, object]
+
+ a = numpy.array([1, 2, 3], numpy.uint8)
+ assert type(a[1]) is numpy.uint8
+ assert numpy.dtype("uint8").type is numpy.uint8
+
+ x = numpy.uint8(128)
+ assert x == 128
+ assert x != -128
+ assert type(x) is numpy.uint8
+ assert repr(x) == "128"
+
+ assert type(int(x)) is int
+ assert int(x) == 128
+
+ assert numpy.uint8(255) == 255
+ assert numpy.uint8(256) == 0
+
def test_int16(self):
import numpypy as numpy
x = numpy.int16(3)
assert x == 3
+ assert numpy.int16(32767) == 32767
+ assert numpy.int16(32768) == -32768
+
+ def test_uint16(self):
+ import numpypy as numpy
+
+ assert numpy.uint16(65535) == 65535
+ assert numpy.uint16(65536) == 0
def test_int32(self):
import numpypy as numpy
x = numpy.int32(23)
assert x == 23
+ assert numpy.int32(2147483647) == 2147483647
+ assert numpy.int32(2147483648) == -2147483648
+
+ def test_uint32(self):
+ import numpypy as numpy
+
+ assert numpy.uint32(4294967295) == 4294967295
+ assert numpy.uint32(4294967296) == 0
def test_int_(self):
import numpypy as numpy
@@ -240,6 +279,25 @@
assert numpy.dtype(numpy.int64).type is numpy.int64
assert numpy.int64(3) == 3
+ assert numpy.int64(9223372036854775807) == 9223372036854775807
+ raises(OverflowError, numpy.int64, 9223372036854775808)
+
+ def test_uint64(self):
+ import sys
+ import numpypy as numpy
+
+ assert numpy.uint64.mro() == [numpy.uint64, numpy.unsignedinteger, numpy.integer, numpy.number, numpy.generic, object]
+
+ assert numpy.dtype(numpy.uint64).type is numpy.uint64
+ skip("see comment")
+ # These tests pass "by chance" on numpy, things that are larger than
+ # platform long (i.e. a python int), don't get put in a normal box,
+ # instead they become an object array containing a long, we don't have
+ # yet, so these can't pass.
+ assert numpy.uint64(9223372036854775808) == 9223372036854775808
+ assert numpy.uint64(18446744073709551615) == 18446744073709551615
+ raises(OverflowError, numpy.uint64(18446744073709551616))
+
def test_float32(self):
import numpypy as numpy
diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py
--- a/pypy/module/micronumpy/test/test_numarray.py
+++ b/pypy/module/micronumpy/test/test_numarray.py
@@ -879,6 +879,45 @@
b[0] = 3
assert b.__debug_repr__() == 'Call2(add, forced=Array)'
+ def test_tolist_scalar(self):
+ from numpypy import int32, bool_
+ x = int32(23)
+ assert x.tolist() == 23
+ assert type(x.tolist()) is int
+ y = bool_(True)
+ assert y.tolist() is True
+
+ def test_tolist_zerodim(self):
+ from numpypy import array
+ x = array(3)
+ assert x.tolist() == 3
+ assert type(x.tolist()) is int
+
+ def test_tolist_singledim(self):
+ from numpypy import array
+ a = array(range(5))
+ assert a.tolist() == [0, 1, 2, 3, 4]
+ assert type(a.tolist()[0]) is int
+ b = array([0.2, 0.4, 0.6])
+ assert b.tolist() == [0.2, 0.4, 0.6]
+
+ def test_tolist_multidim(self):
+ from numpypy import array
+ a = array([[1, 2], [3, 4]])
+ assert a.tolist() == [[1, 2], [3, 4]]
+
+ def test_tolist_view(self):
+ from numpypy import array
+ a = array([[1,2],[3,4]])
+ assert (a + a).tolist() == [[2, 4], [6, 8]]
+
+ def test_tolist_slice(self):
+ from numpypy import array
+ a = array([[17.1, 27.2], [40.3, 50.3]])
+ assert a[:,0].tolist() == [17.1, 40.3]
+ assert a[0].tolist() == [17.1, 27.2]
+
+
class AppTestMultiDim(BaseNumpyAppTest):
def test_init(self):
import numpypy
diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py
--- a/pypy/module/micronumpy/test/test_zjit.py
+++ b/pypy/module/micronumpy/test/test_zjit.py
@@ -185,7 +185,8 @@
# sure it was optimized correctly.
# XXX the comment above is wrong now. We need preferrably a way to
# count the two loops separately
- self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1, 'getfield_gc': 41,
+ self.check_resops({'setinteriorfield_raw': 4, 'guard_nonnull': 1,
+ 'getfield_gc': 35, 'getfield_gc_pure': 6,
'guard_class': 22, 'int_add': 8, 'float_mul': 2,
'guard_isnull': 2, 'jump': 4, 'int_ge': 4,
'getinteriorfield_raw': 4, 'float_add': 2, 'guard_false': 4,
diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py
--- a/pypy/module/micronumpy/types.py
+++ b/pypy/module/micronumpy/types.py
@@ -1,6 +1,7 @@
import functools
import math
+from pypy.interpreter.error import OperationError
from pypy.module.micronumpy import interp_boxes
from pypy.objspace.std.floatobject import float2string
from pypy.rlib import rfloat, libffi, clibffi
@@ -77,6 +78,9 @@
w_obj.__init__(self._coerce(space, w_item).value)
return w_obj
+ def to_builtin_type(self, space, box):
+ return space.wrap(self.for_computation(self.unbox(box)))
+
def _coerce(self, space, w_item):
raise NotImplementedError
@@ -179,6 +183,9 @@
def _coerce(self, space, w_item):
return self.box(space.is_true(w_item))
+ def to_builtin_type(self, space, w_item):
+ return space.wrap(self.unbox(w_item))
+
def str_format(self, box):
value = self.unbox(box)
return "True" if value else "False"
@@ -271,6 +278,19 @@
T = rffi.ULONGLONG
BoxType = interp_boxes.W_UInt64Box
+ def _coerce(self, space, w_item):
+ try:
+ return Integer._coerce(self, space, w_item)
+ except OperationError, e:
+ if not e.match(space, space.w_OverflowError):
+ raise
+ bigint = space.bigint_w(w_item)
+ try:
+ value = bigint.toulonglong()
+ except OverflowError:
+ raise OperationError(space.w_OverflowError, space.w_None)
+ return self.box(value)
+
class Float(Primitive):
_mixin_ = True
diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py
--- a/pypy/module/sys/__init__.py
+++ b/pypy/module/sys/__init__.py
@@ -170,3 +170,7 @@
def get_flag(self, name):
space = self.space
return space.int_w(space.getattr(self.get('flags'), space.wrap(name)))
+
+ def get_state(self, space):
+ from pypy.module.sys import state
+ return state.get(space)
diff --git a/pypy/module/sys/state.py b/pypy/module/sys/state.py
--- a/pypy/module/sys/state.py
+++ b/pypy/module/sys/state.py
@@ -24,7 +24,7 @@
# Initialize the default path
pypydir = os.path.dirname(os.path.abspath(pypy.__file__))
srcdir = os.path.dirname(pypydir)
- path = getinitialpath(srcdir)
+ path = getinitialpath(self, srcdir)
self.w_path = space.newlist([space.wrap(p) for p in path])
def checkdir(path):
@@ -35,7 +35,7 @@
platform = sys.platform
-def getinitialpath(prefix):
+def getinitialpath(state, prefix):
from pypy.module.sys.version import CPYTHON_VERSION
dirname = '%d.%d' % (CPYTHON_VERSION[0],
CPYTHON_VERSION[1])
@@ -49,6 +49,12 @@
checkdir(lib_pypy)
importlist = []
+ #
+ if state is not None: # 'None' for testing only
+ lib_extensions = os.path.join(lib_pypy, '__extensions__')
+ state.w_lib_extensions = state.space.wrap(lib_extensions)
+ importlist.append(lib_extensions)
+ #
importlist.append(lib_pypy)
importlist.append(python_std_lib_modified)
importlist.append(python_std_lib)
@@ -71,7 +77,7 @@
@unwrap_spec(srcdir=str)
def pypy_initial_path(space, srcdir):
try:
- path = getinitialpath(srcdir)
+ path = getinitialpath(get(space), srcdir)
except OSError:
return space.w_None
else:
diff --git a/pypy/module/sys/test/test_initialpath.py b/pypy/module/sys/test/test_initialpath.py
--- a/pypy/module/sys/test/test_initialpath.py
+++ b/pypy/module/sys/test/test_initialpath.py
@@ -13,7 +13,7 @@
def test_stdlib_in_prefix(tmpdir):
dirs = build_hierarchy(tmpdir)
- path = getinitialpath(str(tmpdir))
+ path = getinitialpath(None, str(tmpdir))
# we get at least 'dirs', and maybe more (e.g. plat-linux2)
assert path[:len(dirs)] == map(str, dirs)
@@ -21,7 +21,7 @@
lib_pypy, lib_python_modified, lib_python = build_hierarchy(tmpdir)
lib_tk_modified = lib_python_modified.join('lib-tk')
lib_tk = lib_python.join('lib-tk')
- path = getinitialpath(str(tmpdir))
+ path = getinitialpath(None, str(tmpdir))
i = path.index(str(lib_tk_modified))
j = path.index(str(lib_tk))
assert i < j
diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py
--- a/pypy/objspace/fake/objspace.py
+++ b/pypy/objspace/fake/objspace.py
@@ -9,7 +9,7 @@
from pypy.rlib.unroll import unrolling_iterable
from pypy.rlib.objectmodel import instantiate, we_are_translated
from pypy.rlib.nonconst import NonConstant
-from pypy.rlib.rarithmetic import r_uint
+from pypy.rlib.rarithmetic import r_uint, r_singlefloat
from pypy.translator.translator import TranslationContext
from pypy.tool.option import make_config
@@ -145,9 +145,15 @@
self._see_interp2app(x)
if isinstance(x, GetSetProperty):
self._see_getsetproperty(x)
+ if isinstance(x, r_singlefloat):
+ self._wrap_not_rpython(x)
return w_some_obj()
wrap._annspecialcase_ = "specialize:argtype(1)"
+ def _wrap_not_rpython(self, x):
+ "NOT_RPYTHON"
+ raise NotImplementedError
+
def _see_interp2app(self, interp2app):
"NOT_RPYTHON"
activation = interp2app._code.activation
@@ -238,6 +244,7 @@
t = TranslationContext(config=config)
self.t = t # for debugging
ann = t.buildannotator()
+ ann.policy.allow_someobjects = False
if func is not None:
ann.build_types(func, argtypes, complete_now=False)
#
diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py
--- a/pypy/objspace/std/iterobject.py
+++ b/pypy/objspace/std/iterobject.py
@@ -33,9 +33,6 @@
"""Sequence iterator specialized for lists, accessing
directly their RPython-level list of wrapped objects.
"""
- def __init__(w_self, w_seq):
- W_AbstractSeqIterObject.__init__(w_self, w_seq)
- w_self.w_seq = w_seq
class W_FastTupleIterObject(W_AbstractSeqIterObject):
"""Sequence iterator specialized for tuples, accessing
diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py
--- a/pypy/objspace/std/listobject.py
+++ b/pypy/objspace/std/listobject.py
@@ -9,8 +9,9 @@
from pypy.interpreter import gateway, baseobjspace
from pypy.rlib.objectmodel import instantiate, specialize
from pypy.rlib.listsort import make_timsort_class
-from pypy.rlib import rerased, jit
+from pypy.rlib import rerased, jit, debug
from pypy.interpreter.argument import Signature
+from pypy.tool.sourcetools import func_with_new_name
UNROLL_CUTOFF = 5
@@ -170,6 +171,19 @@
share with the storage, if possible."""
return self.strategy.getitems(self)
+ def getitems_fixedsize(self):
+ """Returns a fixed-size list of all items after wrapping them."""
+ l = self.strategy.getitems_fixedsize(self)
+ debug.make_sure_not_resized(l)
+ return l
+
+ def getitems_unroll(self):
+ """Returns a fixed-size list of all items after wrapping them. The JIT
+ will fully unroll this function. """
+ l = self.strategy.getitems_unroll(self)
+ debug.make_sure_not_resized(l)
+ return l
+
def getitems_copy(self):
"""Returns a copy of all items in the list. Same as getitems except for
ObjectListStrategy."""
@@ -366,6 +380,8 @@
def getitems_copy(self, w_list):
return []
+ getitems_fixedsize = func_with_new_name(getitems_copy, "getitems_fixedsize")
+ getitems_unroll = getitems_fixedsize
def getstorage_copy(self, w_list):
return self.erase(None)
@@ -496,7 +512,6 @@
# tuple is unmutable
return w_list.lstorage
-
@specialize.arg(2)
def _getitems_range(self, w_list, wrap_items):
l = self.unerase(w_list.lstorage)
@@ -519,6 +534,13 @@
return r
+ @jit.dont_look_inside
+ def getitems_fixedsize(self, w_list):
+ return self._getitems_range_unroll(w_list, True)
+ def getitems_unroll(self, w_list):
+ return self._getitems_range_unroll(w_list, True)
+ _getitems_range_unroll = jit.unroll_safe(func_with_new_name(_getitems_range, "_getitems_range_unroll"))
+
def getslice(self, w_list, start, stop, step, length):
v = self.unerase(w_list.lstorage)
old_start = v[0]
@@ -672,10 +694,19 @@
return self.wrap(r)
@jit.look_inside_iff(lambda self, w_list:
- jit.isconstant(w_list.length()) and w_list.length() < UNROLL_CUTOFF)
+ jit.isconstant(w_list.length()) and w_list.length() < UNROLL_CUTOFF)
def getitems_copy(self, w_list):
return [self.wrap(item) for item in self.unerase(w_list.lstorage)]
+ @jit.unroll_safe
+ def getitems_unroll(self, w_list):
+ return [self.wrap(item) for item in self.unerase(w_list.lstorage)]
+
+ @jit.look_inside_iff(lambda self, w_list:
+ jit.isconstant(w_list.length()) and w_list.length() < UNROLL_CUTOFF)
+ def getitems_fixedsize(self, w_list):
+ return self.getitems_unroll(w_list)
+
def getstorage_copy(self, w_list):
items = self.unerase(w_list.lstorage)[:]
return self.erase(items)
diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py
--- a/pypy/objspace/std/model.py
+++ b/pypy/objspace/std/model.py
@@ -15,6 +15,7 @@
_registered_implementations.add(implcls)
option_to_typename = {
+ "withspecialisedtuple" : ["specialisedtupleobject.W_SpecialisedTupleObject"],
"withsmalltuple" : ["smalltupleobject.W_SmallTupleObject"],
"withsmallint" : ["smallintobject.W_SmallIntObject"],
"withsmalllong" : ["smalllongobject.W_SmallLongObject"],
@@ -261,6 +262,11 @@
self.typeorder[smalltupleobject.W_SmallTupleObject] += [
(tupleobject.W_TupleObject, smalltupleobject.delegate_SmallTuple2Tuple)]
+ if config.objspace.std.withspecialisedtuple:
+ from pypy.objspace.std import specialisedtupleobject
+ self.typeorder[specialisedtupleobject.W_SpecialisedTupleObject] += [
+ (tupleobject.W_TupleObject, specialisedtupleobject.delegate_SpecialisedTuple2Tuple)]
+
# put W_Root everywhere
self.typeorder[W_Root] = []
for type in self.typeorder:
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -29,7 +29,7 @@
from pypy.objspace.std.sliceobject import W_SliceObject
from pypy.objspace.std.smallintobject import W_SmallIntObject
from pypy.objspace.std.stringobject import W_StringObject
-from pypy.objspace.std.tupleobject import W_TupleObject
+from pypy.objspace.std.tupleobject import W_AbstractTupleObject
from pypy.objspace.std.typeobject import W_TypeObject
# types
@@ -391,8 +391,8 @@
self.wrap("expected length %d, got %d" % (expected, got)))
def unpackiterable(self, w_obj, expected_length=-1):
- if isinstance(w_obj, W_TupleObject):
- t = w_obj.wrappeditems[:]
+ if isinstance(w_obj, W_AbstractTupleObject):
+ t = w_obj.getitems_copy()
elif isinstance(w_obj, W_ListObject):
t = w_obj.getitems_copy()
else:
@@ -405,11 +405,13 @@
def fixedview(self, w_obj, expected_length=-1, unroll=False):
""" Fast paths
"""
- if isinstance(w_obj, W_TupleObject):
- t = w_obj.wrappeditems
+ if isinstance(w_obj, W_AbstractTupleObject):
+ t = w_obj.tolist()
elif isinstance(w_obj, W_ListObject):
- # XXX this can copy twice
- t = w_obj.getitems()[:]
+ if unroll:
+ t = w_obj.getitems_unroll()
+ else:
+ t = w_obj.getitems_fixedsize()
else:
if unroll:
return make_sure_not_resized(ObjSpace.unpackiterable_unroll(
@@ -428,8 +430,8 @@
def listview(self, w_obj, expected_length=-1):
if isinstance(w_obj, W_ListObject):
t = w_obj.getitems()
- elif isinstance(w_obj, W_TupleObject):
- t = w_obj.wrappeditems[:]
+ elif isinstance(w_obj, W_AbstractTupleObject):
+ t = w_obj.getitems_copy()
else:
return ObjSpace.unpackiterable(self, w_obj, expected_length)
if expected_length != -1 and len(t) != expected_length:
diff --git a/pypy/objspace/std/smalltupleobject.py b/pypy/objspace/std/smalltupleobject.py
--- a/pypy/objspace/std/smalltupleobject.py
+++ b/pypy/objspace/std/smalltupleobject.py
@@ -9,13 +9,14 @@
from pypy.interpreter import gateway
from pypy.rlib.debug import make_sure_not_resized
from pypy.rlib.unroll import unrolling_iterable
+from pypy.tool.sourcetools import func_with_new_name
from pypy.objspace.std.tupleobject import W_AbstractTupleObject, W_TupleObject
class W_SmallTupleObject(W_AbstractTupleObject):
from pypy.objspace.std.tupletype import tuple_typedef as typedef
- def tolist(self):
- raise NotImplementedError
+ #def tolist(self): --- inherited from W_AbstractTupleObject
+ # raise NotImplementedError
def length(self):
raise NotImplementedError
@@ -51,6 +52,9 @@
l[i] = getattr(self, 'w_value%s' % i)
return l
+ # same source code, but builds and returns a resizable list
+ getitems_copy = func_with_new_name(tolist, 'getitems_copy')
+
def length(self):
return n
diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py
new file mode 100644
--- /dev/null
+++ b/pypy/objspace/std/specialisedtupleobject.py
@@ -0,0 +1,302 @@
+from pypy.interpreter.error import OperationError
+from pypy.objspace.std.model import registerimplementation
+from pypy.objspace.std.register_all import register_all
+from pypy.objspace.std.multimethod import FailedToImplement
+from pypy.objspace.std.tupleobject import W_AbstractTupleObject
+from pypy.objspace.std.tupleobject import W_TupleObject
+from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice
+from pypy.rlib.rarithmetic import intmask
+from pypy.rlib.objectmodel import compute_hash
+from pypy.rlib.unroll import unrolling_iterable
+from pypy.tool.sourcetools import func_with_new_name
+
+class NotSpecialised(Exception):
+ pass
+
+class W_SpecialisedTupleObject(W_AbstractTupleObject):
+ from pypy.objspace.std.tupletype import tuple_typedef as typedef
+ __slots__ = []
+
+ def __repr__(self):
+ """ representation for debugging purposes """
+ reprlist = [repr(item) for item in self._to_unwrapped_list()]
+ return "%s(%s)" % (self.__class__.__name__, ', '.join(reprlist))
+
+ #def tolist(self): --- inherited from W_AbstractTupleObject
+ # raise NotImplementedError
+
+ def _to_unwrapped_list(self):
+ "NOT_RPYTHON"
+ raise NotImplementedError
+
+ def length(self):
+ raise NotImplementedError
+
+ def getitem(self, index):
+ raise NotImplementedError
+
+ def hash(self, space):
+ raise NotImplementedError
+
+ def eq(self, space, w_other):
+ raise NotImplementedError
+
+ def setitem(self, index, w_item):
+ raise NotImplementedError
+
+ def unwrap(self, space):
+ return tuple(self._to_unwrapped_list())
+
+ def delegating(self):
+ pass # for tests only
+
+
+def make_specialised_class(typetuple):
+ assert type(typetuple) == tuple
+
+ nValues = len(typetuple)
+ iter_n = unrolling_iterable(range(nValues))
+
+ class cls(W_SpecialisedTupleObject):
+ def __init__(self, space, *values_w):
+ self.space = space
+ assert len(values_w) == nValues
+ for i in iter_n:
+ w_obj = values_w[i]
+ val_type = typetuple[i]
+ if val_type == int:
+ unwrapped = space.int_w(w_obj)
+ elif val_type == float:
+ unwrapped = space.float_w(w_obj)
+ elif val_type == str:
+ unwrapped = space.str_w(w_obj)
+ elif val_type == object:
+ unwrapped = w_obj
+ else:
+ raise AssertionError
+ setattr(self, 'value%s' % i, unwrapped)
+
+ def length(self):
+ return nValues
+
+ def tolist(self):
+ list_w = [None] * nValues
+ for i in iter_n:
+ value = getattr(self, 'value%s' % i)
+ if typetuple[i] != object:
+ value = self.space.wrap(value)
+ list_w[i] = value
+ return list_w
+
+ # same source code, but builds and returns a resizable list
+ getitems_copy = func_with_new_name(tolist, 'getitems_copy')
+
+ def _to_unwrapped_list(self):
+ "NOT_RPYTHON"
+ list_w = [None] * nValues
+ for i in iter_n:
+ value = getattr(self, 'value%s' % i)
+ if typetuple[i] == object:
+ value = self.space.unwrap(value)
+ list_w[i] = value
+ return list_w
+
+ def hash(self, space):
+ # XXX duplicate logic from tupleobject.py
+ mult = 1000003
+ x = 0x345678
+ z = nValues
+ for i in iter_n:
+ value = getattr(self, 'value%s' % i)
+ if typetuple[i] == object:
+ y = space.int_w(space.hash(value))
+ elif typetuple[i] == float:
+ # get the correct hash for float which is an
+ # integer & other less frequent cases
+ from pypy.objspace.std.floatobject import _hash_float
+ y = _hash_float(space, value)
+ else:
+ y = compute_hash(value)
+ x = (x ^ y) * mult
+ z -= 1
+ mult += 82520 + z + z
+ x += 97531
+ return space.wrap(intmask(x))
+
+ def _eq(self, w_other):
+ if not isinstance(w_other, cls):
+ # if we are not comparing same types, give up
+ raise FailedToImplement
+ for i in iter_n:
+ myval = getattr(self, 'value%s' % i)
+ otherval = getattr(w_other, 'value%s' % i)
+ if typetuple[i] == object:
+ if not self.space.eq_w(myval, otherval):
+ return False
+ else:
+ if myval != otherval:
+ return False
+ else:
+ return True
+
+ def eq(self, space, w_other):
+ return space.newbool(self._eq(w_other))
+
+ def ne(self, space, w_other):
+ return space.newbool(not self._eq(w_other))
+
+## def _compare(self, compare_op, w_other):
+## if not isinstance(w_other, cls):
+## raise FailedToImplement
+## ncmp = min(self.length(), w_other.length())
+## for i in iter_n:
+## if typetuple[i] == Any:#like space.eq on wrapped or two params?
+## raise FailedToImplement
+## if ncmp > i:
+## l_val = getattr(self, 'value%s' % i)
+## r_val = getattr(w_other, 'value%s' % i)
+## if l_val != r_val:
+## return compare_op(l_val, r_val)
+## return compare_op(self.length(), w_other.length())
+
+ def getitem(self, index):
+ for i in iter_n:
+ if index == i:
+ value = getattr(self, 'value%s' % i)
+ if typetuple[i] != object:
+ value = self.space.wrap(value)
+ return value
+ raise IndexError
+
+ cls.__name__ = ('W_SpecialisedTupleObject_' +
+ ''.join([t.__name__[0] for t in typetuple]))
+ _specialisations.append(cls)
+ return cls
+
+# ---------- current specialized versions ----------
+
+_specialisations = []
+Cls_ii = make_specialised_class((int, int))
+Cls_is = make_specialised_class((int, str))
+Cls_io = make_specialised_class((int, object))
+Cls_si = make_specialised_class((str, int))
+Cls_ss = make_specialised_class((str, str))
+Cls_so = make_specialised_class((str, object))
+Cls_oi = make_specialised_class((object, int))
+Cls_os = make_specialised_class((object, str))
+Cls_oo = make_specialised_class((object, object))
+Cls_ff = make_specialised_class((float, float))
+Cls_ooo = make_specialised_class((object, object, object))
+
+def makespecialisedtuple(space, list_w):
+ if len(list_w) == 2:
+ w_arg1, w_arg2 = list_w
+ w_type1 = space.type(w_arg1)
+ w_type2 = space.type(w_arg2)
+ #
+ if w_type1 is space.w_int:
+ if w_type2 is space.w_int:
+ return Cls_ii(space, w_arg1, w_arg2)
+ elif w_type2 is space.w_str:
+ return Cls_is(space, w_arg1, w_arg2)
+ else:
+ return Cls_io(space, w_arg1, w_arg2)
+ #
+ elif w_type1 is space.w_str:
+ if w_type2 is space.w_int:
+ return Cls_si(space, w_arg1, w_arg2)
+ elif w_type2 is space.w_str:
+ return Cls_ss(space, w_arg1, w_arg2)
+ else:
+ return Cls_so(space, w_arg1, w_arg2)
+ #
+ elif w_type1 is space.w_float and w_type2 is space.w_float:
+ return Cls_ff(space, w_arg1, w_arg2)
+ #
+ else:
+ if w_type2 is space.w_int:
+ return Cls_oi(space, w_arg1, w_arg2)
+ elif w_type2 is space.w_str:
+ return Cls_os(space, w_arg1, w_arg2)
+ else:
+ return Cls_oo(space, w_arg1, w_arg2)
+ #
+ elif len(list_w) == 3:
+ return Cls_ooo(space, list_w[0], list_w[1], list_w[2])
+ else:
+ raise NotSpecialised
+
+# ____________________________________________________________
+
+registerimplementation(W_SpecialisedTupleObject)
+
+def delegate_SpecialisedTuple2Tuple(space, w_specialised):
+ w_specialised.delegating()
+ return W_TupleObject(w_specialised.tolist())
+
+def len__SpecialisedTuple(space, w_tuple):
+ return space.wrap(w_tuple.length())
+
+def getitem__SpecialisedTuple_ANY(space, w_tuple, w_index):
+ index = space.getindex_w(w_index, space.w_IndexError, "tuple index")
+ if index < 0:
+ index += w_tuple.length()
+ try:
+ return w_tuple.getitem(index)
+ except IndexError:
+ raise OperationError(space.w_IndexError,
+ space.wrap("tuple index out of range"))
+
+def getitem__SpecialisedTuple_Slice(space, w_tuple, w_slice):
+ length = w_tuple.length()
+ start, stop, step, slicelength = w_slice.indices4(space, length)
+ assert slicelength >= 0
+ subitems = [None] * slicelength
+ for i in range(slicelength):
+ subitems[i] = w_tuple.getitem(start)
+ start += step
+ return space.newtuple(subitems)
+
+def mul_specialisedtuple_times(space, w_tuple, w_times):
+ try:
+ times = space.getindex_w(w_times, space.w_OverflowError)
+ except OperationError, e:
+ if e.match(space, space.w_TypeError):
+ raise FailedToImplement
+ raise
+ if times == 1 and space.type(w_tuple) == space.w_tuple:
+ return w_tuple
+ items = w_tuple.tolist()
+ return space.newtuple(items * times)
+
+def mul__SpecialisedTuple_ANY(space, w_tuple, w_times):
+ return mul_specialisedtuple_times(space, w_tuple, w_times)
+
+def mul__ANY_SpecialisedTuple(space, w_times, w_tuple):
+ return mul_specialisedtuple_times(space, w_tuple, w_times)
+
+def eq__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2):
+ return w_tuple1.eq(space, w_tuple2)
+
+def ne__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2):
+ return w_tuple1.ne(space, w_tuple2)
+
+##from operator import lt, le, ge, gt
+
+##def lt__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2):
+## return space.newbool(w_tuple1._compare(lt, w_tuple2))
+
+##def le__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2):
+## return space.newbool(w_tuple1._compare(le, w_tuple2))
+
+##def ge__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2):
+## return space.newbool(w_tuple1._compare(ge, w_tuple2))
+
+##def gt__SpecialisedTuple_SpecialisedTuple(space, w_tuple1, w_tuple2):
+## return space.newbool(w_tuple1._compare(gt, w_tuple2))
+
+def hash__SpecialisedTuple(space, w_tuple):
+ return w_tuple.hash(space)
+
+from pypy.objspace.std import tupletype
+register_all(vars(), tupletype)
diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py
--- a/pypy/objspace/std/test/test_listobject.py
+++ b/pypy/objspace/std/test/test_listobject.py
@@ -48,6 +48,46 @@
for i in range(7):
assert self.space.eq_w(l[i], l2[i])
+ def test_getitems_fixedsize(self):
+ w = self.space.wrap
+ from pypy.objspace.std.listobject import make_range_list
+ rangelist = make_range_list(self.space, 1,1,7)
+ emptylist = W_ListObject(self.space, [])
+ intlist = W_ListObject(self.space, [w(1),w(2),w(3),w(4),w(5),w(6),w(7)])
+ strlist = W_ListObject(self.space, [w('1'),w('2'),w('3'),w('4'),w('5'),w('6'),w('7')])
+ floatlist = W_ListObject(self.space, [w(1.0),w(2.0),w(3.0),w(4.0),w(5.0),w(6.0),w(7.0)])
+ objlist = W_ListObject(self.space, [w(1),w('2'),w(3.0),w(4),w(5),w(6),w(7)])
+
+ emptylist_copy = emptylist.getitems_fixedsize()
+ assert emptylist_copy == []
+
+ rangelist_copy = rangelist.getitems_fixedsize()
+ intlist_copy = intlist.getitems_fixedsize()
+ strlist_copy = strlist.getitems_fixedsize()
+ floatlist_copy = floatlist.getitems_fixedsize()
+ objlist_copy = objlist.getitems_fixedsize()
+ for i in range(7):
+ assert self.space.eq_w(rangelist_copy[i], rangelist.getitem(i))
+ assert self.space.eq_w(intlist_copy[i], intlist.getitem(i))
+ assert self.space.eq_w(strlist_copy[i], strlist.getitem(i))
+ assert self.space.eq_w(floatlist_copy[i], floatlist.getitem(i))
+ assert self.space.eq_w(objlist_copy[i], objlist.getitem(i))
+
+ emptylist_copy = emptylist.getitems_unroll()
+ assert emptylist_copy == []
+
+ rangelist_copy = rangelist.getitems_unroll()
+ intlist_copy = intlist.getitems_unroll()
+ strlist_copy = strlist.getitems_unroll()
+ floatlist_copy = floatlist.getitems_unroll()
+ objlist_copy = objlist.getitems_unroll()
+ for i in range(7):
+ assert self.space.eq_w(rangelist_copy[i], rangelist.getitem(i))
+ assert self.space.eq_w(intlist_copy[i], intlist.getitem(i))
+ assert self.space.eq_w(strlist_copy[i], strlist.getitem(i))
+ assert self.space.eq_w(floatlist_copy[i], floatlist.getitem(i))
+ assert self.space.eq_w(objlist_copy[i], objlist.getitem(i))
+
def test_random_getitem(self):
w = self.space.wrap
s = list('qedx387tn3uixhvt 7fh387fymh3dh238 dwd-wq.dwq9')
diff --git a/pypy/objspace/std/test/test_specialisedtupleobject.py b/pypy/objspace/std/test/test_specialisedtupleobject.py
new file mode 100644
--- /dev/null
+++ b/pypy/objspace/std/test/test_specialisedtupleobject.py
@@ -0,0 +1,234 @@
+import py, sys
+from pypy.objspace.std.tupleobject import W_TupleObject
+from pypy.objspace.std.specialisedtupleobject import W_SpecialisedTupleObject
+from pypy.objspace.std.specialisedtupleobject import _specialisations
+from pypy.interpreter.error import OperationError
+from pypy.conftest import gettestobjspace, option
+from pypy.objspace.std.test import test_tupleobject
+from pypy.interpreter import gateway
+
+
+for cls in _specialisations:
+ globals()[cls.__name__] = cls
+
+
+class TestW_SpecialisedTupleObject():
+
+ def setup_class(cls):
+ cls.space = gettestobjspace(**{"objspace.std.withspecialisedtuple": True})
+
+ def test_isspecialisedtupleobjectintint(self):
+ w_tuple = self.space.newtuple([self.space.wrap(1), self.space.wrap(2)])
+ assert isinstance(w_tuple, W_SpecialisedTupleObject_ii)
+
+ def test_isnotspecialisedtupleobject(self):
+ w_tuple = self.space.newtuple([self.space.wrap({})])
+ assert not isinstance(w_tuple, W_SpecialisedTupleObject)
+
+ def test_specialisedtupleclassname(self):
+ w_tuple = self.space.newtuple([self.space.wrap(1), self.space.wrap(2)])
+ assert w_tuple.__class__.__name__ == 'W_SpecialisedTupleObject_ii'
+
+ def test_hash_against_normal_tuple(self):
+ N_space = gettestobjspace(**{"objspace.std.withspecialisedtuple": False})
+ S_space = gettestobjspace(**{"objspace.std.withspecialisedtuple": True})
+
+ def hash_test(values):
+ N_values_w = [N_space.wrap(value) for value in values]
+ S_values_w = [S_space.wrap(value) for value in values]
+ N_w_tuple = N_space.newtuple(N_values_w)
+ S_w_tuple = S_space.newtuple(S_values_w)
+
+ assert isinstance(S_w_tuple, W_SpecialisedTupleObject)
+ assert isinstance(N_w_tuple, W_TupleObject)
+ assert not N_space.is_true(N_space.eq(N_w_tuple, S_w_tuple))
+ assert S_space.is_true(S_space.eq(N_w_tuple, S_w_tuple))
+ assert S_space.is_true(S_space.eq(N_space.hash(N_w_tuple), S_space.hash(S_w_tuple)))
+
+ hash_test([1,2])
+ hash_test([1.5,2.8])
+ hash_test([1.0,2.0])
+ hash_test(['arbitrary','strings'])
+ hash_test([1,(1,2,3,4)])
+ hash_test([1,(1,2)])
+ hash_test([1,('a',2)])
+ hash_test([1,()])
+ hash_test([1,2,3])
+
+
+class AppTestW_SpecialisedTupleObject:
+
+ def setup_class(cls):
+ cls.space = gettestobjspace(**{"objspace.std.withspecialisedtuple": True})
+ def forbid_delegation(space, w_tuple):
+ def delegation_forbidden():
+ # haaaack
+ co = sys._getframe(2).f_code
+ if co.co_name.startswith('_mm_repr_tuple'):
+ return
+ raise OperationError(space.w_ReferenceError, w_tuple)
+ w_tuple.delegating = delegation_forbidden
+ return w_tuple
+ if option.runappdirect:
+ cls.w_forbid_delegation = lambda self, x: x
+ cls.test_delegation = lambda self: skip("runappdirect")
+ else:
+ cls.w_forbid_delegation = cls.space.wrap(
+ gateway.interp2app(forbid_delegation))
+
+ def w_isspecialised(self, obj, expected=''):
+ import __pypy__
+ r = __pypy__.internal_repr(obj)
+ print obj, '==>', r, ' (expected: %r)' % expected
+ return ("SpecialisedTupleObject" + expected) in r
+
+ def test_createspecialisedtuple(self):
+ spec = {int: 'i',
+ float: 'f',
+ str: 's',
+ list: 'o'}
+ #
+ for x in [42, 4.2, "foo", []]:
+ for y in [43, 4.3, "bar", []]:
+ expected1 = spec[type(x)]
+ expected2 = spec[type(y)]
+ if (expected1 == 'f') ^ (expected2 == 'f'):
+ if expected1 == 'f': expected1 = 'o'
+ if expected2 == 'f': expected2 = 'o'
+ obj = (x, y)
+ assert self.isspecialised(obj, '_' + expected1 + expected2)
+ #
+ obj = (1, 2, 3)
+ assert self.isspecialised(obj, '_ooo')
+
+ def test_delegation(self):
+ t = self.forbid_delegation((42, 43))
+ raises(ReferenceError, t.__getslice__, 0, 1)
+
+ def test_len(self):
+ t = self.forbid_delegation((42,43))
+ assert len(t) == 2
+
+ def test_notspecialisedtuple(self):
+ assert not self.isspecialised((42,43,44,45))
+ assert not self.isspecialised((1.5,))
+
+ def test_slicing_to_specialised(self):
+ t = (1, 2, 3)
+ assert self.isspecialised(t[0:2])
+ t = (1, '2', 3)
+ assert self.isspecialised(t[0:5:2])
+
+ def test_adding_to_specialised(self):
+ t = (1,)
+ assert self.isspecialised(t + (2,))
+
+ def test_multiply_to_specialised(self):
+ t = (1,)
+ assert self.isspecialised(t * 2)
+
+ def test_slicing_from_specialised(self):
+ t = (1, 2, 3)
+ assert t[0:2:1] == (1, 2)
+
+ def test_eq_no_delegation(self):
+ t = (1,)
+ a = self.forbid_delegation(t + (2,))
+ b = (1, 2)
+ assert a == b
+
+ c = (2, 1)
+ assert not a == c
+
+ def test_eq_can_delegate(self):
+ a = (1,2)
+ b = (1,3,2)
+ assert not a == b
+
+ values = [2, 2L, 2.0, 1, 1L, 1.0]
+ for x in values:
+ for y in values:
+ assert ((1,2) == (x,y)) == (1 == x and 2 == y)
+
+ def test_neq(self):
+ a = self.forbid_delegation((1,2))
+ b = (1,)
+ b = b+(2,)
+ assert not a != b
+
+ c = (1,3)
+ assert a != c
+
+ def test_ordering(self):
+ a = (1,2) #self.forbid_delegation((1,2)) --- code commented out
+ assert a < (2,2)
+ assert a < (1,3)
+ assert not a < (1,2)
+
+ assert a <= (2,2)
+ assert a <= (1,2)
+ assert not a <= (1,1)
+
+ assert a >= (0,2)
+ assert a >= (1,2)
+ assert not a >= (1,3)
+
+ assert a > (0,2)
+ assert a > (1,1)
+ assert not a > (1,3)
+
+ assert (2,2) > a
+ assert (1,3) > a
+ assert not (1,2) > a
+
+ assert (2,2) >= a
+ assert (1,2) >= a
+ assert not (1,1) >= a
+
+ assert (0,2) <= a
+ assert (1,2) <= a
+ assert not (1,3) <= a
+
+ assert (0,2) < a
+ assert (1,1) < a
+ assert not (1,3) < a
+
+ def test_hash(self):
+ a = (1,2)
+ b = (1,)
+ b += (2,) # else a and b refer to same constant
+ assert hash(a) == hash(b)
+
+ c = (2,4)
+ assert hash(a) != hash(c)
+
+ assert hash(a) == hash((1L, 2L)) == hash((1.0, 2.0)) == hash((1.0, 2L))
+
+ def test_getitem(self):
+ t = self.forbid_delegation((5,3))
+ assert (t)[0] == 5
+ assert (t)[1] == 3
+ assert (t)[-1] == 3
+ assert (t)[-2] == 5
+ raises(IndexError, "t[2]")
+ raises(IndexError, "t[-3]")
+
+ def test_three_tuples(self):
+ b = self.forbid_delegation((1, 2, 3))
+ c = (1,)
+ d = c + (2, 3)
+ assert self.isspecialised(d)
+ assert b == d
+
+ def test_mongrel(self):
+ a = self.forbid_delegation((1, 2.2, '333'))
+ assert self.isspecialised(a)
+ assert len(a) == 3
+ assert a[0] == 1 and a[1] == 2.2 and a[2] == '333'
+ b = ('333',)
+ assert a == (1, 2.2,) + b
+ assert not a != (1, 2.2) + b
+
+
+class AppTestAll(test_tupleobject.AppTestW_TupleObject):
+ pass
diff --git a/pypy/objspace/std/test/test_tupleobject.py b/pypy/objspace/std/test/test_tupleobject.py
--- a/pypy/objspace/std/test/test_tupleobject.py
+++ b/pypy/objspace/std/test/test_tupleobject.py
@@ -280,6 +280,8 @@
assert () * 10 == ()
assert (5,) * 3 == (5,5,5)
assert (5,2) * 2 == (5,2,5,2)
+
+ def test_mul_identity(self):
t = (1,2,3)
assert (t * 1) is t
diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py
--- a/pypy/objspace/std/tupleobject.py
+++ b/pypy/objspace/std/tupleobject.py
@@ -12,6 +12,15 @@
class W_AbstractTupleObject(W_Object):
__slots__ = ()
+ def tolist(self):
+ "Returns the items, as a fixed-size list."
+ raise NotImplementedError
+
+ def getitems_copy(self):
+ "Returns a copy of the items, as a resizable list."
+ raise NotImplementedError
+
+
class W_TupleObject(W_AbstractTupleObject):
from pypy.objspace.std.tupletype import tuple_typedef as typedef
_immutable_fields_ = ['wrappeditems[*]']
@@ -29,6 +38,12 @@
items = [space.unwrap(w_item) for w_item in w_tuple.wrappeditems]
return tuple(items)
+ def tolist(self):
+ return self.wrappeditems
+
+ def getitems_copy(self):
+ return self.wrappeditems[:] # returns a resizable list
+
registerimplementation(W_TupleObject)
diff --git a/pypy/objspace/std/tupletype.py b/pypy/objspace/std/tupletype.py
--- a/pypy/objspace/std/tupletype.py
+++ b/pypy/objspace/std/tupletype.py
@@ -5,6 +5,14 @@
def wraptuple(space, list_w):
from pypy.objspace.std.tupleobject import W_TupleObject
+
+ if space.config.objspace.std.withspecialisedtuple:
+ from specialisedtupleobject import makespecialisedtuple, NotSpecialised
+ try:
+ return makespecialisedtuple(space, list_w)
+ except NotSpecialised:
+ pass
+
if space.config.objspace.std.withsmalltuple:
from pypy.objspace.std.smalltupleobject import W_SmallTupleObject2
from pypy.objspace.std.smalltupleobject import W_SmallTupleObject3
diff --git a/pypy/rlib/rdynload.py b/pypy/rlib/rdynload.py
--- a/pypy/rlib/rdynload.py
+++ b/pypy/rlib/rdynload.py
@@ -115,7 +115,8 @@
if _WIN32:
DLLHANDLE = rwin32.HMODULE
- def dlopen(name):
+ def dlopen(name, mode=-1):
+ # mode is unused on windows, but a consistant signature
res = rwin32.LoadLibrary(name)
if not res:
err = rwin32.GetLastError()
diff --git a/pypy/rlib/ropenssl.py b/pypy/rlib/ropenssl.py
--- a/pypy/rlib/ropenssl.py
+++ b/pypy/rlib/ropenssl.py
@@ -110,6 +110,10 @@
'struct GENERAL_NAME_st',
[('type', rffi.INT),
])
+ EVP_MD_st = rffi_platform.Struct(
+ 'EVP_MD',
+ [('md_size', rffi.INT),
+ ('block_size', rffi.INT)])
EVP_MD_SIZE = rffi_platform.SizeOf('EVP_MD')
EVP_MD_CTX_SIZE = rffi_platform.SizeOf('EVP_MD_CTX')
@@ -258,7 +262,7 @@
[BIO, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], X509)
EVP_MD_CTX = rffi.COpaquePtr('EVP_MD_CTX', compilation_info=eci)
-EVP_MD = rffi.COpaquePtr('EVP_MD', compilation_info=eci)
+EVP_MD = lltype.Ptr(EVP_MD_st)
OpenSSL_add_all_digests = external(
'OpenSSL_add_all_digests', [], lltype.Void)
diff --git a/pypy/rpython/memory/gc/minimark.py b/pypy/rpython/memory/gc/minimark.py
--- a/pypy/rpython/memory/gc/minimark.py
+++ b/pypy/rpython/memory/gc/minimark.py
@@ -2,8 +2,7 @@
Environment variables can be used to fine-tune the following parameters:
- PYPY_GC_NURSERY The nursery size. Defaults to half the size of
- the L2 cache. Try values like '1.2MB'. Small values
+ PYPY_GC_NURSERY The nursery size. Defaults to '4MB'. Small values
(like 1 or 1KB) are useful for debugging.
PYPY_GC_MAJOR_COLLECT Major collection memory factor. Default is '1.82',
@@ -61,7 +60,7 @@
#
# * young objects: allocated in the nursery if they are not too large, or
# raw-malloced otherwise. The nursery is a fixed-size memory buffer of
-# half the size of the L2 cache. When full, we do a minor collection;
+# 4MB by default. When full, we do a minor collection;
# the surviving objects from the nursery are moved outside, and the
# non-surviving raw-malloced objects are freed. All surviving objects
# become old.
@@ -329,7 +328,8 @@
# size (needed to handle mallocs just below 'large_objects') but
# hacking at the current nursery position in collect_and_reserve().
if newsize <= 0:
- newsize = env.estimate_best_nursery_size()
+ newsize = 4*1024*1024 # fixed to 4MB by default
+ # (it was env.estimate_best_nursery_size())
if newsize <= 0:
newsize = defaultsize
if newsize < minsize:
diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py
--- a/pypy/tool/pytest/appsupport.py
+++ b/pypy/tool/pytest/appsupport.py
@@ -63,7 +63,10 @@
exec_ = eval
def repr(self, w_value):
- return self.space.unwrap(self.space.repr(w_value))
+ try:
+ return self.space.unwrap(self.space.repr(w_value))
+ except Exception, e:
+ return "<Sorry, exception while trying to do repr, %r>"%e
def is_true(self, w_value):
return self.space.is_true(w_value)
diff --git a/pypy/translator/goal/app_main.py b/pypy/translator/goal/app_main.py
--- a/pypy/translator/goal/app_main.py
+++ b/pypy/translator/goal/app_main.py
@@ -672,7 +672,7 @@
def pypy_initial_path(s):
from pypy.module.sys.state import getinitialpath
try:
- return getinitialpath(s)
+ return getinitialpath(None, s)
except OSError:
return None
diff --git a/pypy/translator/goal/test2/test_app_main.py b/pypy/translator/goal/test2/test_app_main.py
--- a/pypy/translator/goal/test2/test_app_main.py
+++ b/pypy/translator/goal/test2/test_app_main.py
@@ -821,6 +821,8 @@
newpath = app_main.get_library_path('/tmp/pypy-c') # stdlib not found
assert newpath == sys.path
newpath = app_main.get_library_path(self.fake_exe)
+ if newpath[0].endswith('__extensions__'):
+ newpath = newpath[1:]
# we get at least 'expected_path', and maybe more (e.g.plat-linux2)
assert newpath[:len(self.expected_path)] == self.expected_path
finally:
diff --git a/pypy/translator/sandbox/sandlib.py b/pypy/translator/sandbox/sandlib.py
--- a/pypy/translator/sandbox/sandlib.py
+++ b/pypy/translator/sandbox/sandlib.py
@@ -6,11 +6,10 @@
import py
import sys, os, posixpath, errno, stat, time
-from pypy.rpython.module.ll_os_stat import s_StatResult
from pypy.tool.ansi_print import AnsiLog
-from pypy.rlib.rarithmetic import r_longlong
import subprocess
from pypy.tool.killsubprocess import killsubprocess
+from pypy.translator.sandbox.vfs import UID, GID
class MyAnsiLog(AnsiLog):
KW_TO_COLOR = {
@@ -34,6 +33,10 @@
from pypy.tool.lib_pypy import import_from_lib_pypy
marshal = import_from_lib_pypy('marshal')
+# Non-marshal result types
+RESULTTYPE_STATRESULT = object()
+RESULTTYPE_LONGLONG = object()
+
def read_message(f, timeout=None):
# warning: 'timeout' is not really reliable and should only be used
# for testing. Also, it doesn't work if the file f does any buffering.
@@ -50,12 +53,30 @@
marshal.dump(msg, g)
else:
marshal.dump(msg, g, 0)
+ elif resulttype is RESULTTYPE_STATRESULT:
+ # Hand-coded marshal for stat results that mimics what rmarshal expects.
+ # marshal.dump(tuple(msg)) would have been too easy. rmarshal insists
+ # on 64-bit ints at places, even when the value fits in 32 bits.
+ import struct
+ st = tuple(msg)
+ fmt = "iIIiiiIfff"
+ buf = []
+ buf.append(struct.pack("<ci", '(', len(st)))
+ for c, v in zip(fmt, st):
+ if c == 'i':
+ buf.append(struct.pack("<ci", c, v))
+ elif c == 'I':
+ buf.append(struct.pack("<cq", c, v))
+ elif c == 'f':
+ fstr = "%g" % v
+ buf.append(struct.pack("<cB", c, len(fstr)))
+ buf.append(fstr)
+ g.write(''.join(buf))
+ elif resulttype is RESULTTYPE_LONGLONG:
+ import struct
+ g.write(struct.pack("<cq", 'I', msg))
else:
- # use the exact result type for encoding
- from pypy.rlib.rmarshal import get_marshaller
- buf = []
- get_marshaller(resulttype)(buf, msg)
- g.write(''.join(buf))
+ raise Exception("Can't marshal: %r (%r)" % (msg, resulttype))
# keep the table in sync with rsandbox.reraise_error()
EXCEPTION_TABLE = [
@@ -390,7 +411,7 @@
def __init__(self, *args, **kwds):
super(VirtualizedSandboxedProc, self).__init__(*args, **kwds)
self.virtual_root = self.build_virtual_root()
- self.open_fds = {} # {virtual_fd: real_file_object}
+ self.open_fds = {} # {virtual_fd: (real_file_object, node)}
def build_virtual_root(self):
raise NotImplementedError("must be overridden")
@@ -425,26 +446,39 @@
def do_ll_os__ll_os_stat(self, vpathname):
node = self.get_node(vpathname)
return node.stat()
- do_ll_os__ll_os_stat.resulttype = s_StatResult
+ do_ll_os__ll_os_stat.resulttype = RESULTTYPE_STATRESULT
do_ll_os__ll_os_lstat = do_ll_os__ll_os_stat
def do_ll_os__ll_os_isatty(self, fd):
return self.virtual_console_isatty and fd in (0, 1, 2)
- def allocate_fd(self, f):
+ def allocate_fd(self, f, node=None):
for fd in self.virtual_fd_range:
if fd not in self.open_fds:
- self.open_fds[fd] = f
+ self.open_fds[fd] = (f, node)
return fd
else:
raise OSError(errno.EMFILE, "trying to open too many files")
- def get_file(self, fd):
+ def get_fd(self, fd, throw=True):
+ """Get the objects implementing file descriptor `fd`.
+
+ Returns a pair, (open file, vfs node)
+
+ `throw`: if true, raise OSError for bad fd, else return (None, None).
+ """
try:
- return self.open_fds[fd]
+ f, node = self.open_fds[fd]
except KeyError:
- raise OSError(errno.EBADF, "bad file descriptor")
+ if throw:
+ raise OSError(errno.EBADF, "bad file descriptor")
+ return None, None
+ return f, node
+
+ def get_file(self, fd, throw=True):
+ """Return the open file for file descriptor `fd`."""
+ return self.get_fd(fd, throw)[0]
def do_ll_os__ll_os_open(self, vpathname, flags, mode):
node = self.get_node(vpathname)
@@ -452,7 +486,7 @@
raise OSError(errno.EPERM, "write access denied")
# all other flags are ignored
f = node.open()
- return self.allocate_fd(f)
+ return self.allocate_fd(f, node)
def do_ll_os__ll_os_close(self, fd):
f = self.get_file(fd)
@@ -460,9 +494,8 @@
f.close()
def do_ll_os__ll_os_read(self, fd, size):
- try:
- f = self.open_fds[fd]
- except KeyError:
+ f = self.get_file(fd, throw=False)
+ if f is None:
return super(VirtualizedSandboxedProc, self).do_ll_os__ll_os_read(
fd, size)
else:
@@ -471,11 +504,16 @@
# don't try to read more than 256KB at once here
return f.read(min(size, 256*1024))
+ def do_ll_os__ll_os_fstat(self, fd):
+ f, node = self.get_fd(fd)
+ return node.stat()
+ do_ll_os__ll_os_fstat.resulttype = RESULTTYPE_STATRESULT
+
def do_ll_os__ll_os_lseek(self, fd, pos, how):
f = self.get_file(fd)
f.seek(pos, how)
return f.tell()
- do_ll_os__ll_os_lseek.resulttype = r_longlong
+ do_ll_os__ll_os_lseek.resulttype = RESULTTYPE_LONGLONG
def do_ll_os__ll_os_getcwd(self):
return self.virtual_cwd
@@ -488,6 +526,14 @@
node = self.get_node(vpathname)
return node.keys()
+ def do_ll_os__ll_os_getuid(self):
+ return UID
+ do_ll_os__ll_os_geteuid = do_ll_os__ll_os_getuid
+
+ def do_ll_os__ll_os_getgid(self):
+ return GID
+ do_ll_os__ll_os_getegid = do_ll_os__ll_os_getgid
+
class VirtualizedSocketProc(VirtualizedSandboxedProc):
""" Extends VirtualizedSandboxProc with socket
@@ -511,13 +557,13 @@
def do_ll_os__ll_os_read(self, fd, size):
if fd in self.sockets:
- return self.open_fds[fd].recv(size)
+ return self.get_file(fd).recv(size)
return super(VirtualizedSocketProc, self).do_ll_os__ll_os_read(
fd, size)
def do_ll_os__ll_os_write(self, fd, data):
if fd in self.sockets:
- return self.open_fds[fd].send(data)
+ return self.get_file(fd).send(data)
return super(VirtualizedSocketProc, self).do_ll_os__ll_os_write(
fd, data)
diff --git a/pypy/translator/sandbox/test/test_sandbox.py b/pypy/translator/sandbox/test/test_sandbox.py
--- a/pypy/translator/sandbox/test/test_sandbox.py
+++ b/pypy/translator/sandbox/test/test_sandbox.py
@@ -80,7 +80,7 @@
assert tail == ""
def test_stat_ftruncate():
- from pypy.rpython.module.ll_os_stat import s_StatResult
+ from pypy.translator.sandbox.sandlib import RESULTTYPE_STATRESULT
from pypy.rlib.rarithmetic import r_longlong
r0x12380000007 = r_longlong(0x12380000007)
@@ -93,7 +93,7 @@
g, f = os.popen2(exe, "t", 0)
st = os.stat_result((55, 0, 0, 0, 0, 0, 0x12380000007, 0, 0, 0))
expect(f, g, "ll_os.ll_os_stat", ("somewhere",), st,
- resulttype = s_StatResult)
+ resulttype = RESULTTYPE_STATRESULT)
expect(f, g, "ll_os.ll_os_ftruncate", (55, 0x12380000007), None)
g.close()
tail = f.read()
diff --git a/pypy/translator/sandbox/test/test_sandlib.py b/pypy/translator/sandbox/test/test_sandlib.py
--- a/pypy/translator/sandbox/test/test_sandlib.py
+++ b/pypy/translator/sandbox/test/test_sandlib.py
@@ -1,14 +1,17 @@
import py
-import os, StringIO
+import errno, os, StringIO
from pypy.tool.sourcetools import func_with_new_name
from pypy.rpython.lltypesystem import rffi
from pypy.translator.sandbox.sandlib import SandboxedProc
from pypy.translator.sandbox.sandlib import SimpleIOSandboxedProc
+from pypy.translator.sandbox.sandlib import VirtualizedSandboxedProc
from pypy.translator.sandbox.sandlib import VirtualizedSocketProc
from pypy.translator.sandbox.test.test_sandbox import compile
+from pypy.translator.sandbox.vfs import Dir, File, RealDir, RealFile
-class MySandboxedProc(SandboxedProc):
+class MockSandboxedProc(SandboxedProc):
+ """A sandbox process wrapper that replays expected syscalls."""
def __init__(self, args, expected):
SandboxedProc.__init__(self, args)
@@ -48,7 +51,7 @@
return 0
exe = compile(entry_point)
- proc = MySandboxedProc([exe, 'x1', 'y2'], expected = [
+ proc = MockSandboxedProc([exe, 'x1', 'y2'], expected = [
("open", ("/tmp/foobar", os.O_RDONLY, 0777), 77),
("read", (77, 123), "he\x00llo"),
("write", (77, "world\x00!\x00"), 42),
@@ -69,7 +72,7 @@
return n
exe = compile(entry_point)
- proc = MySandboxedProc([exe, 'spam', 'egg'], expected = [
+ proc = MockSandboxedProc([exe, 'spam', 'egg'], expected = [
("foobar", ("spam",), 2),
("foobar", ("egg",), 0),
])
@@ -122,9 +125,140 @@
return 0
exe = compile(entry_point)
- proc = MySandboxedProc([exe], expected = [
+ proc = MockSandboxedProc([exe], expected = [
("open", ("/tmp/foobar", os.O_RDONLY, 0777), OSError(-42, "baz")),
("close", (-42,), None),
])
proc.handle_forever()
assert proc.seen == len(proc.expected)
+
+
+class SandboxedProcWithFiles(VirtualizedSandboxedProc, SimpleIOSandboxedProc):
+ """A sandboxed process with a simple virtualized filesystem.
+
+ For testing file operations.
+
+ """
+ def build_virtual_root(self):
+ return Dir({
+ 'hi.txt': File("Hello, world!\n"),
+ 'this.pyc': RealFile(__file__),
+ })
+
+def test_too_many_opens():
+ def entry_point(argv):
+ try:
+ open_files = []
+ for i in range(500):
+ fd = os.open('/hi.txt', os.O_RDONLY, 0777)
+ open_files.append(fd)
+ txt = os.read(fd, 100)
+ if txt != "Hello, world!\n":
+ print "Wrong content: %s" % txt
+ except OSError, e:
+ # We expect to get EMFILE, for opening too many files.
+ if e.errno != errno.EMFILE:
+ print "OSError: %s!" % (e.errno,)
+ else:
+ print "We opened 500 fake files! Shouldn't have been able to."
+
+ for fd in open_files:
+ os.close(fd)
+
+ try:
+ open_files = []
+ for i in range(500):
+ fd = os.open('/this.pyc', os.O_RDONLY, 0777)
+ open_files.append(fd)
+ except OSError, e:
+ # We expect to get EMFILE, for opening too many files.
+ if e.errno != errno.EMFILE:
+ print "OSError: %s!" % (e.errno,)
+ else:
+ print "We opened 500 real files! Shouldn't have been able to."
+
+ print "All ok!"
+ return 0
+ exe = compile(entry_point)
+
+ proc = SandboxedProcWithFiles([exe])
+ output, error = proc.communicate("")
+ assert output == "All ok!\n"
+ assert error == ""
+
+def test_fstat():
+ def compare(a, b, i):
+ if a != b:
+ print "stat and fstat differ @%d: %s != %s" % (i, a, b)
+
+ def entry_point(argv):
+ try:
+ # Open a file, and compare stat and fstat
+ fd = os.open('/hi.txt', os.O_RDONLY, 0777)
+ st = os.stat('/hi.txt')
+ fs = os.fstat(fd)
+ # RPython requires the index for stat to be a constant.. :(
+ compare(st[0], fs[0], 0)
+ compare(st[1], fs[1], 1)
+ compare(st[2], fs[2], 2)
+ compare(st[3], fs[3], 3)
+ compare(st[4], fs[4], 4)
+ compare(st[5], fs[5], 5)
+ compare(st[6], fs[6], 6)
+ compare(st[7], fs[7], 7)
+ compare(st[8], fs[8], 8)
+ compare(st[9], fs[9], 9)
+ except OSError, e:
+ print "OSError: %s" % (e.errno,)
+ print "All ok!"
+ return 0
+ exe = compile(entry_point)
+
+ proc = SandboxedProcWithFiles([exe])
+ output, error = proc.communicate("")
+ assert output == "All ok!\n"
+ assert error == ""
+
+def test_lseek():
+ def char_should_be(c, should):
+ if c != should:
+ print "Wrong char: '%s' should be '%s'" % (c, should)
+
+ def entry_point(argv):
+ fd = os.open('/hi.txt', os.O_RDONLY, 0777)
+ char_should_be(os.read(fd, 1), "H")
+ new = os.lseek(fd, 3, os.SEEK_CUR)
+ if new != 4:
+ print "Wrong offset, %d should be 4" % new
+ char_should_be(os.read(fd, 1), "o")
+ new = os.lseek(fd, -3, os.SEEK_END)
+ if new != 11:
+ print "Wrong offset, %d should be 11" % new
+ char_should_be(os.read(fd, 1), "d")
+ new = os.lseek(fd, 7, os.SEEK_SET)
+ if new != 7:
+ print "Wrong offset, %d should be 7" % new
+ char_should_be(os.read(fd, 1), "w")
+ print "All ok!"
+ return 0
+ exe = compile(entry_point)
+
+ proc = SandboxedProcWithFiles([exe])
+ output, error = proc.communicate("")
+ assert output == "All ok!\n"
+ assert error == ""
+
+def test_getuid():
+ def entry_point(argv):
+ import os
+ print "uid is %s" % os.getuid()
+ print "euid is %s" % os.geteuid()
+ print "gid is %s" % os.getgid()
+ print "egid is %s" % os.getegid()
+ return 0
+ exe = compile(entry_point)
+
+ proc = SandboxedProcWithFiles([exe])
+ output, error = proc.communicate("")
+ assert output == "uid is 1000\neuid is 1000\ngid is 1000\negid is 1000\n"
+ assert error == ""
More information about the pypy-commit
mailing list