From noreply at buildbot.pypy.org Thu Sep 1 02:02:26 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Thu, 1 Sep 2011 02:02:26 +0200 (CEST) Subject: [pypy-commit] pypy default: micronumpy: added indexing by tuples to get/setitem Message-ID: <20110901000226.11F2B8204C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: Changeset: r46964:93adb3d59791 Date: 2011-08-31 18:01 -0600 http://bitbucket.org/pypy/pypy/changeset/93adb3d59791/ Log: micronumpy: added indexing by tuples to get/setitem diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -3,6 +3,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.module.micronumpy import interp_ufuncs, interp_dtype, signature +from pypy.objspace.std.sliceobject import W_SliceObject from pypy.rlib import jit from pypy.rpython.lltypesystem import lltype from pypy.tool.sourcetools import func_with_new_name @@ -217,7 +218,15 @@ return space.wrap("[" + " ".join(concrete._getnums(True)) + "]") def descr_getitem(self, space, w_idx): - # TODO: indexing by tuples + # TODO: indexing by arrays and lists + if space.isinstance_w(w_idx, space.w_tuple): + length = space.len_w(w_idx) + if length == 0: + return space.wrap(self) + if length > 1: # only one dimension for now. + raise OperationError(space.w_IndexError, + space.wrap("invalid index")) + w_idx = space.getitem(w_idx, space.wrap(0)) start, stop, step, slice_length = space.decode_index4(w_idx, self.find_size()) if step == 0: # Single index @@ -231,8 +240,19 @@ return space.wrap(res) def descr_setitem(self, space, w_idx, w_value): - # TODO: indexing by tuples and lists + # TODO: indexing by arrays and lists self.invalidated() + if space.isinstance_w(w_idx, space.w_tuple): + length = space.len_w(w_idx) + if length > 1: # only one dimension for now. + raise OperationError(space.w_IndexError, + space.wrap("invalid index")) + if length == 0: + w_idx = W_SliceObject(space.wrap(0), + space.wrap(self.find_size()), + space.wrap(1)) + else: + w_idx = space.getitem(w_idx, space.wrap(0)) start, stop, step, slice_length = space.decode_index4(w_idx, self.find_size()) if step == 0: diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -102,6 +102,16 @@ assert a[-1] == 8 raises(IndexError, "a[-6]") + def test_getitem_tuple(self): + from numpy import array + a = array(range(5)) + raises(IndexError, "a[(1,2)]") + for i in xrange(5): + assert a[(i,)] == i + b = a[()] + for i in xrange(5): + assert a[i] == b[i] + def test_setitem(self): from numpy import array a = array(range(5)) @@ -110,6 +120,17 @@ raises(IndexError, "a[5] = 0.0") raises(IndexError, "a[-6] = 3.0") + def test_setitem_tuple(self): + from numpy import array + a = array(range(5)) + raises(IndexError, "a[(1,2)] = [0,1]") + for i in xrange(5): + a[(i,)] = i+1 + assert a[i] == i+1 + a[()] = range(5) + for i in xrange(5): + assert a[i] == i + def test_setslice_array(self): from numpy import array a = array(range(5)) @@ -541,4 +562,4 @@ a = fromstring(self.data) for i in range(4): assert a[i] == i + 1 - raises(ValueError, fromstring, "abc") \ No newline at end of file + raises(ValueError, fromstring, "abc") From noreply at buildbot.pypy.org Thu Sep 1 03:51:42 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Thu, 1 Sep 2011 03:51:42 +0200 (CEST) Subject: [pypy-commit] pypy default: fix my slice creation in micronumpy Message-ID: <20110901015142.874458204C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: Changeset: r46965:e31726cdf996 Date: 2011-08-31 19:51 -0600 http://bitbucket.org/pypy/pypy/changeset/e31726cdf996/ Log: fix my slice creation in micronumpy diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -3,7 +3,6 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.module.micronumpy import interp_ufuncs, interp_dtype, signature -from pypy.objspace.std.sliceobject import W_SliceObject from pypy.rlib import jit from pypy.rpython.lltypesystem import lltype from pypy.tool.sourcetools import func_with_new_name @@ -248,7 +247,7 @@ raise OperationError(space.w_IndexError, space.wrap("invalid index")) if length == 0: - w_idx = W_SliceObject(space.wrap(0), + w_idx = space.newslice(space.wrap(0), space.wrap(self.find_size()), space.wrap(1)) else: From noreply at buildbot.pypy.org Thu Sep 1 05:20:51 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Thu, 1 Sep 2011 05:20:51 +0200 (CEST) Subject: [pypy-commit] pypy default: Add Int16 dtype with tests Message-ID: <20110901032051.090EF8204C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: Changeset: r46966:4218de3c70f9 Date: 2011-08-31 21:20 -0600 http://bitbucket.org/pypy/pypy/changeset/4218de3c70f9/ Log: Add Int16 dtype with tests diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -286,6 +286,19 @@ class W_Int8Dtype(IntegerArithmeticDtype, W_Int8Dtype): def unwrap(self, space, w_item): return self.adapt_val(space.int_w(space.int(w_item))) +assert W_Int8Dtype.num_bytes == 1 + +W_Int16Dtype = create_low_level_dtype( + num = 3, kind = SIGNEDLTR, name = "int16", + aliases = ["int16"], + applevel_types = [], + T = rffi.SHORT, + valtype = rffi.SHORT._type, +) +class W_Int16Dtype(IntegerArithmeticDtype, W_Int16Dtype): + def unwrap(self, space, w_item): + return self.adapt_val(space.int_w(space.int(w_item))) +assert W_Int16Dtype.num_bytes == 2 W_Int32Dtype = create_low_level_dtype( num = 5, kind = SIGNEDLTR, name = "int32", @@ -323,7 +336,7 @@ ALL_DTYPES = [ W_BoolDtype, - W_Int8Dtype, W_Int32Dtype, W_Int64Dtype, + W_Int8Dtype, W_Int16Dtype, W_Int32Dtype, W_Int64Dtype, W_Float64Dtype ] @@ -353,4 +366,4 @@ kind = interp_attrproperty("kind", cls=W_Dtype), shape = GetSetProperty(W_Dtype.descr_get_shape), ) -W_Dtype.typedef.acceptable_as_base_class = False \ No newline at end of file +W_Dtype.typedef.acceptable_as_base_class = False diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -82,10 +82,20 @@ assert a[1] == 1 def test_add_int8(self): - from numpy import array + from numpy import array, dtype a = array(range(5), dtype="int8") b = a + a + assert b.dtype is dtype("int8") + for i in range(5): + assert b[i] == i * 2 + + def test_add_int16(self): + from numpy import array, dtype + + a = array(range(5), dtype="int16") + b = a + a + assert b.dtype is dtype("int16") for i in range(5): assert b[i] == i * 2 @@ -98,4 +108,4 @@ from numpy import dtype # You can't subclass dtype - raises(TypeError, type, "Foo", (dtype,), {}) \ No newline at end of file + raises(TypeError, type, "Foo", (dtype,), {}) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -84,6 +84,9 @@ a = array(range(5), dtype="int8") assert str(a) == "[0 1 2 3 4]" + a = array(range(5), dtype="int16") + assert str(a) == "[0 1 2 3 4]" + def test_str_slice(self): from numpy import array, zeros a = array(range(5), float) From noreply at buildbot.pypy.org Thu Sep 1 05:25:39 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 1 Sep 2011 05:25:39 +0200 (CEST) Subject: [pypy-commit] pypy unroll-if-alt: merged default. Message-ID: <20110901032539.368278204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: unroll-if-alt Changeset: r46967:e13ef7acdedb Date: 2011-08-31 15:08 -0400 http://bitbucket.org/pypy/pypy/changeset/e13ef7acdedb/ Log: merged default. diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -25,13 +25,14 @@ class Descr(history.AbstractDescr): def __init__(self, ofs, typeinfo, extrainfo=None, name=None, - arg_types=None, count_fields_if_immut=-1): + arg_types=None, count_fields_if_immut=-1, ffi_flags=0): self.ofs = ofs self.typeinfo = typeinfo self.extrainfo = extrainfo self.name = name self.arg_types = arg_types self.count_fields_if_immut = count_fields_if_immut + self.ffi_flags = ffi_flags def get_arg_types(self): return self.arg_types @@ -67,6 +68,9 @@ def count_fields_if_immutable(self): return self.count_fields_if_immut + def get_ffi_flags(self): + return self.ffi_flags + def __lt__(self, other): raise TypeError("cannot use comparison on Descrs") def __le__(self, other): @@ -114,14 +118,14 @@ return False def getdescr(self, ofs, typeinfo='?', extrainfo=None, name=None, - arg_types=None, count_fields_if_immut=-1): + arg_types=None, count_fields_if_immut=-1, ffi_flags=0): key = (ofs, typeinfo, extrainfo, name, arg_types, - count_fields_if_immut) + count_fields_if_immut, ffi_flags) try: return self._descrs[key] except KeyError: descr = Descr(ofs, typeinfo, extrainfo, name, arg_types, - count_fields_if_immut) + count_fields_if_immut, ffi_flags) self._descrs[key] = descr return descr @@ -326,7 +330,7 @@ return self.getdescr(0, token[0], extrainfo=extrainfo, arg_types=''.join(arg_types)) - def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo): + def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo, ffi_flags): from pypy.jit.backend.llsupport.ffisupport import get_ffi_type_kind from pypy.jit.backend.llsupport.ffisupport import UnsupportedKind arg_types = [] @@ -339,7 +343,8 @@ except UnsupportedKind: return None return self.getdescr(0, reskind, extrainfo=extrainfo, - arg_types=''.join(arg_types)) + arg_types=''.join(arg_types), + ffi_flags=ffi_flags) def grab_exc_value(self): diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -260,10 +260,12 @@ _clsname = '' loop_token = None arg_classes = '' # <-- annotation hack + ffi_flags = 0 - def __init__(self, arg_classes, extrainfo=None): + def __init__(self, arg_classes, extrainfo=None, ffi_flags=0): self.arg_classes = arg_classes # string of "r" and "i" (ref/int) self.extrainfo = extrainfo + self.ffi_flags = ffi_flags def __repr__(self): res = '%s(%s)' % (self.__class__.__name__, self.arg_classes) @@ -284,6 +286,13 @@ def get_extra_info(self): return self.extrainfo + def get_ffi_flags(self): + return self.ffi_flags + + def get_call_conv(self): + from pypy.rlib.clibffi import get_call_conv + return get_call_conv(self.ffi_flags) + def get_arg_types(self): return self.arg_classes @@ -391,8 +400,8 @@ """ _clsname = 'DynamicIntCallDescr' - def __init__(self, arg_classes, result_size, result_sign, extrainfo=None): - BaseIntCallDescr.__init__(self, arg_classes, extrainfo) + def __init__(self, arg_classes, result_size, result_sign, extrainfo=None, ffi_flags=0): + BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags) assert isinstance(result_sign, bool) self._result_size = chr(result_size) self._result_sign = result_sign diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py --- a/pypy/jit/backend/llsupport/ffisupport.py +++ b/pypy/jit/backend/llsupport/ffisupport.py @@ -8,7 +8,7 @@ class UnsupportedKind(Exception): pass -def get_call_descr_dynamic(cpu, ffi_args, ffi_result, extrainfo=None): +def get_call_descr_dynamic(cpu, ffi_args, ffi_result, extrainfo=None, ffi_flags=0): """Get a call descr: the types of result and args are represented by rlib.libffi.types.*""" try: @@ -20,18 +20,24 @@ if reskind == history.INT: size = intmask(ffi_result.c_size) signed = is_ffi_type_signed(ffi_result) - return DynamicIntCallDescr(arg_classes, size, signed, extrainfo) + return DynamicIntCallDescr(arg_classes, size, signed, extrainfo, + ffi_flags=ffi_flags) elif reskind == history.REF: - return NonGcPtrCallDescr(arg_classes, extrainfo) + return NonGcPtrCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) elif reskind == history.FLOAT: - return FloatCallDescr(arg_classes, extrainfo) + return FloatCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) elif reskind == history.VOID: - return VoidCallDescr(arg_classes, extrainfo) + return VoidCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) elif reskind == 'L': - return LongLongCallDescr(arg_classes, extrainfo) + return LongLongCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) elif reskind == 'S': SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - return SingleFloatCallDescr(arg_classes, extrainfo) + return SingleFloatCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) assert False def get_ffi_type_kind(cpu, ffi_type): diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -257,10 +257,10 @@ def calldescrof(self, FUNC, ARGS, RESULT, extrainfo): return get_call_descr(self.gc_ll_descr, ARGS, RESULT, extrainfo) - def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo): + def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo, ffi_flags): from pypy.jit.backend.llsupport import ffisupport return ffisupport.get_call_descr_dynamic(self, ffi_args, ffi_result, - extrainfo) + extrainfo, ffi_flags) def get_overflow_error(self): ovf_vtable = self.cast_adr_to_int(self._ovf_error_vtable) diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py --- a/pypy/jit/backend/llsupport/test/test_ffisupport.py +++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py @@ -13,17 +13,19 @@ def test_call_descr_dynamic(): args = [types.sint, types.pointer] - descr = get_call_descr_dynamic(FakeCPU(), args, types.sint) + descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, ffi_flags=42) assert isinstance(descr, DynamicIntCallDescr) assert descr.arg_classes == 'ii' + assert descr.get_ffi_flags() == 42 args = [types.sint, types.double, types.pointer] descr = get_call_descr_dynamic(FakeCPU(), args, types.void) assert descr is None # missing floats descr = get_call_descr_dynamic(FakeCPU(supports_floats=True), - args, types.void) + args, types.void, ffi_flags=43) assert isinstance(descr, VoidCallDescr) assert descr.arg_classes == 'ifi' + assert descr.get_ffi_flags() == 43 descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8) assert isinstance(descr, DynamicIntCallDescr) @@ -39,14 +41,16 @@ descr = get_call_descr_dynamic(FakeCPU(), [], types.slonglong) assert descr is None # missing longlongs descr = get_call_descr_dynamic(FakeCPU(supports_longlong=True), - [], types.slonglong) + [], types.slonglong, ffi_flags=43) assert isinstance(descr, LongLongCallDescr) + assert descr.get_ffi_flags() == 43 else: assert types.slonglong is types.slong descr = get_call_descr_dynamic(FakeCPU(), [], types.float) assert descr is None # missing singlefloats descr = get_call_descr_dynamic(FakeCPU(supports_singlefloats=True), - [], types.float) + [], types.float, ffi_flags=44) SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) assert isinstance(descr, SingleFloatCallDescr) + assert descr.get_ffi_flags() == 44 diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -468,7 +468,7 @@ assert longlong.getrealfloat(x) == 3.5 - 42 def test_call(self): - from pypy.rlib.libffi import types + from pypy.rlib.libffi import types, FUNCFLAG_CDECL def func_int(a, b): return a + b @@ -497,7 +497,8 @@ assert res.value == 2 * num # then, try it with the dynamic calldescr dyn_calldescr = cpu.calldescrof_dynamic([ffi_type, ffi_type], ffi_type, - EffectInfo.MOST_GENERAL) + EffectInfo.MOST_GENERAL, + ffi_flags=FUNCFLAG_CDECL) res = self.execute_operation(rop.CALL, [funcbox, BoxInt(num), BoxInt(num)], 'int', descr=dyn_calldescr) @@ -1944,7 +1945,7 @@ assert values == [1, 10] def test_call_to_c_function(self): - from pypy.rlib.libffi import CDLL, types, ArgChain + from pypy.rlib.libffi import CDLL, types, ArgChain, FUNCFLAG_CDECL from pypy.rpython.lltypesystem.ll2ctypes import libc_name libc = CDLL(libc_name) c_tolower = libc.getpointer('tolower', [types.uchar], types.sint) @@ -1955,7 +1956,8 @@ func_adr = llmemory.cast_ptr_to_adr(c_tolower.funcsym) funcbox = ConstInt(heaptracker.adr2int(func_adr)) calldescr = cpu.calldescrof_dynamic([types.uchar], types.sint, - EffectInfo.MOST_GENERAL) + EffectInfo.MOST_GENERAL, + ffi_flags=FUNCFLAG_CDECL) i1 = BoxInt() i2 = BoxInt() tok = BoxInt() @@ -2012,7 +2014,8 @@ calldescr = cpu.calldescrof_dynamic([types.pointer, types_size_t, types_size_t, types.pointer], types.void, - EffectInfo.MOST_GENERAL) + EffectInfo.MOST_GENERAL, + ffi_flags=clibffi.FUNCFLAG_CDECL) i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() @@ -2038,6 +2041,57 @@ assert len(glob.lst) > 0 lltype.free(raw, flavor='raw') + def test_call_to_winapi_function(self): + from pypy.rlib.clibffi import _WIN32, FUNCFLAG_STDCALL + if not _WIN32: + py.test.skip("Windows test only") + from pypy.rlib.libffi import CDLL, types, ArgChain + from pypy.rlib.rwin32 import DWORD + libc = CDLL('KERNEL32') + c_GetCurrentDir = libc.getpointer('GetCurrentDirectoryA', + [types.ulong, types.pointer], + types.ulong) + + cwd = os.getcwd() + buflen = len(cwd) + 10 + buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') + argchain = ArgChain().arg(rffi.cast(DWORD, buflen)).arg(buffer) + res = c_GetCurrentDir.call(argchain, DWORD) + assert rffi.cast(lltype.Signed, res) == len(cwd) + assert rffi.charp2strn(buffer, buflen) == cwd + lltype.free(buffer, flavor='raw') + + cpu = self.cpu + func_adr = llmemory.cast_ptr_to_adr(c_GetCurrentDir.funcsym) + funcbox = ConstInt(heaptracker.adr2int(func_adr)) + calldescr = cpu.calldescrof_dynamic([types.ulong, types.pointer], + types.ulong, + EffectInfo.MOST_GENERAL, + ffi_flags=FUNCFLAG_STDCALL) + i1 = BoxInt() + i2 = BoxInt() + i3 = BoxInt() + tok = BoxInt() + faildescr = BasicFailDescr(1) + ops = [ + ResOperation(rop.CALL_RELEASE_GIL, [funcbox, i1, i2], i3, + descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + ResOperation(rop.FINISH, [i3], None, descr=BasicFailDescr(0)) + ] + ops[1].setfailargs([]) + looptoken = LoopToken() + self.cpu.compile_loop([i1, i2], ops, looptoken) + + buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') + self.cpu.set_future_value_int(0, buflen) + self.cpu.set_future_value_int(1, rffi.cast(lltype.Signed, buffer)) + fail = self.cpu.execute_token(looptoken) + assert fail.identifier == 0 + assert self.cpu.get_latest_value_int(0) == len(cwd) + assert rffi.charp2strn(buffer, buflen) == cwd + lltype.free(buffer, flavor='raw') + def test_guard_not_invalidated(self): cpu = self.cpu i0 = BoxInt() diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -91,9 +91,12 @@ reds_v = op.args[2+numgreens:] assert len(reds_v) == numreds # - def _sort(args_v): + def _sort(args_v, is_green): from pypy.jit.metainterp.history import getkind lst = [v for v in args_v if v.concretetype is not lltype.Void] + if is_green: + assert len(lst) == len(args_v), ( + "not supported so far: 'greens' variables contain Void") _kind2count = {'int': 1, 'ref': 2, 'float': 3} lst2 = sorted(lst, key=lambda v: _kind2count[getkind(v.concretetype)]) # a crash here means that you have to reorder the variable named in @@ -102,7 +105,7 @@ assert lst == lst2 return lst # - return (_sort(greens_v), _sort(reds_v)) + return (_sort(greens_v, True), _sort(reds_v, False)) def maybe_on_top_of_llinterp(rtyper, fnptr): # Run a generated graph on top of the llinterp for testing. diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py --- a/pypy/jit/metainterp/optimizeopt/fficall.py +++ b/pypy/jit/metainterp/optimizeopt/fficall.py @@ -18,26 +18,27 @@ def __init__(self, funcval, cpu, prepare_op): self.funcval = funcval self.opargs = [] - argtypes, restype = self._get_signature(funcval) + argtypes, restype, flags = self._get_signature(funcval) self.descr = cpu.calldescrof_dynamic(argtypes, restype, - EffectInfo.MOST_GENERAL) + EffectInfo.MOST_GENERAL, + ffi_flags=flags) # ^^^ may be None if unsupported self.prepare_op = prepare_op self.delayed_ops = [] def _get_signature(self, funcval): """ - given the funcval, return a tuple (argtypes, restype), where the - actuall types are libffi.types.* + given the funcval, return a tuple (argtypes, restype, flags), where + the actuall types are libffi.types.* The implementation is tricky because we have three possible cases: - translated: the easiest case, we can just cast back the pointer to - the original Func instance and read .argtypes and .restype + the original Func instance and read .argtypes, .restype and .flags - completely untranslated: this is what we get from test_optimizeopt tests. funcval contains a FakeLLObject whose _fake_class is Func, - and we can just get .argtypes and .restype + and we can just get .argtypes, .restype and .flags - partially translated: this happens when running metainterp tests: funcval contains the low-level equivalent of a Func, and thus we @@ -49,10 +50,10 @@ llfunc = funcval.box.getref_base() if we_are_translated(): func = cast_base_ptr_to_instance(Func, llfunc) - return func.argtypes, func.restype + return func.argtypes, func.restype, func.flags elif getattr(llfunc, '_fake_class', None) is Func: # untranslated - return llfunc.argtypes, llfunc.restype + return llfunc.argtypes, llfunc.restype, llfunc.flags else: # partially translated # llfunc contains an opaque pointer to something like the following: @@ -63,7 +64,7 @@ # because we don't have the exact TYPE to cast to. Instead, we # just fish it manually :-( f = llfunc._obj.container - return f.inst_argtypes, f.inst_restype + return f.inst_argtypes, f.inst_restype, f.inst_flags class OptFfiCall(Optimization): diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py @@ -14,12 +14,15 @@ can check that the signature of a call is really what you want. """ - def __init__(self, arg_types, typeinfo): + def __init__(self, arg_types, typeinfo, flags): self.arg_types = arg_types self.typeinfo = typeinfo # return type + self.flags = flags def __eq__(self, other): - return self.arg_types == other.arg_types and self.typeinfo == other.typeinfo + return (self.arg_types == other.arg_types and + self.typeinfo == other.typeinfo and + self.flags == other.get_ffi_flags()) class FakeLLObject(object): @@ -41,14 +44,17 @@ vable_token_descr = LLtypeMixin.valuedescr valuedescr = LLtypeMixin.valuedescr - int_float__int = MyCallDescr('if', 'i') + int_float__int_42 = MyCallDescr('if', 'i', 42) + int_float__int_43 = MyCallDescr('if', 'i', 43) funcptr = FakeLLObject() func = FakeLLObject(_fake_class=Func, argtypes=[types.sint, types.double], - restype=types.sint) + restype=types.sint, + flags=42) func2 = FakeLLObject(_fake_class=Func, argtypes=[types.sint, types.double], - restype=types.sint) + restype=types.sint, + flags=43) # def calldescr(cpu, FUNC, oopspecindex, extraeffect=None): if extraeffect == EffectInfo.EF_RANDOM_EFFECTS: @@ -83,7 +89,7 @@ """ expected = """ [i0, f1] - i3 = call_release_gil(12345, i0, f1, descr=int_float__int) + i3 = call_release_gil(12345, i0, f1, descr=int_float__int_42) guard_not_forced() [] guard_no_exception() [] jump(i3, f1) @@ -123,7 +129,7 @@ [i0, f1, p2] i4 = force_token() setfield_gc(p2, i4, descr=vable_token_descr) - i3 = call_release_gil(12345, i0, f1, descr=int_float__int) + i3 = call_release_gil(12345, i0, f1, descr=int_float__int_42) guard_not_forced() [p2] guard_no_exception() [p2] jump(i3, f1, p2) @@ -220,7 +226,7 @@ call(0, ConstPtr(func), descr=libffi_prepare) # # this "nested" call is nicely optimized - i4 = call_release_gil(67890, i0, f1, descr=int_float__int) + i4 = call_release_gil(67890, i0, f1, descr=int_float__int_43) guard_not_forced() [] guard_no_exception() [] # @@ -265,7 +271,7 @@ expected = """ [i0, f1, p2] setfield_gc(p2, i0, descr=valuedescr) - i3 = call_release_gil(12345, i0, f1, descr=int_float__int) + i3 = call_release_gil(12345, i0, f1, descr=int_float__int_42) guard_not_forced() [] guard_no_exception() [] jump(i3, f1, p2) diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -252,6 +252,41 @@ self.check_loops({'int_sub': 1, 'int_gt': 1, 'guard_true': 1, 'jump': 1}) + def test_void_red_variable(self): + mydriver = JitDriver(greens=[], reds=['a', 'm']) + def f1(m): + a = None + while m > 0: + mydriver.jit_merge_point(a=a, m=m) + m = m - 1 + if m == 10: + pass # other case + self.meta_interp(f1, [18]) + + def test_bug_constant_rawptrs(self): + py.test.skip("crashes because a is a constant") + from pypy.rpython.lltypesystem import lltype, rffi + mydriver = JitDriver(greens=['a'], reds=['m']) + def f1(m): + a = lltype.nullptr(rffi.VOIDP.TO) + while m > 0: + mydriver.jit_merge_point(a=a, m=m) + m = m - 1 + self.meta_interp(f1, [18]) + + def test_bug_rawptrs(self): + from pypy.rpython.lltypesystem import lltype, rffi + mydriver = JitDriver(greens=['a'], reds=['m']) + def f1(m): + a = lltype.malloc(rffi.VOIDP.TO, 5, flavor='raw') + while m > 0: + mydriver.jit_merge_point(a=a, m=m) + m = m - 1 + if m == 10: + pass + lltype.free(a, flavor='raw') + self.meta_interp(f1, [18]) + class TestLLWarmspot(WarmspotTests, LLJitMixin): CPUClass = runner.LLtypeCPU diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -245,7 +245,8 @@ graph.startblock = support.split_before_jit_merge_point(*jmpp) graph.startblock.isstartblock = True # a crash in the following checkgraph() means that you forgot - # to list some variable in greens=[] or reds=[] in JitDriver. + # to list some variable in greens=[] or reds=[] in JitDriver, + # or that a jit_merge_point() takes a constant as an argument. checkgraph(graph) for v in graph.getargs(): assert isinstance(v, Variable) @@ -655,11 +656,13 @@ portalfunc_ARGS = [] nums = {} for i, ARG in enumerate(PORTALFUNC.ARGS): + kind = history.getkind(ARG) + assert kind != 'void' if i < len(jd.jitdriver.greens): color = 'green' else: color = 'red' - attrname = '%s_%s' % (color, history.getkind(ARG)) + attrname = '%s_%s' % (color, kind) count = nums.get(attrname, 0) nums[attrname] = count + 1 portalfunc_ARGS.append((ARG, attrname, count)) diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -124,7 +124,7 @@ # Hash of lltype or ootype object. # Only supports strings, unicodes and regular instances, # as well as primitives that can meaningfully be cast to Signed. - if isinstance(TYPE, lltype.Ptr): + if isinstance(TYPE, lltype.Ptr) and TYPE.TO._gckind == 'gc': if TYPE.TO is rstr.STR or TYPE.TO is rstr.UNICODE: return rstr.LLHelpers.ll_strhash(x) # assumed not null else: @@ -140,7 +140,7 @@ else: return 0 else: - return lltype.cast_primitive(lltype.Signed, x) + return rffi.cast(lltype.Signed, x) @specialize.ll_and_arg(3) def set_future_value(cpu, j, value, typecode): diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -446,7 +446,9 @@ result = self.buffer[pos:pos + n] self.pos += n else: - result = self.buffer + pos = self.pos + assert pos >= 0 + result = self.buffer[pos:] self.pos = 0 self.buffer = "" self.readlength += len(result) diff --git a/pypy/objspace/std/test/test_rangeobject.py b/pypy/objspace/std/test/test_rangeobject.py --- a/pypy/objspace/std/test/test_rangeobject.py +++ b/pypy/objspace/std/test/test_rangeobject.py @@ -89,6 +89,9 @@ assert not self.not_forced(r) r.sort() assert r == range(1, 100) + [999] + r = range(10) + r.sort(key=lambda x: -x) + assert r == range(9, -1, -1) def test_pop(self): r = range(10) diff --git a/pypy/rlib/clibffi.py b/pypy/rlib/clibffi.py --- a/pypy/rlib/clibffi.py +++ b/pypy/rlib/clibffi.py @@ -402,12 +402,19 @@ closureHeap = ClosureHeap() -FUNCFLAG_STDCALL = 0 -FUNCFLAG_CDECL = 1 # for WINAPI calls +FUNCFLAG_STDCALL = 0 # on Windows: for WINAPI calls +FUNCFLAG_CDECL = 1 # on Windows: for __cdecl calls FUNCFLAG_PYTHONAPI = 4 FUNCFLAG_USE_ERRNO = 8 FUNCFLAG_USE_LASTERROR = 16 +def get_call_conv(flags): + if _WIN32 and (flags & FUNCFLAG_CDECL == 0): + return FFI_STDCALL + else: + return FFI_DEFAULT_ABI + + class AbstractFuncPtr(object): ll_cif = lltype.nullptr(FFI_CIFP.TO) ll_argtypes = lltype.nullptr(FFI_TYPE_PP.TO) @@ -427,11 +434,6 @@ self.ll_cif = lltype.malloc(FFI_CIFP.TO, flavor='raw', track_allocation=False) # freed by the __del__ - if _WIN32 and (flags & FUNCFLAG_CDECL == 0): - cc = FFI_STDCALL - else: - cc = FFI_DEFAULT_ABI - if _MSVC: # This little trick works correctly with MSVC. # It returns small structures in registers @@ -441,7 +443,7 @@ elif restype.c_size <= 8: restype = ffi_type_sint64 - res = c_ffi_prep_cif(self.ll_cif, cc, + res = c_ffi_prep_cif(self.ll_cif, get_call_conv(flags), rffi.cast(rffi.UINT, argnum), restype, self.ll_argtypes) if not res == FFI_OK: diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -113,7 +113,7 @@ rffi.LONGLONG: ctypes.c_longlong, rffi.ULONGLONG: ctypes.c_ulonglong, rffi.SIZE_T: ctypes.c_size_t, - lltype.Bool: ctypes.c_bool, + lltype.Bool: getattr(ctypes, "c_bool", ctypes.c_long), llmemory.Address: ctypes.c_void_p, llmemory.GCREF: ctypes.c_void_p, llmemory.WeakRef: ctypes.c_void_p, # XXX @@ -1153,7 +1153,11 @@ # an OverflowError on the following line. cvalue = ctypes.cast(ctypes.c_void_p(cvalue), cresulttype) else: - cvalue = cresulttype(cvalue).value # mask high bits off if needed + try: + cvalue = cresulttype(cvalue).value # mask high bits off if needed + except TypeError: + cvalue = int(cvalue) # float -> int + cvalue = cresulttype(cvalue).value # try again return ctypes2lltype(RESTYPE, cvalue) class ForceCastEntry(ExtRegistryEntry): diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -1283,6 +1283,8 @@ try: return p._obj._hash_cache_ except AttributeError: + assert self._T._gckind == 'gc' + assert self # not for NULL result = hash(p._obj) if cache: try: diff --git a/pypy/rpython/lltypesystem/test/test_rffi.py b/pypy/rpython/lltypesystem/test/test_rffi.py --- a/pypy/rpython/lltypesystem/test/test_rffi.py +++ b/pypy/rpython/lltypesystem/test/test_rffi.py @@ -699,7 +699,10 @@ def test_cast(self): res = cast(SIZE_T, -1) assert type(res) is r_size_t - assert res == r_size_t(-1) + assert res == r_size_t(-1) + # + res = cast(lltype.Signed, 42.5) + assert res == 42 def test_rffi_sizeof(self): try: From noreply at buildbot.pypy.org Thu Sep 1 05:25:40 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 1 Sep 2011 05:25:40 +0200 (CEST) Subject: [pypy-commit] pypy default: style nits + code duplication removal Message-ID: <20110901032540.85DC68204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r46968:a54cf959dccb Date: 2011-08-31 23:25 -0400 http://bitbucket.org/pypy/pypy/changeset/a54cf959dccb/ Log: style nits + code duplication removal diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -53,7 +53,9 @@ VOID_TP = lltype.Ptr(lltype.Array(lltype.Void, hints={'nolength': True, "uncast_on_llgraph": True})) -def create_low_level_dtype(num, kind, name, aliases, applevel_types, T, valtype): +def create_low_level_dtype(num, kind, name, aliases, applevel_types, T, valtype, + expected_size=None): + class Box(BaseBox): def __init__(self, val): self.val = val @@ -113,6 +115,8 @@ W_LowLevelDtype.aliases = aliases W_LowLevelDtype.applevel_types = applevel_types W_LowLevelDtype.num_bytes = rffi.sizeof(T) + if expected_size is not None: + assert W_LowLevelDtype.num_bytes == expected_size return W_LowLevelDtype @@ -282,11 +286,10 @@ applevel_types = [], T = rffi.SIGNEDCHAR, valtype = rffi.SIGNEDCHAR._type, + expected_size = 1, ) class W_Int8Dtype(IntegerArithmeticDtype, W_Int8Dtype): - def unwrap(self, space, w_item): - return self.adapt_val(space.int_w(space.int(w_item))) -assert W_Int8Dtype.num_bytes == 1 + pass W_Int16Dtype = create_low_level_dtype( num = 3, kind = SIGNEDLTR, name = "int16", @@ -294,11 +297,10 @@ applevel_types = [], T = rffi.SHORT, valtype = rffi.SHORT._type, + expected_size = 2, ) class W_Int16Dtype(IntegerArithmeticDtype, W_Int16Dtype): - def unwrap(self, space, w_item): - return self.adapt_val(space.int_w(space.int(w_item))) -assert W_Int16Dtype.num_bytes == 2 + pass W_Int32Dtype = create_low_level_dtype( num = 5, kind = SIGNEDLTR, name = "int32", @@ -306,6 +308,7 @@ applevel_types = [], T = rffi.INT, valtype = rffi.INT._type, + expected_size = 4, ) class W_Int32Dtype(IntegerArithmeticDtype, W_Int32Dtype): pass @@ -316,6 +319,7 @@ applevel_types = ["long"], T = rffi.LONGLONG, valtype = rffi.LONGLONG._type, + expected_size = 8, ) class W_Int64Dtype(IntegerArithmeticDtype, W_Int64Dtype): pass @@ -326,6 +330,7 @@ applevel_types = ["float"], T = lltype.Float, valtype = float, + expected_size = 8, ) class W_Float64Dtype(FloatArithmeticDtype, W_Float64Dtype): def unwrap(self, space, w_item): From noreply at buildbot.pypy.org Thu Sep 1 06:16:05 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Thu, 1 Sep 2011 06:16:05 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: Add UInt32 dtype Message-ID: <20110901041605.1BBA78204C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: unsigned-dtypes Changeset: r46969:779c6bc0669f Date: 2011-08-31 22:15 -0600 http://bitbucket.org/pypy/pypy/changeset/779c6bc0669f/ Log: Add UInt32 dtype diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -14,6 +14,7 @@ from pypy.rpython.lltypesystem import lltype, rffi +UNSIGNEDLTR = "u" SIGNEDLTR = "i" BOOLLTR = "b" FLOATINGLTR = "f" @@ -313,6 +314,17 @@ class W_Int32Dtype(IntegerArithmeticDtype, W_Int32Dtype): pass +W_UInt32Dtype = create_low_level_dtype( + num = 6, kind = UNSIGNEDLTR, name = "uint32", + aliases = ["I"], + applevel_types = [], + T = rffi.UINT, + valtype = rffi.UINT._type, + expected_size = 4, +) +class W_UInt32Dtype(IntegerArithmeticDtype, W_UInt32Dtype): + pass + W_Int64Dtype = create_low_level_dtype( num = 9, kind = SIGNEDLTR, name = "int64", aliases = [], @@ -341,7 +353,8 @@ ALL_DTYPES = [ W_BoolDtype, - W_Int8Dtype, W_Int16Dtype, W_Int32Dtype, W_Int64Dtype, + W_Int8Dtype, W_Int16Dtype, W_Int32Dtype, W_UInt32Dtype, + W_Int64Dtype, W_Float64Dtype ] diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -99,6 +99,15 @@ for i in range(5): assert b[i] == i * 2 + def test_add_uint32(self): + from numpy import array, dtype + + a = array(range(5), dtype="I") + b = a + a + assert b.dtype is dtype("I") + for i in range(5): + assert b[i] == i * 2 + def test_shape(self): from numpy import dtype From noreply at buildbot.pypy.org Thu Sep 1 06:52:41 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Thu, 1 Sep 2011 06:52:41 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: Add UInt8 dtype Message-ID: <20110901045241.68D3882212@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: unsigned-dtypes Changeset: r46971:1f12ec631b17 Date: 2011-08-31 22:41 -0600 http://bitbucket.org/pypy/pypy/changeset/1f12ec631b17/ Log: Add UInt8 dtype diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -292,6 +292,17 @@ class W_Int8Dtype(IntegerArithmeticDtype, W_Int8Dtype): pass +W_UInt8Dtype = create_low_level_dtype( + num = 1, kind = SIGNEDLTR, name = "uint8", + aliases = ["uint8"], + applevel_types = [], + T = rffi.UCHAR, + valtype = rffi.UCHAR._type, + expected_size = 1, +) +class W_UInt8Dtype(IntegerArithmeticDtype, W_UInt8Dtype): + pass + W_Int16Dtype = create_low_level_dtype( num = 3, kind = SIGNEDLTR, name = "int16", aliases = ["int16"], From noreply at buildbot.pypy.org Thu Sep 1 06:52:40 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Thu, 1 Sep 2011 06:52:40 +0200 (CEST) Subject: [pypy-commit] pypy numpy-dtype: unary functions actually aren't working. Simplified the Call1 class. Message-ID: <20110901045240.2DAC88204C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: numpy-dtype Changeset: r46970:2062025cf412 Date: 2011-08-24 00:17 -0600 http://bitbucket.org/pypy/pypy/changeset/2062025cf412/ Log: unary functions actually aren't working. Simplified the Call1 class. diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -24,7 +24,7 @@ #'maximum': 'interp_ufuncs.maximum', #'minimum': 'interp_ufuncs.minimum', #'multiply': 'interp_ufuncs.multiply', - #'negative': 'interp_ufuncs.negative', + 'negative': 'interp_ufuncs.negative', #'reciprocal': 'interp_ufuncs.reciprocal', #'sign': 'interp_ufuncs.sign', #'subtract': 'interp_ufuncs.subtract', diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -59,14 +59,14 @@ return self descr_neg = _unaryop_impl(interp_ufuncs.negative) - descr_abs = _unaryop_impl(interp_ufuncs.absolute) + #descr_abs = _unaryop_impl(interp_ufuncs.absolute) def _binop_impl(w_ufunc): def impl(self, space, w_other): return w_ufunc(space, self, w_other) return func_with_new_name(impl, "binop_%s_impl" % w_ufunc.__name__) - descr_add = _binop_impl(interp_ufuncs.add) + #descr_add = _binop_impl(interp_ufuncs.add) #descr_sub = _binop_impl(interp_ufuncs.subtract) #descr_mul = _binop_impl(interp_ufuncs.multiply) #descr_div = _binop_impl(interp_ufuncs.divide) @@ -273,7 +273,7 @@ # w_value = new_numarray(space, w_value, self.dtype) #else: w_value = convert_to_array(space, w_value) - concrete.setslice(space, start, stop, step, + concrete.setslice(start, stop, step, slice_length, w_value) def descr_mean(self, space): @@ -328,10 +328,8 @@ """ Class for representing virtual arrays, such as binary ops or ufuncs """ - def __init__(self, signature): + def __init__(self): BaseArray.__init__(self) - self.forced_result = None - self.signature = signature def _del_sources(self): # Function for deleting references to source arrays, to allow garbage-collecting them @@ -352,38 +350,17 @@ # i += 1 # return result - def force_if_needed(self): - if self.forced_result is None: - self.forced_result = self.compute() - self._del_sources() - - def get_concrete(self): - self.force_if_needed() - return self.forced_result - - def eval(self, i): - if self.forced_result is not None: - return self.forced_result.eval(i) - return self._eval(i) - - def find_size(self): - if self.forced_result is not None: - # The result has been computed and sources may be unavailable - return self.forced_result.find_size() - return self._find_size() - - def find_dtype(self): - return self.dtype - def make_call1(_dtype): class Call1(VirtualArray): _immutable_fields_ = ["function", "values"] dtype = _dtype def __init__(self, function, values, signature): - VirtualArray.__init__(self, signature) + VirtualArray.__init__(self) self.function = function self.values = values + self.forced_result = None + self.signature = signature def _del_sources(self): self.values = None @@ -393,17 +370,41 @@ signature = self.signature result_size = self.find_size() result = create_sdarray(result_size, _dtype) - while i < result_size: - #numpy_driver.jit_merge_point(signature=signature, - # result_size=result_size, i=i, - # self=self, result=result) - result.setitem(i, self.eval(i)) - i += 1 + result.setslice(0, result_size, 1, result_size, self) + #while i < result_size: + # #numpy_driver.jit_merge_point(signature=signature, + # # result_size=result_size, i=i, + # # self=self, result=result) + # result.setitem(i, self.eval(i)) + # i += 1 return result def _find_size(self): return self.values.find_size() + def force_if_needed(self): + if self.forced_result is None: + self.forced_result = self.compute() + self._del_sources() + + def get_concrete(self): + self.force_if_needed() + return self.forced_result + + def eval(self, i): + if self.forced_result is not None: + return self.forced_result.eval(i) + return self._eval(i) + + def find_size(self): + if self.forced_result is not None: + # The result has been computed and sources may be unavailable + return self.forced_result.find_size() + return self._find_size() + + def find_dtype(self): + return self.dtype + def _eval(self, i): return self.function(_dtype.convval(self.values.eval(i))) Call1.__name__ = "Call1_" + Call1.dtype.name @@ -423,12 +424,14 @@ dtype = _dtype def __init__(self, function, left, right, signature): - VirtualArray.__init__(self, signature) + VirtualArray.__init__(self) self.left = left self.right = right dtype1 = self.left.find_dtype() dtype2 = self.right.find_dtype() self.function = function + self.forced_result = None + self.signature = signature #if dtype1.num != _dtype.num: # self.cast1 = _dtype.convval #else: @@ -466,12 +469,38 @@ result.setitem(i, self.eval(i)) i += 1 return result + def _find_size(self): try: return self.left.find_size() except: return self.right.find_size() + def force_if_needed(self): + if self.forced_result is None: + self.forced_result = self.compute() + self._del_sources() + + def get_concrete(self): + self.force_if_needed() + return self.forced_result + + def eval(self, i): + if self.forced_result is not None: + return self.forced_result.eval(i) + return self._eval(i) + + def find_size(self): + if self.forced_result is not None: + # The result has been computed and sources may be unavailable + return self.forced_result.find_size() + return self._find_size() + + def find_dtype(self): + return self.dtype + + + def _eval(self, i): lhs, rhs = _dtype.convval(self.left.eval(i)), _dtype.convval(self.right.eval(i)) return self.function(lhs, rhs) @@ -550,12 +579,12 @@ def find_size(self): return self.size - def setslice(self, space, start, stop, step, slice_length, arr): + def setslice(self, start, stop, step, slice_length, arr): start = self.calc_index(start) if stop != -1: stop = self.calc_index(stop) step = self.step * step - self.parent.setslice(space, start, stop, step, slice_length, arr) + self.parent.setslice(start, stop, step, slice_length, arr) def calc_index(self, item): return (self.start + item * self.step) @@ -653,7 +682,7 @@ j += 1 i += step - def setslice(self, space, start, stop, step, slice_length, arr): + def setslice(self, start, stop, step, slice_length, arr): if step > 0: self._sliceloop1(start, stop, step, arr) else: @@ -732,8 +761,8 @@ __pos__ = interp2app(BaseArray.descr_pos), __neg__ = interp2app(BaseArray.descr_neg), - __abs__ = interp2app(BaseArray.descr_abs), - __add__ = interp2app(BaseArray.descr_add), + #__abs__ = interp2app(BaseArray.descr_abs), + #__add__ = interp2app(BaseArray.descr_add), #__sub__ = interp2app(BaseArray.descr_sub), #__mul__ = interp2app(BaseArray.descr_mul), #__div__ = interp2app(BaseArray.descr_div), diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -19,31 +19,31 @@ return space.wrap(func(space.float_w(w_obj))) return func_with_new_name(impl, "%s_dispatcher" % func.__name__) -def ufunc2(func): - signature = Signature() - def impl(space, w_lhs, w_rhs): - from pypy.module.micronumpy.interp_numarray import pick_call2, convert_to_array - if space.issequence_w(w_lhs) or space.issequence_w(w_rhs): - w_lhs_arr = convert_to_array(space, w_lhs) - w_rhs_arr = convert_to_array(space, w_rhs) - new_sig = w_lhs_arr.signature.transition(signature).transition(w_rhs_arr.signature) - w_res = pick_call2(w_lhs_arr.dtype, w_rhs_arr.dtype)(func, w_lhs_arr, w_rhs_arr, new_sig) - w_lhs_arr.invalidates.append(w_res) - w_rhs_arr.invalidates.append(w_res) - return w_res - else: - return space.wrap(func(space.float_w(w_lhs), space.float_w(w_rhs))) - return func_with_new_name(impl, "%s_dispatcher" % func.__name__) +#def ufunc2(func): +# signature = Signature() +# def impl(space, w_lhs, w_rhs): +# from pypy.module.micronumpy.interp_numarray import pick_call2, convert_to_array +# if space.issequence_w(w_lhs) or space.issequence_w(w_rhs): +# w_lhs_arr = convert_to_array(space, w_lhs) +# w_rhs_arr = convert_to_array(space, w_rhs) +# new_sig = w_lhs_arr.signature.transition(signature).transition(w_rhs_arr.signature) +# w_res = pick_call2(w_lhs_arr.dtype, w_rhs_arr.dtype)(func, w_lhs_arr, w_rhs_arr, new_sig) +# w_lhs_arr.invalidates.append(w_res) +# w_rhs_arr.invalidates.append(w_res) +# return w_res +# else: +# return space.wrap(func(space.float_w(w_lhs), space.float_w(w_rhs))) +# return func_with_new_name(impl, "%s_dispatcher" % func.__name__) - at ufunc - at specialize.argtype(0) -def absolute(value): - return abs(value) +#@ufunc +#@specialize.argtype(1) +#def absolute(value): +# return abs(value) - at ufunc2 - at specialize.argtype(0,1) -def add(lvalue, rvalue): - return lvalue + rvalue +#@specialize.argtype(1,2) +#@ufunc2 +#def add(lvalue, rvalue): +# return lvalue + rvalue #@ufunc2 #def copysign(lvalue, rvalue): From noreply at buildbot.pypy.org Thu Sep 1 06:52:42 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Thu, 1 Sep 2011 06:52:42 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: Add UInt16 dtype Message-ID: <20110901045242.A41D18204C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: unsigned-dtypes Changeset: r46972:178ebefab2a6 Date: 2011-08-31 22:42 -0600 http://bitbucket.org/pypy/pypy/changeset/178ebefab2a6/ Log: Add UInt16 dtype diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -314,6 +314,17 @@ class W_Int16Dtype(IntegerArithmeticDtype, W_Int16Dtype): pass +W_UInt16Dtype = create_low_level_dtype( + num = 3, kind = SIGNEDLTR, name = "uint16", + aliases = ["uint16"], + applevel_types = [], + T = rffi.USHORT, + valtype = rffi.USHORT._type, + expected_size = 2, +) +class W_UInt16Dtype(IntegerArithmeticDtype, W_UInt16Dtype): + pass + W_Int32Dtype = create_low_level_dtype( num = 5, kind = SIGNEDLTR, name = "int32", aliases = ["i"], From noreply at buildbot.pypy.org Thu Sep 1 06:52:44 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Thu, 1 Sep 2011 06:52:44 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: Add UInt64 dtype and make some fixes on the others Message-ID: <20110901045244.05FE98204C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: unsigned-dtypes Changeset: r46973:11460491de24 Date: 2011-08-31 22:52 -0600 http://bitbucket.org/pypy/pypy/changeset/11460491de24/ Log: Add UInt64 dtype and make some fixes on the others diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -293,7 +293,7 @@ pass W_UInt8Dtype = create_low_level_dtype( - num = 1, kind = SIGNEDLTR, name = "uint8", + num = 2, kind = UNSIGNEDLTR, name = "uint8", aliases = ["uint8"], applevel_types = [], T = rffi.UCHAR, @@ -315,7 +315,7 @@ pass W_UInt16Dtype = create_low_level_dtype( - num = 3, kind = SIGNEDLTR, name = "uint16", + num = 4, kind = UNSIGNEDLTR, name = "uint16", aliases = ["uint16"], applevel_types = [], T = rffi.USHORT, @@ -358,6 +358,19 @@ class W_Int64Dtype(IntegerArithmeticDtype, W_Int64Dtype): pass +W_UInt64Dtype = create_low_level_dtype( + num = 10, kind = UNSIGNEDLTR, name = "uint64", + aliases = [], + applevel_types = [], + T = rffi.ULONGLONG, + valtype = rffi.ULONGLONG._type, + expected_size = 8, +) +class W_UInt64Dtype(IntegerArithmeticDtype, W_UInt64Dtype): + pass + + + W_Float64Dtype = create_low_level_dtype( num = 12, kind = FLOATINGLTR, name = "float64", aliases = [], @@ -375,8 +388,8 @@ ALL_DTYPES = [ W_BoolDtype, - W_Int8Dtype, W_Int16Dtype, W_Int32Dtype, W_UInt32Dtype, - W_Int64Dtype, + W_Int8Dtype, W_UInt8Dtype, W_Int16Dtype, W_UInt16Dtype, + W_Int32Dtype, W_UInt32Dtype, W_Int64Dtype, W_UInt64Dtype, W_Float64Dtype ] From notifications-noreply at bitbucket.org Thu Sep 1 07:17:39 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Thu, 01 Sep 2011 05:17:39 -0000 Subject: [pypy-commit] Notification: pypy Message-ID: <20110901051739.17350.67917@bitbucket01.managed.contegix.com> You have received a notification from Justin Peel. Hi, I forked pypy. My fork is at https://bitbucket.org/justinpeel/pypy. -- Disable notifications at https://bitbucket.org/account/notifications/ From noreply at buildbot.pypy.org Thu Sep 1 10:33:58 2011 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 1 Sep 2011 10:33:58 +0200 (CEST) Subject: [pypy-commit] pypy default: use pytest's monkeypatching instead of manually doing it Message-ID: <20110901083358.128E38204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r46974:94aca11cba52 Date: 2011-09-01 10:33 +0200 http://bitbucket.org/pypy/pypy/changeset/94aca11cba52/ Log: use pytest's monkeypatching instead of manually doing it diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -433,7 +433,7 @@ ops_offset[operations[2]] <= ops_offset[None]) - def test_calling_convention(self): + def test_calling_convention(self, monkeypatch): if WORD != 4: py.test.skip("32-bit only test") from pypy.jit.backend.x86.regloc import eax, edx @@ -442,7 +442,7 @@ from pypy.rlib.libffi import types, clibffi had_stdcall = hasattr(clibffi, 'FFI_STDCALL') if not had_stdcall: # not running on Windows, but we can still test - clibffi.FFI_STDCALL = 12345 + monkeypatch.setattr(clibffi, 'FFI_STDCALL', 12345, raising=False) # for ffi in [clibffi.FFI_DEFAULT_ABI, clibffi.FFI_STDCALL]: cpu = self.cpu @@ -514,9 +514,6 @@ assert self.cpu.get_latest_value_int(2) == 42 assert self.cpu.get_latest_value_int(3) == 42 - if not had_stdcall: - del clibffi.FFI_STDCALL - class TestDebuggingAssembler(object): def setup_method(self, meth): From noreply at buildbot.pypy.org Thu Sep 1 11:13:25 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Sep 2011 11:13:25 +0200 (CEST) Subject: [pypy-commit] pyrepl default: Apply 414bb2d98b0c from pypy. Message-ID: <20110901091325.C63B68204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r126:4e8b8d42e92c Date: 2011-09-01 11:13 +0200 http://bitbucket.org/pypy/pyrepl/changeset/4e8b8d42e92c/ Log: Apply 414bb2d98b0c from pypy. diff --git a/pyrepl/reader.py b/pyrepl/reader.py --- a/pyrepl/reader.py +++ b/pyrepl/reader.py @@ -401,13 +401,19 @@ return "(arg: %s) "%self.arg if "\n" in self.buffer: if lineno == 0: - return self._ps2 + res = self.ps2 elif lineno == self.buffer.count("\n"): - return self._ps4 + res = self.ps4 else: - return self._ps3 + res = self.ps3 else: - return self._ps1 + res = self.ps1 + # Lazily call str() on self.psN, and cache the results using as key + # the object on which str() was called. This ensures that even if the + # same object is used e.g. for ps1 and ps2, str() is called only once. + if res not in self._pscache: + self._pscache[res] = str(res) + return self._pscache[res] def push_input_trans(self, itrans): self.input_trans_stack.append(self.input_trans) @@ -473,8 +479,7 @@ self.pos = 0 self.dirty = 1 self.last_command = None - self._ps1, self._ps2, self._ps3, self._ps4 = \ - map(str, [self.ps1, self.ps2, self.ps3, self.ps4]) + self._pscache = {} except: self.restore() raise From noreply at buildbot.pypy.org Thu Sep 1 11:13:56 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Sep 2011 11:13:56 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix: call str() only once per prompt, not twice, even though the Message-ID: <20110901091356.9E2B28204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r46975:414bb2d98b0c Date: 2011-09-01 11:11 +0200 http://bitbucket.org/pypy/pypy/changeset/414bb2d98b0c/ Log: Fix: call str() only once per prompt, not twice, even though the same object is in reader.ps1 and reader.ps2. diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py --- a/lib_pypy/pyrepl/reader.py +++ b/lib_pypy/pyrepl/reader.py @@ -401,13 +401,19 @@ return "(arg: %s) "%self.arg if "\n" in self.buffer: if lineno == 0: - return self._ps2 + res = self.ps2 elif lineno == self.buffer.count("\n"): - return self._ps4 + res = self.ps4 else: - return self._ps3 + res = self.ps3 else: - return self._ps1 + res = self.ps1 + # Lazily call str() on self.psN, and cache the results using as key + # the object on which str() was called. This ensures that even if the + # same object is used e.g. for ps1 and ps2, str() is called only once. + if res not in self._pscache: + self._pscache[res] = str(res) + return self._pscache[res] def push_input_trans(self, itrans): self.input_trans_stack.append(self.input_trans) @@ -473,8 +479,7 @@ self.pos = 0 self.dirty = 1 self.last_command = None - self._ps1, self._ps2, self._ps3, self._ps4 = \ - map(str, [self.ps1, self.ps2, self.ps3, self.ps4]) + self._pscache = {} except: self.restore() raise From noreply at buildbot.pypy.org Thu Sep 1 11:13:57 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Sep 2011 11:13:57 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20110901091357.F35118204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r46976:3ffe8b85f014 Date: 2011-09-01 11:13 +0200 http://bitbucket.org/pypy/pypy/changeset/3ffe8b85f014/ Log: merge heads diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -433,7 +433,7 @@ ops_offset[operations[2]] <= ops_offset[None]) - def test_calling_convention(self): + def test_calling_convention(self, monkeypatch): if WORD != 4: py.test.skip("32-bit only test") from pypy.jit.backend.x86.regloc import eax, edx @@ -442,7 +442,7 @@ from pypy.rlib.libffi import types, clibffi had_stdcall = hasattr(clibffi, 'FFI_STDCALL') if not had_stdcall: # not running on Windows, but we can still test - clibffi.FFI_STDCALL = 12345 + monkeypatch.setattr(clibffi, 'FFI_STDCALL', 12345, raising=False) # for ffi in [clibffi.FFI_DEFAULT_ABI, clibffi.FFI_STDCALL]: cpu = self.cpu @@ -514,9 +514,6 @@ assert self.cpu.get_latest_value_int(2) == 42 assert self.cpu.get_latest_value_int(3) == 42 - if not had_stdcall: - del clibffi.FFI_STDCALL - class TestDebuggingAssembler(object): def setup_method(self, meth): diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -53,7 +53,9 @@ VOID_TP = lltype.Ptr(lltype.Array(lltype.Void, hints={'nolength': True, "uncast_on_llgraph": True})) -def create_low_level_dtype(num, kind, name, aliases, applevel_types, T, valtype): +def create_low_level_dtype(num, kind, name, aliases, applevel_types, T, valtype, + expected_size=None): + class Box(BaseBox): def __init__(self, val): self.val = val @@ -113,6 +115,8 @@ W_LowLevelDtype.aliases = aliases W_LowLevelDtype.applevel_types = applevel_types W_LowLevelDtype.num_bytes = rffi.sizeof(T) + if expected_size is not None: + assert W_LowLevelDtype.num_bytes == expected_size return W_LowLevelDtype @@ -282,10 +286,21 @@ applevel_types = [], T = rffi.SIGNEDCHAR, valtype = rffi.SIGNEDCHAR._type, + expected_size = 1, ) class W_Int8Dtype(IntegerArithmeticDtype, W_Int8Dtype): - def unwrap(self, space, w_item): - return self.adapt_val(space.int_w(space.int(w_item))) + pass + +W_Int16Dtype = create_low_level_dtype( + num = 3, kind = SIGNEDLTR, name = "int16", + aliases = ["int16"], + applevel_types = [], + T = rffi.SHORT, + valtype = rffi.SHORT._type, + expected_size = 2, +) +class W_Int16Dtype(IntegerArithmeticDtype, W_Int16Dtype): + pass W_Int32Dtype = create_low_level_dtype( num = 5, kind = SIGNEDLTR, name = "int32", @@ -293,6 +308,7 @@ applevel_types = [], T = rffi.INT, valtype = rffi.INT._type, + expected_size = 4, ) class W_Int32Dtype(IntegerArithmeticDtype, W_Int32Dtype): pass @@ -303,6 +319,7 @@ applevel_types = ["long"], T = rffi.LONGLONG, valtype = rffi.LONGLONG._type, + expected_size = 8, ) class W_Int64Dtype(IntegerArithmeticDtype, W_Int64Dtype): pass @@ -313,6 +330,7 @@ applevel_types = ["float"], T = lltype.Float, valtype = float, + expected_size = 8, ) class W_Float64Dtype(FloatArithmeticDtype, W_Float64Dtype): def unwrap(self, space, w_item): @@ -323,7 +341,7 @@ ALL_DTYPES = [ W_BoolDtype, - W_Int8Dtype, W_Int32Dtype, W_Int64Dtype, + W_Int8Dtype, W_Int16Dtype, W_Int32Dtype, W_Int64Dtype, W_Float64Dtype ] @@ -353,4 +371,4 @@ kind = interp_attrproperty("kind", cls=W_Dtype), shape = GetSetProperty(W_Dtype.descr_get_shape), ) -W_Dtype.typedef.acceptable_as_base_class = False \ No newline at end of file +W_Dtype.typedef.acceptable_as_base_class = False diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -217,7 +217,15 @@ return space.wrap("[" + " ".join(concrete._getnums(True)) + "]") def descr_getitem(self, space, w_idx): - # TODO: indexing by tuples + # TODO: indexing by arrays and lists + if space.isinstance_w(w_idx, space.w_tuple): + length = space.len_w(w_idx) + if length == 0: + return space.wrap(self) + if length > 1: # only one dimension for now. + raise OperationError(space.w_IndexError, + space.wrap("invalid index")) + w_idx = space.getitem(w_idx, space.wrap(0)) start, stop, step, slice_length = space.decode_index4(w_idx, self.find_size()) if step == 0: # Single index @@ -231,8 +239,19 @@ return space.wrap(res) def descr_setitem(self, space, w_idx, w_value): - # TODO: indexing by tuples and lists + # TODO: indexing by arrays and lists self.invalidated() + if space.isinstance_w(w_idx, space.w_tuple): + length = space.len_w(w_idx) + if length > 1: # only one dimension for now. + raise OperationError(space.w_IndexError, + space.wrap("invalid index")) + if length == 0: + w_idx = space.newslice(space.wrap(0), + space.wrap(self.find_size()), + space.wrap(1)) + else: + w_idx = space.getitem(w_idx, space.wrap(0)) start, stop, step, slice_length = space.decode_index4(w_idx, self.find_size()) if step == 0: diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -82,10 +82,20 @@ assert a[1] == 1 def test_add_int8(self): - from numpy import array + from numpy import array, dtype a = array(range(5), dtype="int8") b = a + a + assert b.dtype is dtype("int8") + for i in range(5): + assert b[i] == i * 2 + + def test_add_int16(self): + from numpy import array, dtype + + a = array(range(5), dtype="int16") + b = a + a + assert b.dtype is dtype("int16") for i in range(5): assert b[i] == i * 2 @@ -98,4 +108,4 @@ from numpy import dtype # You can't subclass dtype - raises(TypeError, type, "Foo", (dtype,), {}) \ No newline at end of file + raises(TypeError, type, "Foo", (dtype,), {}) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -84,6 +84,9 @@ a = array(range(5), dtype="int8") assert str(a) == "[0 1 2 3 4]" + a = array(range(5), dtype="int16") + assert str(a) == "[0 1 2 3 4]" + def test_str_slice(self): from numpy import array, zeros a = array(range(5), float) @@ -102,6 +105,16 @@ assert a[-1] == 8 raises(IndexError, "a[-6]") + def test_getitem_tuple(self): + from numpy import array + a = array(range(5)) + raises(IndexError, "a[(1,2)]") + for i in xrange(5): + assert a[(i,)] == i + b = a[()] + for i in xrange(5): + assert a[i] == b[i] + def test_setitem(self): from numpy import array a = array(range(5)) @@ -110,6 +123,17 @@ raises(IndexError, "a[5] = 0.0") raises(IndexError, "a[-6] = 3.0") + def test_setitem_tuple(self): + from numpy import array + a = array(range(5)) + raises(IndexError, "a[(1,2)] = [0,1]") + for i in xrange(5): + a[(i,)] = i+1 + assert a[i] == i+1 + a[()] = range(5) + for i in xrange(5): + assert a[i] == i + def test_setslice_array(self): from numpy import array a = array(range(5)) @@ -541,4 +565,4 @@ a = fromstring(self.data) for i in range(4): assert a[i] == i + 1 - raises(ValueError, fromstring, "abc") \ No newline at end of file + raises(ValueError, fromstring, "abc") From noreply at buildbot.pypy.org Thu Sep 1 11:39:30 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Sep 2011 11:39:30 +0200 (CEST) Subject: [pypy-commit] pypy default: Translation fix for jit/backend/x86: give these global constants Message-ID: <20110901093930.4D8F68204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r46977:f42981403e3e Date: 2011-09-01 11:39 +0200 http://bitbucket.org/pypy/pypy/changeset/f42981403e3e/ Log: Translation fix for jit/backend/x86: give these global constants the type 'int', and only cast them as needed to USHORT. Avoids surprizes. diff --git a/pypy/rlib/clibffi.py b/pypy/rlib/clibffi.py --- a/pypy/rlib/clibffi.py +++ b/pypy/rlib/clibffi.py @@ -286,10 +286,10 @@ FFI_OK = cConfig.FFI_OK FFI_BAD_TYPEDEF = cConfig.FFI_BAD_TYPEDEF -FFI_DEFAULT_ABI = rffi.cast(rffi.USHORT, cConfig.FFI_DEFAULT_ABI) +FFI_DEFAULT_ABI = cConfig.FFI_DEFAULT_ABI if _WIN32: - FFI_STDCALL = rffi.cast(rffi.USHORT, cConfig.FFI_STDCALL) -FFI_TYPE_STRUCT = rffi.cast(rffi.USHORT, cConfig.FFI_TYPE_STRUCT) + FFI_STDCALL = cConfig.FFI_STDCALL +FFI_TYPE_STRUCT = cConfig.FFI_TYPE_STRUCT FFI_CIFP = rffi.COpaquePtr('ffi_cif', compilation_info=eci) FFI_CLOSUREP = lltype.Ptr(cConfig.ffi_closure) @@ -319,7 +319,7 @@ which the 'ffistruct' member is a regular FFI_TYPE. """ tpe = lltype.malloc(FFI_STRUCT_P.TO, len(field_types)+1, flavor='raw') - tpe.ffistruct.c_type = FFI_TYPE_STRUCT + tpe.ffistruct.c_type = rffi.cast(rffi.USHORT, FFI_TYPE_STRUCT) tpe.ffistruct.c_size = rffi.cast(rffi.SIZE_T, size) tpe.ffistruct.c_alignment = rffi.cast(rffi.USHORT, aligment) tpe.ffistruct.c_elements = rffi.cast(FFI_TYPE_PP, @@ -438,13 +438,14 @@ if _MSVC: # This little trick works correctly with MSVC. # It returns small structures in registers - if r_uint(restype.c_type) == FFI_TYPE_STRUCT: + if intmask(restype.c_type) == FFI_TYPE_STRUCT: if restype.c_size <= 4: restype = ffi_type_sint32 elif restype.c_size <= 8: restype = ffi_type_sint64 - res = c_ffi_prep_cif(self.ll_cif, get_call_conv(flags, False), + res = c_ffi_prep_cif(self.ll_cif, + rffi.cast(rffi.USHORT, get_call_conv(flags,False)), rffi.cast(rffi.UINT, argnum), restype, self.ll_argtypes) if not res == FFI_OK: diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -75,7 +75,7 @@ @staticmethod @jit.elidable def is_struct(ffi_type): - return intmask(ffi_type.c_type) == intmask(FFI_TYPE_STRUCT) + return intmask(ffi_type.c_type) == FFI_TYPE_STRUCT types._import() From noreply at buildbot.pypy.org Thu Sep 1 13:36:49 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Thu, 1 Sep 2011 13:36:49 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: local max Message-ID: <20110901113649.93BA38204C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: extradoc Changeset: r3892:ecee1e7f08e1 Date: 2011-08-26 09:01 +0200 http://bitbucket.org/pypy/extradoc/changeset/ecee1e7f08e1/ Log: local max diff --git a/talk/iwtc11/benchmarks/image/numpy_compare.py b/talk/iwtc11/benchmarks/image/numpy_compare.py --- a/talk/iwtc11/benchmarks/image/numpy_compare.py +++ b/talk/iwtc11/benchmarks/image/numpy_compare.py @@ -212,6 +212,39 @@ ###################################################################### +def range2d(a, b): + for y in range(a, b): + for x in range(a, b): + yield x, y + +def local_max(img): # pypy: 4.72 fps, cpython: 0.31 fps + s = 3 + out = img.new() + for x, y in img.pixels(border=s): + for dx, dy in range2d(-s, s+1): + if img[x+dx, y+dy] > img[x, y]: + break + else: + out[x, y] = 255 + return out + +def local_edge(img): # pypy: 10.25 fps, cpython: 0.25 fps (including sobel_magnitude) + out = img.new() + for x, y in img.pixels(border=1): + n = len([1 for dx, dy in range2d(-1, 2) if img[x+dx, y+dy] > img[x, y]]) + if n <= 3 and img[x,y] > 20: + out[x, y] = 255 + return out + +# Haar detector +# Tracing linesegments +# Subpixel edge-detection +# Distance-transform +# Subpixel correlation/edge-detection + +###################################################################### + + def mplayer(Image, fn='tv://', options=''): f = os.popen('mplayer -really-quiet -noframedrop ' + options + ' ' '-vo yuv4mpeg:file=/dev/stdout 2>/dev/null Author: Hakan Ardo Branch: extradoc Changeset: r3893:0fabc59e39ad Date: 2011-09-01 13:36 +0200 http://bitbucket.org/pypy/extradoc/changeset/0fabc59e39ad/ Log: benchmark results diff --git a/talk/iwtc11/benchmarks/image/numpy_compare.py b/talk/iwtc11/benchmarks/image/numpy_compare.py --- a/talk/iwtc11/benchmarks/image/numpy_compare.py +++ b/talk/iwtc11/benchmarks/image/numpy_compare.py @@ -146,7 +146,7 @@ return numpy.minimum(numpy.sqrt(dx*dx + dy*dy) / 4.0, 255).astype('B') @wrap_numpy -def sobel_magnitude_numpy2(a): # 89 fps +def sobel_magnitude_numpy2(a): # 89 fps (Image), 81 fps (NNImage), 80 fps (BilinImage) dx = -1.0 * a[0:-3, 0:-3] + 1.0 * a[0:-3, 2:-1] + \ -2.0 * a[1:-2, 0:-3] + 2.0 * a[1:-2, 2:-1] + \ -1.0 * a[2:-1, 0:-3] + 1.0 * a[2:-1, 2:-1] @@ -157,7 +157,7 @@ return res.astype('B') @wrap_numpy -def sobel_magnitude_numpy3(a): +def sobel_magnitude_numpy3(a): # 106 fps dx = numpy.zeros(a.shape) scipy.ndimage.filters.sobel(a, 1, dx) dy = numpy.zeros(a.shape) @@ -165,7 +165,7 @@ return numpy.minimum(numpy.sqrt(dx*dx + dy*dy) / 4.0, 255).astype('B') @wrap_numpy -def sobel_magnitude_numpy4(a): +def sobel_magnitude_numpy4(a): # 105 fps dx = numpy.zeros(a.shape) scipy.ndimage.filters.convolve(a, numpy.array([[-1.0, 0.0, 1.0], [-2.0, 0.0, 2.0], @@ -295,12 +295,12 @@ start = start0 = time() for fcnt, img in enumerate(mplayer(BilinImage, 'test.avi', '-benchmark')): #view(img) - view(sobel_magnitude(img)) - #view(sobel_magnitude_numpy(img)) + #view(sobel_magnitude(img)) + #view(sobel_magnitude_numpy2(img)) #view(magnify(img)) - #view(magnify_numpy(img)) + view(magnify_numpy(img)) #view(local_max(img)) - view(local_edge(sobel_magnitude(img))) + #view(local_edge(sobel_magnitude(img))) print 1.0 / (time() - start), 'fps, ', (fcnt-2) / (time() - start0), 'average fps' start = time() if fcnt==2: From noreply at buildbot.pypy.org Thu Sep 1 13:36:51 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Thu, 1 Sep 2011 13:36:51 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: hg merge Message-ID: <20110901113651.F1D788204C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: extradoc Changeset: r3894:2b35bb7878bc Date: 2011-09-01 13:36 +0200 http://bitbucket.org/pypy/extradoc/changeset/2b35bb7878bc/ Log: hg merge diff --git a/planning/micronumpy.txt b/planning/micronumpy.txt new file mode 100644 --- /dev/null +++ b/planning/micronumpy.txt @@ -0,0 +1,25 @@ +NEW TASKS +--------- + +- add in all base dtypes (bools, unsigned and signed ints, floats) + +- get the correct dtype results for binops + +- add more ways to select dtypes (aliases or more advanced code to choose the + types) + +- add in numpy.generic and the various subclasses, use them in returning + instances from subscripting (and possibly internally), also make them valid + for the dtype arguments + +- astype + +- a good sort function + +- indexing by tuples and lists + +- add multi-dim arrays + + - will need to refactor some functions + + - do we keep single-dim arrays separate? From noreply at buildbot.pypy.org Thu Sep 1 14:03:21 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Sep 2011 14:03:21 +0200 (CEST) Subject: [pypy-commit] pypy default: Update. Message-ID: <20110901120321.BC4DD8204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r46978:da3d8c44a9c2 Date: 2011-09-01 14:02 +0200 http://bitbucket.org/pypy/pypy/changeset/da3d8c44a9c2/ Log: Update. diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -209,6 +209,8 @@ * Automatic unlimited stack (must be emulated__ so far) +* Support for other CPUs than x86 and x86-64 + .. __: `recursion depth limit`_ (*) Pickling, as well as changing threads, could be implemented by using @@ -217,9 +219,8 @@ "hard" switch (like now) when the C stack contains non-trivial C frames to save, and a "soft" switch (like previously) when it contains only simple calls from Python to Python. Soft-switched continulets would -also consume a bit less RAM, at the possible expense of making the -switch a bit slower (unsure about that; what is the Stackless Python -experience?). +also consume a bit less RAM, and the switch might be a bit faster too +(unsure about that; what is the Stackless Python experience?). Recursion depth limit From noreply at buildbot.pypy.org Thu Sep 1 15:14:22 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 1 Sep 2011 15:14:22 +0200 (CEST) Subject: [pypy-commit] pypy faster-nested-scopes: two closure tests Message-ID: <20110901131422.89C0B8204C@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: faster-nested-scopes Changeset: r46979:4263a5b46190 Date: 2011-09-01 12:02 +0200 http://bitbucket.org/pypy/pypy/changeset/4263a5b46190/ Log: two closure tests diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -396,3 +396,70 @@ --TICK-- jump(..., descr=) """) + + def test_global_closure_has_constant_cells(self): + log = self.run(""" + def make_adder(n): + def add(x): + return x + n + return add + add5 = make_adder(5) + def main(): + i = 0 + while i < 5000: + i = add5(i) # ID: call + """, []) + loop, = log.loops_by_id('call', is_entry_bridge=True) + assert loop.match(""" + guard_value(i6, 1, descr=...) + guard_nonnull_class(p8, ConstClass(W_IntObject), descr=...) + guard_value(i4, 0, descr=...) + guard_value(p3, ConstPtr(ptr14), descr=...) + i15 = getfield_gc_pure(p8, descr=) + i17 = int_lt(i15, 5000) + guard_true(i17, descr=...) + p18 = getfield_gc(p0, descr=) + guard_value(p18, ConstPtr(ptr19), descr=...) + p20 = getfield_gc(p18, descr=) + guard_value(p20, ConstPtr(ptr21), descr=...) + guard_not_invalidated(descr=...) + # most importantly, there is no getarrayitem_gc here + p23 = call(ConstClass(getexecutioncontext), descr=) + p24 = getfield_gc(p23, descr=) + i25 = force_token() + p26 = getfield_gc(p23, descr=) + guard_isnull(p26, descr=...) + i27 = getfield_gc(p23, descr=) + i28 = int_is_zero(i27) + guard_true(i28, descr=...) + p30 = getfield_gc(ConstPtr(ptr29), descr=) + guard_nonnull_class(p30, ConstClass(W_IntObject), descr=...) + i32 = getfield_gc_pure(p30, descr=) + i33 = int_add_ovf(i15, i32) + guard_no_overflow(descr=...) + --TICK-- + jump(p0, p1, p2, p5, i33, i32, p23, p30, p24, descr=) + """) + + def test_local_closure_is_virtual(self): + log = self.run(""" + def main(): + i = 0 + while i < 5000: + def add(): + return i + 1 + i = add() # ID: call + """, []) + loop, = log.loops_by_id('call') + assert loop.match(""" + i8 = getfield_gc_pure(p6, descr=) + i10 = int_lt(i8, 5000) + guard_true(i10, descr=...) + i11 = force_token() + i13 = int_add(i8, 1) + --TICK-- + p22 = new_with_vtable(ConstClass(W_IntObject)) + setfield_gc(p22, i13, descr=) + setfield_gc(p4, p22, descr=) + jump(p0, p1, p2, p3, p4, p7, p22, p7, descr=) + """) From noreply at buildbot.pypy.org Thu Sep 1 15:14:26 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 1 Sep 2011 15:14:26 +0200 (CEST) Subject: [pypy-commit] pypy faster-nested-scopes: merge default Message-ID: <20110901131426.851948204C@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: faster-nested-scopes Changeset: r46980:a2c02ebec53c Date: 2011-09-01 12:03 +0200 http://bitbucket.org/pypy/pypy/changeset/a2c02ebec53c/ Log: merge default diff --git a/lib-python/modified-2.7/ctypes/util.py b/lib-python/modified-2.7/ctypes/util.py --- a/lib-python/modified-2.7/ctypes/util.py +++ b/lib-python/modified-2.7/ctypes/util.py @@ -72,8 +72,8 @@ return name if os.name == "posix" and sys.platform == "darwin": - from ctypes.macholib.dyld import dyld_find as _dyld_find def find_library(name): + from ctypes.macholib.dyld import dyld_find as _dyld_find possible = ['lib%s.dylib' % name, '%s.dylib' % name, '%s.framework/%s' % (name, name)] diff --git a/lib-python/modified-2.7/gzip.py b/lib-python/modified-2.7/gzip.py new file mode 100644 --- /dev/null +++ b/lib-python/modified-2.7/gzip.py @@ -0,0 +1,514 @@ +"""Functions that read and write gzipped files. + +The user of the file doesn't have to worry about the compression, +but random access is not allowed.""" + +# based on Andrew Kuchling's minigzip.py distributed with the zlib module + +import struct, sys, time, os +import zlib +import io +import __builtin__ + +__all__ = ["GzipFile","open"] + +FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16 + +READ, WRITE = 1, 2 + +def write32u(output, value): + # The L format writes the bit pattern correctly whether signed + # or unsigned. + output.write(struct.pack("' + + def _check_closed(self): + """Raises a ValueError if the underlying file object has been closed. + + """ + if self.closed: + raise ValueError('I/O operation on closed file.') + + def _init_write(self, filename): + self.name = filename + self.crc = zlib.crc32("") & 0xffffffffL + self.size = 0 + self.writebuf = [] + self.bufsize = 0 + + def _write_gzip_header(self): + self.fileobj.write('\037\213') # magic header + self.fileobj.write('\010') # compression method + fname = os.path.basename(self.name) + if fname.endswith(".gz"): + fname = fname[:-3] + flags = 0 + if fname: + flags = FNAME + self.fileobj.write(chr(flags)) + mtime = self.mtime + if mtime is None: + mtime = time.time() + write32u(self.fileobj, long(mtime)) + self.fileobj.write('\002') + self.fileobj.write('\377') + if fname: + self.fileobj.write(fname + '\000') + + def _init_read(self): + self.crc = zlib.crc32("") & 0xffffffffL + self.size = 0 + + def _read_gzip_header(self): + magic = self.fileobj.read(2) + if magic != '\037\213': + raise IOError, 'Not a gzipped file' + method = ord( self.fileobj.read(1) ) + if method != 8: + raise IOError, 'Unknown compression method' + flag = ord( self.fileobj.read(1) ) + self.mtime = read32(self.fileobj) + # extraflag = self.fileobj.read(1) + # os = self.fileobj.read(1) + self.fileobj.read(2) + + if flag & FEXTRA: + # Read & discard the extra field, if present + xlen = ord(self.fileobj.read(1)) + xlen = xlen + 256*ord(self.fileobj.read(1)) + self.fileobj.read(xlen) + if flag & FNAME: + # Read and discard a null-terminated string containing the filename + while True: + s = self.fileobj.read(1) + if not s or s=='\000': + break + if flag & FCOMMENT: + # Read and discard a null-terminated string containing a comment + while True: + s = self.fileobj.read(1) + if not s or s=='\000': + break + if flag & FHCRC: + self.fileobj.read(2) # Read & discard the 16-bit header CRC + + def write(self,data): + self._check_closed() + if self.mode != WRITE: + import errno + raise IOError(errno.EBADF, "write() on read-only GzipFile object") + + if self.fileobj is None: + raise ValueError, "write() on closed GzipFile object" + + # Convert data type if called by io.BufferedWriter. + if isinstance(data, memoryview): + data = data.tobytes() + + if len(data) > 0: + self.size = self.size + len(data) + self.crc = zlib.crc32(data, self.crc) & 0xffffffffL + self.fileobj.write( self.compress.compress(data) ) + self.offset += len(data) + + return len(data) + + def read(self, size=-1): + self._check_closed() + if self.mode != READ: + import errno + raise IOError(errno.EBADF, "read() on write-only GzipFile object") + + if self.extrasize <= 0 and self.fileobj is None: + return '' + + readsize = 1024 + if size < 0: # get the whole thing + try: + while True: + self._read(readsize) + readsize = min(self.max_read_chunk, readsize * 2) + except EOFError: + size = self.extrasize + elif size == 0: + return "" + else: # just get some more of it + try: + while size > self.extrasize: + self._read(readsize) + readsize = min(self.max_read_chunk, readsize * 2) + except EOFError: + if size > self.extrasize: + size = self.extrasize + + offset = self.offset - self.extrastart + chunk = self.extrabuf[offset: offset + size] + self.extrasize = self.extrasize - size + + self.offset += size + return chunk + + def _unread(self, buf): + self.extrasize = len(buf) + self.extrasize + self.offset -= len(buf) + + def _read(self, size=1024): + if self.fileobj is None: + raise EOFError, "Reached EOF" + + if self._new_member: + # If the _new_member flag is set, we have to + # jump to the next member, if there is one. + # + # First, check if we're at the end of the file; + # if so, it's time to stop; no more members to read. + pos = self.fileobj.tell() # Save current position + self.fileobj.seek(0, 2) # Seek to end of file + if pos == self.fileobj.tell(): + raise EOFError, "Reached EOF" + else: + self.fileobj.seek( pos ) # Return to original position + + self._init_read() + self._read_gzip_header() + self.decompress = zlib.decompressobj(-zlib.MAX_WBITS) + self._new_member = False + + # Read a chunk of data from the file + buf = self.fileobj.read(size) + + # If the EOF has been reached, flush the decompression object + # and mark this object as finished. + + if buf == "": + uncompress = self.decompress.flush() + self._read_eof() + self._add_read_data( uncompress ) + raise EOFError, 'Reached EOF' + + uncompress = self.decompress.decompress(buf) + self._add_read_data( uncompress ) + + if self.decompress.unused_data != "": + # Ending case: we've come to the end of a member in the file, + # so seek back to the start of the unused data, finish up + # this member, and read a new gzip header. + # (The number of bytes to seek back is the length of the unused + # data, minus 8 because _read_eof() will rewind a further 8 bytes) + self.fileobj.seek( -len(self.decompress.unused_data)+8, 1) + + # Check the CRC and file size, and set the flag so we read + # a new member on the next call + self._read_eof() + self._new_member = True + + def _add_read_data(self, data): + self.crc = zlib.crc32(data, self.crc) & 0xffffffffL + offset = self.offset - self.extrastart + self.extrabuf = self.extrabuf[offset:] + data + self.extrasize = self.extrasize + len(data) + self.extrastart = self.offset + self.size = self.size + len(data) + + def _read_eof(self): + # We've read to the end of the file, so we have to rewind in order + # to reread the 8 bytes containing the CRC and the file size. + # We check the that the computed CRC and size of the + # uncompressed data matches the stored values. Note that the size + # stored is the true file size mod 2**32. + self.fileobj.seek(-8, 1) + crc32 = read32(self.fileobj) + isize = read32(self.fileobj) # may exceed 2GB + if crc32 != self.crc: + raise IOError("CRC check failed %s != %s" % (hex(crc32), + hex(self.crc))) + elif isize != (self.size & 0xffffffffL): + raise IOError, "Incorrect length of data produced" + + # Gzip files can be padded with zeroes and still have archives. + # Consume all zero bytes and set the file position to the first + # non-zero byte. See http://www.gzip.org/#faq8 + c = "\x00" + while c == "\x00": + c = self.fileobj.read(1) + if c: + self.fileobj.seek(-1, 1) + + @property + def closed(self): + return self.fileobj is None + + def close(self): + if self.fileobj is None: + return + if self.mode == WRITE: + self.fileobj.write(self.compress.flush()) + write32u(self.fileobj, self.crc) + # self.size may exceed 2GB, or even 4GB + write32u(self.fileobj, self.size & 0xffffffffL) + self.fileobj = None + elif self.mode == READ: + self.fileobj = None + if self.myfileobj: + self.myfileobj.close() + self.myfileobj = None + + def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH): + self._check_closed() + if self.mode == WRITE: + # Ensure the compressor's buffer is flushed + self.fileobj.write(self.compress.flush(zlib_mode)) + self.fileobj.flush() + + def fileno(self): + """Invoke the underlying file object's fileno() method. + + This will raise AttributeError if the underlying file object + doesn't support fileno(). + """ + return self.fileobj.fileno() + + def rewind(self): + '''Return the uncompressed stream file position indicator to the + beginning of the file''' + if self.mode != READ: + raise IOError("Can't rewind in write mode") + self.fileobj.seek(0) + self._new_member = True + self.extrabuf = "" + self.extrasize = 0 + self.extrastart = 0 + self.offset = 0 + + def readable(self): + return self.mode == READ + + def writable(self): + return self.mode == WRITE + + def seekable(self): + return True + + def seek(self, offset, whence=0): + if whence: + if whence == 1: + offset = self.offset + offset + else: + raise ValueError('Seek from end not supported') + if self.mode == WRITE: + if offset < self.offset: + raise IOError('Negative seek in write mode') + count = offset - self.offset + for i in range(count // 1024): + self.write(1024 * '\0') + self.write((count % 1024) * '\0') + elif self.mode == READ: + if offset == self.offset: + self.read(0) # to make sure that this file is open + return self.offset + if offset < self.offset: + # for negative seek, rewind and do positive seek + self.rewind() + count = offset - self.offset + for i in range(count // 1024): + self.read(1024) + self.read(count % 1024) + + return self.offset + + def readline(self, size=-1): + if size < 0: + # Shortcut common case - newline found in buffer. + offset = self.offset - self.extrastart + i = self.extrabuf.find('\n', offset) + 1 + if i > 0: + self.extrasize -= i - offset + self.offset += i - offset + return self.extrabuf[offset: i] + + size = sys.maxint + readsize = self.min_readsize + else: + readsize = size + bufs = [] + while size != 0: + c = self.read(readsize) + i = c.find('\n') + + # We set i=size to break out of the loop under two + # conditions: 1) there's no newline, and the chunk is + # larger than size, or 2) there is a newline, but the + # resulting line would be longer than 'size'. + if (size <= i) or (i == -1 and len(c) > size): + i = size - 1 + + if i >= 0 or c == '': + bufs.append(c[:i + 1]) # Add portion of last chunk + self._unread(c[i + 1:]) # Push back rest of chunk + break + + # Append chunk to list, decrease 'size', + bufs.append(c) + size = size - len(c) + readsize = min(size, readsize * 2) + if readsize > self.min_readsize: + self.min_readsize = min(readsize, self.min_readsize * 2, 512) + return ''.join(bufs) # Return resulting line + + +def _test(): + # Act like gzip; with -d, act like gunzip. + # The input file is not deleted, however, nor are any other gzip + # options or features supported. + args = sys.argv[1:] + decompress = args and args[0] == "-d" + if decompress: + args = args[1:] + if not args: + args = ["-"] + for arg in args: + if decompress: + if arg == "-": + f = GzipFile(filename="", mode="rb", fileobj=sys.stdin) + g = sys.stdout + else: + if arg[-3:] != ".gz": + print "filename doesn't end in .gz:", repr(arg) + continue + f = open(arg, "rb") + g = __builtin__.open(arg[:-3], "wb") + else: + if arg == "-": + f = sys.stdin + g = GzipFile(filename="", mode="wb", fileobj=sys.stdout) + else: + f = __builtin__.open(arg, "rb") + g = open(arg + ".gz", "wb") + while True: + chunk = f.read(1024) + if not chunk: + break + g.write(chunk) + if g is not sys.stdout: + g.close() + if f is not sys.stdin: + f.close() + +if __name__ == '__main__': + _test() diff --git a/lib-python/modified-2.7/tarfile.py b/lib-python/modified-2.7/tarfile.py --- a/lib-python/modified-2.7/tarfile.py +++ b/lib-python/modified-2.7/tarfile.py @@ -252,8 +252,8 @@ the high bit set. So we calculate two checksums, unsigned and signed. """ - unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512])) - signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512])) + unsigned_chksum = 256 + sum(struct.unpack("148B8x356B", buf[:512])) + signed_chksum = 256 + sum(struct.unpack("148b8x356b", buf[:512])) return unsigned_chksum, signed_chksum def copyfileobj(src, dst, length=None): @@ -265,7 +265,6 @@ if length is None: shutil.copyfileobj(src, dst) return - BUFSIZE = 16 * 1024 blocks, remainder = divmod(length, BUFSIZE) for b in xrange(blocks): @@ -802,19 +801,19 @@ if self.closed: raise ValueError("I/O operation on closed file") - buf = "" if self.buffer: if size is None: - buf = self.buffer + buf = self.buffer + self.fileobj.read() self.buffer = "" else: buf = self.buffer[:size] self.buffer = self.buffer[size:] - - if size is None: - buf += self.fileobj.read() + buf += self.fileobj.read(size - len(buf)) else: - buf += self.fileobj.read(size - len(buf)) + if size is None: + buf = self.fileobj.read() + else: + buf = self.fileobj.read(size) self.position += len(buf) return buf diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -469,7 +469,8 @@ newargs = [] for argtype, arg in zip(argtypes, args): param = argtype.from_param(arg) - if argtype._type_ == 'P': # special-case for c_void_p + _type_ = getattr(argtype, '_type_', None) + if _type_ == 'P': # special-case for c_void_p param = param._get_buffer_value() elif self._is_primitive(argtype): param = param.value diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -169,6 +169,8 @@ def from_address(self, address): instance = StructOrUnion.__new__(self) + if isinstance(address, _rawffi.StructureInstance): + address = address.buffer instance.__dict__['_buffer'] = self._ffistruct.fromaddress(address) return instance diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py --- a/lib_pypy/pyrepl/reader.py +++ b/lib_pypy/pyrepl/reader.py @@ -401,13 +401,19 @@ return "(arg: %s) "%self.arg if "\n" in self.buffer: if lineno == 0: - return self._ps2 + res = self.ps2 elif lineno == self.buffer.count("\n"): - return self._ps4 + res = self.ps4 else: - return self._ps3 + res = self.ps3 else: - return self._ps1 + res = self.ps1 + # Lazily call str() on self.psN, and cache the results using as key + # the object on which str() was called. This ensures that even if the + # same object is used e.g. for ps1 and ps2, str() is called only once. + if res not in self._pscache: + self._pscache[res] = str(res) + return self._pscache[res] def push_input_trans(self, itrans): self.input_trans_stack.append(self.input_trans) @@ -473,8 +479,7 @@ self.pos = 0 self.dirty = 1 self.last_command = None - self._ps1, self._ps2, self._ps3, self._ps4 = \ - map(str, [self.ps1, self.ps2, self.ps3, self.ps4]) + self._pscache = {} except: self.restore() raise diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py --- a/pypy/annotation/description.py +++ b/pypy/annotation/description.py @@ -399,9 +399,7 @@ if b1 is object: continue if b1.__dict__.get('_mixin_', False): - assert b1.__bases__ == () or b1.__bases__ == (object,), ( - "mixin class %r should have no base" % (b1,)) - self.add_sources_for_class(b1, mixin=True) + self.add_mixin(b1) else: assert base is object, ("multiple inheritance only supported " "with _mixin_: %r" % (cls,)) @@ -469,6 +467,15 @@ return self.classdict[name] = Constant(value) + def add_mixin(self, base): + for subbase in base.__bases__: + if subbase is object: + continue + assert subbase.__dict__.get("_mixin_", False), ("Mixin class %r has non" + "mixin base class %r" % (base, subbase)) + self.add_mixin(subbase) + self.add_sources_for_class(base, mixin=True) + def add_sources_for_class(self, cls, mixin=False): for name, value in cls.__dict__.items(): self.add_source_attribute(name, value, mixin) diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -1,11 +1,10 @@ .. _`ctypes_configure/doc/sample.py`: https://bitbucket.org/pypy/pypy/src/default/ctypes_configure/doc/sample.py .. _`demo/`: https://bitbucket.org/pypy/pypy/src/default/demo/ -.. _`demo/pickle_coroutine.py`: https://bitbucket.org/pypy/pypy/src/default/demo/pickle_coroutine.py .. _`lib-python/`: https://bitbucket.org/pypy/pypy/src/default/lib-python/ .. _`lib-python/2.7/dis.py`: https://bitbucket.org/pypy/pypy/src/default/lib-python/2.7/dis.py .. _`lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/ +.. _`lib_pypy/greenlet.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/greenlet.py .. _`lib_pypy/pypy_test/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/pypy_test/ -.. _`lib_pypy/stackless.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/stackless.py .. _`lib_pypy/tputil.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/tputil.py .. _`pypy/annotation`: .. _`pypy/annotation/`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation/ @@ -55,7 +54,6 @@ .. _`pypy/module`: .. _`pypy/module/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/ .. _`pypy/module/__builtin__/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/__init__.py -.. _`pypy/module/_stackless/test/test_composable_coroutine.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/_stackless/test/test_composable_coroutine.py .. _`pypy/objspace`: .. _`pypy/objspace/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/ .. _`pypy/objspace/dump.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/dump.py @@ -117,6 +115,7 @@ .. _`pypy/translator/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/ .. _`pypy/translator/backendopt/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/backendopt/ .. _`pypy/translator/c/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/ +.. _`pypy/translator/c/src/stacklet/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/src/stacklet/ .. _`pypy/translator/cli/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/cli/ .. _`pypy/translator/goal/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/goal/ .. _`pypy/translator/jvm/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/jvm/ diff --git a/pypy/doc/architecture.rst b/pypy/doc/architecture.rst --- a/pypy/doc/architecture.rst +++ b/pypy/doc/architecture.rst @@ -153,7 +153,7 @@ * Optionally, `various transformations`_ can then be applied which, for example, perform optimizations such as inlining, add capabilities - such as stackless_-style concurrency, or insert code for the + such as stackless-style concurrency (deprecated), or insert code for the `garbage collector`_. * Then, the graphs are converted to source code for the target platform @@ -255,7 +255,6 @@ .. _Python: http://docs.python.org/reference/ .. _Psyco: http://psyco.sourceforge.net -.. _stackless: stackless.html .. _`generate Just-In-Time Compilers`: jit/index.html .. _`JIT Generation in PyPy`: jit/index.html .. _`implement your own interpreter`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -24,6 +24,7 @@ _bisect _codecs _collections + `_continuation`_ `_ffi`_ _hashlib _io @@ -84,10 +85,6 @@ _winreg - Extra module with Stackless_ only: - - _stackless - Note that only some of these modules are built-in in a typical CPython installation, and the rest is from non built-in extension modules. This means that e.g. ``import parser`` will, on CPython, @@ -108,11 +105,11 @@ .. the nonstandard modules are listed below... .. _`__pypy__`: __pypy__-module.html +.. _`_continuation`: stackless.html .. _`_ffi`: ctypes-implementation.html .. _`_rawffi`: ctypes-implementation.html .. _`_minimal_curses`: config/objspace.usemodules._minimal_curses.html .. _`cpyext`: http://morepypy.blogspot.com/2010/04/using-cpython-extension-modules-with.html -.. _Stackless: stackless.html Differences related to garbage collection strategies diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -67,7 +67,6 @@ * ``libssl-dev`` (for the optional ``_ssl`` module) * ``libgc-dev`` (for the Boehm garbage collector: only needed when translating with `--opt=0, 1` or `size`) * ``python-sphinx`` (for the optional documentation build. You need version 1.0.7 or later) - * ``python-greenlet`` (for the optional stackless support in interpreted mode/testing) 3. Translation is time-consuming -- 45 minutes on a very fast machine -- @@ -120,19 +119,8 @@ Installation_ below. The ``translate.py`` script takes a very large number of options controlling -what to translate and how. See ``translate.py -h``. Some of the more -interesting options (but for now incompatible with the JIT) are: - - * ``--stackless``: this produces a pypy-c that includes features - inspired by `Stackless Python `__. - - * ``--gc=boehm|ref|marknsweep|semispace|generation|hybrid|minimark``: - choose between using - the `Boehm-Demers-Weiser garbage collector`_, our reference - counting implementation or one of own collector implementations - (the default depends on the optimization level but is usually - ``minimark``). - +what to translate and how. See ``translate.py -h``. The default options +should be suitable for mostly everybody by now. Find a more detailed description of the various options in our `configuration sections`_. diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -42,7 +42,6 @@ JIT: windows, linux, os/x no JIT: windows, linux, os/x sandbox: linux, os/x - stackless: windows, linux, os/x * write release announcement pypy/doc/release-x.y(.z).txt the release announcement should contain a direct link to the download page diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -35,7 +35,7 @@ * `Differences between PyPy and CPython`_ * `What PyPy can do for your objects`_ - * `Stackless and coroutines`_ + * `Continulets and greenlets`_ * `JIT Generation in PyPy`_ * `Sandboxing Python code`_ @@ -292,8 +292,6 @@ `pypy/translator/jvm/`_ the Java backend -`pypy/translator/stackless/`_ the `Stackless Transform`_ - `pypy/translator/tool/`_ helper tools for translation, including the Pygame `graph viewer`_ @@ -318,7 +316,7 @@ .. _`transparent proxies`: objspace-proxies.html#tproxy .. _`Differences between PyPy and CPython`: cpython_differences.html .. _`What PyPy can do for your objects`: objspace-proxies.html -.. _`Stackless and coroutines`: stackless.html +.. _`Continulets and greenlets`: stackless.html .. _StdObjSpace: objspace.html#the-standard-object-space .. _`abstract interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation .. _`rpython`: coding-guide.html#rpython @@ -337,7 +335,6 @@ .. _`low-level type system`: rtyper.html#low-level-type .. _`object-oriented type system`: rtyper.html#oo-type .. _`garbage collector`: garbage_collection.html -.. _`Stackless Transform`: translation.html#the-stackless-transform .. _`main PyPy-translation scripts`: getting-started-python.html#translating-the-pypy-python-interpreter .. _`.NET`: http://www.microsoft.com/net/ .. _Mono: http://www.mono-project.com/ diff --git a/pypy/doc/jit/pyjitpl5.rst b/pypy/doc/jit/pyjitpl5.rst --- a/pypy/doc/jit/pyjitpl5.rst +++ b/pypy/doc/jit/pyjitpl5.rst @@ -103,7 +103,7 @@ The meta-interpreter starts interpreting the JIT bytecode. Each operation is executed and then recorded in a list of operations, called the trace. -Operations can have a list of boxes that operate on, arguments. Some operations +Operations can have a list of boxes they operate on, arguments. Some operations (like GETFIELD and GETARRAYITEM) also have special objects that describe how their arguments are laid out in memory. All possible operations generated by tracing are listed in metainterp/resoperation.py. When a (interpreter-level) diff --git a/pypy/doc/rlib.rst b/pypy/doc/rlib.rst --- a/pypy/doc/rlib.rst +++ b/pypy/doc/rlib.rst @@ -134,69 +134,6 @@ a hierarchy of Address classes, in a typical static-OO-programming style. -``rstack`` -========== - -The `pypy/rlib/rstack.py`_ module allows an RPython program to control its own execution stack. -This is only useful if the program is translated using stackless. An old -description of the exposed functions is below. - -We introduce an RPython type ``frame_stack_top`` and a built-in function -``yield_current_frame_to_caller()`` that work as follows (see example below): - -* The built-in function ``yield_current_frame_to_caller()`` causes the current - function's state to be captured in a new ``frame_stack_top`` object that is - returned to the parent. Only one frame, the current one, is captured this - way. The current frame is suspended and the caller continues to run. Note - that the caller is only resumed once: when - ``yield_current_frame_to_caller()`` is called. See below. - -* A ``frame_stack_top`` object can be jumped to by calling its ``switch()`` - method with no argument. - -* ``yield_current_frame_to_caller()`` and ``switch()`` themselves return a new - ``frame_stack_top`` object: the freshly captured state of the caller of the - source ``switch()`` that was just executed, or None in the case described - below. - -* the function that called ``yield_current_frame_to_caller()`` also has a - normal return statement, like all functions. This statement must return - another ``frame_stack_top`` object. The latter is *not* returned to the - original caller; there is no way to return several times to the caller. - Instead, it designates the place to which the execution must jump, as if by - a ``switch()``. The place to which we jump this way will see a None as the - source frame stack top. - -* every frame stack top must be resumed once and only once. Not resuming - it at all causes a leak. Resuming it several times causes a crash. - -* a function that called ``yield_current_frame_to_caller()`` should not raise. - It would have no implicit parent frame to propagate the exception to. That - would be a crashingly bad idea. - -The following example would print the numbers from 1 to 7 in order:: - - def g(): - print 2 - frametop_before_5 = yield_current_frame_to_caller() - print 4 - frametop_before_7 = frametop_before_5.switch() - print 6 - return frametop_before_7 - - def f(): - print 1 - frametop_before_4 = g() - print 3 - frametop_before_6 = frametop_before_4.switch() - print 5 - frametop_after_return = frametop_before_6.switch() - print 7 - assert frametop_after_return is None - - f() - - ``streamio`` ============ diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -8,446 +8,289 @@ ================ PyPy can expose to its user language features similar to the ones -present in `Stackless Python`_: **no recursion depth limit**, and the -ability to write code in a **massively concurrent style**. It actually -exposes three different paradigms to choose from: +present in `Stackless Python`_: the ability to write code in a +**massively concurrent style**. (It does not (any more) offer the +ability to run with no `recursion depth limit`_, but the same effect +can be achieved indirectly.) -* `Tasklets and channels`_; +This feature is based on a custom primitive called a continulet_. +Continulets can be directly used by application code, or it is possible +to write (entirely at app-level) more user-friendly interfaces. -* Greenlets_; +Currently PyPy implements greenlets_ on top of continulets. It would be +easy to implement tasklets and channels as well, emulating the model +of `Stackless Python`_. -* Plain coroutines_. +Continulets are extremely light-weight, which means that PyPy should be +able to handle programs containing large amounts of them. However, due +to an implementation restriction, a PyPy compiled with +``--gcrootfinder=shadowstack`` consumes at least one page of physical +memory (4KB) per live continulet, and half a megabyte of virtual memory +on 32-bit or a complete megabyte on 64-bit. Moreover, the feature is +only available (so far) on x86 and x86-64 CPUs; for other CPUs you need +to add a short page of custom assembler to +`pypy/translator/c/src/stacklet/`_. -All of them are extremely light-weight, which means that PyPy should be -able to handle programs containing large amounts of coroutines, tasklets -and greenlets. +Theory +====== -Requirements -++++++++++++++++ +The fundamental idea is that, at any point in time, the program happens +to run one stack of frames (or one per thread, in case of +multi-threading). To see the stack, start at the top frame and follow +the chain of ``f_back`` until you reach the bottom frame. From the +point of view of one of these frames, it has a ``f_back`` pointing to +another frame (unless it is the bottom frame), and it is itself being +pointed to by another frame (unless it is the top frame). -If you are running py.py on top of CPython, then you need to enable -the _stackless module by running it as follows:: +The theory behind continulets is to literally take the previous sentence +as definition of "an O.K. situation". The trick is that there are +O.K. situations that are more complex than just one stack: you will +always have one stack, but you can also have in addition one or more +detached *cycles* of frames, such that by following the ``f_back`` chain +you run in a circle. But note that these cycles are indeed completely +detached: the top frame (the currently running one) is always the one +which is not the ``f_back`` of anybody else, and it is always the top of +a stack that ends with the bottom frame, never a part of these extra +cycles. - py.py --withmod-_stackless +How do you create such cycles? The fundamental operation to do so is to +take two frames and *permute* their ``f_back`` --- i.e. exchange them. +You can permute any two ``f_back`` without breaking the rule of "an O.K. +situation". Say for example that ``f`` is some frame halfway down the +stack, and you permute its ``f_back`` with the ``f_back`` of the top +frame. Then you have removed from the normal stack all intermediate +frames, and turned them into one stand-alone cycle. By doing the same +permutation again you restore the original situation. -This is implemented internally using greenlets, so it only works on a -platform where `greenlets`_ are supported. A few features do -not work this way, though, and really require a translated -``pypy-c``. +In practice, in PyPy, you cannot change the ``f_back`` of an abitrary +frame, but only of frames stored in ``continulets``. -To obtain a translated version of ``pypy-c`` that includes Stackless -support, run translate.py as follows:: - - cd pypy/translator/goal - python translate.py --stackless +Continulets are internally implemented using stacklets. Stacklets are a +bit more primitive (they are really one-shot continuations), but that +idea only works in C, not in Python. The basic idea of continulets is +to have at any point in time a complete valid stack; this is important +e.g. to correctly propagate exceptions (and it seems to give meaningful +tracebacks too). Application level interface ============================= -A stackless PyPy contains a module called ``stackless``. The interface -exposed by this module have not been refined much, so it should be -considered in-flux (as of 2007). -So far, PyPy does not provide support for ``stackless`` in a threaded -environment. This limitation is not fundamental, as previous experience -has shown, so supporting this would probably be reasonably easy. +.. _continulet: -An interesting point is that the same ``stackless`` module can provide -a number of different concurrency paradigms at the same time. From a -theoretical point of view, none of above-mentioned existing three -paradigms considered on its own is new: two of them are from previous -Python work, and the third one is a variant of the classical coroutine. -The new part is that the PyPy implementation manages to provide all of -them and let the user implement more. Moreover - and this might be an -important theoretical contribution of this work - we manage to provide -these concurrency concepts in a "composable" way. In other words, it -is possible to naturally mix in a single application multiple -concurrency paradigms, and multiple unrelated usages of the same -paradigm. This is discussed in the Composability_ section below. +Continulets ++++++++++++ +A translated PyPy contains by default a module called ``_continuation`` +exporting the type ``continulet``. A ``continulet`` object from this +module is a container that stores a "one-shot continuation". It plays +the role of an extra frame you can insert in the stack, and whose +``f_back`` can be changed. -Infinite recursion -++++++++++++++++++ +To make a continulet object, call ``continulet()`` with a callable and +optional extra arguments. -Any stackless PyPy executable natively supports recursion that is only -limited by the available memory. As in normal Python, though, there is -an initial recursion limit (which is 5000 in all pypy-c's, and 1000 in -CPython). It can be changed with ``sys.setrecursionlimit()``. With a -stackless PyPy, any value is acceptable - use ``sys.maxint`` for -unlimited. +Later, the first time you ``switch()`` to the continulet, the callable +is invoked with the same continulet object as the extra first argument. +At that point, the one-shot continuation stored in the continulet points +to the caller of ``switch()``. In other words you have a perfectly +normal-looking stack of frames. But when ``switch()`` is called again, +this stored one-shot continuation is exchanged with the current one; it +means that the caller of ``switch()`` is suspended with its continuation +stored in the container, and the old continuation from the continulet +object is resumed. -In some cases, you can write Python code that causes interpreter-level -infinite recursion -- i.e. infinite recursion without going via -application-level function calls. It is possible to limit that too, -with ``_stackless.set_stack_depth_limit()``, or to unlimit it completely -by setting it to ``sys.maxint``. +The most primitive API is actually 'permute()', which just permutes the +one-shot continuation stored in two (or more) continulets. +In more details: -Coroutines -++++++++++ +* ``continulet(callable, *args, **kwds)``: make a new continulet. + Like a generator, this only creates it; the ``callable`` is only + actually called the first time it is switched to. It will be + called as follows:: -A Coroutine is similar to a very small thread, with no preemptive scheduling. -Within a family of coroutines, the flow of execution is explicitly -transferred from one to another by the programmer. When execution is -transferred to a coroutine, it begins to execute some Python code. When -it transfers execution away from itself it is temporarily suspended, and -when execution returns to it it resumes its execution from the -point where it was suspended. Conceptually, only one coroutine is -actively running at any given time (but see Composability_ below). + callable(cont, *args, **kwds) -The ``stackless.coroutine`` class is instantiated with no argument. -It provides the following methods and attributes: + where ``cont`` is the same continulet object. -* ``stackless.coroutine.getcurrent()`` + Note that it is actually ``cont.__init__()`` that binds + the continulet. It is also possible to create a not-bound-yet + continulet by calling explicitly ``continulet.__new__()``, and + only bind it later by calling explicitly ``cont.__init__()``. - Static method returning the currently running coroutine. There is a - so-called "main" coroutine object that represents the "outer" - execution context, where your main program started and where it runs - as long as it does not switch to another coroutine. +* ``cont.switch(value=None, to=None)``: start the continulet if + it was not started yet. Otherwise, store the current continuation + in ``cont``, and activate the target continuation, which is the + one that was previously stored in ``cont``. Note that the target + continuation was itself previously suspended by another call to + ``switch()``; this older ``switch()`` will now appear to return. + The ``value`` argument is any object that is carried to the target + and returned by the target's ``switch()``. -* ``coro.bind(callable, *args, **kwds)`` + If ``to`` is given, it must be another continulet object. In + that case, performs a "double switch": it switches as described + above to ``cont``, and then immediately switches again to ``to``. + This is different from switching directly to ``to``: the current + continuation gets stored in ``cont``, the old continuation from + ``cont`` gets stored in ``to``, and only then we resume the + execution from the old continuation out of ``to``. - Bind the coroutine so that it will execute ``callable(*args, - **kwds)``. The call is not performed immediately, but only the - first time we call the ``coro.switch()`` method. A coroutine must - be bound before it is switched to. When the coroutine finishes - (because the call to the callable returns), the coroutine exits and - implicitly switches back to another coroutine (its "parent"); after - this point, it is possible to bind it again and switch to it again. - (Which coroutine is the parent of which is not documented, as it is - likely to change when the interface is refined.) +* ``cont.throw(type, value=None, tb=None, to=None)``: similar to + ``switch()``, except that immediately after the switch is done, raise + the given exception in the target. -* ``coro.switch()`` +* ``cont.is_pending()``: return True if the continulet is pending. + This is False when it is not initialized (because we called + ``__new__`` and not ``__init__``) or when it is finished (because + the ``callable()`` returned). When it is False, the continulet + object is empty and cannot be ``switch()``-ed to. - Suspend the current (caller) coroutine, and resume execution in the - target coroutine ``coro``. +* ``permute(*continulets)``: a global function that permutes the + continuations stored in the given continulets arguments. Mostly + theoretical. In practice, using ``cont.switch()`` is easier and + more efficient than using ``permute()``; the latter does not on + its own change the currently running frame. -* ``coro.kill()`` - Kill ``coro`` by sending a CoroutineExit exception and switching - execution immediately to it. This exception can be caught in the - coroutine itself and can be raised from any call to ``coro.switch()``. - This exception isn't propagated to the parent coroutine. +Genlets ++++++++ -* ``coro.throw(type, value)`` +The ``_continuation`` module also exposes the ``generator`` decorator:: - Insert an exception in ``coro`` an resume switches execution - immediately to it. In the coroutine itself, this exception - will come from any call to ``coro.switch()`` and can be caught. If the - exception isn't caught, it will be propagated to the parent coroutine. + @generator + def f(cont, a, b): + cont.switch(a + b) + cont.switch(a + b + 1) -When a coroutine is garbage-collected, it gets the ``.kill()`` method sent to -it. This happens at the point the next ``.switch`` method is called, so the -target coroutine of this call will be executed only after the ``.kill`` has -finished. + for i in f(10, 20): + print i -Example -~~~~~~~ +This example prints 30 and 31. The only advantage over using regular +generators is that the generator itself is not limited to ``yield`` +statements that must all occur syntactically in the same function. +Instead, we can pass around ``cont``, e.g. to nested sub-functions, and +call ``cont.switch(x)`` from there. -Here is a classical producer/consumer example: an algorithm computes a -sequence of values, while another consumes them. For our purposes we -assume that the producer can generate several values at once, and the -consumer can process up to 3 values in a batch - it can also process -batches with fewer than 3 values without waiting for the producer (which -would be messy to express with a classical Python generator). :: +The ``generator`` decorator can also be applied to methods:: - def producer(lst): - while True: - ...compute some more values... - lst.extend(new_values) - coro_consumer.switch() - - def consumer(lst): - while True: - # First ask the producer for more values if needed - while len(lst) == 0: - coro_producer.switch() - # Process the available values in a batch, but at most 3 - batch = lst[:3] - del lst[:3] - ...process batch... - - # Initialize two coroutines with a shared list as argument - exchangelst = [] - coro_producer = coroutine() - coro_producer.bind(producer, exchangelst) - coro_consumer = coroutine() - coro_consumer.bind(consumer, exchangelst) - - # Start running the consumer coroutine - coro_consumer.switch() - - -Tasklets and channels -+++++++++++++++++++++ - -The ``stackless`` module also provides an interface that is roughly -compatible with the interface of the ``stackless`` module in `Stackless -Python`_: it contains ``stackless.tasklet`` and ``stackless.channel`` -classes. Tasklets are also similar to microthreads, but (like coroutines) -they don't actually run in parallel with other microthreads; instead, -they synchronize and exchange data with each other over Channels, and -these exchanges determine which Tasklet runs next. - -For usage reference, see the documentation on the `Stackless Python`_ -website. - -Note that Tasklets and Channels are implemented at application-level in -`lib_pypy/stackless.py`_ on top of coroutines_. You can refer to this -module for more details and API documentation. - -The stackless.py code tries to resemble the stackless C code as much -as possible. This makes the code somewhat unpythonic. - -Bird's eye view of tasklets and channels -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Tasklets are a bit like threads: they encapsulate a function in such a way that -they can be suspended/restarted any time. Unlike threads, they won't -run concurrently, but must be cooperative. When using stackless -features, it is vitally important that no action is performed that blocks -everything else. In particular, blocking input/output should be centralized -to a single tasklet. - -Communication between tasklets is done via channels. -There are three ways for a tasklet to give up control: - -1. call ``stackless.schedule()`` -2. send something over a channel -3. receive something from a channel - -A (live) tasklet can either be running, waiting to get scheduled, or be -blocked by a channel. - -Scheduling is done in strictly round-robin manner. A blocked tasklet -is removed from the scheduling queue and will be reinserted when it -becomes unblocked. - -Example -~~~~~~~ - -Here is a many-producers many-consumers example, where any consumer can -process the result of any producer. For this situation we set up a -single channel where all producer send, and on which all consumers -wait:: - - def producer(chan): - while True: - chan.send(...next value...) - - def consumer(chan): - while True: - x = chan.receive() - ...do something with x... - - # Set up the N producer and M consumer tasklets - common_channel = stackless.channel() - for i in range(N): - stackless.tasklet(producer, common_channel)() - for i in range(M): - stackless.tasklet(consumer, common_channel)() - - # Run it all - stackless.run() - -Each item sent over the channel is received by one of the waiting -consumers; which one is not specified. The producers block until their -item is consumed: the channel is not a queue, but rather a meeting point -which causes tasklets to block until both a consumer and a producer are -ready. In practice, the reason for having several consumers receiving -on a single channel is that some of the consumers can be busy in other -ways part of the time. For example, each consumer might receive a -database request, process it, and send the result to a further channel -before it asks for the next request. In this situation, further -requests can still be received by other consumers. + class X: + @generator + def f(self, cont, a, b): + ... Greenlets +++++++++ -A Greenlet is a kind of primitive Tasklet with a lower-level interface -and with exact control over the execution order. Greenlets are similar -to Coroutines, with a slightly different interface: greenlets put more -emphasis on a tree structure. The various greenlets of a program form a -precise tree, which fully determines their order of execution. +Greenlets are implemented on top of continulets in `lib_pypy/greenlet.py`_. +See the official `documentation of the greenlets`_. -For usage reference, see the `documentation of the greenlets`_. -The PyPy interface is identical. You should use ``greenlet.greenlet`` -instead of ``stackless.greenlet`` directly, because the greenlet library -can give you the latter when you ask for the former on top of PyPy. +Note that unlike the CPython greenlets, this version does not suffer +from GC issues: if the program "forgets" an unfinished greenlet, it will +always be collected at the next garbage collection. -PyPy's greenlets do not suffer from the cyclic GC limitation that the -CPython greenlets have: greenlets referencing each other via local -variables tend to leak on top of CPython (where it is mostly impossible -to do the right thing). It works correctly on top of PyPy. +Unimplemented features +++++++++++++++++++++++ -Coroutine Pickling -++++++++++++++++++ +The following features (present in some past Stackless version of PyPy) +are for the time being not supported any more: -Coroutines and tasklets can be pickled and unpickled, i.e. serialized to -a string of bytes for the purpose of storage or transmission. This -allows "live" coroutines or tasklets to be made persistent, moved to -other machines, or cloned in any way. The standard ``pickle`` module -works with coroutines and tasklets (at least in a translated ``pypy-c``; -unpickling live coroutines or tasklets cannot be easily implemented on -top of CPython). +* Tasklets and channels (needs to be rewritten at app-level) -To be able to achieve this result, we have to consider many objects that -are not normally pickleable in CPython. Here again, the `Stackless -Python`_ implementation has paved the way, and we follow the same -general design decisions: simple internal objects like bound method -objects and various kinds of iterators are supported; frame objects can -be fully pickled and unpickled -(by serializing a reference to the bytecode they are -running in addition to all the local variables). References to globals -and modules are pickled by name, similarly to references to functions -and classes in the traditional CPython ``pickle``. +* Coroutines (could be rewritten at app-level) -The "magic" part of this process is the implementation of the unpickling -of a chain of frames. The Python interpreter of PyPy uses -interpreter-level recursion to represent application-level calls. The -reason for this is that it tremendously simplifies the implementation of -the interpreter itself. Indeed, in Python, almost any operation can -potentially result in a non-tail-recursive call to another Python -function. This makes writing a non-recursive interpreter extremely -tedious; instead, we rely on lower-level transformations during the -translation process to control this recursion. This is the `Stackless -Transform`_, which is at the heart of PyPy's support for stackless-style -concurrency. +* Pickling and unpickling continulets (*) -At any point in time, a chain of Python-level frames corresponds to a -chain of interpreter-level frames (e.g. C frames in pypy-c), where each -single Python-level frame corresponds to one or a few interpreter-level -frames - depending on the length of the interpreter-level call chain -from one bytecode evaluation loop to the next (recursively invoked) one. +* Continuing execution of a continulet in a different thread (*) -This means that it is not sufficient to simply create a chain of Python -frame objects in the heap of a process before we can resume execution of -these newly built frames. We must recreate a corresponding chain of -interpreter-level frames. To this end, we have inserted a few *named -resume points* (see 3.2.4, in `D07.1 Massive Parallelism and Translation Aspects`_) in the Python interpreter of PyPy. This is the -motivation for implementing the interpreter-level primitives -``resume_state_create()`` and ``resume_state_invoke()``, the powerful -interface that allows an RPython program to artificially rebuild a chain -of calls in a reflective way, completely from scratch, and jump to it. +* Automatic unlimited stack (must be emulated__ so far) -.. _`D07.1 Massive Parallelism and Translation Aspects`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf +.. __: `recursion depth limit`_ -Example -~~~~~~~ +(*) Pickling, as well as changing threads, could be implemented by using +a "soft" stack switching mode again. We would get either "hard" or +"soft" switches, similarly to Stackless Python 3rd version: you get a +"hard" switch (like now) when the C stack contains non-trivial C frames +to save, and a "soft" switch (like previously) when it contains only +simple calls from Python to Python. Soft-switched continulets would +also consume a bit less RAM, at the possible expense of making the +switch a bit slower (unsure about that; what is the Stackless Python +experience?). -(See `demo/pickle_coroutine.py`_ for the complete source of this demo.) -Consider a program which contains a part performing a long-running -computation:: +Recursion depth limit ++++++++++++++++++++++ - def ackermann(x, y): - if x == 0: - return y + 1 - if y == 0: - return ackermann(x - 1, 1) - return ackermann(x - 1, ackermann(x, y - 1)) +You can use continulets to emulate the infinite recursion depth present +in Stackless Python and in stackless-enabled older versions of PyPy. -By using pickling, we can save the state of the computation while it is -running, for the purpose of restoring it later and continuing the -computation at another time or on a different machine. However, -pickling does not produce a whole-program dump: it can only pickle -individual coroutines. This means that the computation should be -started in its own coroutine:: +The trick is to start a continulet "early", i.e. when the recursion +depth is very low, and switch to it "later", i.e. when the recursion +depth is high. Example:: - # Make a coroutine that will run 'ackermann(3, 8)' - coro = coroutine() - coro.bind(ackermann, 3, 8) + from _continuation import continulet - # Now start running the coroutine - result = coro.switch() + def invoke(_, callable, arg): + return callable(arg) -The coroutine itself must switch back to the main program when it needs -to be interrupted (we can only pickle suspended coroutines). Due to -current limitations this requires an explicit check in the -``ackermann()`` function:: + def bootstrap(c): + # this loop runs forever, at a very low recursion depth + callable, arg = c.switch() + while True: + # start a new continulet from here, and switch to + # it using an "exchange", i.e. a switch with to=. + to = continulet(invoke, callable, arg) + callable, arg = c.switch(to=to) - def ackermann(x, y): - if interrupt_flag: # test a global flag - main.switch() # and switch back to 'main' if it is set - if x == 0: - return y + 1 - if y == 0: - return ackermann(x - 1, 1) - return ackermann(x - 1, ackermann(x, y - 1)) + c = continulet(bootstrap) + c.switch() -The global ``interrupt_flag`` would be set for example by a timeout, or -by a signal handler reacting to Ctrl-C, etc. It causes the coroutine to -transfer control back to the main program. The execution comes back -just after the line ``coro.switch()``, where we can pickle the coroutine -if necessary:: - if not coro.is_alive: - print "finished; the result is:", result - else: - # save the state of the suspended coroutine - f = open('demo.pickle', 'w') - pickle.dump(coro, f) - f.close() + def recursive(n): + if n == 0: + return ("ok", n) + if n % 200 == 0: + prev = c.switch((recursive, n - 1)) + else: + prev = recursive(n - 1) + return (prev[0], prev[1] + 1) -The process can then stop. At any later time, or on another machine, -we can reload the file and restart the coroutine with:: + print recursive(999999) # prints ('ok', 999999) - f = open('demo.pickle', 'r') - coro = pickle.load(f) - f.close() - result = coro.switch() +Note that if you press Ctrl-C while running this example, the traceback +will be built with *all* recursive() calls so far, even if this is more +than the number that can possibly fit in the C stack. These frames are +"overlapping" each other in the sense of the C stack; more precisely, +they are copied out of and into the C stack as needed. -Limitations -~~~~~~~~~~~ +(The example above also makes use of the following general "guideline" +to help newcomers write continulets: in ``bootstrap(c)``, only call +methods on ``c``, not on another continulet object. That's why we wrote +``c.switch(to=to)`` and not ``to.switch()``, which would mess up the +state. This is however just a guideline; in general we would recommend +to use other interfaces like genlets and greenlets.) -Coroutine pickling is subject to some limitations. First of all, it is -not a whole-program "memory dump". It means that only the "local" state -of a coroutine is saved. The local state is defined to include the -chain of calls and the local variables, but not for example the value of -any global variable. -As in normal Python, the pickle will not include any function object's -code, any class definition, etc., but only references to functions and -classes. Unlike normal Python, the pickle contains frames. A pickled -frame stores a bytecode index, representing the current execution -position. This means that the user program cannot be modified *at all* -between pickling and unpickling! - -On the other hand, the pickled data is fairly independent from the -platform and from the PyPy version. - -Pickling/unpickling fails if the coroutine is suspended in a state that -involves Python frames which were *indirectly* called. To define this -more precisely, a Python function can issue a regular function or method -call to invoke another Python function - this is a *direct* call and can -be pickled and unpickled. But there are many ways to invoke a Python -function indirectly. For example, most operators can invoke a special -method ``__xyz__()`` on a class, various built-in functions can call -back Python functions, signals can invoke signal handlers, and so on. -These cases are not supported yet. - - -Composability -+++++++++++++ +Theory of composability ++++++++++++++++++++++++ Although the concept of coroutines is far from new, they have not been generally integrated into mainstream languages, or only in limited form (like generators in Python and iterators in C#). We can argue that a possible reason for that is that they do not scale well when a program's complexity increases: they look attractive in small examples, but the -models that require explicit switching, by naming the target coroutine, -do not compose naturally. This means that a program that uses -coroutines for two unrelated purposes may run into conflicts caused by -unexpected interactions. +models that require explicit switching, for example by naming the target +coroutine, do not compose naturally. This means that a program that +uses coroutines for two unrelated purposes may run into conflicts caused +by unexpected interactions. To illustrate the problem, consider the following example (simplified -code; see the full source in -`pypy/module/_stackless/test/test_composable_coroutine.py`_). First, a -simple usage of coroutine:: +code using a theorical ``coroutine`` class). First, a simple usage of +coroutine:: main_coro = coroutine.getcurrent() # the main (outer) coroutine data = [] @@ -530,74 +373,35 @@ main coroutine, which confuses the ``generator_iterator.next()`` method (it gets resumed, but not as a result of a call to ``Yield()``). -As part of trying to combine multiple different paradigms into a single -application-level module, we have built a way to solve this problem. -The idea is to avoid the notion of a single, global "main" coroutine (or -a single main greenlet, or a single main tasklet). Instead, each -conceptually separated user of one of these concurrency interfaces can -create its own "view" on what the main coroutine/greenlet/tasklet is, -which other coroutine/greenlet/tasklets there are, and which of these is -the currently running one. Each "view" is orthogonal to the others. In -particular, each view has one (and exactly one) "current" -coroutine/greenlet/tasklet at any point in time. When the user switches -to a coroutine/greenlet/tasklet, it implicitly means that he wants to -switch away from the current coroutine/greenlet/tasklet *that belongs to -the same view as the target*. +Thus the notion of coroutine is *not composable*. By opposition, the +primitive notion of continulets is composable: if you build two +different interfaces on top of it, or have a program that uses twice the +same interface in two parts, then assuming that both parts independently +work, the composition of the two parts still works. -The precise application-level interface has not been fixed yet; so far, -"views" in the above sense are objects of the type -``stackless.usercostate``. The above two examples can be rewritten in -the following way:: +A full proof of that claim would require careful definitions, but let us +just claim that this fact is true because of the following observation: +the API of continulets is such that, when doing a ``switch()``, it +requires the program to have some continulet to explicitly operate on. +It shuffles the current continuation with the continuation stored in +that continulet, but has no effect outside. So if a part of a program +has a continulet object, and does not expose it as a global, then the +rest of the program cannot accidentally influence the continuation +stored in that continulet object. - producer_view = stackless.usercostate() # a local view - main_coro = producer_view.getcurrent() # the main (outer) coroutine - ... - producer_coro = producer_view.newcoroutine() - ... - -and:: - - generators_view = stackless.usercostate() - - def generator(f): - def wrappedfunc(*args, **kwds): - g = generators_view.newcoroutine(generator_iterator) - ... - - ...generators_view.getcurrent()... - -Then the composition ``grab_values()`` works as expected, because the -two views are independent. The coroutine captured as ``self.caller`` in -the ``generator_iterator.next()`` method is the main coroutine of the -``generators_view``. It is no longer the same object as the main -coroutine of the ``producer_view``, so when ``data_producer()`` issues -the following command:: - - main_coro.switch() - -the control flow cannot accidentally jump back to -``generator_iterator.next()``. In other words, from the point of view -of ``producer_view``, the function ``grab_next_value()`` always runs in -its main coroutine ``main_coro`` and the function ``data_producer`` in -its coroutine ``producer_coro``. This is the case independently of -which ``generators_view``-based coroutine is the current one when -``grab_next_value()`` is called. - -Only code that has explicit access to the ``producer_view`` or its -coroutine objects can perform switches that are relevant for the -generator code. If the view object and the coroutine objects that share -this view are all properly encapsulated inside the generator logic, no -external code can accidentally temper with the expected control flow any -longer. - -In conclusion: we will probably change the app-level interface of PyPy's -stackless module in the future to not expose coroutines and greenlets at -all, but only views. They are not much more difficult to use, and they -scale automatically to larger programs. +In other words, if we regard the continulet object as being essentially +a modifiable ``f_back``, then it is just a link between the frame of +``callable()`` and the parent frame --- and it cannot be arbitrarily +changed by unrelated code, as long as they don't explicitly manipulate +the continulet object. Typically, both the frame of ``callable()`` +(commonly a local function) and its parent frame (which is the frame +that switched to it) belong to the same class or module; so from that +point of view the continulet is a purely local link between two local +frames. It doesn't make sense to have a concept that allows this link +to be manipulated from outside. .. _`Stackless Python`: http://www.stackless.com .. _`documentation of the greenlets`: http://packages.python.org/greenlet/ -.. _`Stackless Transform`: translation.html#the-stackless-transform .. include:: _ref.txt diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -552,14 +552,15 @@ The stackless transform converts functions into a form that knows how to save the execution point and active variables into a heap structure -and resume execution at that point. This is used to implement +and resume execution at that point. This was used to implement coroutines as an RPython-level feature, which in turn are used to -implement `coroutines, greenlets and tasklets`_ as an application +implement coroutines, greenlets and tasklets as an application level feature for the Standard Interpreter. -Enable the stackless transformation with :config:`translation.stackless`. +The stackless transformation has been deprecated and is no longer +available in trunk. It has been replaced with continulets_. -.. _`coroutines, greenlets and tasklets`: stackless.html +.. _continulets: stackless.html .. _`preparing the graphs for source generation`: diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -57,6 +57,12 @@ else: return LLSupport.from_rstr(s) +FLOAT_ARRAY_TP = lltype.Ptr(lltype.Array(lltype.Float, hints={"nolength": True})) +def maybe_uncast(TP, array): + if array._TYPE.TO._hints.get("uncast_on_llgraph"): + array = rffi.cast(TP, array) + return array + # a list of argtypes of all operations - couldn't find any and it's # very useful. Note however that the table is half-broken here and # there, in ways that are sometimes a bit hard to fix; that's why @@ -1079,7 +1085,7 @@ if isinstance(TYPE, lltype.Ptr): if isinstance(x, (int, long, llmemory.AddressAsInt)): x = llmemory.cast_int_to_adr(x) - if TYPE is rffi.VOIDP: + if TYPE is rffi.VOIDP or TYPE.TO._hints.get("uncast_on_llgraph"): # assume that we want a "C-style" cast, without typechecking the value return rffi.cast(TYPE, x) return llmemory.cast_adr_to_ptr(x, TYPE) @@ -1329,8 +1335,8 @@ return cast_to_floatstorage(array.getitem(index)) def do_getarrayitem_raw_float(array, index): - array = array.adr.ptr._obj - return cast_to_floatstorage(array.getitem(index)) + array = maybe_uncast(FLOAT_ARRAY_TP, array.adr.ptr) + return cast_to_floatstorage(array._obj.getitem(index)) def do_getarrayitem_gc_ptr(array, index): array = array._obj.container @@ -1392,8 +1398,9 @@ newvalue = cast_from_floatstorage(ITEMTYPE, newvalue) array.setitem(index, newvalue) + def do_setarrayitem_raw_float(array, index, newvalue): - array = array.adr.ptr + array = maybe_uncast(FLOAT_ARRAY_TP, array.adr.ptr) ITEMTYPE = lltype.typeOf(array).TO.OF newvalue = cast_from_floatstorage(ITEMTYPE, newvalue) array._obj.setitem(index, newvalue) diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -25,13 +25,14 @@ class Descr(history.AbstractDescr): def __init__(self, ofs, typeinfo, extrainfo=None, name=None, - arg_types=None, count_fields_if_immut=-1): + arg_types=None, count_fields_if_immut=-1, ffi_flags=0): self.ofs = ofs self.typeinfo = typeinfo self.extrainfo = extrainfo self.name = name self.arg_types = arg_types self.count_fields_if_immut = count_fields_if_immut + self.ffi_flags = ffi_flags def get_arg_types(self): return self.arg_types @@ -67,6 +68,9 @@ def count_fields_if_immutable(self): return self.count_fields_if_immut + def get_ffi_flags(self): + return self.ffi_flags + def __lt__(self, other): raise TypeError("cannot use comparison on Descrs") def __le__(self, other): @@ -114,14 +118,14 @@ return False def getdescr(self, ofs, typeinfo='?', extrainfo=None, name=None, - arg_types=None, count_fields_if_immut=-1): + arg_types=None, count_fields_if_immut=-1, ffi_flags=0): key = (ofs, typeinfo, extrainfo, name, arg_types, - count_fields_if_immut) + count_fields_if_immut, ffi_flags) try: return self._descrs[key] except KeyError: descr = Descr(ofs, typeinfo, extrainfo, name, arg_types, - count_fields_if_immut) + count_fields_if_immut, ffi_flags) self._descrs[key] = descr return descr @@ -326,7 +330,7 @@ return self.getdescr(0, token[0], extrainfo=extrainfo, arg_types=''.join(arg_types)) - def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo): + def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo, ffi_flags): from pypy.jit.backend.llsupport.ffisupport import get_ffi_type_kind from pypy.jit.backend.llsupport.ffisupport import UnsupportedKind arg_types = [] @@ -339,7 +343,8 @@ except UnsupportedKind: return None return self.getdescr(0, reskind, extrainfo=extrainfo, - arg_types=''.join(arg_types)) + arg_types=''.join(arg_types), + ffi_flags=ffi_flags) def grab_exc_value(self): diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -260,10 +260,12 @@ _clsname = '' loop_token = None arg_classes = '' # <-- annotation hack + ffi_flags = 0 - def __init__(self, arg_classes, extrainfo=None): + def __init__(self, arg_classes, extrainfo=None, ffi_flags=0): self.arg_classes = arg_classes # string of "r" and "i" (ref/int) self.extrainfo = extrainfo + self.ffi_flags = ffi_flags def __repr__(self): res = '%s(%s)' % (self.__class__.__name__, self.arg_classes) @@ -284,6 +286,13 @@ def get_extra_info(self): return self.extrainfo + def get_ffi_flags(self): + return self.ffi_flags + + def get_call_conv(self): + from pypy.rlib.clibffi import get_call_conv + return get_call_conv(self.ffi_flags, True) + def get_arg_types(self): return self.arg_classes @@ -391,8 +400,8 @@ """ _clsname = 'DynamicIntCallDescr' - def __init__(self, arg_classes, result_size, result_sign, extrainfo=None): - BaseIntCallDescr.__init__(self, arg_classes, extrainfo) + def __init__(self, arg_classes, result_size, result_sign, extrainfo=None, ffi_flags=0): + BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags) assert isinstance(result_sign, bool) self._result_size = chr(result_size) self._result_sign = result_sign diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py --- a/pypy/jit/backend/llsupport/ffisupport.py +++ b/pypy/jit/backend/llsupport/ffisupport.py @@ -8,7 +8,7 @@ class UnsupportedKind(Exception): pass -def get_call_descr_dynamic(cpu, ffi_args, ffi_result, extrainfo=None): +def get_call_descr_dynamic(cpu, ffi_args, ffi_result, extrainfo=None, ffi_flags=0): """Get a call descr: the types of result and args are represented by rlib.libffi.types.*""" try: @@ -20,18 +20,24 @@ if reskind == history.INT: size = intmask(ffi_result.c_size) signed = is_ffi_type_signed(ffi_result) - return DynamicIntCallDescr(arg_classes, size, signed, extrainfo) + return DynamicIntCallDescr(arg_classes, size, signed, extrainfo, + ffi_flags=ffi_flags) elif reskind == history.REF: - return NonGcPtrCallDescr(arg_classes, extrainfo) + return NonGcPtrCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) elif reskind == history.FLOAT: - return FloatCallDescr(arg_classes, extrainfo) + return FloatCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) elif reskind == history.VOID: - return VoidCallDescr(arg_classes, extrainfo) + return VoidCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) elif reskind == 'L': - return LongLongCallDescr(arg_classes, extrainfo) + return LongLongCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) elif reskind == 'S': SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - return SingleFloatCallDescr(arg_classes, extrainfo) + return SingleFloatCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) assert False def get_ffi_type_kind(cpu, ffi_type): diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -257,10 +257,10 @@ def calldescrof(self, FUNC, ARGS, RESULT, extrainfo): return get_call_descr(self.gc_ll_descr, ARGS, RESULT, extrainfo) - def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo): + def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo, ffi_flags): from pypy.jit.backend.llsupport import ffisupport return ffisupport.get_call_descr_dynamic(self, ffi_args, ffi_result, - extrainfo) + extrainfo, ffi_flags) def get_overflow_error(self): ovf_vtable = self.cast_adr_to_int(self._ovf_error_vtable) diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py --- a/pypy/jit/backend/llsupport/test/test_ffisupport.py +++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py @@ -13,17 +13,19 @@ def test_call_descr_dynamic(): args = [types.sint, types.pointer] - descr = get_call_descr_dynamic(FakeCPU(), args, types.sint) + descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, ffi_flags=42) assert isinstance(descr, DynamicIntCallDescr) assert descr.arg_classes == 'ii' + assert descr.get_ffi_flags() == 42 args = [types.sint, types.double, types.pointer] descr = get_call_descr_dynamic(FakeCPU(), args, types.void) assert descr is None # missing floats descr = get_call_descr_dynamic(FakeCPU(supports_floats=True), - args, types.void) + args, types.void, ffi_flags=43) assert isinstance(descr, VoidCallDescr) assert descr.arg_classes == 'ifi' + assert descr.get_ffi_flags() == 43 descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8) assert isinstance(descr, DynamicIntCallDescr) @@ -39,14 +41,16 @@ descr = get_call_descr_dynamic(FakeCPU(), [], types.slonglong) assert descr is None # missing longlongs descr = get_call_descr_dynamic(FakeCPU(supports_longlong=True), - [], types.slonglong) + [], types.slonglong, ffi_flags=43) assert isinstance(descr, LongLongCallDescr) + assert descr.get_ffi_flags() == 43 else: assert types.slonglong is types.slong descr = get_call_descr_dynamic(FakeCPU(), [], types.float) assert descr is None # missing singlefloats descr = get_call_descr_dynamic(FakeCPU(supports_singlefloats=True), - [], types.float) + [], types.float, ffi_flags=44) SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) assert isinstance(descr, SingleFloatCallDescr) + assert descr.get_ffi_flags() == 44 diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -468,7 +468,7 @@ assert longlong.getrealfloat(x) == 3.5 - 42 def test_call(self): - from pypy.rlib.libffi import types + from pypy.rlib.libffi import types, FUNCFLAG_CDECL def func_int(a, b): return a + b @@ -497,7 +497,8 @@ assert res.value == 2 * num # then, try it with the dynamic calldescr dyn_calldescr = cpu.calldescrof_dynamic([ffi_type, ffi_type], ffi_type, - EffectInfo.MOST_GENERAL) + EffectInfo.MOST_GENERAL, + ffi_flags=FUNCFLAG_CDECL) res = self.execute_operation(rop.CALL, [funcbox, BoxInt(num), BoxInt(num)], 'int', descr=dyn_calldescr) @@ -1944,7 +1945,7 @@ assert values == [1, 10] def test_call_to_c_function(self): - from pypy.rlib.libffi import CDLL, types, ArgChain + from pypy.rlib.libffi import CDLL, types, ArgChain, FUNCFLAG_CDECL from pypy.rpython.lltypesystem.ll2ctypes import libc_name libc = CDLL(libc_name) c_tolower = libc.getpointer('tolower', [types.uchar], types.sint) @@ -1955,7 +1956,8 @@ func_adr = llmemory.cast_ptr_to_adr(c_tolower.funcsym) funcbox = ConstInt(heaptracker.adr2int(func_adr)) calldescr = cpu.calldescrof_dynamic([types.uchar], types.sint, - EffectInfo.MOST_GENERAL) + EffectInfo.MOST_GENERAL, + ffi_flags=FUNCFLAG_CDECL) i1 = BoxInt() i2 = BoxInt() tok = BoxInt() @@ -2012,7 +2014,8 @@ calldescr = cpu.calldescrof_dynamic([types.pointer, types_size_t, types_size_t, types.pointer], types.void, - EffectInfo.MOST_GENERAL) + EffectInfo.MOST_GENERAL, + ffi_flags=clibffi.FUNCFLAG_CDECL) i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() @@ -2038,6 +2041,62 @@ assert len(glob.lst) > 0 lltype.free(raw, flavor='raw') + def test_call_to_winapi_function(self): + from pypy.rlib.clibffi import _WIN32, FUNCFLAG_STDCALL + if not _WIN32: + py.test.skip("Windows test only") + from pypy.rlib.libffi import CDLL, types, ArgChain + from pypy.rlib.rwin32 import DWORD + libc = CDLL('KERNEL32') + c_GetCurrentDir = libc.getpointer('GetCurrentDirectoryA', + [types.ulong, types.pointer], + types.ulong) + + cwd = os.getcwd() + buflen = len(cwd) + 10 + buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') + argchain = ArgChain().arg(rffi.cast(DWORD, buflen)).arg(buffer) + res = c_GetCurrentDir.call(argchain, DWORD) + assert rffi.cast(lltype.Signed, res) == len(cwd) + assert rffi.charp2strn(buffer, buflen) == cwd + lltype.free(buffer, flavor='raw') + + cpu = self.cpu + func_adr = llmemory.cast_ptr_to_adr(c_GetCurrentDir.funcsym) + funcbox = ConstInt(heaptracker.adr2int(func_adr)) + calldescr = cpu.calldescrof_dynamic([types.ulong, types.pointer], + types.ulong, + EffectInfo.MOST_GENERAL, + ffi_flags=FUNCFLAG_STDCALL) + i1 = BoxInt() + i2 = BoxInt() + faildescr = BasicFailDescr(1) + # if the stdcall convention is ignored, then ESP is wrong after the + # call: 8 bytes too much. If we repeat the call often enough, crash. + ops = [] + for i in range(50): + i3 = BoxInt() + ops += [ + ResOperation(rop.CALL_RELEASE_GIL, [funcbox, i1, i2], i3, + descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + ] + ops[-1].setfailargs([]) + ops += [ + ResOperation(rop.FINISH, [i3], None, descr=BasicFailDescr(0)) + ] + looptoken = LoopToken() + self.cpu.compile_loop([i1, i2], ops, looptoken) + + buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') + self.cpu.set_future_value_int(0, buflen) + self.cpu.set_future_value_int(1, rffi.cast(lltype.Signed, buffer)) + fail = self.cpu.execute_token(looptoken) + assert fail.identifier == 0 + assert self.cpu.get_latest_value_int(0) == len(cwd) + assert rffi.charp2strn(buffer, buflen) == cwd + lltype.free(buffer, flavor='raw') + def test_guard_not_invalidated(self): cpu = self.cpu i0 = BoxInt() diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -34,6 +34,7 @@ from pypy.rlib.debug import (debug_print, debug_start, debug_stop, have_debug_prints) from pypy.rlib import rgc +from pypy.rlib.clibffi import FFI_DEFAULT_ABI from pypy.jit.backend.x86.jump import remap_frame_layout from pypy.jit.metainterp.history import ConstInt, BoxInt from pypy.jit.codewriter.effectinfo import EffectInfo @@ -1120,7 +1121,7 @@ return genop_cmp_guard_float def _emit_call(self, force_index, x, arglocs, start=0, tmp=eax, - argtypes=None): + argtypes=None, callconv=FFI_DEFAULT_ABI): if IS_X86_64: return self._emit_call_64(force_index, x, arglocs, start, argtypes) @@ -1149,6 +1150,16 @@ # x is a location self.mc.CALL(x) self.mark_gc_roots(force_index) + # + if callconv != FFI_DEFAULT_ABI: + self._fix_stdcall(callconv, p) + + def _fix_stdcall(self, callconv, p): + from pypy.rlib.clibffi import FFI_STDCALL + assert callconv == FFI_STDCALL + # it's a bit stupid, but we're just going to cancel the fact that + # the called function just added 'p' to ESP, by subtracting it again. + self.mc.SUB_ri(esp.value, p) def _emit_call_64(self, force_index, x, arglocs, start, argtypes): src_locs = [] @@ -2127,7 +2138,8 @@ tmp = eax self._emit_call(force_index, x, arglocs, 3, tmp=tmp, - argtypes=op.getdescr().get_arg_types()) + argtypes=op.getdescr().get_arg_types(), + callconv=op.getdescr().get_call_conv()) if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8: # a float or a long long return diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -527,6 +527,7 @@ NOP = insn('\x90') RET = insn('\xC3') + RET16_i = insn('\xC2', immediate(1, 'h')) PUSH_r = insn(rex_nw, register(1), '\x50') PUSH_b = insn(rex_nw, '\xFF', orbyte(6<<3), stack_bp(1)) diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -433,6 +433,88 @@ ops_offset[operations[2]] <= ops_offset[None]) + def test_calling_convention(self, monkeypatch): + if WORD != 4: + py.test.skip("32-bit only test") + from pypy.jit.backend.x86.regloc import eax, edx + from pypy.jit.backend.x86 import codebuf + from pypy.jit.codewriter.effectinfo import EffectInfo + from pypy.rlib.libffi import types, clibffi + had_stdcall = hasattr(clibffi, 'FFI_STDCALL') + if not had_stdcall: # not running on Windows, but we can still test + monkeypatch.setattr(clibffi, 'FFI_STDCALL', 12345, raising=False) + # + for ffi in [clibffi.FFI_DEFAULT_ABI, clibffi.FFI_STDCALL]: + cpu = self.cpu + mc = codebuf.MachineCodeBlockWrapper() + mc.MOV_rs(eax.value, 4) # argument 1 + mc.MOV_rs(edx.value, 40) # argument 10 + mc.SUB_rr(eax.value, edx.value) # return arg1 - arg10 + if ffi == clibffi.FFI_DEFAULT_ABI: + mc.RET() + else: + mc.RET16_i(40) + rawstart = mc.materialize(cpu.asmmemmgr, []) + # + calldescr = cpu.calldescrof_dynamic([types.slong] * 10, + types.slong, + EffectInfo.MOST_GENERAL, + ffi_flags=-1) + calldescr.get_call_conv = lambda: ffi # <==== hack + funcbox = ConstInt(rawstart) + i1 = BoxInt() + i2 = BoxInt() + i3 = BoxInt() + i4 = BoxInt() + i5 = BoxInt() + i6 = BoxInt() + c = ConstInt(-1) + faildescr = BasicFailDescr(1) + # we must call it repeatedly: if the stack pointer gets increased + # by 40 bytes by the STDCALL call, and if we don't expect it, + # then we are going to get our stack emptied unexpectedly by + # several repeated calls + ops = [ + ResOperation(rop.CALL_RELEASE_GIL, + [funcbox, i1, c, c, c, c, c, c, c, c, i2], + i3, descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + + ResOperation(rop.CALL_RELEASE_GIL, + [funcbox, i1, c, c, c, c, c, c, c, c, i2], + i4, descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + + ResOperation(rop.CALL_RELEASE_GIL, + [funcbox, i1, c, c, c, c, c, c, c, c, i2], + i5, descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + + ResOperation(rop.CALL_RELEASE_GIL, + [funcbox, i1, c, c, c, c, c, c, c, c, i2], + i6, descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + + ResOperation(rop.FINISH, [i3, i4, i5, i6], None, + descr=BasicFailDescr(0)) + ] + ops[1].setfailargs([]) + ops[3].setfailargs([]) + ops[5].setfailargs([]) + ops[7].setfailargs([]) + looptoken = LoopToken() + self.cpu.compile_loop([i1, i2], ops, looptoken) + + self.cpu.set_future_value_int(0, 123450) + self.cpu.set_future_value_int(1, 123408) + fail = self.cpu.execute_token(looptoken) + assert fail.identifier == 0 + assert self.cpu.get_latest_value_int(0) == 42 + assert self.cpu.get_latest_value_int(1) == 42 + assert self.cpu.get_latest_value_int(2) == 42 + assert self.cpu.get_latest_value_int(3) == 42 + + class TestDebuggingAssembler(object): def setup_method(self, meth): self.cpu = CPU(rtyper=None, stats=FakeStats()) diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -1,4 +1,5 @@ import py + from pypy.jit.codewriter import support, heaptracker, longlong from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter.flatten import ListOfKind, IndirectCallTargets @@ -22,6 +23,11 @@ t = Transformer(cpu, callcontrol, portal_jd) t.transform(graph) +def integer_bounds(size, unsigned): + if unsigned: + return 0, 1 << (8 * size) + else: + return -(1 << (8 * size - 1)), 1 << (8 * size - 1) class Transformer(object): vable_array_vars = None @@ -780,81 +786,127 @@ raise NotImplementedError("cast_ptr_to_int") def rewrite_op_force_cast(self, op): - assert not self._is_gc(op.args[0]) - fromll = longlong.is_longlong(op.args[0].concretetype) - toll = longlong.is_longlong(op.result.concretetype) - if fromll and toll: + v_arg = op.args[0] + v_result = op.result + assert not self._is_gc(v_arg) + + if v_arg.concretetype == v_result.concretetype: return - if fromll: - args = op.args - opname = 'truncate_longlong_to_int' - RESULT = lltype.Signed - v = varoftype(RESULT) - op1 = SpaceOperation(opname, args, v) - op2 = self.rewrite_operation(op1) - oplist = self.force_cast_without_longlong(op2.result, op.result) + + float_arg = v_arg.concretetype in [lltype.Float, lltype.SingleFloat] + float_res = v_result.concretetype in [lltype.Float, lltype.SingleFloat] + if not float_arg and not float_res: + # some int -> some int cast + return self._int_to_int_cast(v_arg, v_result) + elif float_arg and float_res: + # some float -> some float cast + return self._float_to_float_cast(v_arg, v_result) + elif not float_arg and float_res: + # some int -> some float + ops = [] + v1 = varoftype(lltype.Signed) + oplist = self.rewrite_operation( + SpaceOperation('force_cast', [v_arg], v1) + ) if oplist: - return [op2] + oplist - # - # force a renaming to put the correct result in place, even though - # it might be slightly mistyped (e.g. Signed versus Unsigned) - assert op2.result is v - op2.result = op.result - return op2 - elif toll: - size, unsigned = rffi.size_and_sign(op.args[0].concretetype) - if unsigned: + ops.extend(oplist) + else: + v1 = v_arg + v2 = varoftype(lltype.Float) + op = self.rewrite_operation( + SpaceOperation('cast_int_to_float', [v1], v2) + ) + ops.append(op) + op2 = self.rewrite_operation( + SpaceOperation('force_cast', [v2], v_result) + ) + if op2: + ops.append(op2) + else: + op.result = v_result + return ops + elif float_arg and not float_res: + # some float -> some int + ops = [] + v1 = varoftype(lltype.Float) + op1 = self.rewrite_operation( + SpaceOperation('force_cast', [v_arg], v1) + ) + if op1: + ops.append(op1) + else: + v1 = v_arg + v2 = varoftype(lltype.Signed) + op = self.rewrite_operation( + SpaceOperation('cast_float_to_int', [v1], v2) + ) + ops.append(op) + oplist = self.rewrite_operation( + SpaceOperation('force_cast', [v2], v_result) + ) + if oplist: + ops.extend(oplist) + else: + op.result = v_result + return ops + else: + assert False + + def _int_to_int_cast(self, v_arg, v_result): + longlong_arg = longlong.is_longlong(v_arg.concretetype) + longlong_res = longlong.is_longlong(v_result.concretetype) + size1, unsigned1 = rffi.size_and_sign(v_arg.concretetype) + size2, unsigned2 = rffi.size_and_sign(v_result.concretetype) + + if longlong_arg and longlong_res: + return + elif longlong_arg: + v = varoftype(lltype.Signed) + op1 = self.rewrite_operation( + SpaceOperation('truncate_longlong_to_int', [v_arg], v) + ) + op2 = SpaceOperation('force_cast', [v], v_result) + oplist = self.rewrite_operation(op2) + if not oplist: + op1.result = v_result + oplist = [] + return [op1] + oplist + elif longlong_res: + if unsigned1: INTERMEDIATE = lltype.Unsigned else: INTERMEDIATE = lltype.Signed v = varoftype(INTERMEDIATE) - oplist = self.force_cast_without_longlong(op.args[0], v) + op1 = SpaceOperation('force_cast', [v_arg], v) + oplist = self.rewrite_operation(op1) if not oplist: - v = op.args[0] + v = v_arg oplist = [] - if unsigned: + if unsigned1: opname = 'cast_uint_to_longlong' else: opname = 'cast_int_to_longlong' - op1 = SpaceOperation(opname, [v], op.result) - op2 = self.rewrite_operation(op1) + op2 = self.rewrite_operation( + SpaceOperation(opname, [v], v_result) + ) return oplist + [op2] - else: - return self.force_cast_without_longlong(op.args[0], op.result) - def force_cast_without_longlong(self, v_arg, v_result): - if v_result.concretetype == v_arg.concretetype: + # We've now, ostensibly, dealt with the longlongs, everything should be + # a Signed or smaller + assert size1 <= rffi.sizeof(lltype.Signed) + assert size2 <= rffi.sizeof(lltype.Signed) + + # the target type is LONG or ULONG + if size2 == rffi.sizeof(lltype.Signed): return - if v_arg.concretetype == rffi.FLOAT: - assert v_result.concretetype == lltype.Float, "cast %s -> %s" % ( - v_arg.concretetype, v_result.concretetype) - return SpaceOperation('cast_singlefloat_to_float', [v_arg], - v_result) - if v_result.concretetype == rffi.FLOAT: - assert v_arg.concretetype == lltype.Float, "cast %s -> %s" % ( - v_arg.concretetype, v_result.concretetype) - return SpaceOperation('cast_float_to_singlefloat', [v_arg], - v_result) - return self.force_cast_without_singlefloat(v_arg, v_result) - def force_cast_without_singlefloat(self, v_arg, v_result): - size2, unsigned2 = rffi.size_and_sign(v_result.concretetype) - assert size2 <= rffi.sizeof(lltype.Signed) - if size2 == rffi.sizeof(lltype.Signed): - return # the target type is LONG or ULONG - size1, unsigned1 = rffi.size_and_sign(v_arg.concretetype) - assert size1 <= rffi.sizeof(lltype.Signed) - # - def bounds(size, unsigned): - if unsigned: - return 0, 1<<(8*size) - else: - return -(1<<(8*size-1)), 1<<(8*size-1) - min1, max1 = bounds(size1, unsigned1) - min2, max2 = bounds(size2, unsigned2) + min1, max1 = integer_bounds(size1, unsigned1) + min2, max2 = integer_bounds(size2, unsigned2) + + # the target type includes the source range if min2 <= min1 <= max1 <= max2: - return # the target type includes the source range - # + return + result = [] if min2: c_min2 = Constant(min2, lltype.Signed) @@ -862,15 +914,28 @@ result.append(SpaceOperation('int_sub', [v_arg, c_min2], v2)) else: v2 = v_arg - c_mask = Constant(int((1<<(8*size2))-1), lltype.Signed) - v3 = varoftype(lltype.Signed) + c_mask = Constant(int((1 << (8 * size2)) - 1), lltype.Signed) + if min2: + v3 = varoftype(lltype.Signed) + else: + v3 = v_result result.append(SpaceOperation('int_and', [v2, c_mask], v3)) if min2: result.append(SpaceOperation('int_add', [v3, c_min2], v_result)) - else: - result[-1].result = v_result return result + def _float_to_float_cast(self, v_arg, v_result): + if v_arg.concretetype == lltype.SingleFloat: + assert v_result.concretetype == lltype.Float, "cast %s -> %s" % ( + v_arg.concretetype, v_result.concretetype) + return SpaceOperation('cast_singlefloat_to_float', [v_arg], + v_result) + if v_result.concretetype == lltype.SingleFloat: + assert v_arg.concretetype == lltype.Float, "cast %s -> %s" % ( + v_arg.concretetype, v_result.concretetype) + return SpaceOperation('cast_float_to_singlefloat', [v_arg], + v_result) + def rewrite_op_direct_ptradd(self, op): # xxx otherwise, not implemented: assert op.args[0].concretetype == rffi.CCHARP diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -91,9 +91,12 @@ reds_v = op.args[2+numgreens:] assert len(reds_v) == numreds # - def _sort(args_v): + def _sort(args_v, is_green): from pypy.jit.metainterp.history import getkind lst = [v for v in args_v if v.concretetype is not lltype.Void] + if is_green: + assert len(lst) == len(args_v), ( + "not supported so far: 'greens' variables contain Void") _kind2count = {'int': 1, 'ref': 2, 'float': 3} lst2 = sorted(lst, key=lambda v: _kind2count[getkind(v.concretetype)]) # a crash here means that you have to reorder the variable named in @@ -102,7 +105,7 @@ assert lst == lst2 return lst # - return (_sort(greens_v), _sort(reds_v)) + return (_sort(greens_v, True), _sort(reds_v, False)) def maybe_on_top_of_llinterp(rtyper, fnptr): # Run a generated graph on top of the llinterp for testing. diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py --- a/pypy/jit/codewriter/test/test_flatten.py +++ b/pypy/jit/codewriter/test/test_flatten.py @@ -324,7 +324,7 @@ def test_exc_exitswitch(self): def g(i): pass - + def f(i): try: g(i) @@ -854,13 +854,51 @@ int_return %i0 """, transform=True) - def test_force_cast_float(self): + def test_force_cast_floats(self): from pypy.rpython.lltypesystem import rffi + # Caststs to lltype.Float def f(n): return rffi.cast(lltype.Float, n) self.encoding_test(f, [12.456], """ float_return %f0 """, transform=True) + self.encoding_test(f, [rffi.cast(rffi.SIGNEDCHAR, 42)], """ + cast_int_to_float %i0 -> %f0 + float_return %f0 + """, transform=True) + + # Casts to lltype.SingleFloat + def g(n): + return rffi.cast(lltype.SingleFloat, n) + self.encoding_test(g, [12.456], """ + cast_float_to_singlefloat %f0 -> %i0 + int_return %i0 + """, transform=True) + self.encoding_test(g, [rffi.cast(rffi.SIGNEDCHAR, 42)], """ + cast_int_to_float %i0 -> %f0 + cast_float_to_singlefloat %f0 -> %i1 + int_return %i1 + """, transform=True) + + # Casts from floats + def f(n): + return rffi.cast(rffi.SIGNEDCHAR, n) + self.encoding_test(f, [12.456], """ + cast_float_to_int %f0 -> %i0 + int_sub %i0, $-128 -> %i1 + int_and %i1, $255 -> %i2 + int_add %i2, $-128 -> %i3 + int_return %i3 + """, transform=True) + self.encoding_test(f, [rffi.cast(lltype.SingleFloat, 12.456)], """ + cast_singlefloat_to_float %i0 -> %f0 + cast_float_to_int %f0 -> %i1 + int_sub %i1, $-128 -> %i2 + int_and %i2, $255 -> %i3 + int_add %i3, $-128 -> %i4 + int_return %i4 + """, transform=True) + def test_direct_ptradd(self): from pypy.rpython.lltypesystem import rffi diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py --- a/pypy/jit/metainterp/optimizeopt/fficall.py +++ b/pypy/jit/metainterp/optimizeopt/fficall.py @@ -18,26 +18,27 @@ def __init__(self, funcval, cpu, prepare_op): self.funcval = funcval self.opargs = [] - argtypes, restype = self._get_signature(funcval) + argtypes, restype, flags = self._get_signature(funcval) self.descr = cpu.calldescrof_dynamic(argtypes, restype, - EffectInfo.MOST_GENERAL) + EffectInfo.MOST_GENERAL, + ffi_flags=flags) # ^^^ may be None if unsupported self.prepare_op = prepare_op self.delayed_ops = [] def _get_signature(self, funcval): """ - given the funcval, return a tuple (argtypes, restype), where the - actuall types are libffi.types.* + given the funcval, return a tuple (argtypes, restype, flags), where + the actuall types are libffi.types.* The implementation is tricky because we have three possible cases: - translated: the easiest case, we can just cast back the pointer to - the original Func instance and read .argtypes and .restype + the original Func instance and read .argtypes, .restype and .flags - completely untranslated: this is what we get from test_optimizeopt tests. funcval contains a FakeLLObject whose _fake_class is Func, - and we can just get .argtypes and .restype + and we can just get .argtypes, .restype and .flags - partially translated: this happens when running metainterp tests: funcval contains the low-level equivalent of a Func, and thus we @@ -49,10 +50,10 @@ llfunc = funcval.box.getref_base() if we_are_translated(): func = cast_base_ptr_to_instance(Func, llfunc) - return func.argtypes, func.restype + return func.argtypes, func.restype, func.flags elif getattr(llfunc, '_fake_class', None) is Func: # untranslated - return llfunc.argtypes, llfunc.restype + return llfunc.argtypes, llfunc.restype, llfunc.flags else: # partially translated # llfunc contains an opaque pointer to something like the following: @@ -63,7 +64,7 @@ # because we don't have the exact TYPE to cast to. Instead, we # just fish it manually :-( f = llfunc._obj.container - return f.inst_argtypes, f.inst_restype + return f.inst_argtypes, f.inst_restype, f.inst_flags class OptFfiCall(Optimization): diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py @@ -14,12 +14,15 @@ can check that the signature of a call is really what you want. """ - def __init__(self, arg_types, typeinfo): + def __init__(self, arg_types, typeinfo, flags): self.arg_types = arg_types self.typeinfo = typeinfo # return type + self.flags = flags def __eq__(self, other): - return self.arg_types == other.arg_types and self.typeinfo == other.typeinfo + return (self.arg_types == other.arg_types and + self.typeinfo == other.typeinfo and + self.flags == other.get_ffi_flags()) class FakeLLObject(object): @@ -41,14 +44,17 @@ vable_token_descr = LLtypeMixin.valuedescr valuedescr = LLtypeMixin.valuedescr - int_float__int = MyCallDescr('if', 'i') + int_float__int_42 = MyCallDescr('if', 'i', 42) + int_float__int_43 = MyCallDescr('if', 'i', 43) funcptr = FakeLLObject() func = FakeLLObject(_fake_class=Func, argtypes=[types.sint, types.double], - restype=types.sint) + restype=types.sint, + flags=42) func2 = FakeLLObject(_fake_class=Func, argtypes=[types.sint, types.double], - restype=types.sint) + restype=types.sint, + flags=43) # def calldescr(cpu, FUNC, oopspecindex, extraeffect=None): if extraeffect == EffectInfo.EF_RANDOM_EFFECTS: @@ -83,7 +89,7 @@ """ expected = """ [i0, f1] - i3 = call_release_gil(12345, i0, f1, descr=int_float__int) + i3 = call_release_gil(12345, i0, f1, descr=int_float__int_42) guard_not_forced() [] guard_no_exception() [] jump(i3, f1) @@ -123,7 +129,7 @@ [i0, f1, p2] i4 = force_token() setfield_gc(p2, i4, descr=vable_token_descr) - i3 = call_release_gil(12345, i0, f1, descr=int_float__int) + i3 = call_release_gil(12345, i0, f1, descr=int_float__int_42) guard_not_forced() [p2] guard_no_exception() [p2] jump(i3, f1, p2) @@ -220,7 +226,7 @@ call(0, ConstPtr(func), descr=libffi_prepare) # # this "nested" call is nicely optimized - i4 = call_release_gil(67890, i0, f1, descr=int_float__int) + i4 = call_release_gil(67890, i0, f1, descr=int_float__int_43) guard_not_forced() [] guard_no_exception() [] # @@ -265,7 +271,7 @@ expected = """ [i0, f1, p2] setfield_gc(p2, i0, descr=valuedescr) - i3 = call_release_gil(12345, i0, f1, descr=int_float__int) + i3 = call_release_gil(12345, i0, f1, descr=int_float__int_42) guard_not_forced() [] guard_no_exception() [] jump(i3, f1, p2) diff --git a/pypy/jit/metainterp/test/test_rawmem.py b/pypy/jit/metainterp/test/test_rawmem.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/test/test_rawmem.py @@ -0,0 +1,22 @@ +from pypy.jit.metainterp.test.support import LLJitMixin +from pypy.rpython.lltypesystem import lltype, rffi + + +class TestJITRawMem(LLJitMixin): + def test_cast_void_ptr(self): + TP = lltype.Array(lltype.Float, hints={"nolength": True}) + VOID_TP = lltype.Array(lltype.Void, hints={"nolength": True, "uncast_on_llgraph": True}) + class A(object): + def __init__(self, x): + self.storage = rffi.cast(lltype.Ptr(VOID_TP), x)\ + + def f(n): + x = lltype.malloc(TP, n, flavor="raw", zero=True) + a = A(x) + s = 0.0 + rffi.cast(lltype.Ptr(TP), a.storage)[0] = 1.0 + s += rffi.cast(lltype.Ptr(TP), a.storage)[0] + lltype.free(x, flavor="raw") + return s + res = self.interp_operations(f, [10]) + assert res == 1.0 \ No newline at end of file diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -252,6 +252,41 @@ self.check_loops({'int_sub': 1, 'int_gt': 1, 'guard_true': 1, 'jump': 1}) + def test_void_red_variable(self): + mydriver = JitDriver(greens=[], reds=['a', 'm']) + def f1(m): + a = None + while m > 0: + mydriver.jit_merge_point(a=a, m=m) + m = m - 1 + if m == 10: + pass # other case + self.meta_interp(f1, [18]) + + def test_bug_constant_rawptrs(self): + py.test.skip("crashes because a is a constant") + from pypy.rpython.lltypesystem import lltype, rffi + mydriver = JitDriver(greens=['a'], reds=['m']) + def f1(m): + a = lltype.nullptr(rffi.VOIDP.TO) + while m > 0: + mydriver.jit_merge_point(a=a, m=m) + m = m - 1 + self.meta_interp(f1, [18]) + + def test_bug_rawptrs(self): + from pypy.rpython.lltypesystem import lltype, rffi + mydriver = JitDriver(greens=['a'], reds=['m']) + def f1(m): + a = lltype.malloc(rffi.VOIDP.TO, 5, flavor='raw') + while m > 0: + mydriver.jit_merge_point(a=a, m=m) + m = m - 1 + if m == 10: + pass + lltype.free(a, flavor='raw') + self.meta_interp(f1, [18]) + class TestLLWarmspot(WarmspotTests, LLJitMixin): CPUClass = runner.LLtypeCPU diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -245,7 +245,8 @@ graph.startblock = support.split_before_jit_merge_point(*jmpp) graph.startblock.isstartblock = True # a crash in the following checkgraph() means that you forgot - # to list some variable in greens=[] or reds=[] in JitDriver. + # to list some variable in greens=[] or reds=[] in JitDriver, + # or that a jit_merge_point() takes a constant as an argument. checkgraph(graph) for v in graph.getargs(): assert isinstance(v, Variable) @@ -655,11 +656,13 @@ portalfunc_ARGS = [] nums = {} for i, ARG in enumerate(PORTALFUNC.ARGS): + kind = history.getkind(ARG) + assert kind != 'void' if i < len(jd.jitdriver.greens): color = 'green' else: color = 'red' - attrname = '%s_%s' % (color, history.getkind(ARG)) + attrname = '%s_%s' % (color, kind) count = nums.get(attrname, 0) nums[attrname] = count + 1 portalfunc_ARGS.append((ARG, attrname, count)) diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -124,7 +124,7 @@ # Hash of lltype or ootype object. # Only supports strings, unicodes and regular instances, # as well as primitives that can meaningfully be cast to Signed. - if isinstance(TYPE, lltype.Ptr): + if isinstance(TYPE, lltype.Ptr) and TYPE.TO._gckind == 'gc': if TYPE.TO is rstr.STR or TYPE.TO is rstr.UNICODE: return rstr.LLHelpers.ll_strhash(x) # assumed not null else: @@ -140,7 +140,7 @@ else: return 0 else: - return lltype.cast_primitive(lltype.Signed, x) + return rffi.cast(lltype.Signed, x) @specialize.ll_and_arg(3) def set_future_value(cpu, j, value, typecode): diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -19,6 +19,7 @@ 'sorted' : 'app_functional.sorted', 'any' : 'app_functional.any', 'all' : 'app_functional.all', + 'sum' : 'app_functional.sum', 'vars' : 'app_inspect.vars', 'dir' : 'app_inspect.dir', @@ -85,7 +86,6 @@ 'enumerate' : 'functional.W_Enumerate', 'min' : 'functional.min', 'max' : 'functional.max', - 'sum' : 'functional.sum', 'map' : 'functional.map', 'zip' : 'functional.zip', 'reduce' : 'functional.reduce', @@ -118,7 +118,7 @@ return module.Module(space, None, w_builtin) builtin = space.interpclass_w(w_builtin) if isinstance(builtin, module.Module): - return builtin + return builtin # no builtin! make a default one. Given them None, at least. builtin = module.Module(space, None) space.setitem(builtin.w_dict, space.wrap('None'), space.w_None) diff --git a/pypy/module/__builtin__/app_functional.py b/pypy/module/__builtin__/app_functional.py --- a/pypy/module/__builtin__/app_functional.py +++ b/pypy/module/__builtin__/app_functional.py @@ -34,3 +34,18 @@ if not x: return False return True + +def sum(sequence, start=0): + """sum(sequence[, start]) -> value + +Returns the sum of a sequence of numbers (NOT strings) plus the value +of parameter 'start' (which defaults to 0). When the sequence is +empty, returns start.""" + if isinstance(start, basestring): + raise TypeError("sum() can't sum strings") + last = start + for x in sequence: + # Very intentionally *not* +=, that would have different semantics if + # start was a mutable type, such as a list + last = last + x + return last \ No newline at end of file diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -325,27 +325,6 @@ result_w.append(w_res) return result_w -def sum(space, w_sequence, w_start=0): - """sum(sequence[, start]) -> value - -Returns the sum of a sequence of numbers (NOT strings) plus the value -of parameter 'start' (which defaults to 0). When the sequence is -empty, returns start.""" - if space.is_true(space.isinstance(w_start, space.w_basestring)): - msg = "sum() can't sum strings" - raise OperationError(space.w_TypeError, space.wrap(msg)) - w_iter = space.iter(w_sequence) - w_last = w_start - while True: - try: - w_next = space.next(w_iter) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break - w_last = space.add(w_last, w_next) - return w_last - @unwrap_spec(sequences_w="args_w") def zip(space, sequences_w): """Return a list of tuples, where the nth tuple contains every nth item of diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -687,11 +687,15 @@ # support for the "string escape" codec # This is a bytes-to bytes transformation - at unwrap_spec(errors='str_or_None') -def escape_encode(space, w_string, errors='strict'): - w_repr = space.repr(w_string) - w_result = space.getslice(w_repr, space.wrap(1), space.wrap(-1)) - return space.newtuple([w_result, space.len(w_string)]) + at unwrap_spec(data=str, errors='str_or_None') +def escape_encode(space, data, errors='strict'): + from pypy.objspace.std.stringobject import string_escape_encode + result = string_escape_encode(data, quote="'") + start = 1 + end = len(result) - 1 + assert end >= 0 + w_result = space.wrap(result[start:end]) + return space.newtuple([w_result, space.wrap(len(data))]) @unwrap_spec(data=str, errors='str_or_None') def escape_decode(space, data, errors='strict'): diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -102,7 +102,6 @@ def test_indexerror(self): test = "\\" # trailing backslash - raises (ValueError, test.decode,'string-escape') def test_charmap_decode(self): @@ -292,6 +291,10 @@ assert '\\0f'.decode('string_escape') == chr(0) + 'f' assert '\\08'.decode('string_escape') == chr(0) + '8' + def test_escape_encode(self): + assert '"'.encode('string_escape') == '"' + assert "'".encode('string_escape') == "\\'" + def test_decode_utf8_different_case(self): constant = u"a" assert constant.encode("utf-8") == constant.encode("UTF-8") diff --git a/pypy/module/_continuation/__init__.py b/pypy/module/_continuation/__init__.py --- a/pypy/module/_continuation/__init__.py +++ b/pypy/module/_continuation/__init__.py @@ -12,7 +12,7 @@ To make a continulet object, call 'continulet' with a callable and optional extra arguments. Later, the first time you switch() to the -continulet, the callable is invoked wih the same continulet object as +continulet, the callable is invoked with the same continulet object as the extra first argument. At this point, the one-shot continuation stored in the continulet points diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -351,6 +351,7 @@ self.decompressor = W_BZ2Decompressor(space) self.readlength = r_longlong(0) self.buffer = "" + self.pos = 0 self.finished = False if buffering < 1024: buffering = 1024 # minimum amount of compressed data read at once @@ -385,6 +386,7 @@ self.stream.seek(0, 0) self.decompressor = W_BZ2Decompressor(self.space) self.readlength = r_longlong(0) + self.pos = 0 self.buffer = "" self.finished = False else: @@ -410,15 +412,19 @@ self.space.wrap("compressed file ended before the logical end-of-the-stream was detected")) result = self.space.str_w(w_result) self.readlength += len(result) - result = self.buffer + result + if len(self.buffer) != self.pos: + pos = self.pos + assert pos >= 0 + result = self.buffer[pos:] + result self.buffer = '' + self.pos = 0 return result def read(self, n): # XXX not nice if n <= 0: return '' - while not self.buffer: + while self.pos == len(self.buffer): if self.finished: return "" moredata = self.stream.read(max(self.buffering, n)) @@ -433,17 +439,25 @@ return "" raise self.buffer = self.space.str_w(w_read) - if len(self.buffer) >= n: - result = self.buffer[:n] - self.buffer = self.buffer[n:] + self.pos = 0 + if len(self.buffer) - self.pos >= n: + pos = self.pos + assert pos >= 0 + result = self.buffer[pos:pos + n] + self.pos += n else: - result = self.buffer + pos = self.pos + assert pos >= 0 + result = self.buffer[pos:] + self.pos = 0 self.buffer = "" self.readlength += len(result) return result def peek(self): - return self.buffer + pos = self.pos + assert pos >= 0 + return self.buffer[pos:] def try_to_find_file_descriptor(self): return self.stream.try_to_find_file_descriptor() diff --git a/pypy/module/bz2/test/test_bz2_file.py b/pypy/module/bz2/test/test_bz2_file.py --- a/pypy/module/bz2/test/test_bz2_file.py +++ b/pypy/module/bz2/test/test_bz2_file.py @@ -274,14 +274,14 @@ pass del bz2f # delete from this frame, which is captured in the traceback - def test_read_chunk10(self): + def test_read_chunk9(self): from bz2 import BZ2File self.create_temp_file() bz2f = BZ2File(self.temppath) text_read = "" while True: - data = bz2f.read(10) + data = bz2f.read(9) # 9 doesn't divide evenly into data length if not data: break text_read = "%s%s" % (text_read, data) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -1,40 +1,45 @@ - from pypy.interpreter.mixedmodule import MixedModule + class Module(MixedModule): - applevel_name = 'numpy' interpleveldefs = { 'array': 'interp_numarray.SingleDimArray', + 'dtype': 'interp_dtype.W_Dtype', + 'ufunc': 'interp_ufuncs.W_Ufunc', + 'zeros': 'interp_numarray.zeros', 'empty': 'interp_numarray.zeros', 'ones': 'interp_numarray.ones', 'fromstring': 'interp_support.fromstring', + } - # ufuncs - 'abs': 'interp_ufuncs.absolute', - 'absolute': 'interp_ufuncs.absolute', - 'add': 'interp_ufuncs.add', - 'copysign': 'interp_ufuncs.copysign', - 'divide': 'interp_ufuncs.divide', - 'exp': 'interp_ufuncs.exp', - 'fabs': 'interp_ufuncs.fabs', - 'floor': 'interp_ufuncs.floor', - 'maximum': 'interp_ufuncs.maximum', - 'minimum': 'interp_ufuncs.minimum', - 'multiply': 'interp_ufuncs.multiply', - 'negative': 'interp_ufuncs.negative', - 'reciprocal': 'interp_ufuncs.reciprocal', - 'sign': 'interp_ufuncs.sign', - 'subtract': 'interp_ufuncs.subtract', - 'sin': 'interp_ufuncs.sin', - 'cos': 'interp_ufuncs.cos', - 'tan': 'interp_ufuncs.tan', - 'arcsin': 'interp_ufuncs.arcsin', - 'arccos': 'interp_ufuncs.arccos', - 'arctan': 'interp_ufuncs.arctan', - } + # ufuncs + for exposed, impl in [ + ("abs", "absolute"), + ("absolute", "absolute"), + ("add", "add"), + ("arccos", "arccos"), + ("arcsin", "arcsin"), + ("arctan", "arctan"), + ("copysign", "copysign"), + ("cos", "cos"), + ("divide", "divide"), + ("exp", "exp"), + ("fabs", "fabs"), + ("floor", "floor"), + ("maximum", "maximum"), + ("minimum", "minimum"), + ("multiply", "multiply"), + ("negative", "negative"), + ("reciprocal", "reciprocal"), + ("sign", "sign"), + ("sin", "sin"), + ("subtract", "subtract"), + ("tan", "tan"), + ]: + interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl appleveldefs = { 'average': 'app_numpy.average', diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -3,56 +3,104 @@ It should not be imported by the module itself """ -from pypy.module.micronumpy.interp_numarray import FloatWrapper, SingleDimArray, BaseArray +from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root +from pypy.module.micronumpy.interp_dtype import W_Float64Dtype +from pypy.module.micronumpy.interp_numarray import Scalar, SingleDimArray, BaseArray +from pypy.rlib.objectmodel import specialize + class BogusBytecode(Exception): pass -def create_array(size): - a = SingleDimArray(size) +def create_array(dtype, size): + a = SingleDimArray(size, dtype=dtype) for i in range(size): - a.storage[i] = float(i % 10) + dtype.setitem(a.storage, i, dtype.box(float(i % 10))) return a -class TrivialSpace(object): - def wrap(self, x): - return x +class FakeSpace(object): + w_ValueError = None + w_TypeError = None + + def __init__(self): + """NOT_RPYTHON""" + self.fromcache = InternalSpaceCache(self).getorbuild def issequence_w(self, w_obj): - # Completley wrong in the general case, but good enough for this. - return isinstance(w_obj, BaseArray) + return True + + @specialize.argtype(1) + def wrap(self, obj): + if isinstance(obj, float): + return FloatObject(obj) + elif isinstance(obj, bool): + return BoolObject(obj) + elif isinstance(obj, int): + return IntObject(obj) + raise Exception + + def float(self, w_obj): + assert isinstance(w_obj, FloatObject) + return w_obj def float_w(self, w_obj): - assert isinstance(w_obj, float) - return w_obj + return w_obj.floatval + + +class FloatObject(W_Root): + def __init__(self, floatval): + self.floatval = floatval + +class BoolObject(W_Root): + def __init__(self, boolval): + self.boolval = boolval + +class IntObject(W_Root): + def __init__(self, intval): + self.intval = intval + + +space = FakeSpace() def numpy_compile(bytecode, array_size): - space = TrivialSpace() stack = [] i = 0 + dtype = space.fromcache(W_Float64Dtype) for b in bytecode: if b == 'a': - stack.append(create_array(array_size)) + stack.append(create_array(dtype, array_size)) i += 1 elif b == 'f': - stack.append(FloatWrapper(1.2)) + stack.append(Scalar(dtype, dtype.box(1.2))) elif b == '+': right = stack.pop() - stack.append(stack.pop().descr_add(space, right)) + res = stack.pop().descr_add(space, right) + assert isinstance(res, BaseArray) + stack.append(res) elif b == '-': right = stack.pop() - stack.append(stack.pop().descr_sub(space, right)) + res = stack.pop().descr_sub(space, right) + assert isinstance(res, BaseArray) + stack.append(res) elif b == '*': right = stack.pop() - stack.append(stack.pop().descr_mul(space, right)) + res = stack.pop().descr_mul(space, right) + assert isinstance(res, BaseArray) + stack.append(res) elif b == '/': right = stack.pop() - stack.append(stack.pop().descr_div(space, right)) + res = stack.pop().descr_div(space, right) + assert isinstance(res, BaseArray) + stack.append(res) elif b == '%': right = stack.pop() - stack.append(stack.pop().descr_mod(space, right)) + res = stack.pop().descr_mod(space, right) + assert isinstance(res, BaseArray) + stack.append(res) elif b == '|': - stack.append(stack.pop().descr_abs(space)) + res = stack.pop().descr_abs(space) + assert isinstance(res, BaseArray) + stack.append(res) else: print "Unknown opcode: %s" % b raise BogusBytecode() diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/interp_dtype.py @@ -0,0 +1,374 @@ +import functools +import math + +from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.error import OperationError +from pypy.interpreter.gateway import interp2app +from pypy.interpreter.typedef import TypeDef, interp_attrproperty, GetSetProperty +from pypy.module.micronumpy import signature +from pypy.objspace.std.floatobject import float2string +from pypy.rlib import rfloat +from pypy.rlib.rarithmetic import widen +from pypy.rlib.objectmodel import specialize, enforceargs +from pypy.rlib.unroll import unrolling_iterable +from pypy.rpython.lltypesystem import lltype, rffi + + +SIGNEDLTR = "i" +BOOLLTR = "b" +FLOATINGLTR = "f" + +class W_Dtype(Wrappable): + def __init__(self, space): + pass + + def descr__new__(space, w_subtype, w_dtype): + if space.is_w(w_dtype, space.w_None): + return space.fromcache(W_Float64Dtype) + elif space.isinstance_w(w_dtype, space.w_str): + dtype = space.str_w(w_dtype) + for alias, dtype_class in dtypes_by_alias: + if alias == dtype: + return space.fromcache(dtype_class) + elif isinstance(space.interpclass_w(w_dtype), W_Dtype): + return w_dtype + elif space.isinstance_w(w_dtype, space.w_type): + for typename, dtype_class in dtypes_by_apptype: + if space.is_w(getattr(space, "w_%s" % typename), w_dtype): + return space.fromcache(dtype_class) + raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + + def descr_repr(self, space): + return space.wrap("dtype('%s')" % self.name) + + def descr_str(self, space): + return space.wrap(self.name) + + def descr_get_shape(self, space): + return space.newtuple([]) + + +class BaseBox(object): + pass + +VOID_TP = lltype.Ptr(lltype.Array(lltype.Void, hints={'nolength': True, "uncast_on_llgraph": True})) + +def create_low_level_dtype(num, kind, name, aliases, applevel_types, T, valtype, + expected_size=None): + + class Box(BaseBox): + def __init__(self, val): + self.val = val + + def wrap(self, space): + return space.wrap(self.val) + + def convert_to(self, dtype): + return dtype.adapt_val(self.val) + Box.__name__ = "%sBox" % T._name + + TP = lltype.Ptr(lltype.Array(T, hints={'nolength': True})) + class W_LowLevelDtype(W_Dtype): + signature = signature.BaseSignature() + + def erase(self, storage): + return rffi.cast(VOID_TP, storage) + + def unerase(self, storage): + return rffi.cast(TP, storage) + + @enforceargs(None, valtype) + def box(self, value): + return Box(value) + + def unbox(self, box): + assert isinstance(box, Box) + return box.val + + def unwrap(self, space, w_item): + raise NotImplementedError + + def malloc(self, size): + # XXX find out why test_zjit explodes with tracking of allocations + return self.erase(lltype.malloc(TP.TO, size, + zero=True, flavor="raw", + track_allocation=False, add_memory_pressure=True + )) + + def getitem(self, storage, i): + return Box(self.unerase(storage)[i]) + + def setitem(self, storage, i, item): + self.unerase(storage)[i] = self.unbox(item) + + def setitem_w(self, space, storage, i, w_item): + self.setitem(storage, i, self.unwrap(space, w_item)) + + @specialize.argtype(1) + def adapt_val(self, val): + return self.box(rffi.cast(TP.TO.OF, val)) + + W_LowLevelDtype.__name__ = "W_%sDtype" % name.capitalize() + W_LowLevelDtype.num = num + W_LowLevelDtype.kind = kind + W_LowLevelDtype.name = name + W_LowLevelDtype.aliases = aliases + W_LowLevelDtype.applevel_types = applevel_types + W_LowLevelDtype.num_bytes = rffi.sizeof(T) + if expected_size is not None: + assert W_LowLevelDtype.num_bytes == expected_size + return W_LowLevelDtype + + +def binop(func): + @functools.wraps(func) + def impl(self, v1, v2): + return self.adapt_val(func(self, + self.for_computation(self.unbox(v1)), + self.for_computation(self.unbox(v2)), + )) + return impl + +def unaryop(func): + @functools.wraps(func) + def impl(self, v): + return self.adapt_val(func(self, self.for_computation(self.unbox(v)))) + return impl + +class ArithmaticTypeMixin(object): + _mixin_ = True + + @binop + def add(self, v1, v2): + return v1 + v2 + @binop + def sub(self, v1, v2): + return v1 - v2 + @binop + def mul(self, v1, v2): + return v1 * v2 + @binop + def div(self, v1, v2): + return v1 / v2 + + @unaryop + def pos(self, v): + return +v + @unaryop + def neg(self, v): + return -v + @unaryop + def abs(self, v): + return abs(v) + + @binop + def max(self, v1, v2): + return max(v1, v2) + @binop + def min(self, v1, v2): + return min(v1, v2) + + def bool(self, v): + return bool(self.for_computation(self.unbox(v))) + def ne(self, v1, v2): + return self.for_computation(self.unbox(v1)) != self.for_computation(self.unbox(v2)) + + +class FloatArithmeticDtype(ArithmaticTypeMixin): + _mixin_ = True + + def for_computation(self, v): + return v + + @binop + def mod(self, v1, v2): + return math.fmod(v1, v2) + @binop + def pow(self, v1, v2): + return math.pow(v1, v2) + + @unaryop + def sign(self, v): + if v == 0.0: + return 0.0 + return rfloat.copysign(1.0, v) + @unaryop + def reciprocal(self, v): + if v == 0.0: + return rfloat.copysign(rfloat.INFINITY, v) + return 1.0 / v + @unaryop + def fabs(self, v): + return math.fabs(v) + @unaryop + def floor(self, v): + return math.floor(v) + + @binop + def copysign(self, v1, v2): + return math.copysign(v1, v2) + @unaryop + def exp(self, v): + try: + return math.exp(v) + except OverflowError: + return rfloat.INFINITY + @unaryop + def sin(self, v): + return math.sin(v) + @unaryop + def cos(self, v): + return math.cos(v) + @unaryop + def tan(self, v): + return math.tan(v) + @unaryop + def arcsin(self, v): + if v < -1.0 or v > 1.0: + return rfloat.NAN + return math.asin(v) + @unaryop + def arccos(self, v): + if v < -1.0 or v > 1.0: + return rfloat.NAN + return math.acos(v) + @unaryop + def arctan(self, v): + return math.atan(v) + +class IntegerArithmeticDtype(ArithmaticTypeMixin): + _mixin_ = True + + def unwrap(self, space, w_item): + return self.adapt_val(space.int_w(space.int(w_item))) + + def for_computation(self, v): + return widen(v) + + @binop + def mod(self, v1, v2): + return v1 % v2 + + @unaryop + def sign(self, v): + if v > 0: + return 1 + elif v < 0: + return -1 + else: + assert v == 0 + return 0 + + def str_format(self, item): + return str(widen(self.unbox(item))) + +W_BoolDtype = create_low_level_dtype( + num = 0, kind = BOOLLTR, name = "bool", + aliases = ["?"], + applevel_types = ["bool"], + T = lltype.Bool, + valtype = bool, +) +class W_BoolDtype(IntegerArithmeticDtype, W_BoolDtype): + def unwrap(self, space, w_item): + return self.adapt_val(space.is_true(w_item)) + + def str_format(self, item): + v = self.unbox(item) + return "True" if v else "False" + + def for_computation(self, v): + return int(v) + +W_Int8Dtype = create_low_level_dtype( + num = 1, kind = SIGNEDLTR, name = "int8", + aliases = ["int8"], + applevel_types = [], + T = rffi.SIGNEDCHAR, + valtype = rffi.SIGNEDCHAR._type, + expected_size = 1, +) +class W_Int8Dtype(IntegerArithmeticDtype, W_Int8Dtype): + pass + +W_Int16Dtype = create_low_level_dtype( + num = 3, kind = SIGNEDLTR, name = "int16", + aliases = ["int16"], + applevel_types = [], + T = rffi.SHORT, + valtype = rffi.SHORT._type, + expected_size = 2, +) +class W_Int16Dtype(IntegerArithmeticDtype, W_Int16Dtype): + pass + +W_Int32Dtype = create_low_level_dtype( + num = 5, kind = SIGNEDLTR, name = "int32", + aliases = ["i"], + applevel_types = [], + T = rffi.INT, + valtype = rffi.INT._type, + expected_size = 4, +) +class W_Int32Dtype(IntegerArithmeticDtype, W_Int32Dtype): + pass + +W_Int64Dtype = create_low_level_dtype( + num = 9, kind = SIGNEDLTR, name = "int64", + aliases = [], + applevel_types = ["long"], + T = rffi.LONGLONG, + valtype = rffi.LONGLONG._type, + expected_size = 8, +) +class W_Int64Dtype(IntegerArithmeticDtype, W_Int64Dtype): + pass + +W_Float64Dtype = create_low_level_dtype( + num = 12, kind = FLOATINGLTR, name = "float64", + aliases = [], + applevel_types = ["float"], + T = lltype.Float, + valtype = float, + expected_size = 8, +) +class W_Float64Dtype(FloatArithmeticDtype, W_Float64Dtype): + def unwrap(self, space, w_item): + return self.adapt_val(space.float_w(space.float(w_item))) + + def str_format(self, item): + return float2string(self.unbox(item), 'g', rfloat.DTSF_STR_PRECISION) + +ALL_DTYPES = [ + W_BoolDtype, + W_Int8Dtype, W_Int16Dtype, W_Int32Dtype, W_Int64Dtype, + W_Float64Dtype +] + +dtypes_by_alias = unrolling_iterable([ + (alias, dtype) + for dtype in ALL_DTYPES + for alias in dtype.aliases +]) +dtypes_by_apptype = unrolling_iterable([ + (apptype, dtype) + for dtype in ALL_DTYPES + for apptype in dtype.applevel_types +]) +dtypes_by_num_bytes = unrolling_iterable(sorted([ + (dtype.num_bytes, dtype) + for dtype in ALL_DTYPES +])) + +W_Dtype.typedef = TypeDef("dtype", + __module__ = "numpy", + __new__ = interp2app(W_Dtype.descr__new__.im_func), + + __repr__ = interp2app(W_Dtype.descr_repr), + __str__ = interp2app(W_Dtype.descr_str), + + num = interp_attrproperty("num", cls=W_Dtype), + kind = interp_attrproperty("kind", cls=W_Dtype), + shape = GetSetProperty(W_Dtype.descr_get_shape), +) +W_Dtype.typedef.acceptable_as_base_class = False diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1,38 +1,22 @@ from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.module.micronumpy.interp_support import Signature -from pypy.module.micronumpy import interp_ufuncs -from pypy.objspace.std.floatobject import float2string as float2string_orig +from pypy.module.micronumpy import interp_ufuncs, interp_dtype, signature from pypy.rlib import jit -from pypy.rlib.rfloat import DTSF_STR_PRECISION from pypy.rpython.lltypesystem import lltype from pypy.tool.sourcetools import func_with_new_name -import math -TP = lltype.Array(lltype.Float, hints={'nolength': True}) numpy_driver = jit.JitDriver(greens = ['signature'], reds = ['result_size', 'i', 'self', 'result']) -all_driver = jit.JitDriver(greens=['signature'], reds=['i', 'size', 'self']) -any_driver = jit.JitDriver(greens=['signature'], reds=['i', 'size', 'self']) -slice_driver1 = jit.JitDriver(greens=['signature'], reds=['i', 'j', 'step', 'stop', 'source', 'dest']) -slice_driver2 = jit.JitDriver(greens=['signature'], reds=['i', 'j', 'step', 'stop', 'source', 'dest']) - -def add(v1, v2): - return v1 + v2 -def mul(v1, v2): - return v1 * v2 -def maximum(v1, v2): - return max(v1, v2) -def minimum(v1, v2): - return min(v1, v2) - -def float2string(x): - return float2string_orig(x, 'g', DTSF_STR_PRECISION) +all_driver = jit.JitDriver(greens=['signature'], reds=['i', 'size', 'self', 'dtype']) +any_driver = jit.JitDriver(greens=['signature'], reds=['i', 'size', 'self', 'dtype']) +slice_driver = jit.JitDriver(greens=['signature'], reds=['i', 'j', 'step', 'stop', 'source', 'dest']) class BaseArray(Wrappable): + _attrs_ = ["invalidates", "signature"] + def __init__(self): self.invalidates = [] @@ -45,93 +29,92 @@ arr.force_if_needed() del self.invalidates[:] - def _unaryop_impl(w_ufunc): + def add_invalidates(self, other): + self.invalidates.append(other) + + def descr__new__(space, w_subtype, w_size_or_iterable, w_dtype=None): + l = space.listview(w_size_or_iterable) + if space.is_w(w_dtype, space.w_None): + w_dtype = None + for w_item in l: + w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_item, w_dtype) + if w_dtype is space.fromcache(interp_dtype.W_Float64Dtype): + break + if w_dtype is None: + w_dtype = space.w_None + + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + arr = SingleDimArray(len(l), dtype=dtype) + i = 0 + for w_elem in l: + dtype.setitem_w(space, arr.storage, i, w_elem) + i += 1 + return arr + + def _unaryop_impl(ufunc_name): def impl(self, space): - return w_ufunc(space, self) - return func_with_new_name(impl, "unaryop_%s_impl" % w_ufunc.__name__) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) - descr_pos = _unaryop_impl(interp_ufuncs.positive) - descr_neg = _unaryop_impl(interp_ufuncs.negative) - descr_abs = _unaryop_impl(interp_ufuncs.absolute) + descr_pos = _unaryop_impl("positive") + descr_neg = _unaryop_impl("negative") + descr_abs = _unaryop_impl("absolute") - def _binop_impl(w_ufunc): + def _binop_impl(ufunc_name): def impl(self, space, w_other): - return w_ufunc(space, self, w_other) - return func_with_new_name(impl, "binop_%s_impl" % w_ufunc.__name__) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) - descr_add = _binop_impl(interp_ufuncs.add) - descr_sub = _binop_impl(interp_ufuncs.subtract) - descr_mul = _binop_impl(interp_ufuncs.multiply) - descr_div = _binop_impl(interp_ufuncs.divide) - descr_pow = _binop_impl(interp_ufuncs.power) - descr_mod = _binop_impl(interp_ufuncs.mod) + descr_add = _binop_impl("add") + descr_sub = _binop_impl("subtract") + descr_mul = _binop_impl("multiply") + descr_div = _binop_impl("divide") + descr_pow = _binop_impl("power") + descr_mod = _binop_impl("mod") - def _binop_right_impl(w_ufunc): + def _binop_right_impl(ufunc_name): def impl(self, space, w_other): - w_other = FloatWrapper(space.float_w(w_other)) - return w_ufunc(space, w_other, self) - return func_with_new_name(impl, "binop_right_%s_impl" % w_ufunc.__name__) + w_other = scalar_w(space, + interp_ufuncs.find_dtype_for_scalar(space, w_other, self.find_dtype()), + w_other + ) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) - descr_radd = _binop_right_impl(interp_ufuncs.add) - descr_rsub = _binop_right_impl(interp_ufuncs.subtract) - descr_rmul = _binop_right_impl(interp_ufuncs.multiply) - descr_rdiv = _binop_right_impl(interp_ufuncs.divide) - descr_rpow = _binop_right_impl(interp_ufuncs.power) - descr_rmod = _binop_right_impl(interp_ufuncs.mod) + descr_radd = _binop_right_impl("add") + descr_rsub = _binop_right_impl("subtract") + descr_rmul = _binop_right_impl("multiply") + descr_rdiv = _binop_right_impl("divide") + descr_rpow = _binop_right_impl("power") + descr_rmod = _binop_right_impl("mod") - def _reduce_sum_prod_impl(function, init): + def _reduce_ufunc_impl(ufunc_name): + def impl(self, space): + return getattr(interp_ufuncs.get(space), ufunc_name).descr_reduce(space, self) + return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name) + + descr_sum = _reduce_ufunc_impl("add") + descr_prod = _reduce_ufunc_impl("multiply") + descr_max = _reduce_ufunc_impl("maximum") + descr_min = _reduce_ufunc_impl("minimum") + + def _reduce_argmax_argmin_impl(op_name): reduce_driver = jit.JitDriver(greens=['signature'], - reds = ['i', 'size', 'self', 'result']) - - def loop(self, result, size): - i = 0 - while i < size: - reduce_driver.jit_merge_point(signature=self.signature, - self=self, size=size, i=i, - result=result) - result = function(result, self.eval(i)) - i += 1 - return result - - def impl(self, space): - return space.wrap(loop(self, init, self.find_size())) - return func_with_new_name(impl, "reduce_%s_impl" % function.__name__) - - def _reduce_max_min_impl(function): - reduce_driver = jit.JitDriver(greens=['signature'], - reds = ['i', 'size', 'self', 'result']) - def loop(self, result, size): - i = 1 - while i < size: - reduce_driver.jit_merge_point(signature=self.signature, - self=self, size=size, i=i, - result=result) - result = function(result, self.eval(i)) - i += 1 - return result - - def impl(self, space): - size = self.find_size() - if size == 0: - raise OperationError(space.w_ValueError, - space.wrap("Can't call %s on zero-size arrays" \ - % function.__name__)) - return space.wrap(loop(self, self.eval(0), size)) - return func_with_new_name(impl, "reduce_%s_impl" % function.__name__) - - def _reduce_argmax_argmin_impl(function): - reduce_driver = jit.JitDriver(greens=['signature'], - reds = ['i', 'size', 'result', 'self', 'cur_best']) + reds = ['i', 'size', 'result', 'self', 'cur_best', 'dtype']) def loop(self, size): result = 0 cur_best = self.eval(0) i = 1 + dtype = self.find_dtype() while i < size: reduce_driver.jit_merge_point(signature=self.signature, - self=self, size=size, i=i, - result=result, cur_best=cur_best) - new_best = function(cur_best, self.eval(i)) - if new_best != cur_best: + self=self, dtype=dtype, + size=size, i=i, result=result, + cur_best=cur_best) + new_best = getattr(dtype, op_name)(cur_best, self.eval(i)) + if dtype.ne(new_best, cur_best): result = i cur_best = new_best i += 1 @@ -141,16 +124,17 @@ if size == 0: raise OperationError(space.w_ValueError, space.wrap("Can't call %s on zero-size arrays" \ - % function.__name__)) + % op_name)) return space.wrap(loop(self, size)) - return func_with_new_name(impl, "reduce_arg%s_impl" % function.__name__) + return func_with_new_name(impl, "reduce_arg%s_impl" % op_name) def _all(self): size = self.find_size() + dtype = self.find_dtype() i = 0 while i < size: - all_driver.jit_merge_point(signature=self.signature, self=self, size=size, i=i) - if not self.eval(i): + all_driver.jit_merge_point(signature=self.signature, self=self, dtype=dtype, size=size, i=i) + if not dtype.bool(self.eval(i)): return False i += 1 return True @@ -159,77 +143,44 @@ def _any(self): size = self.find_size() + dtype = self.find_dtype() i = 0 while i < size: - any_driver.jit_merge_point(signature=self.signature, self=self, size=size, i=i) - if self.eval(i): + any_driver.jit_merge_point(signature=self.signature, self=self, size=size, dtype=dtype, i=i) + if dtype.bool(self.eval(i)): return True i += 1 return False def descr_any(self, space): return space.wrap(self._any()) - descr_sum = _reduce_sum_prod_impl(add, 0.0) - descr_prod = _reduce_sum_prod_impl(mul, 1.0) - descr_max = _reduce_max_min_impl(maximum) - descr_min = _reduce_max_min_impl(minimum) - descr_argmax = _reduce_argmax_argmin_impl(maximum) - descr_argmin = _reduce_argmax_argmin_impl(minimum) - - def descr_sort(self, space): - size = self.find_size() - stack = [(0,size-1)] - first=0; last=size-1; splitpoint=first; - while (len(stack) > 0): - first, last = stack.pop() - while last>first: - #splitpoint = split(first,last) - x = self.eval(first) - splitpoint = first - unknown = first+1 - while (unknown<=last): - if (self.eval(unknown) 1000: nums = [ - float2string(self.eval(index)) + dtype.str_format(self.eval(index)) for index in range(3) ] nums.append("..." + "," * comma) nums.extend([ - float2string(self.eval(index)) + dtype.str_format(self.eval(index)) for index in range(self.find_size() - 3, self.find_size()) ]) else: nums = [ - float2string(self.eval(index)) + dtype.str_format(self.eval(index)) for index in range(self.find_size()) ] return nums @@ -237,19 +188,28 @@ def get_concrete(self): raise NotImplementedError - def descr_copy(self, space): - return new_numarray(space, self) + def descr_get_dtype(self, space): + return space.wrap(self.find_dtype()) def descr_get_shape(self, space): return space.newtuple([self.descr_len(space)]) + def descr_copy(self, space): + return space.call_function(space.gettypefor(BaseArray), self, self.find_dtype()) + def descr_len(self, space): return self.get_concrete().descr_len(space) def descr_repr(self, space): # Simple implementation so that we can see the array. Needs work. concrete = self.get_concrete() - return space.wrap("array([" + ", ".join(concrete._getnums(False)) + "])") + res = "array([" + ", ".join(concrete._getnums(False)) + "]" + dtype = concrete.find_dtype() + if (dtype is not space.fromcache(interp_dtype.W_Float64Dtype) and + dtype is not space.fromcache(interp_dtype.W_Int64Dtype)): + res += ", dtype=" + dtype.name + res += ")" + return space.wrap(res) def descr_str(self, space): # Simple implementation so that we can see the array. Needs work. @@ -257,96 +217,121 @@ return space.wrap("[" + " ".join(concrete._getnums(True)) + "]") def descr_getitem(self, space, w_idx): - # TODO: indexing by tuples + # TODO: indexing by arrays and lists + if space.isinstance_w(w_idx, space.w_tuple): + length = space.len_w(w_idx) + if length == 0: + return space.wrap(self) + if length > 1: # only one dimension for now. + raise OperationError(space.w_IndexError, + space.wrap("invalid index")) + w_idx = space.getitem(w_idx, space.wrap(0)) start, stop, step, slice_length = space.decode_index4(w_idx, self.find_size()) if step == 0: # Single index - return space.wrap(self.get_concrete().eval(start)) + return self.get_concrete().eval(start).wrap(space) else: # Slice - res = SingleDimSlice(start, stop, step, slice_length, self, self.signature.transition(SingleDimSlice.static_signature)) + new_sig = signature.Signature.find_sig([ + SingleDimSlice.signature, self.signature + ]) + res = SingleDimSlice(start, stop, step, slice_length, self, new_sig) return space.wrap(res) def descr_setitem(self, space, w_idx, w_value): - # TODO: indexing by tuples and lists + # TODO: indexing by arrays and lists self.invalidated() + if space.isinstance_w(w_idx, space.w_tuple): + length = space.len_w(w_idx) + if length > 1: # only one dimension for now. + raise OperationError(space.w_IndexError, + space.wrap("invalid index")) + if length == 0: + w_idx = space.newslice(space.wrap(0), + space.wrap(self.find_size()), + space.wrap(1)) + else: + w_idx = space.getitem(w_idx, space.wrap(0)) start, stop, step, slice_length = space.decode_index4(w_idx, self.find_size()) if step == 0: # Single index - self.get_concrete().setitem(start, space.float_w(w_value)) + self.get_concrete().setitem_w(space, start, w_value) else: concrete = self.get_concrete() if isinstance(w_value, BaseArray): - # for now we just copy if setting part of an array from + # for now we just copy if setting part of an array from # part of itself. can be improved. if (concrete.get_root_storage() == w_value.get_concrete().get_root_storage()): - w_value = new_numarray(space, w_value) + w_value = space.call_function(space.gettypefor(BaseArray), w_value) + assert isinstance(w_value, BaseArray) else: w_value = convert_to_array(space, w_value) - concrete.setslice(space, start, stop, step, + concrete.setslice(space, start, stop, step, slice_length, w_value) def descr_mean(self, space): return space.wrap(space.float_w(self.descr_sum(space))/self.find_size()) - def _sliceloop1(self, start, stop, step, source, dest): + def _sliceloop(self, start, stop, step, source, dest): i = start j = 0 - while i < stop: - slice_driver1.jit_merge_point(signature=source.signature, - step=step, stop=stop, i=i, j=j, source=source, - dest=dest) - dest.storage[i] = source.eval(j) + while (step > 0 and i < stop) or (step < 0 and i > stop): + slice_driver.jit_merge_point(signature=source.signature, step=step, + stop=stop, i=i, j=j, source=source, + dest=dest) + dest.setitem(i, source.eval(j).convert_to(dest.find_dtype())) j += 1 i += step - def _sliceloop2(self, start, stop, step, source, dest): - i = start - j = 0 - while i > stop: - slice_driver2.jit_merge_point(signature=source.signature, - step=step, stop=stop, i=i, j=j, source=source, - dest=dest) - dest.storage[i] = source.eval(j) - j += 1 - i += step - -def convert_to_array (space, w_obj): +def convert_to_array(space, w_obj): if isinstance(w_obj, BaseArray): return w_obj elif space.issequence_w(w_obj): # Convert to array. - return new_numarray(space, w_obj) + w_obj = space.call_function(space.gettypefor(BaseArray), w_obj) + assert isinstance(w_obj, BaseArray) + return w_obj else: # If it's a scalar - return FloatWrapper(space.float_w(w_obj)) + dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) + return scalar_w(space, dtype, w_obj) -class FloatWrapper(BaseArray): +def scalar_w(space, dtype, w_obj): + return Scalar(dtype, dtype.unwrap(space, w_obj)) + +class Scalar(BaseArray): """ Intermediate class representing a float literal. """ - signature = Signature() + signature = signature.BaseSignature() - def __init__(self, float_value): + _attrs_ = ["dtype", "value"] + + def __init__(self, dtype, value): BaseArray.__init__(self) - self.float_value = float_value + self.dtype = dtype + self.value = value def find_size(self): raise ValueError + def find_dtype(self): + return self.dtype + def eval(self, i): - return self.float_value + return self.value class VirtualArray(BaseArray): """ Class for representing virtual arrays, such as binary ops or ufuncs """ - def __init__(self, signature): + def __init__(self, signature, res_dtype): BaseArray.__init__(self) self.forced_result = None self.signature = signature + self.res_dtype = res_dtype def _del_sources(self): # Function for deleting references to source arrays, to allow garbage-collecting them @@ -356,12 +341,12 @@ i = 0 signature = self.signature result_size = self.find_size() - result = SingleDimArray(result_size) + result = SingleDimArray(result_size, self.find_dtype()) while i < result_size: numpy_driver.jit_merge_point(signature=signature, result_size=result_size, i=i, self=self, result=result) - result.storage[i] = self.eval(i) + result.dtype.setitem(result.storage, i, self.eval(i)) i += 1 return result @@ -379,17 +364,22 @@ return self.forced_result.eval(i) return self._eval(i) + def setitem(self, item, value): + return self.get_concrete().setitem(item, value) + def find_size(self): if self.forced_result is not None: # The result has been computed and sources may be unavailable return self.forced_result.find_size() return self._find_size() + def find_dtype(self): + return self.res_dtype + class Call1(VirtualArray): - def __init__(self, function, values, signature): - VirtualArray.__init__(self, signature) - self.function = function + def __init__(self, signature, res_dtype, values): + VirtualArray.__init__(self, signature, res_dtype) self.values = values def _del_sources(self): @@ -398,16 +388,24 @@ def _find_size(self): return self.values.find_size() + def _find_dtype(self): + return self.res_dtype + def _eval(self, i): - return self.function(self.values.eval(i)) + val = self.values.eval(i).convert_to(self.res_dtype) + + sig = jit.promote(self.signature) + assert isinstance(sig, signature.Signature) + call_sig = sig.components[0] + assert isinstance(call_sig, signature.Call1) + return call_sig.func(self.res_dtype, val) class Call2(VirtualArray): """ Intermediate class for performing binary operations. """ - def __init__(self, function, left, right, signature): - VirtualArray.__init__(self, signature) - self.function = function + def __init__(self, signature, res_dtype, left, right): + VirtualArray.__init__(self, signature, res_dtype) self.left = left self.right = right @@ -423,8 +421,14 @@ return self.right.find_size() def _eval(self, i): - lhs, rhs = self.left.eval(i), self.right.eval(i) - return self.function(lhs, rhs) + lhs = self.left.eval(i).convert_to(self.res_dtype) + rhs = self.right.eval(i).convert_to(self.res_dtype) + + sig = jit.promote(self.signature) + assert isinstance(sig, signature.Signature) + call_sig = sig.components[0] + assert isinstance(call_sig, signature.Call2) + return call_sig.func(self.res_dtype, lhs, rhs) class ViewArray(BaseArray): """ @@ -447,9 +451,13 @@ def eval(self, i): return self.parent.eval(self.calc_index(i)) - @unwrap_spec(item=int, value=float) + @unwrap_spec(item=int) + def setitem_w(self, space, item, w_value): + return self.parent.setitem_w(space, self.calc_index(item), w_value) + def setitem(self, item, value): - return self.parent.setitem(self.calc_index(item), value) + # This is currently not possible to be called from anywhere. + raise NotImplementedError def descr_len(self, space): return space.wrap(self.find_size()) @@ -458,7 +466,7 @@ raise NotImplementedError class SingleDimSlice(ViewArray): - static_signature = Signature() + signature = signature.BaseSignature() def __init__(self, start, stop, step, slice_length, parent, signature): ViewArray.__init__(self, parent, signature) @@ -475,35 +483,32 @@ self.size = slice_length def get_root_storage(self): - return self.parent.storage + return self.parent.get_concrete().get_root_storage() def find_size(self): return self.size + def find_dtype(self): + return self.parent.find_dtype() + def setslice(self, space, start, stop, step, slice_length, arr): start = self.calc_index(start) if stop != -1: stop = self.calc_index(stop) step = self.step * step - if step > 0: - self._sliceloop1(start, stop, step, arr, self.parent) - else: - self._sliceloop2(start, stop, step, arr, self.parent) + self._sliceloop(start, stop, step, arr, self.parent) def calc_index(self, item): return (self.start + item * self.step) class SingleDimArray(BaseArray): - signature = Signature() - - def __init__(self, size): + def __init__(self, size, dtype): BaseArray.__init__(self) self.size = size - self.storage = lltype.malloc(TP, size, zero=True, - flavor='raw', track_allocation=False, - add_memory_pressure=True) - # XXX find out why test_zjit explodes with trackign of allocations + self.dtype = dtype + self.storage = dtype.malloc(size) + self.signature = dtype.signature def get_concrete(self): return self @@ -514,54 +519,52 @@ def find_size(self): return self.size + def find_dtype(self): + return self.dtype + def eval(self, i): - return self.storage[i] + return self.dtype.getitem(self.storage, i) def descr_len(self, space): return space.wrap(self.size) + def setitem_w(self, space, item, w_value): + self.invalidated() + self.dtype.setitem_w(space, self.storage, item, w_value) + def setitem(self, item, value): self.invalidated() - self.storage[item] = value + self.dtype.setitem(self.storage, item, value) def setslice(self, space, start, stop, step, slice_length, arr): - if step > 0: - self._sliceloop1(start, stop, step, arr, self) - else: - self._sliceloop2(start, stop, step, arr, self) + self._sliceloop(start, stop, step, arr, self) def __del__(self): lltype.free(self.storage, flavor='raw', track_allocation=False) -def new_numarray(space, w_size_or_iterable): - l = space.listview(w_size_or_iterable) - arr = SingleDimArray(len(l)) - i = 0 - for w_elem in l: - arr.storage[i] = space.float_w(space.float(w_elem)) - i += 1 - return arr - -def descr_new_numarray(space, w_type, w_size_or_iterable): - return space.wrap(new_numarray(space, w_size_or_iterable)) + at unwrap_spec(size=int) +def zeros(space, size, w_dtype=None): + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) + return space.wrap(SingleDimArray(size, dtype=dtype)) @unwrap_spec(size=int) -def zeros(space, size): - return space.wrap(SingleDimArray(size)) +def ones(space, size, w_dtype=None): + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + ) - at unwrap_spec(size=int) -def ones(space, size): - arr = SingleDimArray(size) + arr = SingleDimArray(size, dtype=dtype) + one = dtype.adapt_val(1) for i in xrange(size): - arr.storage[i] = 1.0 + arr.dtype.setitem(arr.storage, i, one) return space.wrap(arr) BaseArray.typedef = TypeDef( 'numarray', - __new__ = interp2app(descr_new_numarray), + __new__ = interp2app(BaseArray.descr__new__.im_func), - copy = interp2app(BaseArray.descr_copy), - shape = GetSetProperty(BaseArray.descr_get_shape), __len__ = interp2app(BaseArray.descr_len), __getitem__ = interp2app(BaseArray.descr_getitem), @@ -585,6 +588,9 @@ __repr__ = interp2app(BaseArray.descr_repr), __str__ = interp2app(BaseArray.descr_str), + dtype = GetSetProperty(BaseArray.descr_get_dtype), + shape = GetSetProperty(BaseArray.descr_get_shape), + mean = interp2app(BaseArray.descr_mean), sum = interp2app(BaseArray.descr_sum), prod = interp2app(BaseArray.descr_prod), @@ -595,5 +601,6 @@ all = interp2app(BaseArray.descr_all), any = interp2app(BaseArray.descr_any), dot = interp2app(BaseArray.descr_dot), - sort = interp2app(BaseArray.descr_sort), + + copy = interp2app(BaseArray.descr_copy), ) diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -1,7 +1,8 @@ +from pypy.interpreter.error import OperationError +from pypy.interpreter.gateway import unwrap_spec +from pypy.module.micronumpy.interp_dtype import W_Float64Dtype from pypy.rlib.rstruct.runpack import runpack from pypy.rpython.lltypesystem import lltype, rffi -from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import unwrap_spec FLOAT_SIZE = rffi.sizeof(lltype.Float) @@ -17,26 +18,17 @@ raise OperationError(space.w_ValueError, space.wrap( "string length %d not divisable by %d" % (length, FLOAT_SIZE))) - a = SingleDimArray(number) + dtype = space.fromcache(W_Float64Dtype) + a = SingleDimArray(number, dtype=dtype) start = 0 end = FLOAT_SIZE i = 0 while i < number: part = s[start:end] - a.storage[i] = runpack('d', part) + a.dtype.setitem(a.storage, i, dtype.box(runpack('d', part))) i += 1 start += FLOAT_SIZE end += FLOAT_SIZE - return space.wrap(a) - -class Signature(object): - def __init__(self): - self.transitions = {} - - def transition(self, target): - if target in self.transitions: - return self.transitions[target] - self.transitions[target] = new = Signature() - return new \ No newline at end of file + return space.wrap(a) \ No newline at end of file diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -1,139 +1,273 @@ -import math - -from pypy.module.micronumpy.interp_support import Signature -from pypy.rlib import rfloat +from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.gateway import interp2app +from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty +from pypy.module.micronumpy import interp_dtype, signature +from pypy.rlib import jit from pypy.tool.sourcetools import func_with_new_name -def ufunc(func): - signature = Signature() - def impl(space, w_obj): - from pypy.module.micronumpy.interp_numarray import Call1, convert_to_array - if space.issequence_w(w_obj): - w_obj_arr = convert_to_array(space, w_obj) - w_res = Call1(func, w_obj_arr, w_obj_arr.signature.transition(signature)) - w_obj_arr.invalidates.append(w_res) - return w_res + +reduce_driver = jit.JitDriver( + greens = ["signature"], + reds = ["i", "size", "self", "dtype", "value", "obj"] +) + +class W_Ufunc(Wrappable): + _attrs_ = ["name", "promote_to_float", "promote_bools", "identity"] + + def __init__(self, name, promote_to_float, promote_bools, identity): + self.name = name + self.promote_to_float = promote_to_float + self.promote_bools = promote_bools + + self.identity = identity + + def descr_repr(self, space): + return space.wrap("" % self.name) + + def descr_get_identity(self, space): + if self.identity is None: + return space.w_None + return self.identity.wrap(space) + + def descr_call(self, space, __args__): + try: + args_w = __args__.fixedunpack(self.argcount) + except ValueError, e: + raise OperationError(space.w_TypeError, space.wrap(str(e))) + return self.call(space, args_w) + + def descr_reduce(self, space, w_obj): + from pypy.module.micronumpy.interp_numarray import convert_to_array, Scalar + + if self.argcount != 2: + raise OperationError(space.w_ValueError, space.wrap("reduce only " + "supported for binary functions")) + + assert isinstance(self, W_Ufunc2) + obj = convert_to_array(space, w_obj) + if isinstance(obj, Scalar): + raise OperationError(space.w_TypeError, space.wrap("cannot reduce " + "on a scalar")) + + size = obj.find_size() + dtype = find_unaryop_result_dtype( + space, obj.find_dtype(), + promote_to_largest=True + ) + start = 0 + if self.identity is None: + if size == 0: + raise operationerrfmt(space.w_ValueError, "zero-size array to " + "%s.reduce without identity", self.name) + value = obj.eval(0).convert_to(dtype) + start += 1 else: - return space.wrap(func(space.float_w(w_obj))) - return func_with_new_name(impl, "%s_dispatcher" % func.__name__) + value = self.identity.convert_to(dtype) + new_sig = signature.Signature.find_sig([ + self.reduce_signature, obj.signature + ]) + return self.reduce(new_sig, start, value, obj, dtype, size).wrap(space) -def ufunc2(func): - signature = Signature() - def impl(space, w_lhs, w_rhs): - from pypy.module.micronumpy.interp_numarray import Call2, convert_to_array - if space.issequence_w(w_lhs) or space.issequence_w(w_rhs): - w_lhs_arr = convert_to_array(space, w_lhs) - w_rhs_arr = convert_to_array(space, w_rhs) - new_sig = w_lhs_arr.signature.transition(signature).transition(w_rhs_arr.signature) - w_res = Call2(func, w_lhs_arr, w_rhs_arr, new_sig) - w_lhs_arr.invalidates.append(w_res) - w_rhs_arr.invalidates.append(w_res) - return w_res + def reduce(self, signature, start, value, obj, dtype, size): + i = start + while i < size: + reduce_driver.jit_merge_point(signature=signature, self=self, + value=value, obj=obj, i=i, + dtype=dtype, size=size) + value = self.func(dtype, value, obj.eval(i).convert_to(dtype)) + i += 1 + return value + +class W_Ufunc1(W_Ufunc): + argcount = 1 + + def __init__(self, func, name, promote_to_float=False, promote_bools=False, + identity=None): + + W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity) + self.func = func + self.signature = signature.Call1(func) + + def call(self, space, args_w): + from pypy.module.micronumpy.interp_numarray import (Call1, + convert_to_array, Scalar) + + [w_obj] = args_w + w_obj = convert_to_array(space, w_obj) + res_dtype = find_unaryop_result_dtype(space, + w_obj.find_dtype(), + promote_to_float=self.promote_to_float, + promote_bools=self.promote_bools, + ) + if isinstance(w_obj, Scalar): + return self.func(res_dtype, w_obj.value.convert_to(res_dtype)).wrap(space) + + new_sig = signature.Signature.find_sig([self.signature, w_obj.signature]) + w_res = Call1(new_sig, res_dtype, w_obj) + w_obj.add_invalidates(w_res) + return w_res + + +class W_Ufunc2(W_Ufunc): + argcount = 2 + + def __init__(self, func, name, promote_to_float=False, promote_bools=False, + identity=None): + + W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity) + self.func = func + self.signature = signature.Call2(func) + self.reduce_signature = signature.BaseSignature() + + def call(self, space, args_w): + from pypy.module.micronumpy.interp_numarray import (Call2, + convert_to_array, Scalar) + + [w_lhs, w_rhs] = args_w + w_lhs = convert_to_array(space, w_lhs) + w_rhs = convert_to_array(space, w_rhs) + res_dtype = find_binop_result_dtype(space, + w_lhs.find_dtype(), w_rhs.find_dtype(), + promote_to_float=self.promote_to_float, + promote_bools=self.promote_bools, + ) + if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar): + return self.func(res_dtype, w_lhs.value, w_rhs.value).wrap(space) + + new_sig = signature.Signature.find_sig([ + self.signature, w_lhs.signature, w_rhs.signature + ]) + w_res = Call2(new_sig, res_dtype, w_lhs, w_rhs) + w_lhs.add_invalidates(w_res) + w_rhs.add_invalidates(w_res) + return w_res + + +W_Ufunc.typedef = TypeDef("ufunc", + __module__ = "numpy", + + __call__ = interp2app(W_Ufunc.descr_call), + __repr__ = interp2app(W_Ufunc.descr_repr), + + identity = GetSetProperty(W_Ufunc.descr_get_identity), + nin = interp_attrproperty("argcount", cls=W_Ufunc), + + reduce = interp2app(W_Ufunc.descr_reduce), +) + +def find_binop_result_dtype(space, dt1, dt2, promote_to_float=False, + promote_bools=False): + # dt1.num should be <= dt2.num + if dt1.num > dt2.num: + dt1, dt2 = dt2, dt1 + # Some operations promote op(bool, bool) to return int8, rather than bool + if promote_bools and (dt1.kind == dt2.kind == interp_dtype.BOOLLTR): + return space.fromcache(interp_dtype.W_Int8Dtype) + if promote_to_float: + return find_unaryop_result_dtype(space, dt2, promote_to_float=True) + # If they're the same kind, choose the greater one. + if dt1.kind == dt2.kind: + return dt2 + + # Everything promotes to float, and bool promotes to everything. + if dt2.kind == interp_dtype.FLOATINGLTR or dt1.kind == interp_dtype.BOOLLTR: + return dt2 + + assert False + +def find_unaryop_result_dtype(space, dt, promote_to_float=False, + promote_bools=False, promote_to_largest=False): + if promote_bools and (dt.kind == interp_dtype.BOOLLTR): + return space.fromcache(interp_dtype.W_Int8Dtype) + if promote_to_float: + for bytes, dtype in interp_dtype.dtypes_by_num_bytes: + if dtype.kind == interp_dtype.FLOATINGLTR and dtype.num_bytes >= dt.num_bytes: + return space.fromcache(dtype) + if promote_to_largest: + if dt.kind == interp_dtype.BOOLLTR or dt.kind == interp_dtype.SIGNEDLTR: + return space.fromcache(interp_dtype.W_Int64Dtype) + elif dt.kind == interp_dtype.FLOATINGLTR: + return space.fromcache(interp_dtype.W_Float64Dtype) else: - return space.wrap(func(space.float_w(w_lhs), space.float_w(w_rhs))) - return func_with_new_name(impl, "%s_dispatcher" % func.__name__) + assert False + return dt - at ufunc -def absolute(value): - return abs(value) +def find_dtype_for_scalar(space, w_obj, current_guess=None): + w_type = space.type(w_obj) - at ufunc2 -def add(lvalue, rvalue): - return lvalue + rvalue + bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) + int64_dtype = space.fromcache(interp_dtype.W_Int64Dtype) - at ufunc2 -def copysign(lvalue, rvalue): - return rfloat.copysign(lvalue, rvalue) + if space.is_w(w_type, space.w_bool): + if current_guess is None: + return bool_dtype + elif space.is_w(w_type, space.w_int): + if (current_guess is None or current_guess is bool_dtype or + current_guess is int64_dtype): + return int64_dtype + return space.fromcache(interp_dtype.W_Float64Dtype) - at ufunc2 -def divide(lvalue, rvalue): - return lvalue / rvalue - at ufunc -def exp(value): - try: - return math.exp(value) - except OverflowError: - return rfloat.INFINITY +def ufunc_dtype_caller(ufunc_name, op_name, argcount): + if argcount == 1: + def impl(res_dtype, value): + return getattr(res_dtype, op_name)(value) + elif argcount == 2: + def impl(res_dtype, lvalue, rvalue): + return getattr(res_dtype, op_name)(lvalue, rvalue) + return func_with_new_name(impl, ufunc_name) - at ufunc -def fabs(value): - return math.fabs(value) +class UfuncState(object): + def __init__(self, space): + "NOT_RPYTHON" + for ufunc_def in [ + ("add", "add", 2, {"identity": 0}), + ("subtract", "sub", 2), + ("multiply", "mul", 2, {"identity": 1}), + ("divide", "div", 2, {"promote_bools": True}), + ("mod", "mod", 2, {"promote_bools": True}), + ("power", "pow", 2, {"promote_bools": True}), - at ufunc2 -def maximum(lvalue, rvalue): - return max(lvalue, rvalue) + ("maximum", "max", 2), + ("minimum", "min", 2), - at ufunc2 -def minimum(lvalue, rvalue): - return min(lvalue, rvalue) + ("copysign", "copysign", 2, {"promote_to_float": True}), - at ufunc2 -def multiply(lvalue, rvalue): - return lvalue * rvalue + ("positive", "pos", 1), + ("negative", "neg", 1), + ("absolute", "abs", 1), + ("sign", "sign", 1, {"promote_bools": True}), + ("reciprocal", "reciprocal", 1), -# Used by numarray for __pos__. Not visible from numpy application space. - at ufunc -def positive(value): - return value + ("fabs", "fabs", 1, {"promote_to_float": True}), + ("floor", "floor", 1, {"promote_to_float": True}), + ("exp", "exp", 1, {"promote_to_float": True}), - at ufunc -def negative(value): - return -value + ("sin", "sin", 1, {"promote_to_float": True}), + ("cos", "cos", 1, {"promote_to_float": True}), + ("tan", "tan", 1, {"promote_to_float": True}), + ("arcsin", "arcsin", 1, {"promote_to_float": True}), + ("arccos", "arccos", 1, {"promote_to_float": True}), + ("arctan", "arctan", 1, {"promote_to_float": True}), + ]: + self.add_ufunc(space, *ufunc_def) - at ufunc -def reciprocal(value): - if value == 0.0: - return rfloat.copysign(rfloat.INFINITY, value) - return 1.0 / value + def add_ufunc(self, space, ufunc_name, op_name, argcount, extra_kwargs=None): + if extra_kwargs is None: + extra_kwargs = {} - at ufunc2 -def subtract(lvalue, rvalue): - return lvalue - rvalue + identity = extra_kwargs.get("identity") + if identity is not None: + identity = space.fromcache(interp_dtype.W_Int64Dtype).adapt_val(identity) + extra_kwargs["identity"] = identity - at ufunc -def floor(value): - return math.floor(value) + func = ufunc_dtype_caller(ufunc_name, op_name, argcount) + if argcount == 1: + ufunc = W_Ufunc1(func, ufunc_name, **extra_kwargs) + elif argcount == 2: + ufunc = W_Ufunc2(func, ufunc_name, **extra_kwargs) + setattr(self, ufunc_name, ufunc) - at ufunc -def sign(value): - if value == 0.0: - return 0.0 - return rfloat.copysign(1.0, value) - - at ufunc -def sin(value): - return math.sin(value) - - at ufunc -def cos(value): - return math.cos(value) - - at ufunc -def tan(value): - return math.tan(value) - - at ufunc2 -def power(lvalue, rvalue): - return math.pow(lvalue, rvalue) - - at ufunc2 -def mod(lvalue, rvalue): - return math.fmod(lvalue, rvalue) - - - at ufunc -def arcsin(value): - if value < -1.0 or value > 1.0: - return rfloat.NAN - return math.asin(value) - - at ufunc -def arccos(value): - if value < -1.0 or value > 1.0: - return rfloat.NAN - return math.acos(value) - - at ufunc -def arctan(value): - return math.atan(value) +def get(space): + return space.fromcache(UfuncState) \ No newline at end of file diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/signature.py @@ -0,0 +1,52 @@ +from pypy.rlib.objectmodel import r_dict, compute_identity_hash +from pypy.rlib.rarithmetic import intmask + + +def components_eq(lhs, rhs): + if len(lhs) != len(rhs): + return False + for i in range(len(lhs)): + v1, v2 = lhs[i], rhs[i] + if type(v1) is not type(v2) or not v1.eq(v2): + return False + return True + +def components_hash(components): + res = 0x345678 + for component in components: + res = intmask((1000003 * res) ^ component.hash()) + return res + +class BaseSignature(object): + _attrs_ = [] + + def eq(self, other): + return self is other + + def hash(self): + return compute_identity_hash(self) + +class Signature(BaseSignature): + _known_sigs = r_dict(components_eq, components_hash) + + _attrs_ = ["components"] + _immutable_fields_ = ["components[*]"] + + def __init__(self, components): + self.components = components + + @staticmethod + def find_sig(components): + return Signature._known_sigs.setdefault(components, Signature(components)) + +class Call1(BaseSignature): + _immutable_fields_ = ["func"] + + def __init__(self, func): + self.func = func + +class Call2(BaseSignature): + _immutable_fields_ = ["func"] + + def __init__(self, func): + self.func = func \ No newline at end of file diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -1,23 +1,36 @@ from pypy.conftest import gettestobjspace -from pypy.module.micronumpy.interp_numarray import SingleDimArray, FloatWrapper +from pypy.module.micronumpy import interp_dtype +from pypy.module.micronumpy.interp_numarray import SingleDimArray, Scalar +from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, + find_unaryop_result_dtype) + class BaseNumpyAppTest(object): def setup_class(cls): - cls.space = gettestobjspace(usemodules=('micronumpy',)) + cls.space = gettestobjspace(usemodules=['micronumpy']) class TestSignature(object): def test_binop_signature(self, space): - ar = SingleDimArray(10) + float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + + ar = SingleDimArray(10, dtype=float64_dtype) v1 = ar.descr_add(space, ar) - v2 = ar.descr_add(space, FloatWrapper(2.0)) + v2 = ar.descr_add(space, Scalar(float64_dtype, 2.0)) assert v1.signature is not v2.signature - v3 = ar.descr_add(space, FloatWrapper(1.0)) + v3 = ar.descr_add(space, Scalar(float64_dtype, 1.0)) assert v2.signature is v3.signature v4 = ar.descr_add(space, ar) assert v1.signature is v4.signature + bool_ar = SingleDimArray(10, dtype=space.fromcache(interp_dtype.W_BoolDtype)) + v5 = ar.descr_add(space, bool_ar) + assert v5.signature is not v1.signature + assert v5.signature is not v2.signature + v6 = ar.descr_add(space, bool_ar) + assert v5.signature is v6.signature + def test_slice_signature(self, space): - ar = SingleDimArray(10) + ar = SingleDimArray(10, dtype=space.fromcache(interp_dtype.W_Float64Dtype)) v1 = ar.descr_getitem(space, space.wrap(slice(1, 5, 1))) v2 = ar.descr_getitem(space, space.wrap(slice(4, 6, 1))) assert v1.signature is v2.signature @@ -25,3 +38,44 @@ v3 = ar.descr_add(space, v1) v4 = ar.descr_add(space, v2) assert v3.signature is v4.signature + +class TestUfuncCoerscion(object): + def test_binops(self, space): + bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) + int8_dtype = space.fromcache(interp_dtype.W_Int8Dtype) + int32_dtype = space.fromcache(interp_dtype.W_Int32Dtype) + float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + + # Basic pairing + assert find_binop_result_dtype(space, bool_dtype, bool_dtype) is bool_dtype + assert find_binop_result_dtype(space, bool_dtype, float64_dtype) is float64_dtype + assert find_binop_result_dtype(space, float64_dtype, bool_dtype) is float64_dtype + assert find_binop_result_dtype(space, int32_dtype, int8_dtype) is int32_dtype + assert find_binop_result_dtype(space, int32_dtype, bool_dtype) is int32_dtype + + # With promote bool (happens on div), the result is that the op should + # promote bools to int8 + assert find_binop_result_dtype(space, bool_dtype, bool_dtype, promote_bools=True) is int8_dtype + assert find_binop_result_dtype(space, bool_dtype, float64_dtype, promote_bools=True) is float64_dtype + + # Coerce to floats + assert find_binop_result_dtype(space, bool_dtype, float64_dtype, promote_to_float=True) is float64_dtype + + def test_unaryops(self, space): + bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) + int8_dtype = space.fromcache(interp_dtype.W_Int8Dtype) + int32_dtype = space.fromcache(interp_dtype.W_Int32Dtype) + float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) + + # Normal rules, everythign returns itself + assert find_unaryop_result_dtype(space, bool_dtype) is bool_dtype + assert find_unaryop_result_dtype(space, int8_dtype) is int8_dtype + assert find_unaryop_result_dtype(space, int32_dtype) is int32_dtype + assert find_unaryop_result_dtype(space, float64_dtype) is float64_dtype + + # Coerce to floats, some of these will eventually be float16, or + # whatever our smallest float type is. + assert find_unaryop_result_dtype(space, bool_dtype, promote_to_float=True) is float64_dtype + assert find_unaryop_result_dtype(space, int8_dtype, promote_to_float=True) is float64_dtype + assert find_unaryop_result_dtype(space, int32_dtype, promote_to_float=True) is float64_dtype + assert find_unaryop_result_dtype(space, float64_dtype, promote_to_float=True) is float64_dtype \ No newline at end of file diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -0,0 +1,111 @@ +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + + +class AppTestDtypes(BaseNumpyAppTest): + def test_dtype(self): + from numpy import dtype + + d = dtype('?') + assert d.num == 0 + assert d.kind == 'b' + assert dtype('int8').num == 1 + assert dtype(d) is d + assert dtype(None) is dtype(float) + raises(TypeError, dtype, 1042) + + def test_dtype_with_types(self): + from numpy import dtype + + assert dtype(bool).num == 0 + assert dtype(long).num == 9 + assert dtype(float).num == 12 + + def test_array_dtype_attr(self): + from numpy import array, dtype + + a = array(range(5), long) + assert a.dtype is dtype(long) + + def test_repr_str(self): + from numpy import dtype + + assert repr(dtype) == "" + d = dtype('?') + assert repr(d) == "dtype('bool')" + assert str(d) == "bool" + + def test_bool_array(self): + from numpy import array + + a = array([0, 1, 2, 2.5], dtype='?') + assert a[0] is False + for i in xrange(1, 4): + assert a[i] is True + + def test_copy_array_with_dtype(self): + from numpy import array + a = array([0, 1, 2, 3], dtype=long) + # int on 64-bit, long in 32-bit + assert isinstance(a[0], (int, long)) + b = a.copy() + assert isinstance(b[0], (int, long)) + + a = array([0, 1, 2, 3], dtype=bool) + assert isinstance(a[0], bool) + b = a.copy() + assert isinstance(b[0], bool) + + def test_zeros_bool(self): + from numpy import zeros + a = zeros(10, dtype=bool) + for i in range(10): + assert a[i] is False + + def test_ones_bool(self): + from numpy import ones + a = ones(10, dtype=bool) + for i in range(10): + assert a[i] is True + + def test_zeros_long(self): + from numpy import zeros + a = zeros(10, dtype=long) + for i in range(10): + assert isinstance(a[i], (int, long)) + assert a[1] == 0 + + def test_ones_long(self): + from numpy import ones + a = ones(10, dtype=bool) + for i in range(10): + assert isinstance(a[i], (int, long)) + assert a[1] == 1 + + def test_add_int8(self): + from numpy import array, dtype + + a = array(range(5), dtype="int8") + b = a + a + assert b.dtype is dtype("int8") + for i in range(5): + assert b[i] == i * 2 + + def test_add_int16(self): + from numpy import array, dtype + + a = array(range(5), dtype="int16") + b = a + a + assert b.dtype is dtype("int16") + for i in range(5): + assert b[i] == i * 2 + + def test_shape(self): + from numpy import dtype + + assert dtype(long).shape == () + + def test_cant_subclass(self): + from numpy import dtype + + # You can't subclass dtype + raises(TypeError, type, "Foo", (dtype,), {}) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1,5 +1,3 @@ -import py - from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest from pypy.conftest import gettestobjspace @@ -52,14 +50,18 @@ def test_repr(self): from numpy import array, zeros - a = array(range(5)) + a = array(range(5), float) assert repr(a) == "array([0.0, 1.0, 2.0, 3.0, 4.0])" a = zeros(1001) assert repr(a) == "array([0.0, 0.0, 0.0, ..., 0.0, 0.0, 0.0])" + a = array(range(5), long) + assert repr(a) == "array([0, 1, 2, 3, 4])" + a = array([True, False, True, False], "?") + assert repr(a) == "array([True, False, True, False], dtype=bool)" def test_repr_slice(self): from numpy import array, zeros - a = array(range(5)) + a = array(range(5), float) b = a[1::2] assert repr(b) == "array([1.0, 3.0])" a = zeros(2002) @@ -68,15 +70,26 @@ def test_str(self): from numpy import array, zeros - a = array(range(5)) + a = array(range(5), float) assert str(a) == "[0.0 1.0 2.0 3.0 4.0]" assert str((2*a)[:]) == "[0.0 2.0 4.0 6.0 8.0]" a = zeros(1001) assert str(a) == "[0.0 0.0 0.0 ..., 0.0 0.0 0.0]" + a = array(range(5), dtype=long) + assert str(a) == "[0 1 2 3 4]" + a = array([True, False, True, False], dtype="?") + assert str(a) == "[True False True False]" + + a = array(range(5), dtype="int8") + assert str(a) == "[0 1 2 3 4]" + + a = array(range(5), dtype="int16") + assert str(a) == "[0 1 2 3 4]" + def test_str_slice(self): from numpy import array, zeros - a = array(range(5)) + a = array(range(5), float) b = a[1::2] assert str(b) == "[1.0 3.0]" a = zeros(2002) @@ -92,6 +105,16 @@ assert a[-1] == 8 raises(IndexError, "a[-6]") + def test_getitem_tuple(self): + from numpy import array + a = array(range(5)) + raises(IndexError, "a[(1,2)]") + for i in xrange(5): + assert a[(i,)] == i + b = a[()] + for i in xrange(5): + assert a[i] == b[i] + def test_setitem(self): from numpy import array a = array(range(5)) @@ -100,6 +123,17 @@ raises(IndexError, "a[5] = 0.0") raises(IndexError, "a[-6] = 3.0") + def test_setitem_tuple(self): + from numpy import array + a = array(range(5)) + raises(IndexError, "a[(1,2)] = [0,1]") + for i in xrange(5): + a[(i,)] = i+1 + assert a[i] == i+1 + a[()] = range(5) + for i in xrange(5): + assert a[i] == i + def test_setslice_array(self): from numpy import array a = array(range(5)) @@ -132,7 +166,7 @@ def test_setslice_list(self): from numpy import array - a = array(range(5)) + a = array(range(5), float) b = [0., 1.] a[1:4:2] = b assert a[1] == 0. @@ -140,7 +174,7 @@ def test_setslice_constant(self): from numpy import array - a = array(range(5)) + a = array(range(5), float) a[1:4:2] = 0. assert a[1] == 0. assert a[3] == 0. @@ -167,6 +201,12 @@ for i in range(5): assert b[i] == i + i + a = array([True, False, True, False], dtype="?") + b = array([True, True, False, False], dtype="?") + c = a + b + for i in range(4): + assert c[i] == bool(a[i] + b[i]) + def test_add_other(self): from numpy import array a = array(range(5)) @@ -220,12 +260,19 @@ assert b[i] == i - 5 def test_mul(self): - from numpy import array + from numpy import array, dtype a = array(range(5)) b = a * a for i in range(5): assert b[i] == i * i + a = array(range(5), dtype=bool) + b = a * a + assert b.dtype is dtype(bool) + assert b[0] is False + for i in range(1, 5): + assert b[i] is True + def test_mul_constant(self): from numpy import array a = array(range(5)) @@ -234,16 +281,22 @@ assert b[i] == i * 5 def test_div(self): - from numpy import array + from numpy import array, dtype a = array(range(1, 6)) b = a / a for i in range(5): assert b[i] == 1 + a = array(range(1, 6), dtype=bool) + b = a / a + assert b.dtype is dtype("int8") + for i in range(5): + assert b[i] == 1 + def test_div_other(self): from numpy import array a = array(range(5)) - b = array([2, 2, 2, 2, 2]) + b = array([2, 2, 2, 2, 2], float) c = a / b for i in range(5): assert c[i] == i / 2.0 @@ -257,7 +310,7 @@ def test_pow(self): from numpy import array - a = array(range(5)) + a = array(range(5), float) b = a ** a for i in range(5): print b[i], i**i @@ -265,7 +318,7 @@ def test_pow_other(self): from numpy import array - a = array(range(5)) + a = array(range(5), float) b = array([2, 2, 2, 2, 2]) c = a ** b for i in range(5): @@ -273,7 +326,7 @@ def test_pow_constant(self): from numpy import array - a = array(range(5)) + a = array(range(5), float) b = a ** 2 for i in range(5): assert b[i] == i ** 2 @@ -285,6 +338,12 @@ for i in range(5): assert b[i] == 0 + a = array(range(1, 6), float) + b = (a + 1) % a + assert b[0] == 0 + for i in range(1, 5): + assert b[i] == 1 + def test_mod_other(self): from numpy import array a = array(range(5)) @@ -307,6 +366,10 @@ for i in range(5): assert b[i] == a[i] + a = +array(range(5)) + for i in range(5): + assert a[i] == i + def test_neg(self): from numpy import array a = array([1.,-2.,3.,-4.,-5.]) @@ -314,6 +377,10 @@ for i in range(5): assert b[i] == -a[i] + a = -array(range(5), dtype="int8") + for i in range(5): + assert a[i] == -i + def test_abs(self): from numpy import array a = array([1.,-2.,3.,-4.,-5.]) @@ -321,6 +388,10 @@ for i in range(5): assert b[i] == abs(a[i]) + a = abs(array(range(-5, 5), dtype="int8")) + for i in range(-5, 5): + assert a[i + 5] == abs(i) + def test_auto_force(self): from numpy import array a = array(range(5)) @@ -343,6 +414,12 @@ for i in range(4): assert s[i] == a[i+1] + s = (a + a)[1:2] + assert len(s) == 1 + assert s[0] == 2 + s[:1] = array([5]) + assert s[0] == 5 + def test_getslice_step(self): from numpy import array a = array(range(10)) @@ -388,6 +465,9 @@ assert a.sum() == 10.0 assert a[:4].sum() == 6.0 + a = array([True] * 5, bool) + assert a.sum() == 5 + def test_prod(self): from numpy import array a = array(range(1,6)) @@ -420,6 +500,9 @@ b = array([]) raises(ValueError, "b.argmax()") + a = array(range(-5, 5)) + assert a.argmax() == 9 + def test_argmin(self): from numpy import array a = array([-1.2, 3.4, 5.7, -3.0, 2.7]) @@ -427,24 +510,6 @@ b = array([]) raises(ValueError, "b.argmin()") - def test_sort(self): - from numpy import array - a = [3.0,4.0,0.0,-1.0] - b = array(a) - a.sort() - b.sort() - assert(len(a)==len(b)) - for i in range(len(a)): - assert(a[i]==b[i]) - a = array(list(reversed(range(6)))) - b = array(range(6)) - a.sort() - assert(len(a)==len(b)) - for i in range(len(a)): - assert(a[i]==b[i]) - - - def test_all(self): from numpy import array a = array(range(5)) @@ -468,12 +533,25 @@ a = array(range(5)) assert a.dot(a) == 30.0 + a = array(range(5)) + assert a.dot(range(5)) == 30 + def test_dot_constant(self): from numpy import array a = array(range(5)) b = a.dot(2.5) for i in xrange(5): - assert b[i] == 2.5*a[i] + assert b[i] == 2.5 * a[i] + + def test_dtype_guessing(self): + from numpy import array, dtype + + assert array([True]).dtype is dtype(bool) + assert array([True, 1]).dtype is dtype(long) + assert array([1, 2, 3]).dtype is dtype(long) + assert array([1.2, True]).dtype is dtype(float) + assert array([1.2, 5]).dtype is dtype(float) + assert array([]).dtype is dtype(float) class AppTestSupport(object): @@ -488,4 +566,3 @@ for i in range(4): assert a[i] == i + 1 raises(ValueError, fromstring, "abc") - diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -3,6 +3,32 @@ class AppTestUfuncs(BaseNumpyAppTest): + def test_ufunc_instance(self): + from numpy import add, ufunc + + assert isinstance(add, ufunc) + assert repr(add) == "" + assert repr(ufunc) == "" + + def test_ufunc_attrs(self): + from numpy import add, multiply, sin + + assert add.identity == 0 + assert multiply.identity == 1 + assert sin.identity is None + + assert add.nin == 2 + assert multiply.nin == 2 + assert sin.nin == 1 + + def test_wrong_arguments(self): + from numpy import add, sin + + raises(TypeError, add, 1) + raises(TypeError, add, 1, 2, 3) + raises(TypeError, sin, 1, 2) + raises(TypeError, sin) + def test_single_item(self): from numpy import negative, sign, minimum @@ -86,7 +112,7 @@ def test_fabs(self): from numpy import array, fabs from math import fabs as math_fabs - + a = array([-5.0, -0.0, 1.0]) b = fabs(a) for i in range(3): @@ -110,6 +136,10 @@ for i in range(3): assert c[i] == max(a[i], b[i]) + x = maximum(2, 3) + assert x == 3 + assert isinstance(x, (int, long)) + def test_multiply(self): from numpy import array, multiply @@ -120,7 +150,7 @@ assert c[i] == a[i] * b[i] def test_sign(self): - from numpy import array, sign + from numpy import array, sign, dtype reference = [-1.0, 0.0, 0.0, 1.0] a = array([-5.0, -0.0, 0.0, 6.0]) @@ -128,6 +158,16 @@ for i in range(4): assert b[i] == reference[i] + a = sign(array(range(-5, 5))) + ref = [-1, -1, -1, -1, -1, 0, 1, 1, 1, 1] + for i in range(10): + assert a[i] == ref[i] + + a = sign(array([True, False], dtype=bool)) + assert a.dtype == dtype("int8") + assert a[0] == 1 + assert a[1] == 0 + def test_reciporocal(self): from numpy import array, reciprocal @@ -165,6 +205,11 @@ for i in range(4): assert c[i] == reference[i] + b = array([True, True, True, True], dtype=bool) + c = copysign(a, b) + for i in range(4): + assert c[i] == abs(a[i]) + def test_exp(self): import math from numpy import array, exp @@ -188,6 +233,10 @@ for i in range(len(a)): assert b[i] == math.sin(a[i]) + a = sin(array([True, False], dtype=bool)) + assert a[0] == sin(1) + assert a[1] == 0.0 + def test_cos(self): import math from numpy import array, cos @@ -211,7 +260,7 @@ import math from numpy import array, arcsin - a = array([-1, -0.5, -0.33, 0, 0.33, 0.5, 1]) + a = array([-1, -0.5, -0.33, 0, 0.33, 0.5, 1]) b = arcsin(a) for i in range(len(a)): assert b[i] == math.asin(a[i]) @@ -230,7 +279,7 @@ for i in range(len(a)): assert b[i] == math.acos(a[i]) - + a = array([-10, -1.5, -1.01, 1.01, 1.5, 10, float('nan'), float('inf'), float('-inf')]) b = arccos(a) for f in b: @@ -249,3 +298,16 @@ b = arctan(a) assert math.isnan(b[0]) + def test_reduce_errors(self): + from numpy import sin, add + + raises(ValueError, sin.reduce, [1, 2, 3]) + raises(TypeError, add.reduce, 1) + + def test_reduce(self): + from numpy import add, maximum + + assert add.reduce([1, 2, 3]) == 6 + assert maximum.reduce([1]) == 1 + assert maximum.reduce([1, 2, 3]) == 3 + raises(ValueError, maximum.reduce, []) \ No newline at end of file diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -1,34 +1,26 @@ from pypy.jit.metainterp.test.support import LLJitMixin +from pypy.module.micronumpy import interp_ufuncs, signature +from pypy.module.micronumpy.compile import (numpy_compile, FakeSpace, + FloatObject) +from pypy.module.micronumpy.interp_dtype import W_Float64Dtype, W_Int64Dtype +from pypy.module.micronumpy.interp_numarray import (BaseArray, SingleDimArray, + SingleDimSlice, scalar_w) +from pypy.rlib.nonconst import NonConstant +from pypy.rpython.annlowlevel import llstr from pypy.rpython.test.test_llinterp import interpret -from pypy.module.micronumpy.interp_numarray import (SingleDimArray, Signature, - FloatWrapper, Call2, SingleDimSlice, add, mul, Call1) -from pypy.module.micronumpy.interp_ufuncs import negative -from pypy.module.micronumpy.compile import numpy_compile -from pypy.rlib.objectmodel import specialize -from pypy.rlib.nonconst import NonConstant -class FakeSpace(object): - w_ValueError = None - - def issequence_w(self, w_obj): - return True - - @specialize.argtype(1) - def wrap(self, w_obj): - return w_obj - - def float_w(self, w_obj): - return float(w_obj) class TestNumpyJIt(LLJitMixin): def setup_class(cls): cls.space = FakeSpace() + cls.float64_dtype = cls.space.fromcache(W_Float64Dtype) + cls.int64_dtype = cls.space.fromcache(W_Int64Dtype) def test_add(self): def f(i): - ar = SingleDimArray(i) - v = Call2(add, ar, ar, Signature()) - return v.get_concrete().storage[3] + ar = SingleDimArray(i, dtype=self.float64_dtype) + v = interp_ufuncs.get(self.space).add.call(self.space, [ar, ar]) + return v.get_concrete().eval(3).val result = self.meta_interp(f, [5], listops=True, backendopt=True) self.check_loops({'getarrayitem_raw': 2, 'float_add': 1, @@ -38,9 +30,14 @@ def test_floatadd(self): def f(i): - ar = SingleDimArray(i) - v = Call2(add, ar, FloatWrapper(4.5), Signature()) - return v.get_concrete().storage[3] + ar = SingleDimArray(i, dtype=self.float64_dtype) + v = interp_ufuncs.get(self.space).add.call(self.space, [ + ar, + scalar_w(self.space, self.float64_dtype, self.space.wrap(4.5)) + ], + ) + assert isinstance(v, BaseArray) + return v.get_concrete().eval(3).val result = self.meta_interp(f, [5], listops=True, backendopt=True) self.check_loops({"getarrayitem_raw": 1, "float_add": 1, @@ -50,10 +47,18 @@ def test_sum(self): space = self.space + float64_dtype = self.float64_dtype + int64_dtype = self.int64_dtype def f(i): - ar = SingleDimArray(i) - return ar.descr_add(space, ar).descr_sum(space) + if NonConstant(False): + dtype = int64_dtype + else: + dtype = float64_dtype + ar = SingleDimArray(i, dtype=dtype) + v = ar.descr_add(space, ar).descr_sum(space) + assert isinstance(v, FloatObject) + return v.floatval result = self.meta_interp(f, [5], listops=True, backendopt=True) self.check_loops({"getarrayitem_raw": 2, "float_add": 2, @@ -63,10 +68,18 @@ def test_prod(self): space = self.space + float64_dtype = self.float64_dtype + int64_dtype = self.int64_dtype def f(i): - ar = SingleDimArray(i) - return ar.descr_add(space, ar).descr_prod(space) + if NonConstant(False): + dtype = int64_dtype + else: + dtype = float64_dtype + ar = SingleDimArray(i, dtype=dtype) + v = ar.descr_add(space, ar).descr_prod(space) + assert isinstance(v, FloatObject) + return v.floatval result = self.meta_interp(f, [5], listops=True, backendopt=True) self.check_loops({"getarrayitem_raw": 2, "float_add": 1, @@ -76,32 +89,48 @@ def test_max(self): space = self.space + float64_dtype = self.float64_dtype + int64_dtype = self.int64_dtype def f(i): - ar = SingleDimArray(i) + if NonConstant(False): + dtype = int64_dtype + else: + dtype = float64_dtype + ar = SingleDimArray(i, dtype=dtype) j = 0 while j < i: - ar.get_concrete().storage[j] = float(j) + ar.get_concrete().setitem(j, float64_dtype.box(float(j))) j += 1 - return ar.descr_add(space, ar).descr_max(space) + v = ar.descr_add(space, ar).descr_max(space) + assert isinstance(v, FloatObject) + return v.floatval result = self.meta_interp(f, [5], listops=True, backendopt=True) self.check_loops({"getarrayitem_raw": 2, "float_add": 1, "float_gt": 1, "int_add": 1, - "int_lt": 1, "guard_true": 1, + "int_lt": 1, "guard_true": 1, "guard_false": 1, "jump": 1}) assert result == f(5) def test_min(self): space = self.space + float64_dtype = self.float64_dtype + int64_dtype = self.int64_dtype def f(i): - ar = SingleDimArray(i) + if NonConstant(False): + dtype = int64_dtype + else: + dtype = float64_dtype + ar = SingleDimArray(i, dtype=dtype) j = 0 while j < i: - ar.get_concrete().storage[j] = float(j) + ar.get_concrete().setitem(j, float64_dtype.box(float(j))) j += 1 - return ar.descr_add(space, ar).descr_min(space) + v = ar.descr_add(space, ar).descr_min(space) + assert isinstance(v, FloatObject) + return v.floatval result = self.meta_interp(f, [5], listops=True, backendopt=True) self.check_loops({"getarrayitem_raw": 2, "float_add": 1, @@ -112,14 +141,15 @@ def test_argmin(self): space = self.space + float64_dtype = self.float64_dtype def f(i): - ar = SingleDimArray(i) + ar = SingleDimArray(i, dtype=NonConstant(float64_dtype)) j = 0 while j < i: - ar.get_concrete().storage[j] = float(j) + ar.get_concrete().setitem(j, float64_dtype.box(float(j))) j += 1 - return ar.descr_add(space, ar).descr_argmin(space) + return ar.descr_add(space, ar).descr_argmin(space).intval result = self.meta_interp(f, [5], listops=True, backendopt=True) self.check_loops({"getarrayitem_raw": 2, "float_add": 1, @@ -130,14 +160,16 @@ def test_all(self): space = self.space + float64_dtype = self.float64_dtype def f(i): - ar = SingleDimArray(i) + ar = SingleDimArray(i, dtype=NonConstant(float64_dtype)) j = 0 while j < i: - ar.get_concrete().storage[j] = 1.0 + ar.get_concrete().setitem(j, float64_dtype.box(1.0)) j += 1 - return ar.descr_add(space, ar).descr_all(space) + return ar.descr_add(space, ar).descr_all(space).boolval + result = self.meta_interp(f, [5], listops=True, backendopt=True) self.check_loops({"getarrayitem_raw": 2, "float_add": 1, "int_add": 1, "float_ne": 1, @@ -146,10 +178,11 @@ def test_any(self): space = self.space + float64_dtype = self.float64_dtype def f(i): - ar = SingleDimArray(i) - return ar.descr_add(space, ar).descr_any(space) + ar = SingleDimArray(i, dtype=NonConstant(float64_dtype)) + return ar.descr_add(space, ar).descr_any(space).boolval result = self.meta_interp(f, [5], listops=True, backendopt=True) self.check_loops({"getarrayitem_raw": 2, "float_add": 1, @@ -157,13 +190,17 @@ "int_lt": 1, "guard_true": 1, "jump": 1}) assert result == f(5) - def test_already_forecd(self): + def test_already_forced(self): + space = self.space + def f(i): - ar = SingleDimArray(i) - v1 = Call2(add, ar, FloatWrapper(4.5), Signature()) - v2 = Call2(mul, v1, FloatWrapper(4.5), Signature()) + ar = SingleDimArray(i, dtype=self.float64_dtype) + v1 = interp_ufuncs.get(self.space).add.call(space, [ar, scalar_w(space, self.float64_dtype, space.wrap(4.5))]) + assert isinstance(v1, BaseArray) + v2 = interp_ufuncs.get(self.space).multiply.call(space, [v1, scalar_w(space, self.float64_dtype, space.wrap(4.5))]) v1.force_if_needed() - return v2.get_concrete().storage[3] + assert isinstance(v2, BaseArray) + return v2.get_concrete().eval(3).val result = self.meta_interp(f, [5], listops=True, backendopt=True) # This is the sum of the ops for both loops, however if you remove the @@ -177,10 +214,10 @@ def test_ufunc(self): space = self.space def f(i): - ar = SingleDimArray(i) - v1 = Call2(add, ar, ar, Signature()) - v2 = negative(space, v1) - return v2.get_concrete().storage[3] + ar = SingleDimArray(i, dtype=self.float64_dtype) + v1 = interp_ufuncs.get(self.space).add.call(space, [ar, ar]) + v2 = interp_ufuncs.get(self.space).negative.call(space, [v1]) + return v2.get_concrete().eval(3).val result = self.meta_interp(f, [5], listops=True, backendopt=True) self.check_loops({"getarrayitem_raw": 2, "float_add": 1, "float_neg": 1, @@ -192,17 +229,15 @@ def test_appropriate_specialization(self): space = self.space def f(i): - add_sig = Signature() - mul_sig = Signature() - ar = SingleDimArray(i) + ar = SingleDimArray(i, dtype=self.float64_dtype) - v1 = Call2(add, ar, ar, ar.signature.transition(add_sig)) - v2 = negative(space, v1) + v1 = interp_ufuncs.get(self.space).add.call(space, [ar, ar]) + v2 = interp_ufuncs.get(self.space).negative.call(space, [v1]) v2.get_concrete() for i in xrange(5): - v1 = Call2(mul, ar, ar, ar.signature.transition(mul_sig)) - v2 = negative(space, v1) + v1 = interp_ufuncs.get(self.space).multiply.call(space, [ar, ar]) + v2 = interp_ufuncs.get(self.space).negative.call(space, [v1]) v2.get_concrete() self.meta_interp(f, [5], listops=True, backendopt=True) @@ -212,10 +247,13 @@ def test_slice(self): def f(i): step = 3 - ar = SingleDimArray(step*i) - s = SingleDimSlice(0, step*i, step, i, ar, ar.signature.transition(SingleDimSlice.static_signature)) - v = Call2(add, s, s, Signature()) - return v.get_concrete().storage[3] + ar = SingleDimArray(step*i, dtype=self.float64_dtype) + new_sig = signature.Signature.find_sig([ + SingleDimSlice.signature, ar.signature + ]) + s = SingleDimSlice(0, step*i, step, i, ar, new_sig) + v = interp_ufuncs.get(self.space).add.call(self.space, [s, s]) + return v.get_concrete().eval(3).val result = self.meta_interp(f, [5], listops=True, backendopt=True) self.check_loops({'int_mul': 1, 'getarrayitem_raw': 2, 'float_add': 1, @@ -227,11 +265,17 @@ def f(i): step1 = 2 step2 = 3 - ar = SingleDimArray(step2*i) - s1 = SingleDimSlice(0, step1*i, step1, i, ar, ar.signature.transition(SingleDimSlice.static_signature)) - s2 = SingleDimSlice(0, step2*i, step2, i, ar, ar.signature.transition(SingleDimSlice.static_signature)) - v = Call2(add, s1, s2, Signature()) - return v.get_concrete().storage[3] + ar = SingleDimArray(step2*i, dtype=self.float64_dtype) + new_sig = signature.Signature.find_sig([ + SingleDimSlice.signature, ar.signature + ]) + s1 = SingleDimSlice(0, step1*i, step1, i, ar, new_sig) + new_sig = signature.Signature.find_sig([ + SingleDimSlice.signature, s1.signature + ]) + s2 = SingleDimSlice(0, step2*i, step2, i, ar, new_sig) + v = interp_ufuncs.get(self.space).add.call(self.space, [s1, s2]) + return v.get_concrete().eval(3).val result = self.meta_interp(f, [5], listops=True, backendopt=True) self.check_loops({'int_mul': 2, 'getarrayitem_raw': 2, 'float_add': 1, @@ -241,18 +285,16 @@ def test_setslice(self): space = self.space + float64_dtype = self.float64_dtype def f(i): step = NonConstant(3) - ar = SingleDimArray(step*i) - ar2 = SingleDimArray(i) - ar2.storage[1] = 5.5 - if NonConstant(False): - arg = ar2 - else: - arg = ar2.descr_add(space, ar2) + ar = SingleDimArray(step*i, dtype=float64_dtype) + ar2 = SingleDimArray(i, dtype=float64_dtype) + ar2.get_concrete().setitem(1, float64_dtype.box(5.5)) + arg = ar2.descr_add(space, ar2) ar.setslice(space, 0, step*i, step, i, arg) - return ar.get_concrete().storage[3] + return ar.get_concrete().eval(3).val result = self.meta_interp(f, [5], listops=True, backendopt=True) self.check_loops({'getarrayitem_raw': 2, @@ -267,12 +309,11 @@ x = x.compute() assert isinstance(x, SingleDimArray) assert x.size == 10 - assert x.storage[0] == 0 - assert x.storage[1] == ((1 + 1) * 1.2) / 1.2 - 1 - + assert x.eval(0).val == 0 + assert x.eval(1).val == ((1 + 1) * 1.2) / 1.2 - 1 + def test_translation(self): # we import main to check if the target compiles from pypy.translator.goal.targetnumpystandalone import main - from pypy.rpython.annlowlevel import llstr - + interpret(main, [llstr('af+'), 100]) diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -161,6 +161,8 @@ interpleveldefs['mknod'] = 'interp_posix.mknod' if hasattr(os, 'nice'): interpleveldefs['nice'] = 'interp_posix.nice' + if hasattr(os, 'getlogin'): + interpleveldefs['getlogin'] = 'interp_posix.getlogin' for name in ['setsid', 'getuid', 'geteuid', 'getgid', 'getegid', 'setuid', 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -464,6 +464,15 @@ space.wrap("strerror() argument out of range")) return space.wrap(text) +def getlogin(space): + """Return the currently logged in user.""" + try: + cur = os.getlogin() + except OSError, e: + raise wrap_oserror(space, e) + else: + return space.wrap(cur) + # ____________________________________________________________ def getstatfields(space): diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -805,6 +805,16 @@ data = f.read() assert data == "who cares?" + try: + os.getlogin() + except (AttributeError, OSError): + pass + else: + def test_getlogin(self): + assert isinstance(self.posix.getlogin(), str) + # How else could we test that getlogin is properly + # working? + def test_tmpfile(self): os = self.posix f = os.tmpfile() diff --git a/pypy/module/pypyjit/test_pypy_c/test_math.py b/pypy/module/pypyjit/test_pypy_c/test_math.py --- a/pypy/module/pypyjit/test_pypy_c/test_math.py +++ b/pypy/module/pypyjit/test_pypy_c/test_math.py @@ -78,6 +78,7 @@ assert loop.match(""" i1 = int_gt(i0, 0) guard_true(i1, descr=...) + guard_not_invalidated(descr=...) f1 = cast_int_to_float(i0) i2 = float_eq(f1, inf) i3 = float_eq(f1, -inf) diff --git a/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c b/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c --- a/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c +++ b/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c @@ -481,6 +481,16 @@ int a, b, c, d, e, f, g, h; } S8I; + + +typedef int (*CALLBACK_RECT)(RECT rect); + +EXPORT(int) call_callback_with_rect(CALLBACK_RECT cb, RECT rect) +{ + return cb(rect); +} + + EXPORT(S8I) ret_8i_func(S8I inp) { inp.a *= 2; diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py b/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py @@ -150,7 +150,6 @@ class TestMoreCallbacks(BaseCTypesTestChecker): def test_callback_with_struct_argument(self): - py.test.skip("callbacks with struct arguments not implemented yet") class RECT(Structure): _fields_ = [("left", c_int), ("top", c_int), ("right", c_int), ("bottom", c_int)] @@ -167,6 +166,28 @@ assert res == 1111 + def test_callback_from_c_with_struct_argument(self): + import conftest + _ctypes_test = str(conftest.sofile) + dll = CDLL(_ctypes_test) + + class RECT(Structure): + _fields_ = [("left", c_long), ("top", c_long), + ("right", c_long), ("bottom", c_long)] + + proto = CFUNCTYPE(c_int, RECT) + def callback(point): + return point.left+point.top+point.right+point.bottom + + cbp = proto(callback) + rect = RECT(1000,100,10,1) + + call_callback_with_rect = dll.call_callback_with_rect + call_callback_with_rect.restype = c_int + call_callback_with_rect.argtypes = [proto, RECT] + res = call_callback_with_rect(cbp, rect) + assert res == 1111 + def test_callback_unsupported_return_struct(self): class RECT(Structure): _fields_ = [("left", c_int), ("top", c_int), diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -913,12 +913,16 @@ def repr__String(space, w_str): s = w_str._value - buf = StringBuilder(len(s) + 2) - quote = "'" if quote in s and '"' not in s: quote = '"' + return space.wrap(string_escape_encode(s, quote)) + +def string_escape_encode(s, quote): + + buf = StringBuilder(len(s) + 2) + buf.append(quote) startslice = 0 @@ -959,7 +963,7 @@ buf.append(quote) - return space.wrap(buf.build()) + return buf.build() DEFAULT_NOOP_TABLE = ''.join([chr(i) for i in range(256)]) diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -6,8 +6,6 @@ BaseTestRDictImplementation, BaseTestDevolvedDictImplementation from pypy.interpreter import gateway -from pypy.conftest import gettestobjspace, option - space = FakeSpace() class TestCellDict(object): @@ -44,10 +42,11 @@ class AppTestModuleDict(object): def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withcelldict": True}) + cls.w_runappdirect = cls.space.wrap(option.runappdirect) def w_impl_used(self, obj): - if option.runappdirect: - py.test.skip("__repr__ doesn't work on appdirect") + if self.runappdirect: + skip("__repr__ doesn't work on appdirect") import __pypy__ assert "ModuleDictStrategy" in __pypy__.internal_repr(obj) diff --git a/pypy/objspace/std/test/test_rangeobject.py b/pypy/objspace/std/test/test_rangeobject.py --- a/pypy/objspace/std/test/test_rangeobject.py +++ b/pypy/objspace/std/test/test_rangeobject.py @@ -89,6 +89,9 @@ assert not self.not_forced(r) r.sort() assert r == range(1, 100) + [999] + r = range(10) + r.sort(key=lambda x: -x) + assert r == range(9, -1, -1) def test_pop(self): r = range(10) diff --git a/pypy/rlib/_rffi_stacklet.py b/pypy/rlib/_rffi_stacklet.py --- a/pypy/rlib/_rffi_stacklet.py +++ b/pypy/rlib/_rffi_stacklet.py @@ -45,5 +45,5 @@ destroy = llexternal('stacklet_destroy', [thread_handle, handle], lltype.Void) _translate_pointer = llexternal("_stacklet_translate_pointer", - [handle, llmemory.Address], + [llmemory.Address, llmemory.Address], llmemory.Address) diff --git a/pypy/rlib/_stacklet_asmgcc.py b/pypy/rlib/_stacklet_asmgcc.py --- a/pypy/rlib/_stacklet_asmgcc.py +++ b/pypy/rlib/_stacklet_asmgcc.py @@ -30,7 +30,7 @@ p = llmemory.cast_adr_to_ptr(obj, lltype.Ptr(SUSPSTACK)) if not p.handle: return False - self.context = p.handle + self.context = llmemory.cast_ptr_to_adr(p.handle) anchor = p.anchor del p self.curframe = lltype.malloc(WALKFRAME, flavor='raw') @@ -52,7 +52,7 @@ def teardown(self): lltype.free(self.curframe, flavor='raw') lltype.free(self.otherframe, flavor='raw') - self.context = lltype.nullptr(_c.handle.TO) + self.context = llmemory.NULL return llmemory.NULL def next(self, obj, prev): diff --git a/pypy/rlib/_stacklet_shadowstack.py b/pypy/rlib/_stacklet_shadowstack.py --- a/pypy/rlib/_stacklet_shadowstack.py +++ b/pypy/rlib/_stacklet_shadowstack.py @@ -13,6 +13,7 @@ # We still have the old shadowstack active at this point; save it # away, and start a fresh new one oldsuspstack = gcrootfinder.oldsuspstack + h = llmemory.cast_ptr_to_adr(h) llop.gc_save_current_state_away(lltype.Void, oldsuspstack, h) llop.gc_start_fresh_new_state(lltype.Void) @@ -50,6 +51,7 @@ # away, and restore the new one if oldsuspstack: ll_assert(not _c.is_empty_handle(h),"unexpected empty stacklet handle") + h = llmemory.cast_ptr_to_adr(h) llop.gc_save_current_state_away(lltype.Void, oldsuspstack, h) else: ll_assert(_c.is_empty_handle(h),"unexpected non-empty stacklet handle") diff --git a/pypy/rlib/clibffi.py b/pypy/rlib/clibffi.py --- a/pypy/rlib/clibffi.py +++ b/pypy/rlib/clibffi.py @@ -286,10 +286,10 @@ FFI_OK = cConfig.FFI_OK FFI_BAD_TYPEDEF = cConfig.FFI_BAD_TYPEDEF -FFI_DEFAULT_ABI = rffi.cast(rffi.USHORT, cConfig.FFI_DEFAULT_ABI) +FFI_DEFAULT_ABI = cConfig.FFI_DEFAULT_ABI if _WIN32: - FFI_STDCALL = rffi.cast(rffi.USHORT, cConfig.FFI_STDCALL) -FFI_TYPE_STRUCT = rffi.cast(rffi.USHORT, cConfig.FFI_TYPE_STRUCT) + FFI_STDCALL = cConfig.FFI_STDCALL +FFI_TYPE_STRUCT = cConfig.FFI_TYPE_STRUCT FFI_CIFP = rffi.COpaquePtr('ffi_cif', compilation_info=eci) FFI_CLOSUREP = lltype.Ptr(cConfig.ffi_closure) @@ -319,7 +319,7 @@ which the 'ffistruct' member is a regular FFI_TYPE. """ tpe = lltype.malloc(FFI_STRUCT_P.TO, len(field_types)+1, flavor='raw') - tpe.ffistruct.c_type = FFI_TYPE_STRUCT + tpe.ffistruct.c_type = rffi.cast(rffi.USHORT, FFI_TYPE_STRUCT) tpe.ffistruct.c_size = rffi.cast(rffi.SIZE_T, size) tpe.ffistruct.c_alignment = rffi.cast(rffi.USHORT, aligment) tpe.ffistruct.c_elements = rffi.cast(FFI_TYPE_PP, @@ -402,12 +402,20 @@ closureHeap = ClosureHeap() -FUNCFLAG_STDCALL = 0 -FUNCFLAG_CDECL = 1 # for WINAPI calls +FUNCFLAG_STDCALL = 0 # on Windows: for WINAPI calls +FUNCFLAG_CDECL = 1 # on Windows: for __cdecl calls FUNCFLAG_PYTHONAPI = 4 FUNCFLAG_USE_ERRNO = 8 FUNCFLAG_USE_LASTERROR = 16 +def get_call_conv(flags, from_jit): + if _WIN32 and (flags & FUNCFLAG_CDECL == 0): + return FFI_STDCALL + else: + return FFI_DEFAULT_ABI +get_call_conv._annspecialcase_ = 'specialize:arg(1)' # hack :-/ + + class AbstractFuncPtr(object): ll_cif = lltype.nullptr(FFI_CIFP.TO) ll_argtypes = lltype.nullptr(FFI_TYPE_PP.TO) @@ -427,21 +435,17 @@ self.ll_cif = lltype.malloc(FFI_CIFP.TO, flavor='raw', track_allocation=False) # freed by the __del__ - if _WIN32 and (flags & FUNCFLAG_CDECL == 0): - cc = FFI_STDCALL - else: - cc = FFI_DEFAULT_ABI - if _MSVC: # This little trick works correctly with MSVC. # It returns small structures in registers - if r_uint(restype.c_type) == FFI_TYPE_STRUCT: + if intmask(restype.c_type) == FFI_TYPE_STRUCT: if restype.c_size <= 4: restype = ffi_type_sint32 elif restype.c_size <= 8: restype = ffi_type_sint64 - res = c_ffi_prep_cif(self.ll_cif, cc, + res = c_ffi_prep_cif(self.ll_cif, + rffi.cast(rffi.USHORT, get_call_conv(flags,False)), rffi.cast(rffi.UINT, argnum), restype, self.ll_argtypes) if not res == FFI_OK: diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -75,7 +75,7 @@ @staticmethod @jit.elidable def is_struct(ffi_type): - return intmask(ffi_type.c_type) == intmask(FFI_TYPE_STRUCT) + return intmask(ffi_type.c_type) == FFI_TYPE_STRUCT types._import() diff --git a/pypy/rlib/nonconst.py b/pypy/rlib/nonconst.py --- a/pypy/rlib/nonconst.py +++ b/pypy/rlib/nonconst.py @@ -24,6 +24,12 @@ def __add__(self, other): return self.__dict__['constant'] + other + def __radd__(self, other): + return other + self.__dict__['constant'] + + def __mul__(self, other): + return self.__dict__['constant'] * other + class EntryNonConstant(ExtRegistryEntry): _about_ = NonConstant diff --git a/pypy/rlib/parsing/tree.py b/pypy/rlib/parsing/tree.py --- a/pypy/rlib/parsing/tree.py +++ b/pypy/rlib/parsing/tree.py @@ -6,9 +6,16 @@ content = ["digraph G{"] content.extend(self.dot()) content.append("}") - p = py.test.ensuretemp("automaton").join("temp.dot") + try: + p = py.test.ensuretemp("automaton").join("temp.dot") + remove = False + except AttributeError: # pytest lacks ensuretemp, make a normal one + p = py.path.local.mkdtemp().join('automaton.dot') + remove = True p.write("\n".join(content)) graphclient.display_dot_file(str(p)) + if remove: + p.dirpath().remove() class Symbol(Node): diff --git a/pypy/rlib/rerased.py b/pypy/rlib/rerased.py --- a/pypy/rlib/rerased.py +++ b/pypy/rlib/rerased.py @@ -117,6 +117,10 @@ return erase, unerase +def new_static_erasing_pair(name): + erase, unerase = new_erasing_pair(name) + return staticmethod(erase), staticmethod(unerase) + # ---------- implementation-specific ---------- diff --git a/pypy/rlib/runicode.py b/pypy/rlib/runicode.py --- a/pypy/rlib/runicode.py +++ b/pypy/rlib/runicode.py @@ -1403,7 +1403,7 @@ s, pos, pos + unicode_bytes) result.append(res) continue - result.append(unichr(t)) + result.append(UNICHR(t)) pos += unicode_bytes return result.build(), pos diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -20,7 +20,7 @@ from pypy.rpython.extfunc import ExtRegistryEntry from pypy.rlib.objectmodel import Symbolic, ComputedIntSymbolic from pypy.tool.uid import fixid -from pypy.rlib.rarithmetic import r_uint, r_singlefloat, r_longfloat, intmask +from pypy.rlib.rarithmetic import r_uint, r_singlefloat, r_longfloat, base_int, intmask from pypy.annotation import model as annmodel from pypy.rpython.llinterp import LLInterpreter, LLException from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE @@ -113,7 +113,7 @@ rffi.LONGLONG: ctypes.c_longlong, rffi.ULONGLONG: ctypes.c_ulonglong, rffi.SIZE_T: ctypes.c_size_t, - lltype.Bool: ctypes.c_long, # XXX + lltype.Bool: getattr(ctypes, "c_bool", ctypes.c_long), llmemory.Address: ctypes.c_void_p, llmemory.GCREF: ctypes.c_void_p, llmemory.WeakRef: ctypes.c_void_p, # XXX @@ -1142,6 +1142,8 @@ cvalue = 0 elif isinstance(cvalue, (str, unicode)): cvalue = ord(cvalue) # character -> integer + elif hasattr(RESTYPE, "_type") and issubclass(RESTYPE._type, base_int): + cvalue = int(cvalue) if not isinstance(cvalue, (int, long, float)): raise NotImplementedError("casting %r to %r" % (TYPE1, RESTYPE)) @@ -1151,7 +1153,11 @@ # an OverflowError on the following line. cvalue = ctypes.cast(ctypes.c_void_p(cvalue), cresulttype) else: - cvalue = cresulttype(cvalue).value # mask high bits off if needed + try: + cvalue = cresulttype(cvalue).value # mask high bits off if needed + except TypeError: + cvalue = int(cvalue) # float -> int + cvalue = cresulttype(cvalue).value # try again return ctypes2lltype(RESTYPE, cvalue) class ForceCastEntry(ExtRegistryEntry): diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -1283,6 +1283,8 @@ try: return p._obj._hash_cache_ except AttributeError: + assert self._T._gckind == 'gc' + assert self # not for NULL result = hash(p._obj) if cache: try: diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -876,7 +876,7 @@ if size is None: size = llmemory.sizeof(tp) # a symbolic result in this case return size - if isinstance(tp, lltype.Ptr): + if isinstance(tp, lltype.Ptr) or tp is llmemory.Address: tp = ULONG # XXX! if tp is lltype.Char or tp is lltype.Bool: return 1 diff --git a/pypy/rpython/lltypesystem/test/test_rffi.py b/pypy/rpython/lltypesystem/test/test_rffi.py --- a/pypy/rpython/lltypesystem/test/test_rffi.py +++ b/pypy/rpython/lltypesystem/test/test_rffi.py @@ -699,7 +699,10 @@ def test_cast(self): res = cast(SIZE_T, -1) assert type(res) is r_size_t - assert res == r_size_t(-1) + assert res == r_size_t(-1) + # + res = cast(lltype.Signed, 42.5) + assert res == 42 def test_rffi_sizeof(self): try: diff --git a/pypy/rpython/memory/gctransform/shadowstack.py b/pypy/rpython/memory/gctransform/shadowstack.py --- a/pypy/rpython/memory/gctransform/shadowstack.py +++ b/pypy/rpython/memory/gctransform/shadowstack.py @@ -3,6 +3,7 @@ from pypy.rpython.annlowlevel import llhelper from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rlib.debug import ll_assert +from pypy.rlib.nonconst import NonConstant from pypy.annotation import model as annmodel @@ -31,7 +32,7 @@ 'root_iterator' in translator._jit2gc): root_iterator = translator._jit2gc['root_iterator'] def jit_walk_stack_root(callback, addr, end): - root_iterator.context = llmemory.NULL + root_iterator.context = NonConstant(llmemory.NULL) gc = self.gc while True: addr = root_iterator.next(gc, addr, end) @@ -162,7 +163,7 @@ # We are in the child process. Assumes that only the # current thread survived, so frees the shadow stacks # of all the other ones. - gcdata.thread_stacks.clear() + gcdata.thread_stacks = None # Finally, reset the stored thread IDs, in case it # changed because of fork(). Also change the main # thread to the current one (because there is not any diff --git a/pypy/rpython/module/ll_os.py b/pypy/rpython/module/ll_os.py --- a/pypy/rpython/module/ll_os.py +++ b/pypy/rpython/module/ll_os.py @@ -383,6 +383,20 @@ return extdef([int, int], s_None, llimpl=dup2_llimpl, export_name="ll_os.ll_os_dup2") + @registering_if(os, "getlogin", condition=not _WIN32) + def register_os_getlogin(self): + os_getlogin = self.llexternal('getlogin', [], rffi.CCHARP) + + def getlogin_llimpl(): + result = os_getlogin() + if not result: + raise OSError(rposix.get_errno(), "getlogin failed") + + return rffi.charp2str(result) + + return extdef([], str, llimpl=getlogin_llimpl, + export_name="ll_os.ll_os_getlogin") + @registering_str_unicode(os.utime) def register_os_utime(self, traits): UTIMBUFP = lltype.Ptr(self.UTIMBUF) diff --git a/pypy/rpython/module/test/test_ll_os.py b/pypy/rpython/module/test/test_ll_os.py --- a/pypy/rpython/module/test/test_ll_os.py +++ b/pypy/rpython/module/test/test_ll_os.py @@ -35,6 +35,16 @@ for value in times: assert isinstance(value, float) +def test_getlogin(): + if not hasattr(os, 'getlogin'): + py.test.skip('posix specific function') + try: + expected = os.getlogin() + except OSError, e: + py.test.skip("the underlying os.getlogin() failed: %s" % e) + data = getllimpl(os.getlogin)() + assert data == expected + def test_utimes(): if os.name != 'nt': py.test.skip('Windows specific feature') diff --git a/pypy/tool/error.py b/pypy/tool/error.py --- a/pypy/tool/error.py +++ b/pypy/tool/error.py @@ -7,8 +7,8 @@ import sys import py -log = py.log.Producer("error") -py.log.setconsumer("error", ansi_log) +log = py.log.Producer("error") +py.log.setconsumer("error", ansi_log) SHOW_TRACEBACK = False SHOW_ANNOTATIONS = True @@ -17,7 +17,7 @@ from pypy.interpreter.pytraceback import offset2lineno import traceback -def source_lines(graph, block, operindex=None, offset=None, long=False, \ +def source_lines1(graph, block, operindex=None, offset=None, long=False, \ show_lines_of_code=SHOW_DEFAULT_LINES_OF_CODE): if block is not None: if block is graph.returnblock: @@ -61,6 +61,10 @@ lines.append("") return lines +def source_lines(graph, *args, **kwds): + lines = source_lines1(graph, *args, **kwds) + return ['In %r:' % (graph,)] + lines + class FlowingError(Exception): pass @@ -125,8 +129,16 @@ if func is None: r = repr(desc) else: - r = "function %s <%s, line %s>" % (func.func_name, - func.func_code.co_filename, func.func_code.co_firstlineno) + try: + if isinstance(func, type): + func_name = "%s.__init__" % func.__name__ + func = func.__init__.im_func + else: + func_name = func.func_name + r = "function %s <%s, line %s>" % (func_name, + func.func_code.co_filename, func.func_code.co_firstlineno) + except (AttributeError, TypeError): + r = repr(desc) msg.append(" %s returning" % (r,)) if hasattr(desc, 'getuniquegraph'): graph = desc.getuniquegraph() @@ -155,7 +167,7 @@ msg.append("%8s: %s" % (v, s_v)) msg.append('') msg += source_lines(graph, block, operindex, long=True) - + if called_from_graph is not None: msg.append(".. called from %r" % (called_from_graph,)) if s_value.origin is not None: @@ -184,7 +196,7 @@ import traceback errmsg = ["Error:\n"] exc, val, tb = sys.exc_info() - + errmsg.extend([" %s" % line for line in traceback.format_exception(exc, val, [])]) block = getattr(val, '__annotator_block', None) if block: diff --git a/pypy/translator/c/test/test_extfunc.py b/pypy/translator/c/test/test_extfunc.py --- a/pypy/translator/c/test/test_extfunc.py +++ b/pypy/translator/c/test/test_extfunc.py @@ -595,6 +595,18 @@ f1 = compile(does_stuff, []) f1() +if hasattr(os, 'getlogin'): + def test_os_getlogin(): + def does_stuff(): + return os.getlogin() + + try: + expected = os.getlogin() + except OSError, e: + py.test.skip("the underlying os.getlogin() failed: %s" % e) + f1 = compile(does_stuff, []) + assert f1() == expected + # ____________________________________________________________ def _real_getenv(var): diff --git a/pypy/translator/c/test/test_standalone.py b/pypy/translator/c/test/test_standalone.py --- a/pypy/translator/c/test/test_standalone.py +++ b/pypy/translator/c/test/test_standalone.py @@ -1019,6 +1019,27 @@ '5 ok'] + def test_gc_with_fork_without_threads(self): + from pypy.rlib.objectmodel import invoke_around_extcall + if not hasattr(os, 'fork'): + py.test.skip("requires fork()") + + def entry_point(argv): + childpid = os.fork() + if childpid == 0: + print "Testing..." + else: + pid, status = os.waitpid(childpid, 0) + assert pid == childpid + assert status == 0 + print "OK." + return 0 + + t, cbuilder = self.compile(entry_point) + data = cbuilder.cmdexec('') + print repr(data) + assert data.startswith('Testing...\nOK.') + def test_thread_and_gc_with_fork(self): # This checks that memory allocated for the shadow stacks of the # other threads is really released when doing a fork() -- or at From noreply at buildbot.pypy.org Thu Sep 1 15:14:27 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 1 Sep 2011 15:14:27 +0200 (CEST) Subject: [pypy-commit] pypy faster-nested-scopes: don't test jump arguments, doesn't lead to test precision and there are random variations Message-ID: <20110901131427.C42E08204C@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: faster-nested-scopes Changeset: r46981:927d3f73e9f7 Date: 2011-09-01 15:13 +0200 http://bitbucket.org/pypy/pypy/changeset/927d3f73e9f7/ Log: don't test jump arguments, doesn't lead to test precision and there are random variations diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -174,7 +174,7 @@ guard_no_overflow(descr=...) i18 = force_token() --TICK-- - jump(p0, p1, p2, p3, p4, i8, p7, i17, p8, i9, i17, p10, p11, p12, descr=) + jump(..., descr=) """) def test_default_and_kw(self): From noreply at buildbot.pypy.org Thu Sep 1 15:33:59 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 1 Sep 2011 15:33:59 +0200 (CEST) Subject: [pypy-commit] pypy faster-nested-scopes: close to-be-merge branch Message-ID: <20110901133359.DA9F78204C@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: faster-nested-scopes Changeset: r46982:a5bc14d5cef9 Date: 2011-09-01 15:15 +0200 http://bitbucket.org/pypy/pypy/changeset/a5bc14d5cef9/ Log: close to-be-merge branch From noreply at buildbot.pypy.org Thu Sep 1 15:34:01 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 1 Sep 2011 15:34:01 +0200 (CEST) Subject: [pypy-commit] pypy default: merge faster-nested-scopes: make nested scopes be a bit better supported by the JIT. Message-ID: <20110901133401.5FFAF8204C@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r46983:2641481ac49e Date: 2011-09-01 15:30 +0200 http://bitbucket.org/pypy/pypy/changeset/2641481ac49e/ Log: merge faster-nested-scopes: make nested scopes be a bit better supported by the JIT. - the cells on the frame are now part of the virtualizable. - the constructor of frame get a reference to the outer scope, which often makes reading of the inherited cells constant-foldable diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -626,9 +626,9 @@ self.default_compiler = compiler return compiler - def createframe(self, code, w_globals, closure=None): + def createframe(self, code, w_globals, outer_func=None): "Create an empty PyFrame suitable for this code object." - return self.FrameClass(self, code, w_globals, closure) + return self.FrameClass(self, code, w_globals, outer_func) def allocate_lock(self): """Return an interp-level Lock object if threads are enabled, diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -30,7 +30,7 @@ can_change_code = True _immutable_fields_ = ['code?', 'w_func_globals?', - 'closure?', + 'closure?[*]', 'defs_w?[*]', 'name?'] @@ -96,7 +96,7 @@ assert isinstance(code, PyCode) if nargs < 5: new_frame = self.space.createframe(code, self.w_func_globals, - self.closure) + self) for i in funccallunrolling: if i < nargs: new_frame.locals_stack_w[i] = args_w[i] @@ -156,7 +156,7 @@ def _flat_pycall(self, code, nargs, frame): # code is a PyCode new_frame = self.space.createframe(code, self.w_func_globals, - self.closure) + self) for i in xrange(nargs): w_arg = frame.peekvalue(nargs-1-i) new_frame.locals_stack_w[i] = w_arg @@ -167,7 +167,7 @@ def _flat_pycall_defaults(self, code, nargs, frame, defs_to_load): # code is a PyCode new_frame = self.space.createframe(code, self.w_func_globals, - self.closure) + self) for i in xrange(nargs): w_arg = frame.peekvalue(nargs-1-i) new_frame.locals_stack_w[i] = w_arg diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -8,7 +8,7 @@ class Cell(Wrappable): "A simple container for a wrapped value." - + def __init__(self, w_value=None): self.w_value = w_value @@ -90,32 +90,33 @@ # variables coming from a parent function in which i'm nested # 'closure' is a list of Cell instances: the received free vars. - cells = None - @jit.unroll_safe - def initialize_frame_scopes(self, closure, code): - super_initialize_frame_scopes(self, closure, code) + def initialize_frame_scopes(self, outer_func, code): + super_initialize_frame_scopes(self, outer_func, code) ncellvars = len(code.co_cellvars) nfreevars = len(code.co_freevars) if not nfreevars: if not ncellvars: + self.cells = [] return # no self.cells needed - fast path - if closure is None: - closure = [] - elif closure is None: + elif outer_func is None: space = self.space raise OperationError(space.w_TypeError, space.wrap("directly executed code object " "may not contain free variables")) - if len(closure) != nfreevars: + if outer_func and outer_func.closure: + closure_size = len(outer_func.closure) + else: + closure_size = 0 + if closure_size != nfreevars: raise ValueError("code object received a closure with " "an unexpected number of free variables") self.cells = [None] * (ncellvars + nfreevars) for i in range(ncellvars): self.cells[i] = Cell() for i in range(nfreevars): - self.cells[i + ncellvars] = closure[i] - + self.cells[i + ncellvars] = outer_func.closure[i] + def _getcells(self): return self.cells diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -198,7 +198,7 @@ def funcrun(self, func, args): frame = self.space.createframe(self, func.w_func_globals, - func.closure) + func) sig = self._signature # speed hack fresh_frame = jit.hint(frame, access_directly=True, @@ -211,7 +211,7 @@ def funcrun_obj(self, func, w_obj, args): frame = self.space.createframe(self, func.w_func_globals, - func.closure) + func) sig = self._signature # speed hack fresh_frame = jit.hint(frame, access_directly=True, diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -51,7 +51,7 @@ is_being_profiled = False escaped = False # see mark_as_escaped() - def __init__(self, space, code, w_globals, closure): + def __init__(self, space, code, w_globals, outer_func): if not we_are_translated(): assert type(self) in (space.FrameClass, CPythonFrame), ( "use space.FrameClass(), not directly PyFrame()") @@ -70,7 +70,7 @@ self.builtin = space.builtin.pick_builtin(w_globals) # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. - self.initialize_frame_scopes(closure, code) + self.initialize_frame_scopes(outer_func, code) self.f_lineno = code.co_firstlineno def mark_as_escaped(self): @@ -117,8 +117,8 @@ return self.builtin else: return self.space.builtin - - def initialize_frame_scopes(self, closure, code): + + def initialize_frame_scopes(self, outer_func, code): # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. # CO_NEWLOCALS: make a locals dict unless optimized is also set @@ -385,7 +385,11 @@ # do not use the instance's __init__ but the base's, because we set # everything like cells from here - PyFrame.__init__(self, space, pycode, w_globals, closure) + # XXX hack + from pypy.interpreter.function import Function + outer_func = Function(space, None, closure=closure, + forcename="fake") + PyFrame.__init__(self, space, pycode, w_globals, outer_func) f_back = space.interp_w(PyFrame, w_f_back, can_be_None=True) new_frame.f_backref = jit.non_virtual_ref(f_back) diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -292,7 +292,7 @@ raise break new_frame = space.createframe(code, w_func.w_func_globals, - w_func.closure) + w_func) new_frame.locals_stack_w[0] = w_item w_res = new_frame.run() result_w.append(w_res) diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py --- a/pypy/module/cpyext/frameobject.py +++ b/pypy/module/cpyext/frameobject.py @@ -57,7 +57,7 @@ code = space.interp_w(PyCode, w_code) w_globals = from_ref(space, py_frame.c_f_globals) - frame = space.FrameClass(space, code, w_globals, closure=None) + frame = space.FrameClass(space, code, w_globals, outer_func=None) frame.f_lineno = py_frame.c_f_lineno w_obj = space.wrap(frame) track_reference(space, py_obj, w_obj) diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -21,6 +21,7 @@ PyFrame._virtualizable2_ = ['last_instr', 'pycode', 'valuestackdepth', 'locals_stack_w[*]', + 'cells[*]', 'last_exception', 'lastblock', 'is_being_profiled', diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -174,7 +174,7 @@ guard_no_overflow(descr=...) i18 = force_token() --TICK-- - jump(p0, p1, p2, p3, p4, i8, p7, i17, p8, i9, i17, p10, p11, p12, descr=) + jump(..., descr=) """) def test_default_and_kw(self): @@ -396,3 +396,70 @@ --TICK-- jump(..., descr=) """) + + def test_global_closure_has_constant_cells(self): + log = self.run(""" + def make_adder(n): + def add(x): + return x + n + return add + add5 = make_adder(5) + def main(): + i = 0 + while i < 5000: + i = add5(i) # ID: call + """, []) + loop, = log.loops_by_id('call', is_entry_bridge=True) + assert loop.match(""" + guard_value(i6, 1, descr=...) + guard_nonnull_class(p8, ConstClass(W_IntObject), descr=...) + guard_value(i4, 0, descr=...) + guard_value(p3, ConstPtr(ptr14), descr=...) + i15 = getfield_gc_pure(p8, descr=) + i17 = int_lt(i15, 5000) + guard_true(i17, descr=...) + p18 = getfield_gc(p0, descr=) + guard_value(p18, ConstPtr(ptr19), descr=...) + p20 = getfield_gc(p18, descr=) + guard_value(p20, ConstPtr(ptr21), descr=...) + guard_not_invalidated(descr=...) + # most importantly, there is no getarrayitem_gc here + p23 = call(ConstClass(getexecutioncontext), descr=) + p24 = getfield_gc(p23, descr=) + i25 = force_token() + p26 = getfield_gc(p23, descr=) + guard_isnull(p26, descr=...) + i27 = getfield_gc(p23, descr=) + i28 = int_is_zero(i27) + guard_true(i28, descr=...) + p30 = getfield_gc(ConstPtr(ptr29), descr=) + guard_nonnull_class(p30, ConstClass(W_IntObject), descr=...) + i32 = getfield_gc_pure(p30, descr=) + i33 = int_add_ovf(i15, i32) + guard_no_overflow(descr=...) + --TICK-- + jump(p0, p1, p2, p5, i33, i32, p23, p30, p24, descr=) + """) + + def test_local_closure_is_virtual(self): + log = self.run(""" + def main(): + i = 0 + while i < 5000: + def add(): + return i + 1 + i = add() # ID: call + """, []) + loop, = log.loops_by_id('call') + assert loop.match(""" + i8 = getfield_gc_pure(p6, descr=) + i10 = int_lt(i8, 5000) + guard_true(i10, descr=...) + i11 = force_token() + i13 = int_add(i8, 1) + --TICK-- + p22 = new_with_vtable(ConstClass(W_IntObject)) + setfield_gc(p22, i13, descr=) + setfield_gc(p4, p22, descr=) + jump(p0, p1, p2, p3, p4, p7, p22, p7, descr=) + """) diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py --- a/pypy/objspace/flow/flowcontext.py +++ b/pypy/objspace/flow/flowcontext.py @@ -184,7 +184,7 @@ class FlowExecutionContext(ExecutionContext): - def __init__(self, space, code, globals, constargs={}, closure=None, + def __init__(self, space, code, globals, constargs={}, outer_func=None, name=None): ExecutionContext.__init__(self, space) self.code = code @@ -193,11 +193,11 @@ self.crnt_offset = -1 self.crnt_frame = None - if closure is None: + if outer_func and outer_func.closure: + self.closure = [nestedscope.Cell(Constant(value)) + for value in outer_func.closure] + else: self.closure = None - else: - self.closure = [nestedscope.Cell(Constant(value)) - for value in closure] frame = self.create_frame() formalargcount = code.getformalargcount() arg_list = [Variable() for i in range(formalargcount)] @@ -216,7 +216,7 @@ # while ignoring any operation like the creation of the locals dict self.recorder = [] frame = FlowSpaceFrame(self.space, self.code, - self.w_globals, self.closure) + self.w_globals, self) frame.last_instr = 0 return frame diff --git a/pypy/objspace/flow/objspace.py b/pypy/objspace/flow/objspace.py --- a/pypy/objspace/flow/objspace.py +++ b/pypy/objspace/flow/objspace.py @@ -252,9 +252,9 @@ raise TypeError("%r is a generator" % (func,)) code = PyCode._from_code(self, code) if func.func_closure is None: - closure = None + cl = None else: - closure = [extract_cell_content(c) for c in func.func_closure] + cl = [extract_cell_content(c) for c in func.func_closure] # CallableFactory.pycall may add class_ to functions that are methods name = func.func_name class_ = getattr(func, 'class_', None) @@ -262,8 +262,10 @@ name = '%s.%s' % (class_.__name__, name) for c in "<>&!": name = name.replace(c, '_') + class outerfunc: # hack + closure = cl ec = flowcontext.FlowExecutionContext(self, code, func.func_globals, - constargs, closure, name) + constargs, outerfunc, name) graph = ec.graph graph.func = func # attach a signature and defaults to the graph diff --git a/pypy/objspace/std/fake.py b/pypy/objspace/std/fake.py --- a/pypy/objspace/std/fake.py +++ b/pypy/objspace/std/fake.py @@ -142,7 +142,7 @@ def funcrun(self, func, args): frame = func.space.createframe(self, func.w_func_globals, - func.closure) + func) sig = self.signature() scope_w = args.parse_obj(None, func.name, sig, func.defs_w) frame.setfastscope(scope_w) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -129,12 +129,12 @@ ec._py_repr = None return ec - def createframe(self, code, w_globals, closure=None): + def createframe(self, code, w_globals, outer_func=None): from pypy.objspace.std.fake import CPythonFakeCode, CPythonFakeFrame if not we_are_translated() and isinstance(code, CPythonFakeCode): return CPythonFakeFrame(self, code, w_globals) else: - return ObjSpace.createframe(self, code, w_globals, closure) + return ObjSpace.createframe(self, code, w_globals, outer_func) def gettypefor(self, cls): return self.gettypeobject(cls.typedef) From noreply at buildbot.pypy.org Thu Sep 1 15:34:02 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 1 Sep 2011 15:34:02 +0200 (CEST) Subject: [pypy-commit] pypy default: merge default Message-ID: <20110901133402.96B188204C@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r46984:1b70e84ac93b Date: 2011-09-01 15:31 +0200 http://bitbucket.org/pypy/pypy/changeset/1b70e84ac93b/ Log: merge default diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -209,6 +209,8 @@ * Automatic unlimited stack (must be emulated__ so far) +* Support for other CPUs than x86 and x86-64 + .. __: `recursion depth limit`_ (*) Pickling, as well as changing threads, could be implemented by using @@ -217,9 +219,8 @@ "hard" switch (like now) when the C stack contains non-trivial C frames to save, and a "soft" switch (like previously) when it contains only simple calls from Python to Python. Soft-switched continulets would -also consume a bit less RAM, at the possible expense of making the -switch a bit slower (unsure about that; what is the Stackless Python -experience?). +also consume a bit less RAM, and the switch might be a bit faster too +(unsure about that; what is the Stackless Python experience?). Recursion depth limit From noreply at buildbot.pypy.org Thu Sep 1 17:52:06 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Sep 2011 17:52:06 +0200 (CEST) Subject: [pypy-commit] pypy default: Trying to fix Windows package.py Message-ID: <20110901155206.9C4508204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r46985:8a9885e47d2c Date: 2011-09-01 17:51 +0200 http://bitbucket.org/pypy/pypy/changeset/8a9885e47d2c/ Log: Trying to fix Windows package.py diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -52,9 +52,13 @@ pypy_c_dir = basedir.join('pypy', 'translator', 'goal') pypy_c = pypy_c_dir.join('pypy-c.exe') libpypy_c = pypy_c_dir.join('libpypy-c.dll') + libexpat = pypy_c_dir.join('libexpat.dll') + if not libexpat.check(): + libexpat = py.path.local.sysfind('libexpat.dll') + assert libexpat, "libexpat.dll not found" binaries = [(pypy_c, pypy_c.basename), (libpypy_c, libpypy_c.basename), - (pypy_c_dir.join('libexpat.dll'), 'libexpat.dll')] + (libexpat, libexpat.basename)] else: basename = 'pypy-c' if override_pypy_c is None: From noreply at buildbot.pypy.org Thu Sep 1 17:55:39 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Sep 2011 17:55:39 +0200 (CEST) Subject: [pypy-commit] pypy default: Print the path of the libexpat.dll we pick. Message-ID: <20110901155539.2E1978204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r46986:fc2e47f14b48 Date: 2011-09-01 17:55 +0200 http://bitbucket.org/pypy/pypy/changeset/fc2e47f14b48/ Log: Print the path of the libexpat.dll we pick. diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -56,6 +56,7 @@ if not libexpat.check(): libexpat = py.path.local.sysfind('libexpat.dll') assert libexpat, "libexpat.dll not found" + print "Picking %s" % libexpat binaries = [(pypy_c, pypy_c.basename), (libpypy_c, libpypy_c.basename), (libexpat, libexpat.basename)] From noreply at buildbot.pypy.org Thu Sep 1 18:05:18 2011 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 1 Sep 2011 18:05:18 +0200 (CEST) Subject: [pypy-commit] pypy default: avoid 'import ctypes' inside the byref function, we import 'pointer' directly from _ctypes Message-ID: <20110901160518.B2A7F8204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r46987:862677259356 Date: 2011-09-01 15:07 +0200 http://bitbucket.org/pypy/pypy/changeset/862677259356/ Log: avoid 'import ctypes' inside the byref function, we import 'pointer' directly from _ctypes diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -166,7 +166,8 @@ return tp._alignmentofinstances() def byref(cdata): - from ctypes import pointer + # "pointer" is imported at the end of this module to avoid circular + # imports return pointer(cdata) def cdata_from_address(self, address): @@ -226,3 +227,6 @@ 'v' : _ffi.types.sshort, } + +# used by "byref" +from _ctypes.pointer import pointer From noreply at buildbot.pypy.org Thu Sep 1 18:05:20 2011 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 1 Sep 2011 18:05:20 +0200 (CEST) Subject: [pypy-commit] pypy default: implement a cache of statements similar to the one which is in the CPython C extension. It makes some trivial benchmark twice as fast Message-ID: <20110901160520.030E28204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r46988:72321d3c8b69 Date: 2011-09-01 18:04 +0200 http://bitbucket.org/pypy/pypy/changeset/72321d3c8b69/ Log: implement a cache of statements similar to the one which is in the CPython C extension. It makes some trivial benchmark twice as fast diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -24,6 +24,7 @@ from ctypes import c_void_p, c_int, c_double, c_int64, c_char_p, cdll from ctypes import POINTER, byref, string_at, CFUNCTYPE, cast from ctypes import sizeof, c_ssize_t +from collections import OrderedDict import datetime import sys import time @@ -274,6 +275,28 @@ def unicode_text_factory(x): return unicode(x, 'utf-8') + +class StatementCache(object): + def __init__(self, connection, maxcount): + self.connection = connection + self.maxcount = maxcount + self.cache = OrderedDict() + + def get(self, sql, cursor, row_factory): + try: + stat = self.cache[sql] + except KeyError: + stat = Statement(self.connection, sql) + self.cache[sql] = stat + if len(self.cache) > self.maxcount: + self.cache.popitem(0) + # + if stat.in_use: + stat = Statement(self.connection, sql) + stat.set_cursor_and_factory(cursor, row_factory) + return stat + + class Connection(object): def __init__(self, database, timeout=5.0, detect_types=0, isolation_level="", check_same_thread=True, factory=None, cached_statements=100): @@ -291,6 +314,7 @@ self.row_factory = None self._isolation_level = isolation_level self.detect_types = detect_types + self.statement_cache = StatementCache(self, cached_statements) self.cursors = [] @@ -399,7 +423,7 @@ cur = Cursor(self) if not isinstance(sql, (str, unicode)): raise Warning("SQL is of wrong type. Must be string or unicode.") - statement = Statement(cur, sql, self.row_factory) + statement = self.statement_cache.get(sql, cur, self.row_factory) return statement def _get_isolation_level(self): @@ -708,7 +732,7 @@ if type(sql) is unicode: sql = sql.encode("utf-8") self._check_closed() - self.statement = Statement(self, sql, self.row_factory) + self.statement = self.connection.statement_cache.get(sql, self, self.row_factory) if self.connection._isolation_level is not None: if self.statement.kind == "DDL": @@ -746,7 +770,8 @@ if type(sql) is unicode: sql = sql.encode("utf-8") self._check_closed() - self.statement = Statement(self, sql, self.row_factory) + self.statement = self.connection.statement_cache.get(sql, self, self.row_factory) + if self.statement.kind == "DML": self.connection._begin() else: @@ -871,14 +896,12 @@ lastrowid = property(_getlastrowid) class Statement(object): - def __init__(self, cur, sql, row_factory): + def __init__(self, connection, sql): self.statement = None if not isinstance(sql, str): raise ValueError, "sql must be a string" - self.con = cur.connection - self.cur = weakref.ref(cur) + self.con = connection self.sql = sql # DEBUG ONLY - self.row_factory = row_factory first_word = self._statement_kind = sql.lstrip().split(" ")[0].upper() if first_word in ("INSERT", "UPDATE", "DELETE", "REPLACE"): self.kind = "DML" @@ -887,6 +910,11 @@ else: self.kind = "DDL" self.exhausted = False + self.in_use = False + # + # set by set_cursor_and_factory + self.cur = None + self.row_factory = None self.statement = c_void_p() next_char = c_char_p() @@ -907,6 +935,10 @@ self._build_row_cast_map() + def set_cursor_and_factory(self, cur, row_factory): + self.cur = weakref.ref(cur) + self.row_factory = row_factory + def _build_row_cast_map(self): self.row_cast_map = [] for i in xrange(sqlite.sqlite3_column_count(self.statement)): @@ -976,6 +1008,7 @@ ret = sqlite.sqlite3_reset(self.statement) if ret != SQLITE_OK: raise self.con._get_exception(ret) + self.mark_dirty() if params is None: if sqlite.sqlite3_bind_parameter_count(self.statement) != 0: @@ -1068,11 +1101,17 @@ def reset(self): self.row_cast_map = None - return sqlite.sqlite3_reset(self.statement) + ret = sqlite.sqlite3_reset(self.statement) + self.in_use = False + return ret def finalize(self): sqlite.sqlite3_finalize(self.statement) self.statement = None + self.in_use = False + + def mark_dirty(self): + self.in_use = True def __del__(self): sqlite.sqlite3_finalize(self.statement) From noreply at buildbot.pypy.org Thu Sep 1 18:05:27 2011 From: noreply at buildbot.pypy.org (hager) Date: Thu, 1 Sep 2011 18:05:27 +0200 (CEST) Subject: [pypy-commit] pypy ppc-jit-backend: Slightly beautified test_call_many_arguments. Message-ID: <20110901160527.73E2E8204C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r46989:b9eddaa199ec Date: 2011-09-01 18:03 +0200 http://bitbucket.org/pypy/pypy/changeset/b9eddaa199ec/ Log: Slightly beautified test_call_many_arguments. diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -629,18 +629,18 @@ def test_call_many_arguments(self): # Test calling a function with a large number of arguments (more than # 6, which will force passing some arguments on the stack on 64-bit) - + num_args = 16 def func(*args): - assert len(args) == 16 + assert len(args) == num_args # Try to sum up args in a way that would probably detect a # transposed argument return sum(arg * (2**i) for i, arg in enumerate(args)) - FUNC = self.FuncType([lltype.Signed]*16, lltype.Signed) + FUNC = self.FuncType([lltype.Signed]*num_args, lltype.Signed) FPTR = self.Ptr(FUNC) calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) func_ptr = llhelper(FPTR, func) - args = range(16) + args = range(num_args) funcbox = self.get_funcbox(self.cpu, func_ptr) res = self.execute_operation(rop.CALL, [funcbox] + map(BoxInt, args), 'int', descr=calldescr) assert res.value == func(*args) From noreply at buildbot.pypy.org Thu Sep 1 18:05:28 2011 From: noreply at buildbot.pypy.org (hager) Date: Thu, 1 Sep 2011 18:05:28 +0200 (CEST) Subject: [pypy-commit] pypy ppc-jit-backend: Save and restore nonvolatile registers, made test_call_many_arguments pass. Message-ID: <20110901160528.B220C8204C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r46990:fc74b87ee65e Date: 2011-09-01 18:05 +0200 http://bitbucket.org/pypy/pypy/changeset/fc74b87ee65e/ Log: Save and restore nonvolatile registers, made test_call_many_arguments pass. diff --git a/pypy/jit/backend/ppc/ppcgen/arch.py b/pypy/jit/backend/ppc/ppcgen/arch.py --- a/pypy/jit/backend/ppc/ppcgen/arch.py +++ b/pypy/jit/backend/ppc/ppcgen/arch.py @@ -10,3 +10,5 @@ IS_PPC_32 = False IS_PPC_64 = True +NONVOLATILES = [2] + range(13, 32) +VOLATILES = [0] + range(3, 13) diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -4,7 +4,7 @@ from pypy.jit.backend.ppc.ppcgen.ppc_field import ppc_fields from pypy.jit.backend.ppc.ppcgen.assembler import Assembler from pypy.jit.backend.ppc.ppcgen.symbol_lookup import lookup -from pypy.jit.backend.ppc.ppcgen.arch import IS_PPC_32 +from pypy.jit.backend.ppc.ppcgen.arch import IS_PPC_32, WORD, NONVOLATILES from pypy.jit.metainterp.history import Const, ConstPtr from pypy.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin from pypy.jit.backend.llsupport.asmmemmgr import AsmMemoryManager @@ -932,12 +932,21 @@ self.ld(rD, rD, 0) def store_reg(self, source_reg, addr): + self.load_word(0, addr) if IS_PPC_32: - self.addis(10, 0, ha(addr)) - self.stw(source_reg, 10, la(addr)) + self.stwx(source_reg, 0, 0) else: - self.load_word(10, addr) - self.std(source_reg, 10, 0) + # ? + self.std(source_reg, 0, 10) + + def save_nonvolatiles(self, framesize): + for i, reg in enumerate(NONVOLATILES): + self.stw(reg, 1, framesize - 4 * i) + + def restore_nonvolatiles(self, framesize): + for i, reg in enumerate(NONVOLATILES): + self.lwz(reg, 1, framesize - i * 4) + # translate a trace operation to corresponding machine code def build_op(self, trace_op, cpu): @@ -1399,7 +1408,9 @@ call_addr = rffi.cast(lltype.Signed, op.getarg(0).value) args = op.getarglist()[1:] descr = op.getdescr() + num_args = len(args) + # pass first arguments in registers arg_reg = 3 for arg in args: if isinstance(arg, Box): @@ -1409,6 +1420,22 @@ else: assert 0, "%s not supported yet" % arg arg_reg += 1 + if arg_reg == 11: + break + + # if the function takes more than 8 arguments, + # pass remaining arguments on stack + if num_args > 8: + remaining_args = args[8:] + for i, arg in enumerate(remaining_args): + if isinstance(arg, Box): + #self.mr(0, cpu.reg_map[arg]) + self.stw(cpu.reg_map[arg], 1, 8 + WORD * i) + elif isinstance(arg, Const): + self.load_word(0, arg.value) + self.stw(0, 1, 8 + WORD * i) + else: + assert 0, "%s not supported yet" % arg self.load_word(0, call_addr) self.mtctr(0) @@ -1562,9 +1589,14 @@ self.store_reg(cpu.next_free_register, addr) else: assert 0, "arg type not suported" - self.lwz(0, 1, 36) + + framesize = 64 + 80 + + self.restore_nonvolatiles(framesize) + + self.lwz(0, 1, framesize + 4) # 36 self.mtlr(0) - self.addi(1, 1, 32) + self.addi(1, 1, framesize) self.load_word(3, identifier) self.blr() diff --git a/pypy/jit/backend/ppc/runner.py b/pypy/jit/backend/ppc/runner.py --- a/pypy/jit/backend/ppc/runner.py +++ b/pypy/jit/backend/ppc/runner.py @@ -12,13 +12,13 @@ from pypy.jit.backend.x86 import regloc from pypy.jit.backend.x86.support import values_array from pypy.jit.backend.ppc.ppcgen.ppc_assembler import PPCBuilder +from pypy.jit.backend.ppc.ppcgen.arch import NONVOLATILES import sys from pypy.tool.ansi_print import ansi_log log = py.log.Producer('jitbackend') py.log.setconsumer('jitbackend', ansi_log) - class PPC_64_CPU(AbstractLLCPU): def __init__(self, rtyper, stats, opts=None, translate_support_code=False, @@ -46,6 +46,9 @@ codebuilder = PPCBuilder() + # function prologue + self._make_prologue(codebuilder) + # initialize registers from memory self.next_free_register = 3 for index, arg in enumerate(inputargs): @@ -56,8 +59,10 @@ self.startpos = codebuilder.get_relative_pos() - self._make_prologue(codebuilder) + # generate code for operations self._walk_trace_ops(codebuilder, operations) + + # function epilogue self._make_epilogue(codebuilder) f = codebuilder.assemble() @@ -106,9 +111,11 @@ return reg def _make_prologue(self, codebuilder): - codebuilder.stwu(1, 1, -32) + framesize = 64 + 80 + codebuilder.stwu(1, 1, -framesize) codebuilder.mflr(0) - codebuilder.stw(0, 1, 36) + codebuilder.stw(0, 1, framesize + 4) + codebuilder.save_nonvolatiles(framesize) def _make_epilogue(self, codebuilder): for op_index, fail_index, guard, reglist in self.patch_list: @@ -135,9 +142,12 @@ descr.patch_pos = patch_pos descr.used_mem_indices = used_mem_indices - codebuilder.lwz(0, 1, 36) + framesize = 64 + 80 + codebuilder.restore_nonvolatiles(framesize) + + codebuilder.lwz(0, 1, framesize + 4) # 36 codebuilder.mtlr(0) - codebuilder.addi(1, 1, 32) + codebuilder.addi(1, 1, framesize) codebuilder.li(3, fail_index) codebuilder.blr() From noreply at buildbot.pypy.org Thu Sep 1 18:26:04 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Sep 2011 18:26:04 +0200 (CEST) Subject: [pypy-commit] pypy default: Update. Message-ID: <20110901162604.8FB2D8204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r46991:4d88c07896b2 Date: 2011-09-01 18:25 +0200 http://bitbucket.org/pypy/pypy/changeset/4d88c07896b2/ Log: Update. diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -199,7 +199,11 @@ The following features (present in some past Stackless version of PyPy) are for the time being not supported any more: -* Tasklets and channels (needs to be rewritten at app-level) +* Tasklets and channels (currently ``stackless.py`` seems to import, + but you have tasklets on top of coroutines on top of greenlets on + top of continulets on top of stacklets, and it's probably not too + hard to cut two of these levels by adapting ``stackless.py`` to + use directly continulets) * Coroutines (could be rewritten at app-level) From noreply at buildbot.pypy.org Thu Sep 1 18:31:10 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Sep 2011 18:31:10 +0200 (CEST) Subject: [pypy-commit] pypy default: Blindly fix an issue on not-explicitly-supported platforms. Message-ID: <20110901163110.A0F5F8204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r46992:f1f9f3782931 Date: 2011-09-01 18:30 +0200 http://bitbucket.org/pypy/pypy/changeset/f1f9f3782931/ Log: Blindly fix an issue on not-explicitly-supported platforms. diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -14,7 +14,7 @@ if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) -elif platform.cc.startswith('gcc'): +elif platform.cc is not None and platform.cc.startswith('gcc'): out = platform.execute(platform.cc, '--version').out match = re.search(' (\d+\.\d+(\.\d+)*)', out) if match: From noreply at buildbot.pypy.org Thu Sep 1 19:19:17 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Sep 2011 19:19:17 +0200 (CEST) Subject: [pypy-commit] pypy default: Add another limitation. Message-ID: <20110901171917.0476C8204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r46993:7ae58f838b74 Date: 2011-09-01 19:18 +0200 http://bitbucket.org/pypy/pypy/changeset/7ae58f838b74/ Log: Add another limitation. diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -215,6 +215,11 @@ * Support for other CPUs than x86 and x86-64 +* The app-level ``f_back`` field of frames crossing continulet boundaries + is None for now, unlike what I explain in the theoretical overview + above. It mostly means that in a ``pdb.set_trace()`` you cannot go + ``up`` past countinulet boundaries. This could be fixed. + .. __: `recursion depth limit`_ (*) Pickling, as well as changing threads, could be implemented by using From noreply at buildbot.pypy.org Thu Sep 1 19:35:11 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Sep 2011 19:35:11 +0200 (CEST) Subject: [pypy-commit] pypy default: ANSI C. Needed on top of MSVC. Message-ID: <20110901173511.EB4D28204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r46994:997c1403edbb Date: 2011-09-01 19:33 +0200 http://bitbucket.org/pypy/pypy/changeset/997c1403edbb/ Log: ANSI C. Needed on top of MSVC. diff --git a/pypy/translator/c/src/stacklet/stacklet.c b/pypy/translator/c/src/stacklet/stacklet.c --- a/pypy/translator/c/src/stacklet/stacklet.c +++ b/pypy/translator/c/src/stacklet/stacklet.c @@ -319,10 +319,11 @@ char **_stacklet_translate_pointer(stacklet_handle context, char **ptr) { + char *p = (char *)ptr; + long delta; if (context == NULL) return ptr; - char *p = (char *)ptr; - long delta = p - context->stack_start; + delta = p - context->stack_start; if (((unsigned long)delta) < ((unsigned long)context->stack_saved)) { /* a pointer to a saved away word */ char *c = (char *)(context + 1); From noreply at buildbot.pypy.org Thu Sep 1 20:17:31 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 1 Sep 2011 20:17:31 +0200 (CEST) Subject: [pypy-commit] pypy unroll-if-alt: merged in default. Message-ID: <20110901181731.6E1D58204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: unroll-if-alt Changeset: r46995:bb553b7cded1 Date: 2011-09-01 12:33 -0400 http://bitbucket.org/pypy/pypy/changeset/bb553b7cded1/ Log: merged in default. diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -166,7 +166,8 @@ return tp._alignmentofinstances() def byref(cdata): - from ctypes import pointer + # "pointer" is imported at the end of this module to avoid circular + # imports return pointer(cdata) def cdata_from_address(self, address): @@ -226,3 +227,6 @@ 'v' : _ffi.types.sshort, } + +# used by "byref" +from _ctypes.pointer import pointer diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -24,6 +24,7 @@ from ctypes import c_void_p, c_int, c_double, c_int64, c_char_p, cdll from ctypes import POINTER, byref, string_at, CFUNCTYPE, cast from ctypes import sizeof, c_ssize_t +from collections import OrderedDict import datetime import sys import time @@ -274,6 +275,28 @@ def unicode_text_factory(x): return unicode(x, 'utf-8') + +class StatementCache(object): + def __init__(self, connection, maxcount): + self.connection = connection + self.maxcount = maxcount + self.cache = OrderedDict() + + def get(self, sql, cursor, row_factory): + try: + stat = self.cache[sql] + except KeyError: + stat = Statement(self.connection, sql) + self.cache[sql] = stat + if len(self.cache) > self.maxcount: + self.cache.popitem(0) + # + if stat.in_use: + stat = Statement(self.connection, sql) + stat.set_cursor_and_factory(cursor, row_factory) + return stat + + class Connection(object): def __init__(self, database, timeout=5.0, detect_types=0, isolation_level="", check_same_thread=True, factory=None, cached_statements=100): @@ -291,6 +314,7 @@ self.row_factory = None self._isolation_level = isolation_level self.detect_types = detect_types + self.statement_cache = StatementCache(self, cached_statements) self.cursors = [] @@ -399,7 +423,7 @@ cur = Cursor(self) if not isinstance(sql, (str, unicode)): raise Warning("SQL is of wrong type. Must be string or unicode.") - statement = Statement(cur, sql, self.row_factory) + statement = self.statement_cache.get(sql, cur, self.row_factory) return statement def _get_isolation_level(self): @@ -708,7 +732,7 @@ if type(sql) is unicode: sql = sql.encode("utf-8") self._check_closed() - self.statement = Statement(self, sql, self.row_factory) + self.statement = self.connection.statement_cache.get(sql, self, self.row_factory) if self.connection._isolation_level is not None: if self.statement.kind == "DDL": @@ -746,7 +770,8 @@ if type(sql) is unicode: sql = sql.encode("utf-8") self._check_closed() - self.statement = Statement(self, sql, self.row_factory) + self.statement = self.connection.statement_cache.get(sql, self, self.row_factory) + if self.statement.kind == "DML": self.connection._begin() else: @@ -871,14 +896,12 @@ lastrowid = property(_getlastrowid) class Statement(object): - def __init__(self, cur, sql, row_factory): + def __init__(self, connection, sql): self.statement = None if not isinstance(sql, str): raise ValueError, "sql must be a string" - self.con = cur.connection - self.cur = weakref.ref(cur) + self.con = connection self.sql = sql # DEBUG ONLY - self.row_factory = row_factory first_word = self._statement_kind = sql.lstrip().split(" ")[0].upper() if first_word in ("INSERT", "UPDATE", "DELETE", "REPLACE"): self.kind = "DML" @@ -887,6 +910,11 @@ else: self.kind = "DDL" self.exhausted = False + self.in_use = False + # + # set by set_cursor_and_factory + self.cur = None + self.row_factory = None self.statement = c_void_p() next_char = c_char_p() @@ -907,6 +935,10 @@ self._build_row_cast_map() + def set_cursor_and_factory(self, cur, row_factory): + self.cur = weakref.ref(cur) + self.row_factory = row_factory + def _build_row_cast_map(self): self.row_cast_map = [] for i in xrange(sqlite.sqlite3_column_count(self.statement)): @@ -976,6 +1008,7 @@ ret = sqlite.sqlite3_reset(self.statement) if ret != SQLITE_OK: raise self.con._get_exception(ret) + self.mark_dirty() if params is None: if sqlite.sqlite3_bind_parameter_count(self.statement) != 0: @@ -1068,11 +1101,17 @@ def reset(self): self.row_cast_map = None - return sqlite.sqlite3_reset(self.statement) + ret = sqlite.sqlite3_reset(self.statement) + self.in_use = False + return ret def finalize(self): sqlite.sqlite3_finalize(self.statement) self.statement = None + self.in_use = False + + def mark_dirty(self): + self.in_use = True def __del__(self): sqlite.sqlite3_finalize(self.statement) diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py --- a/lib_pypy/pyrepl/reader.py +++ b/lib_pypy/pyrepl/reader.py @@ -401,13 +401,19 @@ return "(arg: %s) "%self.arg if "\n" in self.buffer: if lineno == 0: - return self._ps2 + res = self.ps2 elif lineno == self.buffer.count("\n"): - return self._ps4 + res = self.ps4 else: - return self._ps3 + res = self.ps3 else: - return self._ps1 + res = self.ps1 + # Lazily call str() on self.psN, and cache the results using as key + # the object on which str() was called. This ensures that even if the + # same object is used e.g. for ps1 and ps2, str() is called only once. + if res not in self._pscache: + self._pscache[res] = str(res) + return self._pscache[res] def push_input_trans(self, itrans): self.input_trans_stack.append(self.input_trans) @@ -473,8 +479,7 @@ self.pos = 0 self.dirty = 1 self.last_command = None - self._ps1, self._ps2, self._ps3, self._ps4 = \ - map(str, [self.ps1, self.ps2, self.ps3, self.ps4]) + self._pscache = {} except: self.restore() raise diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -209,6 +209,8 @@ * Automatic unlimited stack (must be emulated__ so far) +* Support for other CPUs than x86 and x86-64 + .. __: `recursion depth limit`_ (*) Pickling, as well as changing threads, could be implemented by using @@ -217,9 +219,8 @@ "hard" switch (like now) when the C stack contains non-trivial C frames to save, and a "soft" switch (like previously) when it contains only simple calls from Python to Python. Soft-switched continulets would -also consume a bit less RAM, at the possible expense of making the -switch a bit slower (unsure about that; what is the Stackless Python -experience?). +also consume a bit less RAM, and the switch might be a bit faster too +(unsure about that; what is the Stackless Python experience?). Recursion depth limit diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -626,9 +626,9 @@ self.default_compiler = compiler return compiler - def createframe(self, code, w_globals, closure=None): + def createframe(self, code, w_globals, outer_func=None): "Create an empty PyFrame suitable for this code object." - return self.FrameClass(self, code, w_globals, closure) + return self.FrameClass(self, code, w_globals, outer_func) def allocate_lock(self): """Return an interp-level Lock object if threads are enabled, diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -30,7 +30,7 @@ can_change_code = True _immutable_fields_ = ['code?', 'w_func_globals?', - 'closure?', + 'closure?[*]', 'defs_w?[*]', 'name?'] @@ -96,7 +96,7 @@ assert isinstance(code, PyCode) if nargs < 5: new_frame = self.space.createframe(code, self.w_func_globals, - self.closure) + self) for i in funccallunrolling: if i < nargs: new_frame.locals_stack_w[i] = args_w[i] @@ -156,7 +156,7 @@ def _flat_pycall(self, code, nargs, frame): # code is a PyCode new_frame = self.space.createframe(code, self.w_func_globals, - self.closure) + self) for i in xrange(nargs): w_arg = frame.peekvalue(nargs-1-i) new_frame.locals_stack_w[i] = w_arg @@ -167,7 +167,7 @@ def _flat_pycall_defaults(self, code, nargs, frame, defs_to_load): # code is a PyCode new_frame = self.space.createframe(code, self.w_func_globals, - self.closure) + self) for i in xrange(nargs): w_arg = frame.peekvalue(nargs-1-i) new_frame.locals_stack_w[i] = w_arg diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -8,7 +8,7 @@ class Cell(Wrappable): "A simple container for a wrapped value." - + def __init__(self, w_value=None): self.w_value = w_value @@ -90,32 +90,33 @@ # variables coming from a parent function in which i'm nested # 'closure' is a list of Cell instances: the received free vars. - cells = None - @jit.unroll_safe - def initialize_frame_scopes(self, closure, code): - super_initialize_frame_scopes(self, closure, code) + def initialize_frame_scopes(self, outer_func, code): + super_initialize_frame_scopes(self, outer_func, code) ncellvars = len(code.co_cellvars) nfreevars = len(code.co_freevars) if not nfreevars: if not ncellvars: + self.cells = [] return # no self.cells needed - fast path - if closure is None: - closure = [] - elif closure is None: + elif outer_func is None: space = self.space raise OperationError(space.w_TypeError, space.wrap("directly executed code object " "may not contain free variables")) - if len(closure) != nfreevars: + if outer_func and outer_func.closure: + closure_size = len(outer_func.closure) + else: + closure_size = 0 + if closure_size != nfreevars: raise ValueError("code object received a closure with " "an unexpected number of free variables") self.cells = [None] * (ncellvars + nfreevars) for i in range(ncellvars): self.cells[i] = Cell() for i in range(nfreevars): - self.cells[i + ncellvars] = closure[i] - + self.cells[i + ncellvars] = outer_func.closure[i] + def _getcells(self): return self.cells diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -198,7 +198,7 @@ def funcrun(self, func, args): frame = self.space.createframe(self, func.w_func_globals, - func.closure) + func) sig = self._signature # speed hack fresh_frame = jit.hint(frame, access_directly=True, @@ -211,7 +211,7 @@ def funcrun_obj(self, func, w_obj, args): frame = self.space.createframe(self, func.w_func_globals, - func.closure) + func) sig = self._signature # speed hack fresh_frame = jit.hint(frame, access_directly=True, diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -51,7 +51,7 @@ is_being_profiled = False escaped = False # see mark_as_escaped() - def __init__(self, space, code, w_globals, closure): + def __init__(self, space, code, w_globals, outer_func): if not we_are_translated(): assert type(self) in (space.FrameClass, CPythonFrame), ( "use space.FrameClass(), not directly PyFrame()") @@ -70,7 +70,7 @@ self.builtin = space.builtin.pick_builtin(w_globals) # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. - self.initialize_frame_scopes(closure, code) + self.initialize_frame_scopes(outer_func, code) self.f_lineno = code.co_firstlineno def mark_as_escaped(self): @@ -117,8 +117,8 @@ return self.builtin else: return self.space.builtin - - def initialize_frame_scopes(self, closure, code): + + def initialize_frame_scopes(self, outer_func, code): # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. # CO_NEWLOCALS: make a locals dict unless optimized is also set @@ -385,7 +385,11 @@ # do not use the instance's __init__ but the base's, because we set # everything like cells from here - PyFrame.__init__(self, space, pycode, w_globals, closure) + # XXX hack + from pypy.interpreter.function import Function + outer_func = Function(space, None, closure=closure, + forcename="fake") + PyFrame.__init__(self, space, pycode, w_globals, outer_func) f_back = space.interp_w(PyFrame, w_f_back, can_be_None=True) new_frame.f_backref = jit.non_virtual_ref(f_back) diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -291,7 +291,7 @@ def get_call_conv(self): from pypy.rlib.clibffi import get_call_conv - return get_call_conv(self.ffi_flags) + return get_call_conv(self.ffi_flags, True) def get_arg_types(self): return self.arg_classes diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -2070,16 +2070,21 @@ ffi_flags=FUNCFLAG_STDCALL) i1 = BoxInt() i2 = BoxInt() - i3 = BoxInt() - tok = BoxInt() faildescr = BasicFailDescr(1) - ops = [ - ResOperation(rop.CALL_RELEASE_GIL, [funcbox, i1, i2], i3, - descr=calldescr), - ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), - ResOperation(rop.FINISH, [i3], None, descr=BasicFailDescr(0)) + # if the stdcall convention is ignored, then ESP is wrong after the + # call: 8 bytes too much. If we repeat the call often enough, crash. + ops = [] + for i in range(50): + i3 = BoxInt() + ops += [ + ResOperation(rop.CALL_RELEASE_GIL, [funcbox, i1, i2], i3, + descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + ] + ops[-1].setfailargs([]) + ops += [ + ResOperation(rop.FINISH, [i3], None, descr=BasicFailDescr(0)) ] - ops[1].setfailargs([]) looptoken = LoopToken() self.cpu.compile_loop([i1, i2], ops, looptoken) diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -34,6 +34,7 @@ from pypy.rlib.debug import (debug_print, debug_start, debug_stop, have_debug_prints) from pypy.rlib import rgc +from pypy.rlib.clibffi import FFI_DEFAULT_ABI from pypy.jit.backend.x86.jump import remap_frame_layout from pypy.jit.metainterp.history import ConstInt, BoxInt from pypy.jit.codewriter.effectinfo import EffectInfo @@ -1120,7 +1121,7 @@ return genop_cmp_guard_float def _emit_call(self, force_index, x, arglocs, start=0, tmp=eax, - argtypes=None): + argtypes=None, callconv=FFI_DEFAULT_ABI): if IS_X86_64: return self._emit_call_64(force_index, x, arglocs, start, argtypes) @@ -1149,6 +1150,16 @@ # x is a location self.mc.CALL(x) self.mark_gc_roots(force_index) + # + if callconv != FFI_DEFAULT_ABI: + self._fix_stdcall(callconv, p) + + def _fix_stdcall(self, callconv, p): + from pypy.rlib.clibffi import FFI_STDCALL + assert callconv == FFI_STDCALL + # it's a bit stupid, but we're just going to cancel the fact that + # the called function just added 'p' to ESP, by subtracting it again. + self.mc.SUB_ri(esp.value, p) def _emit_call_64(self, force_index, x, arglocs, start, argtypes): src_locs = [] @@ -2127,7 +2138,8 @@ tmp = eax self._emit_call(force_index, x, arglocs, 3, tmp=tmp, - argtypes=op.getdescr().get_arg_types()) + argtypes=op.getdescr().get_arg_types(), + callconv=op.getdescr().get_call_conv()) if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8: # a float or a long long return diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -527,6 +527,7 @@ NOP = insn('\x90') RET = insn('\xC3') + RET16_i = insn('\xC2', immediate(1, 'h')) PUSH_r = insn(rex_nw, register(1), '\x50') PUSH_b = insn(rex_nw, '\xFF', orbyte(6<<3), stack_bp(1)) diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -433,6 +433,88 @@ ops_offset[operations[2]] <= ops_offset[None]) + def test_calling_convention(self, monkeypatch): + if WORD != 4: + py.test.skip("32-bit only test") + from pypy.jit.backend.x86.regloc import eax, edx + from pypy.jit.backend.x86 import codebuf + from pypy.jit.codewriter.effectinfo import EffectInfo + from pypy.rlib.libffi import types, clibffi + had_stdcall = hasattr(clibffi, 'FFI_STDCALL') + if not had_stdcall: # not running on Windows, but we can still test + monkeypatch.setattr(clibffi, 'FFI_STDCALL', 12345, raising=False) + # + for ffi in [clibffi.FFI_DEFAULT_ABI, clibffi.FFI_STDCALL]: + cpu = self.cpu + mc = codebuf.MachineCodeBlockWrapper() + mc.MOV_rs(eax.value, 4) # argument 1 + mc.MOV_rs(edx.value, 40) # argument 10 + mc.SUB_rr(eax.value, edx.value) # return arg1 - arg10 + if ffi == clibffi.FFI_DEFAULT_ABI: + mc.RET() + else: + mc.RET16_i(40) + rawstart = mc.materialize(cpu.asmmemmgr, []) + # + calldescr = cpu.calldescrof_dynamic([types.slong] * 10, + types.slong, + EffectInfo.MOST_GENERAL, + ffi_flags=-1) + calldescr.get_call_conv = lambda: ffi # <==== hack + funcbox = ConstInt(rawstart) + i1 = BoxInt() + i2 = BoxInt() + i3 = BoxInt() + i4 = BoxInt() + i5 = BoxInt() + i6 = BoxInt() + c = ConstInt(-1) + faildescr = BasicFailDescr(1) + # we must call it repeatedly: if the stack pointer gets increased + # by 40 bytes by the STDCALL call, and if we don't expect it, + # then we are going to get our stack emptied unexpectedly by + # several repeated calls + ops = [ + ResOperation(rop.CALL_RELEASE_GIL, + [funcbox, i1, c, c, c, c, c, c, c, c, i2], + i3, descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + + ResOperation(rop.CALL_RELEASE_GIL, + [funcbox, i1, c, c, c, c, c, c, c, c, i2], + i4, descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + + ResOperation(rop.CALL_RELEASE_GIL, + [funcbox, i1, c, c, c, c, c, c, c, c, i2], + i5, descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + + ResOperation(rop.CALL_RELEASE_GIL, + [funcbox, i1, c, c, c, c, c, c, c, c, i2], + i6, descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + + ResOperation(rop.FINISH, [i3, i4, i5, i6], None, + descr=BasicFailDescr(0)) + ] + ops[1].setfailargs([]) + ops[3].setfailargs([]) + ops[5].setfailargs([]) + ops[7].setfailargs([]) + looptoken = LoopToken() + self.cpu.compile_loop([i1, i2], ops, looptoken) + + self.cpu.set_future_value_int(0, 123450) + self.cpu.set_future_value_int(1, 123408) + fail = self.cpu.execute_token(looptoken) + assert fail.identifier == 0 + assert self.cpu.get_latest_value_int(0) == 42 + assert self.cpu.get_latest_value_int(1) == 42 + assert self.cpu.get_latest_value_int(2) == 42 + assert self.cpu.get_latest_value_int(3) == 42 + + class TestDebuggingAssembler(object): def setup_method(self, meth): self.cpu = CPU(rtyper=None, stats=FakeStats()) diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -292,7 +292,7 @@ raise break new_frame = space.createframe(code, w_func.w_func_globals, - w_func.closure) + w_func) new_frame.locals_stack_w[0] = w_item w_res = new_frame.run() result_w.append(w_res) diff --git a/pypy/module/bz2/test/test_bz2_file.py b/pypy/module/bz2/test/test_bz2_file.py --- a/pypy/module/bz2/test/test_bz2_file.py +++ b/pypy/module/bz2/test/test_bz2_file.py @@ -274,14 +274,14 @@ pass del bz2f # delete from this frame, which is captured in the traceback - def test_read_chunk10(self): + def test_read_chunk9(self): from bz2 import BZ2File self.create_temp_file() bz2f = BZ2File(self.temppath) text_read = "" while True: - data = bz2f.read(10) + data = bz2f.read(9) # 9 doesn't divide evenly into data length if not data: break text_read = "%s%s" % (text_read, data) diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py --- a/pypy/module/cpyext/frameobject.py +++ b/pypy/module/cpyext/frameobject.py @@ -57,7 +57,7 @@ code = space.interp_w(PyCode, w_code) w_globals = from_ref(space, py_frame.c_f_globals) - frame = space.FrameClass(space, code, w_globals, closure=None) + frame = space.FrameClass(space, code, w_globals, outer_func=None) frame.f_lineno = py_frame.c_f_lineno w_obj = space.wrap(frame) track_reference(space, py_obj, w_obj) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -53,7 +53,9 @@ VOID_TP = lltype.Ptr(lltype.Array(lltype.Void, hints={'nolength': True, "uncast_on_llgraph": True})) -def create_low_level_dtype(num, kind, name, aliases, applevel_types, T, valtype): +def create_low_level_dtype(num, kind, name, aliases, applevel_types, T, valtype, + expected_size=None): + class Box(BaseBox): def __init__(self, val): self.val = val @@ -113,6 +115,8 @@ W_LowLevelDtype.aliases = aliases W_LowLevelDtype.applevel_types = applevel_types W_LowLevelDtype.num_bytes = rffi.sizeof(T) + if expected_size is not None: + assert W_LowLevelDtype.num_bytes == expected_size return W_LowLevelDtype @@ -282,10 +286,21 @@ applevel_types = [], T = rffi.SIGNEDCHAR, valtype = rffi.SIGNEDCHAR._type, + expected_size = 1, ) class W_Int8Dtype(IntegerArithmeticDtype, W_Int8Dtype): - def unwrap(self, space, w_item): - return self.adapt_val(space.int_w(space.int(w_item))) + pass + +W_Int16Dtype = create_low_level_dtype( + num = 3, kind = SIGNEDLTR, name = "int16", + aliases = ["int16"], + applevel_types = [], + T = rffi.SHORT, + valtype = rffi.SHORT._type, + expected_size = 2, +) +class W_Int16Dtype(IntegerArithmeticDtype, W_Int16Dtype): + pass W_Int32Dtype = create_low_level_dtype( num = 5, kind = SIGNEDLTR, name = "int32", @@ -293,6 +308,7 @@ applevel_types = [], T = rffi.INT, valtype = rffi.INT._type, + expected_size = 4, ) class W_Int32Dtype(IntegerArithmeticDtype, W_Int32Dtype): pass @@ -303,6 +319,7 @@ applevel_types = ["long"], T = rffi.LONGLONG, valtype = rffi.LONGLONG._type, + expected_size = 8, ) class W_Int64Dtype(IntegerArithmeticDtype, W_Int64Dtype): pass @@ -313,6 +330,7 @@ applevel_types = ["float"], T = lltype.Float, valtype = float, + expected_size = 8, ) class W_Float64Dtype(FloatArithmeticDtype, W_Float64Dtype): def unwrap(self, space, w_item): @@ -323,7 +341,7 @@ ALL_DTYPES = [ W_BoolDtype, - W_Int8Dtype, W_Int32Dtype, W_Int64Dtype, + W_Int8Dtype, W_Int16Dtype, W_Int32Dtype, W_Int64Dtype, W_Float64Dtype ] @@ -353,4 +371,4 @@ kind = interp_attrproperty("kind", cls=W_Dtype), shape = GetSetProperty(W_Dtype.descr_get_shape), ) -W_Dtype.typedef.acceptable_as_base_class = False \ No newline at end of file +W_Dtype.typedef.acceptable_as_base_class = False diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -217,7 +217,15 @@ return space.wrap("[" + " ".join(concrete._getnums(True)) + "]") def descr_getitem(self, space, w_idx): - # TODO: indexing by tuples + # TODO: indexing by arrays and lists + if space.isinstance_w(w_idx, space.w_tuple): + length = space.len_w(w_idx) + if length == 0: + return space.wrap(self) + if length > 1: # only one dimension for now. + raise OperationError(space.w_IndexError, + space.wrap("invalid index")) + w_idx = space.getitem(w_idx, space.wrap(0)) start, stop, step, slice_length = space.decode_index4(w_idx, self.find_size()) if step == 0: # Single index @@ -231,8 +239,19 @@ return space.wrap(res) def descr_setitem(self, space, w_idx, w_value): - # TODO: indexing by tuples and lists + # TODO: indexing by arrays and lists self.invalidated() + if space.isinstance_w(w_idx, space.w_tuple): + length = space.len_w(w_idx) + if length > 1: # only one dimension for now. + raise OperationError(space.w_IndexError, + space.wrap("invalid index")) + if length == 0: + w_idx = space.newslice(space.wrap(0), + space.wrap(self.find_size()), + space.wrap(1)) + else: + w_idx = space.getitem(w_idx, space.wrap(0)) start, stop, step, slice_length = space.decode_index4(w_idx, self.find_size()) if step == 0: diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -82,10 +82,20 @@ assert a[1] == 1 def test_add_int8(self): - from numpy import array + from numpy import array, dtype a = array(range(5), dtype="int8") b = a + a + assert b.dtype is dtype("int8") + for i in range(5): + assert b[i] == i * 2 + + def test_add_int16(self): + from numpy import array, dtype + + a = array(range(5), dtype="int16") + b = a + a + assert b.dtype is dtype("int16") for i in range(5): assert b[i] == i * 2 @@ -98,4 +108,4 @@ from numpy import dtype # You can't subclass dtype - raises(TypeError, type, "Foo", (dtype,), {}) \ No newline at end of file + raises(TypeError, type, "Foo", (dtype,), {}) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -84,6 +84,9 @@ a = array(range(5), dtype="int8") assert str(a) == "[0 1 2 3 4]" + a = array(range(5), dtype="int16") + assert str(a) == "[0 1 2 3 4]" + def test_str_slice(self): from numpy import array, zeros a = array(range(5), float) @@ -102,6 +105,16 @@ assert a[-1] == 8 raises(IndexError, "a[-6]") + def test_getitem_tuple(self): + from numpy import array + a = array(range(5)) + raises(IndexError, "a[(1,2)]") + for i in xrange(5): + assert a[(i,)] == i + b = a[()] + for i in xrange(5): + assert a[i] == b[i] + def test_setitem(self): from numpy import array a = array(range(5)) @@ -110,6 +123,17 @@ raises(IndexError, "a[5] = 0.0") raises(IndexError, "a[-6] = 3.0") + def test_setitem_tuple(self): + from numpy import array + a = array(range(5)) + raises(IndexError, "a[(1,2)] = [0,1]") + for i in xrange(5): + a[(i,)] = i+1 + assert a[i] == i+1 + a[()] = range(5) + for i in xrange(5): + assert a[i] == i + def test_setslice_array(self): from numpy import array a = array(range(5)) @@ -541,4 +565,4 @@ a = fromstring(self.data) for i in range(4): assert a[i] == i + 1 - raises(ValueError, fromstring, "abc") \ No newline at end of file + raises(ValueError, fromstring, "abc") diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -21,6 +21,7 @@ PyFrame._virtualizable2_ = ['last_instr', 'pycode', 'valuestackdepth', 'locals_stack_w[*]', + 'cells[*]', 'last_exception', 'lastblock', 'is_being_profiled', diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -174,7 +174,7 @@ guard_no_overflow(descr=...) i18 = force_token() --TICK-- - jump(p0, p1, p2, p3, p4, i8, p7, i17, p8, i9, i17, p10, p11, p12, descr=) + jump(..., descr=) """) def test_default_and_kw(self): @@ -396,3 +396,70 @@ --TICK-- jump(..., descr=) """) + + def test_global_closure_has_constant_cells(self): + log = self.run(""" + def make_adder(n): + def add(x): + return x + n + return add + add5 = make_adder(5) + def main(): + i = 0 + while i < 5000: + i = add5(i) # ID: call + """, []) + loop, = log.loops_by_id('call', is_entry_bridge=True) + assert loop.match(""" + guard_value(i6, 1, descr=...) + guard_nonnull_class(p8, ConstClass(W_IntObject), descr=...) + guard_value(i4, 0, descr=...) + guard_value(p3, ConstPtr(ptr14), descr=...) + i15 = getfield_gc_pure(p8, descr=) + i17 = int_lt(i15, 5000) + guard_true(i17, descr=...) + p18 = getfield_gc(p0, descr=) + guard_value(p18, ConstPtr(ptr19), descr=...) + p20 = getfield_gc(p18, descr=) + guard_value(p20, ConstPtr(ptr21), descr=...) + guard_not_invalidated(descr=...) + # most importantly, there is no getarrayitem_gc here + p23 = call(ConstClass(getexecutioncontext), descr=) + p24 = getfield_gc(p23, descr=) + i25 = force_token() + p26 = getfield_gc(p23, descr=) + guard_isnull(p26, descr=...) + i27 = getfield_gc(p23, descr=) + i28 = int_is_zero(i27) + guard_true(i28, descr=...) + p30 = getfield_gc(ConstPtr(ptr29), descr=) + guard_nonnull_class(p30, ConstClass(W_IntObject), descr=...) + i32 = getfield_gc_pure(p30, descr=) + i33 = int_add_ovf(i15, i32) + guard_no_overflow(descr=...) + --TICK-- + jump(p0, p1, p2, p5, i33, i32, p23, p30, p24, descr=) + """) + + def test_local_closure_is_virtual(self): + log = self.run(""" + def main(): + i = 0 + while i < 5000: + def add(): + return i + 1 + i = add() # ID: call + """, []) + loop, = log.loops_by_id('call') + assert loop.match(""" + i8 = getfield_gc_pure(p6, descr=) + i10 = int_lt(i8, 5000) + guard_true(i10, descr=...) + i11 = force_token() + i13 = int_add(i8, 1) + --TICK-- + p22 = new_with_vtable(ConstClass(W_IntObject)) + setfield_gc(p22, i13, descr=) + setfield_gc(p4, p22, descr=) + jump(p0, p1, p2, p3, p4, p7, p22, p7, descr=) + """) diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py --- a/pypy/objspace/flow/flowcontext.py +++ b/pypy/objspace/flow/flowcontext.py @@ -184,7 +184,7 @@ class FlowExecutionContext(ExecutionContext): - def __init__(self, space, code, globals, constargs={}, closure=None, + def __init__(self, space, code, globals, constargs={}, outer_func=None, name=None): ExecutionContext.__init__(self, space) self.code = code @@ -193,11 +193,11 @@ self.crnt_offset = -1 self.crnt_frame = None - if closure is None: + if outer_func and outer_func.closure: + self.closure = [nestedscope.Cell(Constant(value)) + for value in outer_func.closure] + else: self.closure = None - else: - self.closure = [nestedscope.Cell(Constant(value)) - for value in closure] frame = self.create_frame() formalargcount = code.getformalargcount() arg_list = [Variable() for i in range(formalargcount)] @@ -216,7 +216,7 @@ # while ignoring any operation like the creation of the locals dict self.recorder = [] frame = FlowSpaceFrame(self.space, self.code, - self.w_globals, self.closure) + self.w_globals, self) frame.last_instr = 0 return frame diff --git a/pypy/objspace/flow/objspace.py b/pypy/objspace/flow/objspace.py --- a/pypy/objspace/flow/objspace.py +++ b/pypy/objspace/flow/objspace.py @@ -252,9 +252,9 @@ raise TypeError("%r is a generator" % (func,)) code = PyCode._from_code(self, code) if func.func_closure is None: - closure = None + cl = None else: - closure = [extract_cell_content(c) for c in func.func_closure] + cl = [extract_cell_content(c) for c in func.func_closure] # CallableFactory.pycall may add class_ to functions that are methods name = func.func_name class_ = getattr(func, 'class_', None) @@ -262,8 +262,10 @@ name = '%s.%s' % (class_.__name__, name) for c in "<>&!": name = name.replace(c, '_') + class outerfunc: # hack + closure = cl ec = flowcontext.FlowExecutionContext(self, code, func.func_globals, - constargs, closure, name) + constargs, outerfunc, name) graph = ec.graph graph.func = func # attach a signature and defaults to the graph diff --git a/pypy/objspace/std/fake.py b/pypy/objspace/std/fake.py --- a/pypy/objspace/std/fake.py +++ b/pypy/objspace/std/fake.py @@ -142,7 +142,7 @@ def funcrun(self, func, args): frame = func.space.createframe(self, func.w_func_globals, - func.closure) + func) sig = self.signature() scope_w = args.parse_obj(None, func.name, sig, func.defs_w) frame.setfastscope(scope_w) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -129,12 +129,12 @@ ec._py_repr = None return ec - def createframe(self, code, w_globals, closure=None): + def createframe(self, code, w_globals, outer_func=None): from pypy.objspace.std.fake import CPythonFakeCode, CPythonFakeFrame if not we_are_translated() and isinstance(code, CPythonFakeCode): return CPythonFakeFrame(self, code, w_globals) else: - return ObjSpace.createframe(self, code, w_globals, closure) + return ObjSpace.createframe(self, code, w_globals, outer_func) def gettypefor(self, cls): return self.gettypeobject(cls.typedef) diff --git a/pypy/rlib/clibffi.py b/pypy/rlib/clibffi.py --- a/pypy/rlib/clibffi.py +++ b/pypy/rlib/clibffi.py @@ -286,10 +286,10 @@ FFI_OK = cConfig.FFI_OK FFI_BAD_TYPEDEF = cConfig.FFI_BAD_TYPEDEF -FFI_DEFAULT_ABI = rffi.cast(rffi.USHORT, cConfig.FFI_DEFAULT_ABI) +FFI_DEFAULT_ABI = cConfig.FFI_DEFAULT_ABI if _WIN32: - FFI_STDCALL = rffi.cast(rffi.USHORT, cConfig.FFI_STDCALL) -FFI_TYPE_STRUCT = rffi.cast(rffi.USHORT, cConfig.FFI_TYPE_STRUCT) + FFI_STDCALL = cConfig.FFI_STDCALL +FFI_TYPE_STRUCT = cConfig.FFI_TYPE_STRUCT FFI_CIFP = rffi.COpaquePtr('ffi_cif', compilation_info=eci) FFI_CLOSUREP = lltype.Ptr(cConfig.ffi_closure) @@ -319,7 +319,7 @@ which the 'ffistruct' member is a regular FFI_TYPE. """ tpe = lltype.malloc(FFI_STRUCT_P.TO, len(field_types)+1, flavor='raw') - tpe.ffistruct.c_type = FFI_TYPE_STRUCT + tpe.ffistruct.c_type = rffi.cast(rffi.USHORT, FFI_TYPE_STRUCT) tpe.ffistruct.c_size = rffi.cast(rffi.SIZE_T, size) tpe.ffistruct.c_alignment = rffi.cast(rffi.USHORT, aligment) tpe.ffistruct.c_elements = rffi.cast(FFI_TYPE_PP, @@ -408,11 +408,12 @@ FUNCFLAG_USE_ERRNO = 8 FUNCFLAG_USE_LASTERROR = 16 -def get_call_conv(flags): +def get_call_conv(flags, from_jit): if _WIN32 and (flags & FUNCFLAG_CDECL == 0): return FFI_STDCALL else: return FFI_DEFAULT_ABI +get_call_conv._annspecialcase_ = 'specialize:arg(1)' # hack :-/ class AbstractFuncPtr(object): @@ -437,13 +438,14 @@ if _MSVC: # This little trick works correctly with MSVC. # It returns small structures in registers - if r_uint(restype.c_type) == FFI_TYPE_STRUCT: + if intmask(restype.c_type) == FFI_TYPE_STRUCT: if restype.c_size <= 4: restype = ffi_type_sint32 elif restype.c_size <= 8: restype = ffi_type_sint64 - res = c_ffi_prep_cif(self.ll_cif, get_call_conv(flags), + res = c_ffi_prep_cif(self.ll_cif, + rffi.cast(rffi.USHORT, get_call_conv(flags,False)), rffi.cast(rffi.UINT, argnum), restype, self.ll_argtypes) if not res == FFI_OK: diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -75,7 +75,7 @@ @staticmethod @jit.elidable def is_struct(ffi_type): - return intmask(ffi_type.c_type) == intmask(FFI_TYPE_STRUCT) + return intmask(ffi_type.c_type) == FFI_TYPE_STRUCT types._import() diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -52,9 +52,14 @@ pypy_c_dir = basedir.join('pypy', 'translator', 'goal') pypy_c = pypy_c_dir.join('pypy-c.exe') libpypy_c = pypy_c_dir.join('libpypy-c.dll') + libexpat = pypy_c_dir.join('libexpat.dll') + if not libexpat.check(): + libexpat = py.path.local.sysfind('libexpat.dll') + assert libexpat, "libexpat.dll not found" + print "Picking %s" % libexpat binaries = [(pypy_c, pypy_c.basename), (libpypy_c, libpypy_c.basename), - (pypy_c_dir.join('libexpat.dll'), 'libexpat.dll')] + (libexpat, libexpat.basename)] else: basename = 'pypy-c' if override_pypy_c is None: From noreply at buildbot.pypy.org Thu Sep 1 20:17:32 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 1 Sep 2011 20:17:32 +0200 (CEST) Subject: [pypy-commit] pypy default: float64 and int64 arrays with no values show their dtypes in their reprs. Message-ID: <20110901181732.A9FA382212@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r46996:6bff38bd825c Date: 2011-09-01 14:17 -0400 http://bitbucket.org/pypy/pypy/changeset/6bff38bd825c/ Log: float64 and int64 arrays with no values show their dtypes in their reprs. diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -206,7 +206,7 @@ res = "array([" + ", ".join(concrete._getnums(False)) + "]" dtype = concrete.find_dtype() if (dtype is not space.fromcache(interp_dtype.W_Float64Dtype) and - dtype is not space.fromcache(interp_dtype.W_Int64Dtype)): + dtype is not space.fromcache(interp_dtype.W_Int64Dtype)) or not self.find_size(): res += ", dtype=" + dtype.name res += ")" return space.wrap(res) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -52,10 +52,14 @@ from numpy import array, zeros a = array(range(5), float) assert repr(a) == "array([0.0, 1.0, 2.0, 3.0, 4.0])" + a = array([], float) + assert repr(a) == "array([], dtype=float64)" a = zeros(1001) assert repr(a) == "array([0.0, 0.0, 0.0, ..., 0.0, 0.0, 0.0])" a = array(range(5), long) assert repr(a) == "array([0, 1, 2, 3, 4])" + a = array([], long) + assert repr(a) == "array([], dtype=int64)" a = array([True, False, True, False], "?") assert repr(a) == "array([True, False, True, False], dtype=bool)" From noreply at buildbot.pypy.org Thu Sep 1 20:18:10 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Sep 2011 20:18:10 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Update the docs. Message-ID: <20110901181810.8C9EA8204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r250:df99ee33a75f Date: 2011-09-01 20:17 +0200 http://bitbucket.org/pypy/pypy.org/changeset/df99ee33a75f/ Log: Update the docs. diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -147,7 +147,8 @@
  • It is recommended to use PyPy to do translations, instead of using CPython, because it is twice as fast. You should just start by downloading an -official release of PyPy (with the JIT).

    +official release of PyPy (with the JIT). If you really have to use CPython +then note that we are talking about CPython 2.5-2.7 here, not CPython 3.x.

  • If RAM usage is a problem, then you can (for now) tweak some parameters via environment variables and command-line options. The following command diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -138,7 +138,8 @@ * It is recommended to use PyPy to do translations, instead of using CPython, because it is twice as fast. You should just start by downloading an - official release of PyPy (with the JIT). + official release of PyPy (with the JIT). If you really have to use CPython + then note that we are talking about CPython 2.5-2.7 here, not CPython 3.x. * If RAM usage is a problem, then you can (for now) tweak some parameters via environment variables and command-line options. The following command From noreply at buildbot.pypy.org Thu Sep 1 20:19:52 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Thu, 1 Sep 2011 20:19:52 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: Add in Long and ULong dtypes Message-ID: <20110901181952.A87308204C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: unsigned-dtypes Changeset: r46997:b15a2d4696be Date: 2011-09-01 11:21 -0600 http://bitbucket.org/pypy/pypy/changeset/b15a2d4696be/ Log: Add in Long and ULong dtypes diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -8,7 +8,7 @@ from pypy.module.micronumpy import signature from pypy.objspace.std.floatobject import float2string from pypy.rlib import rfloat -from pypy.rlib.rarithmetic import widen +from pypy.rlib.rarithmetic import LONG_BIT, widen from pypy.rlib.objectmodel import specialize, enforceargs from pypy.rlib.unroll import unrolling_iterable from pypy.rpython.lltypesystem import lltype, rffi @@ -369,7 +369,21 @@ class W_UInt64Dtype(IntegerArithmeticDtype, W_UInt64Dtype): pass +if LONG_BIT == 32: + class W_LongDtype(W_Int32Dtype): + pass + class W_ULongDtype(W_UInt32Dtype): + pass +else: + class W_LongDtype(W_Int64Dtype): + pass + + class W_ULongDtype(W_UInt64Dtype): + pass + +W_LongDtype.num = 7 +W_ULongDtype.num = 8 W_Float64Dtype = create_low_level_dtype( num = 12, kind = FLOATINGLTR, name = "float64", @@ -389,8 +403,11 @@ ALL_DTYPES = [ W_BoolDtype, W_Int8Dtype, W_UInt8Dtype, W_Int16Dtype, W_UInt16Dtype, - W_Int32Dtype, W_UInt32Dtype, W_Int64Dtype, W_UInt64Dtype, - W_Float64Dtype + W_Int32Dtype, W_UInt32Dtype, W_LongDtype, W_ULongDtype, + W_Int64Dtype, W_UInt64Dtype, + W_Float64Dtype, #float32 fill-in for now + W_Float64Dtype, + W_Float64Dtype, #float96 fill-in for now ] dtypes_by_alias = unrolling_iterable([ diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -270,4 +270,4 @@ setattr(self, ufunc_name, ufunc) def get(space): - return space.fromcache(UfuncState) \ No newline at end of file + return space.fromcache(UfuncState) From noreply at buildbot.pypy.org Thu Sep 1 20:42:06 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Thu, 1 Sep 2011 20:42:06 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: updated binop dtype promotion Message-ID: <20110901184206.31AE58204C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: unsigned-dtypes Changeset: r46998:af44aa788a0c Date: 2011-09-01 12:39 -0600 http://bitbucket.org/pypy/pypy/changeset/af44aa788a0c/ Log: updated binop dtype promotion diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -4,6 +4,7 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty from pypy.module.micronumpy import interp_dtype, signature from pypy.rlib import jit +from pypy.rlib.rarithmetic import LONG_BIT from pypy.tool.sourcetools import func_with_new_name @@ -172,9 +173,25 @@ # Everything promotes to float, and bool promotes to everything. if dt2.kind == interp_dtype.FLOATINGLTR or dt1.kind == interp_dtype.BOOLLTR: + if dt2.num == 11 and dt1.num_bytes >= 4: + return interp_dtype.W_Float64Dtype return dt2 - assert False + # for now this means mixing signed and unsigned + if dt2.kind == interp_dtype.SIGNEDLTR + if dt1.num_bytes < dt2.num_bytes: + return dt2 + # we need to promote both dtypes + dtypenum = dt2.num + 2 + else: + dtypenum = dt2.num + 1 + newdtype = interp_dtype.ALL_DTYPE[dtypenum] + + if newdtype.num_bytes > dt2.num_bytes or newdtype.kind == interp_dtype.FLOATINGLTR: + return newdtype + else: + # we only promoted to long on 32-bit or to longlong on 64-bit + return interp_dtype.ALL_DTYPE[dtypenum + 2] def find_unaryop_result_dtype(space, dt, promote_to_float=False, promote_bools=False, promote_to_largest=False): From noreply at buildbot.pypy.org Thu Sep 1 20:48:55 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Thu, 1 Sep 2011 20:48:55 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: Add some more aliasesto various dtypes and the applevel_types for W_LongDtype Message-ID: <20110901184855.C3ABC8204C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: unsigned-dtypes Changeset: r46999:ef03c45c0e6e Date: 2011-09-01 12:48 -0600 http://bitbucket.org/pypy/pypy/changeset/ef03c45c0e6e/ Log: Add some more aliasesto various dtypes and the applevel_types for W_LongDtype diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -265,7 +265,7 @@ W_BoolDtype = create_low_level_dtype( num = 0, kind = BOOLLTR, name = "bool", - aliases = ["?"], + aliases = ["?", "bool", "bool8"], applevel_types = ["bool"], T = lltype.Bool, valtype = bool, @@ -283,7 +283,7 @@ W_Int8Dtype = create_low_level_dtype( num = 1, kind = SIGNEDLTR, name = "int8", - aliases = ["int8"], + aliases = ["b", "int8"], applevel_types = [], T = rffi.SIGNEDCHAR, valtype = rffi.SIGNEDCHAR._type, @@ -294,7 +294,7 @@ W_UInt8Dtype = create_low_level_dtype( num = 2, kind = UNSIGNEDLTR, name = "uint8", - aliases = ["uint8"], + aliases = ["B", "uint8"], applevel_types = [], T = rffi.UCHAR, valtype = rffi.UCHAR._type, @@ -305,7 +305,7 @@ W_Int16Dtype = create_low_level_dtype( num = 3, kind = SIGNEDLTR, name = "int16", - aliases = ["int16"], + aliases = ["h", "int16"], applevel_types = [], T = rffi.SHORT, valtype = rffi.SHORT._type, @@ -316,7 +316,7 @@ W_UInt16Dtype = create_low_level_dtype( num = 4, kind = UNSIGNEDLTR, name = "uint16", - aliases = ["uint16"], + aliases = ["H", "uint16"], applevel_types = [], T = rffi.USHORT, valtype = rffi.USHORT._type, @@ -327,7 +327,7 @@ W_Int32Dtype = create_low_level_dtype( num = 5, kind = SIGNEDLTR, name = "int32", - aliases = ["i"], + aliases = ["i", "int32"], applevel_types = [], T = rffi.INT, valtype = rffi.INT._type, @@ -338,7 +338,7 @@ W_UInt32Dtype = create_low_level_dtype( num = 6, kind = UNSIGNEDLTR, name = "uint32", - aliases = ["I"], + aliases = ["I", "uint32"], applevel_types = [], T = rffi.UINT, valtype = rffi.UINT._type, @@ -349,7 +349,7 @@ W_Int64Dtype = create_low_level_dtype( num = 9, kind = SIGNEDLTR, name = "int64", - aliases = [], + aliases = ["q", "int64"], applevel_types = ["long"], T = rffi.LONGLONG, valtype = rffi.LONGLONG._type, @@ -360,7 +360,7 @@ W_UInt64Dtype = create_low_level_dtype( num = 10, kind = UNSIGNEDLTR, name = "uint64", - aliases = [], + aliases = ["Q", "uint64"], applevel_types = [], T = rffi.ULONGLONG, valtype = rffi.ULONGLONG._type, @@ -383,12 +383,15 @@ pass W_LongDtype.num = 7 +W_LongDtype.aliases = ["l"] +W_LongDtype.applevel_types = ["int"] W_ULongDtype.num = 8 +W_ULongDtype.aliases = ["L"] W_Float64Dtype = create_low_level_dtype( num = 12, kind = FLOATINGLTR, name = "float64", aliases = [], - applevel_types = ["float"], + applevel_types = ["d", "float"], T = lltype.Float, valtype = float, expected_size = 8, From noreply at buildbot.pypy.org Thu Sep 1 21:07:28 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Thu, 1 Sep 2011 21:07:28 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: Add dtypes tests and fix some things Message-ID: <20110901190728.F36588204C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: unsigned-dtypes Changeset: r47000:eec948d816bc Date: 2011-09-01 13:05 -0600 http://bitbucket.org/pypy/pypy/changeset/eec948d816bc/ Log: Add dtypes tests and fix some things diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -390,8 +390,8 @@ W_Float64Dtype = create_low_level_dtype( num = 12, kind = FLOATINGLTR, name = "float64", - aliases = [], - applevel_types = ["d", "float"], + aliases = ["d"], + applevel_types = ["float"], T = lltype.Float, valtype = float, expected_size = 8, @@ -403,14 +403,25 @@ def str_format(self, item): return float2string(self.unbox(item), 'g', rfloat.DTSF_STR_PRECISION) +# these are really just stand-ins for now until we get them fully working +class W_Float32Dtype(W_Float64Dtype): + pass +W_Float32Dtype.num = 11 +W_Float32Dtype.aliases = ["f"] +W_Float32Dtype.applevel_types = [] + +class W_Float96Dtype(W_Float64Dtype): + pass +W_Float96Dtype.num = 13 +W_Float96Dtype.aliases = ["g"] +W_Float96Dtype.applevel_types = [] + ALL_DTYPES = [ W_BoolDtype, W_Int8Dtype, W_UInt8Dtype, W_Int16Dtype, W_UInt16Dtype, W_Int32Dtype, W_UInt32Dtype, W_LongDtype, W_ULongDtype, W_Int64Dtype, W_UInt64Dtype, - W_Float64Dtype, #float32 fill-in for now - W_Float64Dtype, - W_Float64Dtype, #float96 fill-in for now + W_Float32Dtype, W_Float64Dtype, W_Float96Dtype, ] dtypes_by_alias = unrolling_iterable([ diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -178,20 +178,20 @@ return dt2 # for now this means mixing signed and unsigned - if dt2.kind == interp_dtype.SIGNEDLTR + if dt2.kind == interp_dtype.SIGNEDLTR: if dt1.num_bytes < dt2.num_bytes: return dt2 # we need to promote both dtypes dtypenum = dt2.num + 2 else: dtypenum = dt2.num + 1 - newdtype = interp_dtype.ALL_DTYPE[dtypenum] + newdtype = interp_dtype.ALL_DTYPES[dtypenum] if newdtype.num_bytes > dt2.num_bytes or newdtype.kind == interp_dtype.FLOATINGLTR: return newdtype else: # we only promoted to long on 32-bit or to longlong on 64-bit - return interp_dtype.ALL_DTYPE[dtypenum + 2] + return interp_dtype.ALL_DTYPES[dtypenum + 2] def find_unaryop_result_dtype(space, dt, promote_to_float=False, promote_bools=False, promote_to_largest=False): diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -17,6 +17,7 @@ from numpy import dtype assert dtype(bool).num == 0 + assert dtype(int).num == 7 assert dtype(long).num == 9 assert dtype(float).num == 12 @@ -81,6 +82,36 @@ assert isinstance(a[i], (int, long)) assert a[1] == 1 + def test_overflow(self): + from numpy import array + assert array([128], 'b')[0] == -128 + assert array([256], 'B')[0] == 0 + assert array([32768], 'h')[0] == -32768 + assert array([65536], 'H')[0] == 0 + raises(OverflowError, "array([2147483648], 'i')") + raises(OverflowError, "array([4294967296], 'I')") + raises(OverflowError, "array([9223372036854775808], 'q')") + raises(OverflowError, "array([18446744073709551616], 'Q')") + + def test_bool_binop_types(self): + from numpy import array, dtype + types = ('?','b','B','h','H','i','I','l','L','q','Q','f','d','g') + dtypes = [dtype(t) for t in types] + N = len(types) + a = array([True], '?') + for i in xrange(N): + assert (a + array([0], types[i])).dtype is dtypes[i] + + def test_binop_types(self): + from numpy import array, dtype + tests = (('b','B','h'), ('b','h','h'), ('b','H','i'), ('b','I','q'), + ('b','Q','d'), ('B','H','H'), ('B','I','I'), ('B','Q','Q'), + ('B','h','h'), ('h','H','i'), ('h','i','i'), ('H','i','i'), + ('H','I','I'), ('i','I','q'), ('I','q','q'), ('q','Q','d'), + ('i','f','f'), ('q','f','d'), ('q','d','d'), ('Q','f','d')) + for d1, d2, dout in tests: + assert (array([1], d1) + array([1], d2)).dtype is dtype(dout) + def test_add_int8(self): from numpy import array, dtype From noreply at buildbot.pypy.org Thu Sep 1 22:49:15 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 1 Sep 2011 22:49:15 +0200 (CEST) Subject: [pypy-commit] buildbot default: Windows buildbot makes .zip files, not bz2 archives. Message-ID: <20110901204915.33C8E8204C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r589:d68d3f08d031 Date: 2011-09-01 22:46 +0200 http://bitbucket.org/pypy/buildbot/changeset/d68d3f08d031/ Log: Windows buildbot makes .zip files, not bz2 archives. diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -293,10 +293,14 @@ '.'], workdir='build')) nightly = '~/nightly/' - pypy_c_rel = "build/" + name + ".tar.bz2" + if platform == "win32": + extension = ".zip" + else: + extension = ".tar.bz2" + pypy_c_rel = "build/" + name + extension self.addStep(PyPyUpload(slavesrc=WithProperties(pypy_c_rel), masterdest=WithProperties(nightly), - basename=name + ".tar.bz2", + basename=name + extension workdir='.', blocksize=100*1024)) From noreply at buildbot.pypy.org Thu Sep 1 22:51:47 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Thu, 1 Sep 2011 22:51:47 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: fixed binop dtype promotion. fixed one tests. All regular tests working. Some jit tests not working. Message-ID: <20110901205147.090528204C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: unsigned-dtypes Changeset: r47001:0cc5004488bd Date: 2011-09-01 14:51 -0600 http://bitbucket.org/pypy/pypy/changeset/0cc5004488bd/ Log: fixed binop dtype promotion. fixed one tests. All regular tests working. Some jit tests not working. diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -407,12 +407,14 @@ class W_Float32Dtype(W_Float64Dtype): pass W_Float32Dtype.num = 11 +W_Float32Dtype.name = "float32" W_Float32Dtype.aliases = ["f"] W_Float32Dtype.applevel_types = [] class W_Float96Dtype(W_Float64Dtype): pass W_Float96Dtype.num = 13 +W_Float96Dtype.name = "float96" W_Float96Dtype.aliases = ["g"] W_Float96Dtype.applevel_types = [] diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -174,7 +174,7 @@ # Everything promotes to float, and bool promotes to everything. if dt2.kind == interp_dtype.FLOATINGLTR or dt1.kind == interp_dtype.BOOLLTR: if dt2.num == 11 and dt1.num_bytes >= 4: - return interp_dtype.W_Float64Dtype + return space.fromcache(interp_dtype.W_Float64Dtype) return dt2 # for now this means mixing signed and unsigned @@ -185,13 +185,19 @@ dtypenum = dt2.num + 2 else: dtypenum = dt2.num + 1 + if dt2.num == 10: + dtypenum += 1 newdtype = interp_dtype.ALL_DTYPES[dtypenum] if newdtype.num_bytes > dt2.num_bytes or newdtype.kind == interp_dtype.FLOATINGLTR: - return newdtype + return space.fromcache(newdtype) else: # we only promoted to long on 32-bit or to longlong on 64-bit - return interp_dtype.ALL_DTYPES[dtypenum + 2] + if LONG_BIT == 32: + dtypenum += 2 + else: + dtypenum += 3 + return space.fromcache(interp_dtype.ALL_DTYPES[dtypenum]) def find_unaryop_result_dtype(space, dt, promote_to_float=False, promote_bools=False, promote_to_largest=False): diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -96,11 +96,10 @@ def test_bool_binop_types(self): from numpy import array, dtype types = ('?','b','B','h','H','i','I','l','L','q','Q','f','d','g') - dtypes = [dtype(t) for t in types] N = len(types) a = array([True], '?') - for i in xrange(N): - assert (a + array([0], types[i])).dtype is dtypes[i] + for t in types: + assert (a + array([0], t)).dtype is dtype(t) def test_binop_types(self): from numpy import array, dtype @@ -108,7 +107,7 @@ ('b','Q','d'), ('B','H','H'), ('B','I','I'), ('B','Q','Q'), ('B','h','h'), ('h','H','i'), ('h','i','i'), ('H','i','i'), ('H','I','I'), ('i','I','q'), ('I','q','q'), ('q','Q','d'), - ('i','f','f'), ('q','f','d'), ('q','d','d'), ('Q','f','d')) + ('i','f','d'), ('q','f','d'), ('q','d','d'), ('Q','f','d')) for d1, d2, dout in tests: assert (array([1], d1) + array([1], d2)).dtype is dtype(dout) From noreply at buildbot.pypy.org Fri Sep 2 01:15:11 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Fri, 2 Sep 2011 01:15:11 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: fix unaryop promote_to_float and some test_base tests Message-ID: <20110901231511.702538204C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: unsigned-dtypes Changeset: r47002:96b92594e41c Date: 2011-09-01 15:13 -0600 http://bitbucket.org/pypy/pypy/changeset/96b92594e41c/ Log: fix unaryop promote_to_float and some test_base tests diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -388,6 +388,21 @@ W_ULongDtype.num = 8 W_ULongDtype.aliases = ["L"] +W_Float32Dtype = create_low_level_dtype( + num = 11, kind = FLOATINGLTR, name = "float32", + aliases = ["f"], + applevel_types = [], + T = lltype.Float, # SingleFloat + valtype = float, # r_singlefloat + expected_size = 8, # 4 +) +class W_Float32Dtype(FloatArithmeticDtype, W_Float32Dtype): + def unwrap(self, space, w_item): + return self.adapt_val(space.float_w(space.float(w_item))) + + def str_format(self, item): + return float2string(self.unbox(item), 'g', rfloat.DTSF_STR_PRECISION) + W_Float64Dtype = create_low_level_dtype( num = 12, kind = FLOATINGLTR, name = "float64", aliases = ["d"], @@ -403,20 +418,20 @@ def str_format(self, item): return float2string(self.unbox(item), 'g', rfloat.DTSF_STR_PRECISION) -# these are really just stand-ins for now until we get them fully working -class W_Float32Dtype(W_Float64Dtype): - pass -W_Float32Dtype.num = 11 -W_Float32Dtype.name = "float32" -W_Float32Dtype.aliases = ["f"] -W_Float32Dtype.applevel_types = [] +W_Float96Dtype = create_low_level_dtype( + num = 13, kind = FLOATINGLTR, name = "float96", + aliases = ["g"], + applevel_types = [], + T = lltype.Float, # LongFloat + valtype = float, # r_longfloat + expected_size = 8, # 12 +) +class W_Float96Dtype(FloatArithmeticDtype, W_Float96Dtype): + def unwrap(self, space, w_item): + return self.adapt_val(space.float_w(space.float(w_item))) -class W_Float96Dtype(W_Float64Dtype): - pass -W_Float96Dtype.num = 13 -W_Float96Dtype.name = "float96" -W_Float96Dtype.aliases = ["g"] -W_Float96Dtype.applevel_types = [] + def str_format(self, item): + return float2string(self.unbox(item), 'g', rfloat.DTSF_STR_PRECISION) ALL_DTYPES = [ W_BoolDtype, diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -204,8 +204,10 @@ if promote_bools and (dt.kind == interp_dtype.BOOLLTR): return space.fromcache(interp_dtype.W_Int8Dtype) if promote_to_float: + if dt.kind == interp_dtype.FLOATINGLTR: + return dt for bytes, dtype in interp_dtype.dtypes_by_num_bytes: - if dtype.kind == interp_dtype.FLOATINGLTR and dtype.num_bytes >= dt.num_bytes: + if dtype.kind == interp_dtype.FLOATINGLTR and dtype.num_bytes > dt.num_bytes: return space.fromcache(dtype) if promote_to_largest: if dt.kind == interp_dtype.BOOLLTR or dt.kind == interp_dtype.SIGNEDLTR: diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -64,7 +64,9 @@ def test_unaryops(self, space): bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) int8_dtype = space.fromcache(interp_dtype.W_Int8Dtype) + int16_dtype = space.fromcache(interp_dtype.W_Int16Dtype) int32_dtype = space.fromcache(interp_dtype.W_Int32Dtype) + float32_dtype = space.fromcache(interp_dtype.W_Float32Dtype) float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype) # Normal rules, everythign returns itself @@ -75,7 +77,8 @@ # Coerce to floats, some of these will eventually be float16, or # whatever our smallest float type is. - assert find_unaryop_result_dtype(space, bool_dtype, promote_to_float=True) is float64_dtype - assert find_unaryop_result_dtype(space, int8_dtype, promote_to_float=True) is float64_dtype + assert find_unaryop_result_dtype(space, bool_dtype, promote_to_float=True) is float32_dtype # will be float16 if we ever put that in + assert find_unaryop_result_dtype(space, int8_dtype, promote_to_float=True) is float32_dtype # will be float16 if we ever put that in + assert find_unaryop_result_dtype(space, int16_dtype, promote_to_float=True) is float32_dtype assert find_unaryop_result_dtype(space, int32_dtype, promote_to_float=True) is float64_dtype - assert find_unaryop_result_dtype(space, float64_dtype, promote_to_float=True) is float64_dtype \ No newline at end of file + assert find_unaryop_result_dtype(space, float64_dtype, promote_to_float=True) is float64_dtype From noreply at buildbot.pypy.org Fri Sep 2 01:15:12 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Fri, 2 Sep 2011 01:15:12 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: fixes for promote_to_float and promote_to_largest Message-ID: <20110901231512.B0CBE82212@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: unsigned-dtypes Changeset: r47003:018e3d037507 Date: 2011-09-01 17:13 -0600 http://bitbucket.org/pypy/pypy/changeset/018e3d037507/ Log: fixes for promote_to_float and promote_to_largest diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -206,6 +206,8 @@ if promote_to_float: if dt.kind == interp_dtype.FLOATINGLTR: return dt + if dt.num >= 5: + return space.fromcache(interp_dtype.W_Float64Dtype) for bytes, dtype in interp_dtype.dtypes_by_num_bytes: if dtype.kind == interp_dtype.FLOATINGLTR and dtype.num_bytes > dt.num_bytes: return space.fromcache(dtype) @@ -214,6 +216,8 @@ return space.fromcache(interp_dtype.W_Int64Dtype) elif dt.kind == interp_dtype.FLOATINGLTR: return space.fromcache(interp_dtype.W_Float64Dtype) + elif dt.kind == interp_dtype.UNSIGNEDLTR: + return space.fromcache(interp_dtype.W_UInt64Dtype) else: assert False return dt From noreply at buildbot.pypy.org Fri Sep 2 08:44:12 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Sep 2011 08:44:12 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a faq entry. Message-ID: <20110902064412.9E5D38204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47004:8eee94a7ba6b Date: 2011-09-02 08:39 +0200 http://bitbucket.org/pypy/pypy/changeset/8eee94a7ba6b/ Log: Add a faq entry. diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -315,6 +315,22 @@ .. _`Andrew Brown's tutorial`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html +--------------------------------------------------------- +Can RPython modules for PyPy be translated independently? +--------------------------------------------------------- + +No, you have to rebuild the entire interpreter. This means two things: + +* It is imperative to use test-driven development. You have to test + exhaustively your module in pure Python, before even attempting to + translate it. Once you translate it, you should have only a few typing + issues left to fix, but otherwise the result should work out of the box. + +* Second, and perhaps most important: do you have a really good reason + for writing the module in RPython in the first place? Nowadays you + should really look at alternatives, like writing it in pure Python, + using ctypes if it needs to call C code. + ---------------------------------------------------------- Why does PyPy draw a Mandelbrot fractal while translating? ---------------------------------------------------------- From noreply at buildbot.pypy.org Fri Sep 2 08:44:13 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Sep 2011 08:44:13 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20110902064413.F1D1F8204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47005:88215e914245 Date: 2011-09-02 08:39 +0200 http://bitbucket.org/pypy/pypy/changeset/88215e914245/ Log: merge heads diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -206,7 +206,7 @@ res = "array([" + ", ".join(concrete._getnums(False)) + "]" dtype = concrete.find_dtype() if (dtype is not space.fromcache(interp_dtype.W_Float64Dtype) and - dtype is not space.fromcache(interp_dtype.W_Int64Dtype)): + dtype is not space.fromcache(interp_dtype.W_Int64Dtype)) or not self.find_size(): res += ", dtype=" + dtype.name res += ")" return space.wrap(res) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -52,10 +52,14 @@ from numpy import array, zeros a = array(range(5), float) assert repr(a) == "array([0.0, 1.0, 2.0, 3.0, 4.0])" + a = array([], float) + assert repr(a) == "array([], dtype=float64)" a = zeros(1001) assert repr(a) == "array([0.0, 0.0, 0.0, ..., 0.0, 0.0, 0.0])" a = array(range(5), long) assert repr(a) == "array([0, 1, 2, 3, 4])" + a = array([], long) + assert repr(a) == "array([], dtype=int64)" a = array([True, False, True, False], "?") assert repr(a) == "array([True, False, True, False], dtype=bool)" From noreply at buildbot.pypy.org Fri Sep 2 08:44:15 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Sep 2011 08:44:15 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a paragraph. Message-ID: <20110902064415.3A5578204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47006:b8398cc6c1c0 Date: 2011-09-02 08:43 +0200 http://bitbucket.org/pypy/pypy/changeset/b8398cc6c1c0/ Log: Add a paragraph. diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -331,6 +331,11 @@ should really look at alternatives, like writing it in pure Python, using ctypes if it needs to call C code. +In this context it is not that important to be able to translate +RPython modules independently of translating the complete interpreter. +(It could be done given enough efforts, but it's a really serious +undertaking. Consider it as quite unlikely for now.) + ---------------------------------------------------------- Why does PyPy draw a Mandelbrot fractal while translating? ---------------------------------------------------------- From noreply at buildbot.pypy.org Fri Sep 2 09:00:20 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Sep 2011 09:00:20 +0200 (CEST) Subject: [pypy-commit] pypy default: Mention Cython. Message-ID: <20110902070020.2042D8204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47007:e9cb09c6e277 Date: 2011-09-02 08:59 +0200 http://bitbucket.org/pypy/pypy/changeset/e9cb09c6e277/ Log: Mention Cython. diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -329,7 +329,8 @@ * Second, and perhaps most important: do you have a really good reason for writing the module in RPython in the first place? Nowadays you should really look at alternatives, like writing it in pure Python, - using ctypes if it needs to call C code. + using ctypes if it needs to call C code. Other alternatives are being + developed too (as of summer 2011), like a Cython binding. In this context it is not that important to be able to translate RPython modules independently of translating the complete interpreter. From noreply at buildbot.pypy.org Fri Sep 2 10:00:39 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 2 Sep 2011 10:00:39 +0200 (CEST) Subject: [pypy-commit] pypy default: fix on 64 bit Message-ID: <20110902080039.EBE428204C@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r47008:2c5ff10a0772 Date: 2011-09-02 09:59 +0200 http://bitbucket.org/pypy/pypy/changeset/2c5ff10a0772/ Log: fix on 64 bit diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -415,26 +415,26 @@ guard_nonnull_class(p8, ConstClass(W_IntObject), descr=...) guard_value(i4, 0, descr=...) guard_value(p3, ConstPtr(ptr14), descr=...) - i15 = getfield_gc_pure(p8, descr=) + i15 = getfield_gc_pure(p8, descr=) i17 = int_lt(i15, 5000) guard_true(i17, descr=...) - p18 = getfield_gc(p0, descr=) + p18 = getfield_gc(p0, descr=) guard_value(p18, ConstPtr(ptr19), descr=...) - p20 = getfield_gc(p18, descr=) + p20 = getfield_gc(p18, descr=) guard_value(p20, ConstPtr(ptr21), descr=...) guard_not_invalidated(descr=...) # most importantly, there is no getarrayitem_gc here p23 = call(ConstClass(getexecutioncontext), descr=) - p24 = getfield_gc(p23, descr=) + p24 = getfield_gc(p23, descr=) i25 = force_token() - p26 = getfield_gc(p23, descr=) + p26 = getfield_gc(p23, descr=) guard_isnull(p26, descr=...) - i27 = getfield_gc(p23, descr=) + i27 = getfield_gc(p23, descr=) i28 = int_is_zero(i27) guard_true(i28, descr=...) - p30 = getfield_gc(ConstPtr(ptr29), descr=) + p30 = getfield_gc(ConstPtr(ptr29), descr=) guard_nonnull_class(p30, ConstClass(W_IntObject), descr=...) - i32 = getfield_gc_pure(p30, descr=) + i32 = getfield_gc_pure(p30, descr=) i33 = int_add_ovf(i15, i32) guard_no_overflow(descr=...) --TICK-- @@ -452,14 +452,14 @@ """, []) loop, = log.loops_by_id('call') assert loop.match(""" - i8 = getfield_gc_pure(p6, descr=) + i8 = getfield_gc_pure(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) i11 = force_token() i13 = int_add(i8, 1) --TICK-- p22 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p22, i13, descr=) - setfield_gc(p4, p22, descr=) + setfield_gc(p22, i13, descr=) + setfield_gc(p4, p22, descr=) jump(p0, p1, p2, p3, p4, p7, p22, p7, descr=) """) From noreply at buildbot.pypy.org Fri Sep 2 10:17:23 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Sep 2011 10:17:23 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix this test by retrying in bad cases. Message-ID: <20110902081723.DFB4D8204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47009:ec222879764c Date: 2011-09-02 10:16 +0200 http://bitbucket.org/pypy/pypy/changeset/ec222879764c/ Log: Fix this test by retrying in bad cases. diff --git a/pypy/objspace/std/test/test_methodcache.py b/pypy/objspace/std/test/test_methodcache.py --- a/pypy/objspace/std/test/test_methodcache.py +++ b/pypy/objspace/std/test/test_methodcache.py @@ -134,20 +134,24 @@ def test_custom_metaclass(self): import __pypy__ - class MetaA(type): - def __getattribute__(self, x): - return 1 - def f(self): - return 42 - A = type.__new__(MetaA, "A", (), {"f": f}) - l = [type.__getattribute__(A, "__new__")(A)] * 10 - __pypy__.reset_method_cache_counter() - for i, a in enumerate(l): - assert a.f() == 42 - cache_counter = __pypy__.method_cache_counter("f") - assert cache_counter[0] >= 5 - assert cache_counter[1] >= 1 # should be (27, 3) - assert sum(cache_counter) == 10 + for j in range(20): + class MetaA(type): + def __getattribute__(self, x): + return 1 + def f(self): + return 42 + A = type.__new__(MetaA, "A", (), {"f": f}) + l = [type.__getattribute__(A, "__new__")(A)] * 10 + __pypy__.reset_method_cache_counter() + for i, a in enumerate(l): + assert a.f() == 42 + cache_counter = __pypy__.method_cache_counter("f") + assert sum(cache_counter) == 10 + if cache_counter == (9, 1): + break + #else the moon is misaligned, try again + else: + raise AssertionError("cache_counter = %r" % (cache_counter,)) def test_mutate_class(self): import __pypy__ From noreply at buildbot.pypy.org Fri Sep 2 10:17:25 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Sep 2011 10:17:25 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix for pypy.jit.metainterp.test.test_ztranslation. Message-ID: <20110902081725.30A588204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47010:261c24cda77c Date: 2011-09-02 10:17 +0200 http://bitbucket.org/pypy/pypy/changeset/261c24cda77c/ Log: Fix for pypy.jit.metainterp.test.test_ztranslation. diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -206,6 +206,7 @@ _immutable_fields_ = ['funcsym'] argtypes = [] restype = lltype.nullptr(clibffi.FFI_TYPE_P.TO) + flags = 0 funcsym = lltype.nullptr(rffi.VOIDP.TO) def __init__(self, name, argtypes, restype, funcsym, flags=FUNCFLAG_CDECL, From noreply at buildbot.pypy.org Fri Sep 2 11:23:20 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Sep 2011 11:23:20 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: A nicer image. Message-ID: <20110902092320.17E328204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r251:c72342f7e140 Date: 2011-09-02 11:23 +0200 http://bitbucket.org/pypy/pypy.org/changeset/c72342f7e140/ Log: A nicer image. diff --git a/image/people/arigo.png b/image/people/arigo.png index 24113c3371a96bd2ae9cd49ad6dca3a4459eca20..d35a82b51cd9b32f42ac1760be3ff5fd5ae03af6 GIT binary patch [cut] From noreply at buildbot.pypy.org Fri Sep 2 11:24:08 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 2 Sep 2011 11:24:08 +0200 (CEST) Subject: [pypy-commit] pypy improve-heap-caching-tracing: start a branch to cache more during tracing. start by stepwise refactoring the Message-ID: <20110902092408.A149A8204C@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: improve-heap-caching-tracing Changeset: r47011:9806d3b8c116 Date: 2011-09-02 10:21 +0200 http://bitbucket.org/pypy/pypy/changeset/9806d3b8c116/ Log: start a branch to cache more during tracing. start by stepwise refactoring the current heap cache to be more independently testable diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/heapcache.py @@ -0,0 +1,23 @@ + + +class HeapCache(object): + def __init__(self): + self.reset() + + def reset(self): + # contains boxes where the class is already known + self.known_class_boxes = {} + # contains frame boxes that are not virtualizables + self.nonstandard_virtualizables = {} + + def is_class_known(self, box): + return box in self.known_class_boxes + + def class_now_know(self, box): + self.known_class_boxes[box] = None + + def is_nonstandard_virtualizable(self, box): + return box in self.nonstandard_virtualizables + + def nonstandard_virtualizables_now_known(self, box): + self.nonstandard_virtualizables[box] = None diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -17,6 +17,7 @@ from pypy.jit.metainterp.jitprof import ABORT_TOO_LONG, ABORT_BRIDGE, \ ABORT_FORCE_QUASIIMMUT, ABORT_BAD_LOOP from pypy.jit.metainterp.jitexc import JitException, get_llexception +from pypy.jit.metainterp.heapcache import HeapCache from pypy.rlib.objectmodel import specialize from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr from pypy.jit.codewriter import heaptracker @@ -321,7 +322,7 @@ def _establish_nullity(self, box, orgpc): value = box.nonnull() if value: - if box not in self.metainterp.known_class_boxes: + if not self.metainterp.heapcache.is_class_known(box): self.generate_guard(rop.GUARD_NONNULL, box, resumepc=orgpc) else: if not isinstance(box, Const): @@ -373,7 +374,7 @@ cpu = self.metainterp.cpu cls = heaptracker.descr2vtable(cpu, sizedescr) resbox = self.execute(rop.NEW_WITH_VTABLE, ConstInt(cls)) - self.metainterp.known_class_boxes[resbox] = None + self.metainterp.heapcache.class_now_know(resbox) return resbox ## @FixME #arguments("box") @@ -633,7 +634,7 @@ standard_box = self.metainterp.virtualizable_boxes[-1] if standard_box is box: return False - if box in self.metainterp.nonstandard_virtualizables: + if self.metainterp.heapcache.is_nonstandard_virtualizable(box): return True eqbox = self.metainterp.execute_and_record(rop.PTR_EQ, None, box, standard_box) @@ -642,7 +643,7 @@ if isstandard: self.metainterp.replace_box(box, standard_box) else: - self.metainterp.nonstandard_virtualizables[box] = None + self.metainterp.heapcache.nonstandard_virtualizables_now_known(box) return not isstandard def _get_virtualizable_field_index(self, fielddescr): @@ -884,9 +885,9 @@ @arguments("orgpc", "box") def opimpl_guard_class(self, orgpc, box): clsbox = self.cls_of_box(box) - if box not in self.metainterp.known_class_boxes: + if not self.metainterp.heapcache.is_class_known(box): self.generate_guard(rop.GUARD_CLASS, box, [clsbox], resumepc=orgpc) - self.metainterp.known_class_boxes[box] = None + self.metainterp.heapcache.class_now_know(box) return clsbox @arguments("int", "orgpc") @@ -1492,10 +1493,7 @@ self.last_exc_value_box = None self.retracing_loop_from = None self.call_pure_results = args_dict_box() - # contains boxes where the class is already known - self.known_class_boxes = {} - # contains frame boxes that are not virtualizables - self.nonstandard_virtualizables = {} + self.heapcache = HeapCache() # heap cache # maps descrs to (from_box, to_box) tuples self.heap_cache = {} @@ -1862,8 +1860,7 @@ duplicates[box] = None def reached_loop_header(self, greenboxes, redboxes, resumedescr): - self.known_class_boxes = {} - self.nonstandard_virtualizables = {} # XXX maybe not needed? + self.heapcache.reset() self.heap_cache = {} self.heap_array_cache = {} diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/test/test_heapcache.py @@ -0,0 +1,26 @@ +from pypy.jit.metainterp.heapcache import HeapCache + +class TestHeapCache(object): + def test_known_class_box(self): + h = HeapCache() + assert not h.is_class_known(1) + assert not h.is_class_known(2) + h.class_now_know(1) + assert h.is_class_known(1) + assert not h.is_class_known(2) + + h.reset() + assert not h.is_class_known(1) + assert not h.is_class_known(2) + + def test_nonstandard_virtualizable(self): + h = HeapCache() + assert not h.is_nonstandard_virtualizable(1) + assert not h.is_nonstandard_virtualizable(2) + h.nonstandard_virtualizables_now_known(1) + assert h.is_nonstandard_virtualizable(1) + assert not h.is_nonstandard_virtualizable(2) + + h.reset() + assert not h.is_nonstandard_virtualizable(1) + assert not h.is_nonstandard_virtualizable(2) From noreply at buildbot.pypy.org Fri Sep 2 11:24:09 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 2 Sep 2011 11:24:09 +0200 (CEST) Subject: [pypy-commit] pypy improve-heap-caching-tracing: move all the heap caching to the new file. add unit tests. Message-ID: <20110902092409.E979B8204C@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: improve-heap-caching-tracing Changeset: r47012:8549556a3847 Date: 2011-09-02 11:22 +0200 http://bitbucket.org/pypy/pypy/changeset/8549556a3847/ Log: move all the heap caching to the new file. add unit tests. diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py --- a/pypy/jit/metainterp/heapcache.py +++ b/pypy/jit/metainterp/heapcache.py @@ -1,4 +1,5 @@ - +from pypy.jit.metainterp.resoperation import rop +from pypy.jit.metainterp.history import ConstInt class HeapCache(object): def __init__(self): @@ -9,6 +10,29 @@ self.known_class_boxes = {} # contains frame boxes that are not virtualizables self.nonstandard_virtualizables = {} + # heap cache + # maps descrs to (from_box, to_box) tuples + self.heap_cache = {} + # heap array cache + # maps descrs to {index: (from_box, to_box)} dicts + self.heap_array_cache = {} + + def invalidate_caches(self, opnum, descr): + if opnum == rop.SETFIELD_GC: + return + if opnum == rop.SETARRAYITEM_GC: + return + if rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST: + return + if opnum == rop.CALL: + effectinfo = descr.get_extra_info() + ef = effectinfo.extraeffect + if ef == effectinfo.EF_LOOPINVARIANT or \ + ef == effectinfo.EF_ELIDABLE_CANNOT_RAISE or \ + ef == effectinfo.EF_ELIDABLE_CAN_RAISE: + return + self.heap_cache.clear() + self.heap_array_cache.clear() def is_class_known(self, box): return box in self.known_class_boxes @@ -21,3 +45,46 @@ def nonstandard_virtualizables_now_known(self, box): self.nonstandard_virtualizables[box] = None + + + def getfield(self, box, descr): + frombox, tobox = self.heap_cache.get(descr, (None, None)) + if box is frombox: + return tobox + return None + + def setfield(self, box, descr, fieldbox): + self.heap_cache[descr] = (box, fieldbox) + + def getarrayitem(self, box, descr, indexbox): + if not isinstance(indexbox, ConstInt): + return + index = indexbox.getint() + cache = self.heap_array_cache.get(descr, None) + if cache: + frombox, tobox = cache.get(index, (None, None)) + if frombox is box: + return tobox + + def setarrayitem(self, box, descr, indexbox, valuebox): + if not isinstance(indexbox, ConstInt): + cache = self.heap_array_cache.get(descr, None) + if cache is not None: + cache.clear() + return + cache = self.heap_array_cache.setdefault(descr, {}) + index = indexbox.getint() + cache[index] = box, valuebox + + def replace_box(self, oldbox, newbox): + for descr, (frombox, tobox) in self.heap_cache.iteritems(): + change = False + if frombox is oldbox: + change = True + frombox = newbox + if tobox is oldbox: + change = True + tobox = newbox + if change: + self.heap_cache[descr] = frombox, tobox + # XXX what about self.heap_array_cache? diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -398,19 +398,14 @@ @arguments("box", "descr", "box") def _opimpl_getarrayitem_gc_any(self, arraybox, arraydescr, indexbox): - cache = self.metainterp.heap_array_cache.get(arraydescr, None) - if cache and isinstance(indexbox, ConstInt): - index = indexbox.getint() - frombox, tobox = cache.get(index, (None, None)) - if frombox is arraybox: - return tobox + tobox = self.metainterp.heapcache.getarrayitem( + arraybox, arraydescr, indexbox) + if tobox: + return tobox resbox = self.execute_with_descr(rop.GETARRAYITEM_GC, arraydescr, arraybox, indexbox) - if isinstance(indexbox, ConstInt): - if not cache: - cache = self.metainterp.heap_array_cache[arraydescr] = {} - index = indexbox.getint() - cache[index] = arraybox, resbox + self.metainterp.heapcache.setarrayitem( + arraybox, arraydescr, indexbox, resbox) return resbox @@ -440,13 +435,8 @@ indexbox, itembox): self.execute_with_descr(rop.SETARRAYITEM_GC, arraydescr, arraybox, indexbox, itembox) - if isinstance(indexbox, ConstInt): - cache = self.metainterp.heap_array_cache.setdefault(arraydescr, {}) - cache[indexbox.getint()] = arraybox, itembox - else: - cache = self.metainterp.heap_array_cache.get(arraydescr, None) - if cache: - cache.clear() + self.metainterp.heapcache.setarrayitem( + arraybox, arraydescr, indexbox, itembox) opimpl_setarrayitem_gc_i = _opimpl_setarrayitem_gc_any opimpl_setarrayitem_gc_r = _opimpl_setarrayitem_gc_any @@ -541,11 +531,11 @@ @specialize.arg(1) def _opimpl_getfield_gc_any_pureornot(self, opnum, box, fielddescr): - frombox, tobox = self.metainterp.heap_cache.get(fielddescr, (None, None)) - if frombox is box: + tobox = self.metainterp.heapcache.getfield(box, fielddescr) + if tobox is not None: return tobox resbox = self.execute_with_descr(opnum, fielddescr, box) - self.metainterp.heap_cache[fielddescr] = (box, resbox) + self.metainterp.heapcache.setfield(box, fielddescr, resbox) return resbox @arguments("orgpc", "box", "descr") @@ -566,11 +556,11 @@ @arguments("box", "descr", "box") def _opimpl_setfield_gc_any(self, box, fielddescr, valuebox): - frombox, tobox = self.metainterp.heap_cache.get(fielddescr, (None, None)) - if frombox is box and tobox is valuebox: + tobox = self.metainterp.heapcache.getfield(box, fielddescr) + if tobox is valuebox: return self.execute_with_descr(rop.SETFIELD_GC, fielddescr, box, valuebox) - self.metainterp.heap_cache[fielddescr] = (box, valuebox) + self.metainterp.heapcache.setfield(box, fielddescr, valuebox) opimpl_setfield_gc_i = _opimpl_setfield_gc_any opimpl_setfield_gc_r = _opimpl_setfield_gc_any opimpl_setfield_gc_f = _opimpl_setfield_gc_any @@ -1494,12 +1484,6 @@ self.retracing_loop_from = None self.call_pure_results = args_dict_box() self.heapcache = HeapCache() - # heap cache - # maps descrs to (from_box, to_box) tuples - self.heap_cache = {} - # heap array cache - # maps descrs to {index: (from_box, to_box)} dicts - self.heap_array_cache = {} def perform_call(self, jitcode, boxes, greenkey=None): # causes the metainterp to enter the given subfunction @@ -1675,29 +1659,11 @@ # record the operation profiler = self.staticdata.profiler profiler.count_ops(opnum, RECORDED_OPS) - self._invalidate_caches(opnum, descr) + self.heapcache.invalidate_caches(opnum, descr) op = self.history.record(opnum, argboxes, resbox, descr) self.attach_debug_info(op) return resbox - def _invalidate_caches(self, opnum, descr): - if opnum == rop.SETFIELD_GC: - return - if opnum == rop.SETARRAYITEM_GC: - return - if rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST: - return - if opnum == rop.CALL: - effectinfo = descr.get_extra_info() - ef = effectinfo.extraeffect - if ef == effectinfo.EF_LOOPINVARIANT or \ - ef == effectinfo.EF_ELIDABLE_CANNOT_RAISE or \ - ef == effectinfo.EF_ELIDABLE_CAN_RAISE: - return - if self.heap_cache: - self.heap_cache.clear() - if self.heap_array_cache: - self.heap_array_cache.clear() def attach_debug_info(self, op): if (not we_are_translated() and op is not None @@ -1861,8 +1827,6 @@ def reached_loop_header(self, greenboxes, redboxes, resumedescr): self.heapcache.reset() - self.heap_cache = {} - self.heap_array_cache = {} duplicates = {} self.remove_consts_and_duplicates(redboxes, len(redboxes), @@ -2370,17 +2334,7 @@ for i in range(len(boxes)): if boxes[i] is oldbox: boxes[i] = newbox - for descr, (frombox, tobox) in self.heap_cache.iteritems(): - change = False - if frombox is oldbox: - change = True - frombox = newbox - if tobox is oldbox: - change = True - tobox = newbox - if change: - self.heap_cache[descr] = frombox, tobox - # XXX what about self.heap_array_cache? + self.heapcache.replace_box(oldbox, newbox) def find_biggest_function(self): start_stack = [] diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py --- a/pypy/jit/metainterp/test/test_heapcache.py +++ b/pypy/jit/metainterp/test/test_heapcache.py @@ -1,4 +1,36 @@ from pypy.jit.metainterp.heapcache import HeapCache +from pypy.jit.metainterp.resoperation import rop +from pypy.jit.metainterp.history import ConstInt + +box1 = object() +box2 = object() +box3 = object() +box4 = object() +descr1 = object() +descr2 = object() + +index1 = ConstInt(0) +index2 = ConstInt(1) + + +class FakeEffektinfo(object): + EF_ELIDABLE_CANNOT_RAISE = 0 #elidable function (and cannot raise) + EF_LOOPINVARIANT = 1 #special: call it only once per loop + EF_CANNOT_RAISE = 2 #a function which cannot raise + EF_ELIDABLE_CAN_RAISE = 3 #elidable function (but can raise) + EF_CAN_RAISE = 4 #normal function (can raise) + EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE = 5 #can raise and force virtualizables + EF_RANDOM_EFFECTS = 6 #can do whatever + + def __init__(self, extraeffect): + self.extraeffect = extraeffect + +class FakeCallDescr(object): + def __init__(self, extraeffect): + self.extraeffect = extraeffect + + def get_extra_info(self): + return FakeEffektinfo(self.extraeffect) class TestHeapCache(object): def test_known_class_box(self): @@ -24,3 +56,113 @@ h.reset() assert not h.is_nonstandard_virtualizable(1) assert not h.is_nonstandard_virtualizable(2) + + + def test_heapcache_fields(self): + h = HeapCache() + assert h.getfield(box1, descr1) is None + assert h.getfield(box1, descr2) is None + h.setfield(box1, descr1, box2) + assert h.getfield(box1, descr1) is box2 + assert h.getfield(box1, descr2) is None + h.setfield(box1, descr2, box3) + assert h.getfield(box1, descr1) is box2 + assert h.getfield(box1, descr2) is box3 + h.setfield(box1, descr1, box3) + assert h.getfield(box1, descr1) is box3 + assert h.getfield(box1, descr2) is box3 + h.setfield(box3, descr1, box1) + assert h.getfield(box3, descr1) is box1 + assert h.getfield(box1, descr1) is None + assert h.getfield(box1, descr2) is box3 + + h.reset() + assert h.getfield(box1, descr1) is None + assert h.getfield(box1, descr2) is None + assert h.getfield(box3, descr1) is None + + def test_heapcache_arrays(self): + h = HeapCache() + assert h.getarrayitem(box1, descr1, index1) is None + assert h.getarrayitem(box1, descr2, index1) is None + assert h.getarrayitem(box1, descr1, index2) is None + assert h.getarrayitem(box1, descr2, index2) is None + + h.setarrayitem(box1, descr1, index1, box2) + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr2, index1) is None + assert h.getarrayitem(box1, descr1, index2) is None + assert h.getarrayitem(box1, descr2, index2) is None + h.setarrayitem(box1, descr1, index2, box4) + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr2, index1) is None + assert h.getarrayitem(box1, descr1, index2) is box4 + assert h.getarrayitem(box1, descr2, index2) is None + + h.setarrayitem(box1, descr2, index1, box3) + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr2, index1) is box3 + assert h.getarrayitem(box1, descr1, index2) is box4 + assert h.getarrayitem(box1, descr2, index2) is None + + h.setarrayitem(box1, descr1, index1, box3) + assert h.getarrayitem(box1, descr1, index1) is box3 + assert h.getarrayitem(box1, descr2, index1) is box3 + assert h.getarrayitem(box1, descr1, index2) is box4 + assert h.getarrayitem(box1, descr2, index2) is None + + h.setarrayitem(box3, descr1, index1, box1) + assert h.getarrayitem(box3, descr1, index1) is box1 + assert h.getarrayitem(box1, descr1, index1) is None + assert h.getarrayitem(box1, descr2, index1) is box3 + assert h.getarrayitem(box1, descr1, index2) is box4 + assert h.getarrayitem(box1, descr2, index2) is None + + h.reset() + assert h.getarrayitem(box1, descr1, index1) is None + assert h.getarrayitem(box1, descr2, index1) is None + assert h.getarrayitem(box3, descr1, index1) is None + + def test_heapcache_array_nonconst_index(self): + h = HeapCache() + h.setarrayitem(box1, descr1, index1, box2) + h.setarrayitem(box1, descr1, index2, box4) + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr1, index2) is box4 + h.setarrayitem(box1, descr1, box2, box3) + assert h.getarrayitem(box1, descr1, index1) is None + assert h.getarrayitem(box1, descr1, index2) is None + + def test_invalidate_cache(self): + h = HeapCache() + h.setfield(box1, descr1, box2) + h.setarrayitem(box1, descr1, index1, box2) + h.setarrayitem(box1, descr1, index2, box4) + h.invalidate_caches(rop.INT_ADD, None) + assert h.getfield(box1, descr1) is box2 + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr1, index2) is box4 + + h.invalidate_caches( + rop.CALL, FakeCallDescr(FakeEffektinfo.EF_ELIDABLE_CANNOT_RAISE)) + assert h.getfield(box1, descr1) is box2 + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr1, index2) is box4 + + + h.invalidate_caches( + rop.CALL, FakeCallDescr(FakeEffektinfo.EF_RANDOM_EFFECTS)) + assert h.getfield(box1, descr1) is None + assert h.getarrayitem(box1, descr1, index1) is None + assert h.getarrayitem(box1, descr1, index2) is None + + + def test_replace_box(self): + h = HeapCache() + h.setfield(box1, descr1, box2) + h.setfield(box1, descr2, box3) + h.replace_box(box1, box4) + assert h.getfield(box1, descr1) is None + assert h.getfield(box1, descr2) is None + assert h.getfield(box4, descr1) is box2 + assert h.getfield(box4, descr2) is box3 From noreply at buildbot.pypy.org Fri Sep 2 11:32:13 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Sep 2011 11:32:13 +0200 (CEST) Subject: [pypy-commit] buildbot default: Kill an old test. Message-ID: <20110902093213.5A06E8204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r590:074c6ccb9256 Date: 2011-09-02 11:31 +0200 http://bitbucket.org/pypy/buildbot/changeset/074c6ccb9256/ Log: Kill an old test. diff --git a/bot2/pypybuildbot/test/test_pypylist.py b/bot2/pypybuildbot/test/test_pypylist.py --- a/bot2/pypybuildbot/test/test_pypylist.py +++ b/bot2/pypybuildbot/test/test_pypylist.py @@ -96,9 +96,6 @@ t = PyPyTarball('pypy-c-nojit-76867-linux.tar.bz2') check_builder_names(t, 'own-linux-x86-32', 'pypy-c-app-level-linux-x86-32') - t = PyPyTarball('pypy-c-stackless-76867-linux.tar.bz2') - check_builder_names(t, 'own-linux-x86-32', 'pypy-c-stackless-app-level-linux-x86-32') - t = PyPyTarball('pypy-c-jit-76867-osx.tar.bz2') check_builder_names(t, 'own-macosx-x86-32', 'pypy-c-jit-macosx-x86-32') From noreply at buildbot.pypy.org Fri Sep 2 11:32:14 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Sep 2011 11:32:14 +0200 (CEST) Subject: [pypy-commit] buildbot default: Bah! Syntax error. Message-ID: <20110902093214.7F3458204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r591:e3dc118be8c0 Date: 2011-09-02 11:31 +0200 http://bitbucket.org/pypy/buildbot/changeset/e3dc118be8c0/ Log: Bah! Syntax error. diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -300,7 +300,7 @@ pypy_c_rel = "build/" + name + extension self.addStep(PyPyUpload(slavesrc=WithProperties(pypy_c_rel), masterdest=WithProperties(nightly), - basename=name + extension + basename=name + extension, workdir='.', blocksize=100*1024)) From noreply at buildbot.pypy.org Fri Sep 2 14:45:54 2011 From: noreply at buildbot.pypy.org (snus_mumrik) Date: Fri, 2 Sep 2011 14:45:54 +0200 (CEST) Subject: [pypy-commit] pypy numpy-comparison: Creating branch for comparison operators. Depends on numpy-dtype-alt. Message-ID: <20110902124554.887348204C@wyvern.cs.uni-duesseldorf.de> Author: Ilya Osadchiy Branch: numpy-comparison Changeset: r47013:6dc3eafc6e1f Date: 2011-08-27 13:23 +0300 http://bitbucket.org/pypy/pypy/changeset/6dc3eafc6e1f/ Log: Creating branch for comparison operators. Depends on numpy-dtype- alt. From noreply at buildbot.pypy.org Fri Sep 2 14:45:55 2011 From: noreply at buildbot.pypy.org (snus_mumrik) Date: Fri, 2 Sep 2011 14:45:55 +0200 (CEST) Subject: [pypy-commit] pypy numpy-comparison: Initial implementation (tests pass, translation fails) Message-ID: <20110902124555.CC5FD8204C@wyvern.cs.uni-duesseldorf.de> Author: Ilya Osadchiy Branch: numpy-comparison Changeset: r47014:edb6c31894de Date: 2011-09-02 10:37 +0300 http://bitbucket.org/pypy/pypy/changeset/edb6c31894de/ Log: Initial implementation (tests pass, translation fails) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -25,17 +25,18 @@ 'floor': 'interp_ufuncs.floor', 'maximum': 'interp_ufuncs.maximum', 'minimum': 'interp_ufuncs.minimum', - 'multiply': 'interp_ufuncs.multiply', - 'negative': 'interp_ufuncs.negative', - 'reciprocal': 'interp_ufuncs.reciprocal', - 'sign': 'interp_ufuncs.sign', - 'subtract': 'interp_ufuncs.subtract', - 'sin': 'interp_ufuncs.sin', - 'cos': 'interp_ufuncs.cos', - 'tan': 'interp_ufuncs.tan', - 'arcsin': 'interp_ufuncs.arcsin', - 'arccos': 'interp_ufuncs.arccos', - 'arctan': 'interp_ufuncs.arctan', + 'multiply': 'interp_ufuncs.multiply', + 'negative': 'interp_ufuncs.negative', + 'reciprocal': 'interp_ufuncs.reciprocal', + 'sign': 'interp_ufuncs.sign', + 'subtract': 'interp_ufuncs.subtract', + 'sin': 'interp_ufuncs.sin', + 'cos': 'interp_ufuncs.cos', + 'tan': 'interp_ufuncs.tan', + 'arcsin': 'interp_ufuncs.arcsin', + 'arccos': 'interp_ufuncs.arccos', + 'arctan': 'interp_ufuncs.arctan', + 'equal': 'interp_ufuncs.equal', } appleveldefs = { diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -125,6 +125,15 @@ )) return impl +def bool_binop(func): + @functools.wraps(func) + def impl(self, v1, v2): + return self.box(func(self, + self.for_computation(self.unbox(v1)), + self.for_computation(self.unbox(v2)), + )) + return impl + def unaryop(func): @functools.wraps(func) def impl(self, v): @@ -147,6 +156,25 @@ def div(self, v1, v2): return v1 / v2 + @bool_binop + def eq(self, v1, v2): + return v1 == v2 + @bool_binop + def ne(self, v1, v2): + return v1 != v2 + @bool_binop + def lt(self, v1, v2): + return v1 < v2 + @bool_binop + def le(self, v1, v2): + return v1 <= v2 + @bool_binop + def gt(self, v1, v2): + return v1 > v2 + @bool_binop + def ge(self, v1, v2): + return v1 >= v2 + @unaryop def pos(self, v): return +v @@ -166,8 +194,8 @@ def bool(self, v): return bool(self.for_computation(self.unbox(v))) - def ne(self, v1, v2): - return self.for_computation(self.unbox(v1)) != self.for_computation(self.unbox(v2)) +# def ne(self, v1, v2): +# return self.for_computation(self.unbox(v1)) != self.for_computation(self.unbox(v2)) class FloatArithmeticDtype(ArithmaticTypeMixin): @@ -355,4 +383,4 @@ num = interp_attrproperty("num", cls=W_Dtype), kind = interp_attrproperty("kind", cls=W_Dtype), shape = GetSetProperty(W_Dtype.descr_get_shape), -) \ No newline at end of file +) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -74,6 +74,13 @@ descr_pow = _binop_impl(interp_ufuncs.power) descr_mod = _binop_impl(interp_ufuncs.mod) + descr_eq = _binop_impl(interp_ufuncs.equal) + descr_ne = _binop_impl(interp_ufuncs.not_equal) + descr_lt = _binop_impl(interp_ufuncs.less) + descr_le = _binop_impl(interp_ufuncs.less_equal) + descr_gt = _binop_impl(interp_ufuncs.greater) + descr_ge = _binop_impl(interp_ufuncs.greater_equal) + def _binop_right_impl(w_ufunc): def impl(self, space, w_other): w_other = scalar_w(space, @@ -152,7 +159,7 @@ size=size, i=i, result=result, cur_best=cur_best) new_best = getattr(dtype, op_name)(cur_best, self.eval(i)) - if dtype.ne(new_best, cur_best): + if dtype.unbox(dtype.ne(new_best, cur_best)): result = i cur_best = new_best i += 1 @@ -350,11 +357,12 @@ """ Class for representing virtual arrays, such as binary ops or ufuncs """ - def __init__(self, signature, res_dtype): + def __init__(self, signature, res_dtype, calc_dtype): BaseArray.__init__(self) self.forced_result = None self.signature = signature self.res_dtype = res_dtype + self.calc_dtype = calc_dtype def _del_sources(self): # Function for deleting references to source arrays, to allow garbage-collecting them @@ -402,7 +410,7 @@ class Call1(VirtualArray): def __init__(self, signature, res_dtype, values): - VirtualArray.__init__(self, signature, res_dtype) + VirtualArray.__init__(self, signature, res_dtype, res_dtype) self.values = values def _del_sources(self): @@ -427,8 +435,8 @@ """ Intermediate class for performing binary operations. """ - def __init__(self, signature, res_dtype, left, right): - VirtualArray.__init__(self, signature, res_dtype) + def __init__(self, signature, res_dtype, calc_dtype, left, right): + VirtualArray.__init__(self, signature, res_dtype, calc_dtype) self.left = left self.right = right @@ -444,14 +452,14 @@ return self.right.find_size() def _eval(self, i): - lhs = self.left.eval(i).convert_to(self.res_dtype) - rhs = self.right.eval(i).convert_to(self.res_dtype) + lhs = self.left.eval(i).convert_to(self.calc_dtype) + rhs = self.right.eval(i).convert_to(self.calc_dtype) sig = jit.promote(self.signature) assert isinstance(sig, signature.Signature) call_sig = sig.components[0] assert isinstance(call_sig, signature.Call2) - return call_sig.func(self.res_dtype, lhs, rhs) + return call_sig.func(self.calc_dtype, lhs, rhs).convert_to(self.res_dtype) class ViewArray(BaseArray): """ @@ -610,6 +618,13 @@ __repr__ = interp2app(BaseArray.descr_repr), __str__ = interp2app(BaseArray.descr_str), + __eq__ = interp2app(BaseArray.descr_eq), + __ne__ = interp2app(BaseArray.descr_ne), + __lt__ = interp2app(BaseArray.descr_lt), + __le__ = interp2app(BaseArray.descr_le), + __gt__ = interp2app(BaseArray.descr_gt), + __ge__ = interp2app(BaseArray.descr_ge), + dtype = GetSetProperty(BaseArray.descr_get_dtype), shape = GetSetProperty(BaseArray.descr_get_shape), diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -24,9 +24,9 @@ return w_res return func_with_new_name(impl, "%s_dispatcher" % func.__name__) -def ufunc2(func=None, promote_to_float=False): +def ufunc2(func=None, promote_to_float=False, bool_result=False): if func is None: - return lambda func: ufunc2(func, promote_to_float) + return lambda func: ufunc2(func, promote_to_float, bool_result) call_sig = signature.Call2(func) def impl(space, w_lhs, w_rhs): @@ -35,17 +35,25 @@ w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) - res_dtype = find_binop_result_dtype(space, + calc_dtype = find_binop_result_dtype(space, w_lhs.find_dtype(), w_rhs.find_dtype(), promote_to_float=promote_to_float, ) + # Some operations return bool regardless of input type + if bool_result: + res_dtype = space.fromcache(interp_dtype.W_BoolDtype) + else: + res_dtype = calc_dtype if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar): - return func(res_dtype, w_lhs.value, w_rhs.value).wrap(space) + lhs = w_lhs.value.convert_to(calc_dtype) + rhs = w_rhs.value.convert_to(calc_dtype) + interm_res = func(calc_dtype, lhs, rhs) + return interm_res.convert_to(res_dtype).wrap(space) new_sig = signature.Signature.find_sig([ call_sig, w_lhs.signature, w_rhs.signature ]) - w_res = Call2(new_sig, res_dtype, w_lhs, w_rhs) + w_res = Call2(new_sig, res_dtype, calc_dtype, w_lhs, w_rhs) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) return w_res @@ -123,6 +131,13 @@ ("maximum", "max", 2), ("minimum", "min", 2), + ("equal", "eq", 2, {"bool_result": True}), + ("not_equal", "ne", 2, {"bool_result": True}), + ("less", "lt", 2, {"bool_result": True}), + ("less_equal", "le", 2, {"bool_result": True}), + ("greater", "gt", 2, {"bool_result": True}), + ("greater_equal", "ge", 2, {"bool_result": True}), + ("copysign", "copysign", 2, {"promote_to_float": True}), ("positive", "pos", 1), diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -510,6 +510,34 @@ assert array([1.2, 5]).dtype is dtype(float) assert array([]).dtype is dtype(float) + def test_comparison(self): + from numpy import array, dtype + a = array(range(5)) + b = array(range(5), dtype=float) + for func in [ + lambda x, y: x == y, + lambda x, y: x != y, + lambda x, y: x < y, + lambda x, y: x <= y, + lambda x, y: x > y, + lambda x, y: x >= y, + ]: + _a3 = func (a, 3) + assert _a3.dtype is dtype(bool) + for i in xrange(5): + assert _a3[i] == (True if func(a[i], 3) else False) + _b3 = func (b, 3) + assert _b3.dtype is dtype(bool) + for i in xrange(5): + assert _b3[i] == (True if func(b[i], 3) else False) + _3a = func (3, a) + assert _3a.dtype is dtype(bool) + for i in xrange(5): + assert _3a[i] == (True if func(3, a[i]) else False) + _3b = func (3, b) + assert _3b.dtype is dtype(bool) + for i in xrange(5): + assert _3b[i] == (True if func(3, b[i]) else False) class AppTestSupport(object): def setup_class(cls): @@ -522,4 +550,4 @@ a = fromstring(self.data) for i in range(4): assert a[i] == i + 1 - raises(ValueError, fromstring, "abc") \ No newline at end of file + raises(ValueError, fromstring, "abc") diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -267,3 +267,11 @@ b = arctan(a) assert math.isnan(b[0]) + def test_comparison(self): + from numpy import array, dtype, equal + assert equal(3, 3) is True + assert equal(3, 4) is False + assert equal(3.0, 3.0) is True + assert equal(3.0, 3.5) is False + assert equal(3.0, 3) is True + assert equal(3.0, 4) is False From noreply at buildbot.pypy.org Fri Sep 2 14:46:01 2011 From: noreply at buildbot.pypy.org (snus_mumrik) Date: Fri, 2 Sep 2011 14:46:01 +0200 (CEST) Subject: [pypy-commit] pypy numpy-comparison: merge default Message-ID: <20110902124601.0D2B38204C@wyvern.cs.uni-duesseldorf.de> Author: Ilya Osadchiy Branch: numpy-comparison Changeset: r47015:ec77dcd63e59 Date: 2011-09-02 11:15 +0300 http://bitbucket.org/pypy/pypy/changeset/ec77dcd63e59/ Log: merge default diff too long, truncating to 10000 out of 18343 lines diff --git a/lib-python/modified-2.7/ctypes/util.py b/lib-python/modified-2.7/ctypes/util.py --- a/lib-python/modified-2.7/ctypes/util.py +++ b/lib-python/modified-2.7/ctypes/util.py @@ -72,8 +72,8 @@ return name if os.name == "posix" and sys.platform == "darwin": - from ctypes.macholib.dyld import dyld_find as _dyld_find def find_library(name): + from ctypes.macholib.dyld import dyld_find as _dyld_find possible = ['lib%s.dylib' % name, '%s.dylib' % name, '%s.framework/%s' % (name, name)] diff --git a/lib-python/modified-2.7/gzip.py b/lib-python/modified-2.7/gzip.py new file mode 100644 --- /dev/null +++ b/lib-python/modified-2.7/gzip.py @@ -0,0 +1,514 @@ +"""Functions that read and write gzipped files. + +The user of the file doesn't have to worry about the compression, +but random access is not allowed.""" + +# based on Andrew Kuchling's minigzip.py distributed with the zlib module + +import struct, sys, time, os +import zlib +import io +import __builtin__ + +__all__ = ["GzipFile","open"] + +FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16 + +READ, WRITE = 1, 2 + +def write32u(output, value): + # The L format writes the bit pattern correctly whether signed + # or unsigned. + output.write(struct.pack("' + + def _check_closed(self): + """Raises a ValueError if the underlying file object has been closed. + + """ + if self.closed: + raise ValueError('I/O operation on closed file.') + + def _init_write(self, filename): + self.name = filename + self.crc = zlib.crc32("") & 0xffffffffL + self.size = 0 + self.writebuf = [] + self.bufsize = 0 + + def _write_gzip_header(self): + self.fileobj.write('\037\213') # magic header + self.fileobj.write('\010') # compression method + fname = os.path.basename(self.name) + if fname.endswith(".gz"): + fname = fname[:-3] + flags = 0 + if fname: + flags = FNAME + self.fileobj.write(chr(flags)) + mtime = self.mtime + if mtime is None: + mtime = time.time() + write32u(self.fileobj, long(mtime)) + self.fileobj.write('\002') + self.fileobj.write('\377') + if fname: + self.fileobj.write(fname + '\000') + + def _init_read(self): + self.crc = zlib.crc32("") & 0xffffffffL + self.size = 0 + + def _read_gzip_header(self): + magic = self.fileobj.read(2) + if magic != '\037\213': + raise IOError, 'Not a gzipped file' + method = ord( self.fileobj.read(1) ) + if method != 8: + raise IOError, 'Unknown compression method' + flag = ord( self.fileobj.read(1) ) + self.mtime = read32(self.fileobj) + # extraflag = self.fileobj.read(1) + # os = self.fileobj.read(1) + self.fileobj.read(2) + + if flag & FEXTRA: + # Read & discard the extra field, if present + xlen = ord(self.fileobj.read(1)) + xlen = xlen + 256*ord(self.fileobj.read(1)) + self.fileobj.read(xlen) + if flag & FNAME: + # Read and discard a null-terminated string containing the filename + while True: + s = self.fileobj.read(1) + if not s or s=='\000': + break + if flag & FCOMMENT: + # Read and discard a null-terminated string containing a comment + while True: + s = self.fileobj.read(1) + if not s or s=='\000': + break + if flag & FHCRC: + self.fileobj.read(2) # Read & discard the 16-bit header CRC + + def write(self,data): + self._check_closed() + if self.mode != WRITE: + import errno + raise IOError(errno.EBADF, "write() on read-only GzipFile object") + + if self.fileobj is None: + raise ValueError, "write() on closed GzipFile object" + + # Convert data type if called by io.BufferedWriter. + if isinstance(data, memoryview): + data = data.tobytes() + + if len(data) > 0: + self.size = self.size + len(data) + self.crc = zlib.crc32(data, self.crc) & 0xffffffffL + self.fileobj.write( self.compress.compress(data) ) + self.offset += len(data) + + return len(data) + + def read(self, size=-1): + self._check_closed() + if self.mode != READ: + import errno + raise IOError(errno.EBADF, "read() on write-only GzipFile object") + + if self.extrasize <= 0 and self.fileobj is None: + return '' + + readsize = 1024 + if size < 0: # get the whole thing + try: + while True: + self._read(readsize) + readsize = min(self.max_read_chunk, readsize * 2) + except EOFError: + size = self.extrasize + elif size == 0: + return "" + else: # just get some more of it + try: + while size > self.extrasize: + self._read(readsize) + readsize = min(self.max_read_chunk, readsize * 2) + except EOFError: + if size > self.extrasize: + size = self.extrasize + + offset = self.offset - self.extrastart + chunk = self.extrabuf[offset: offset + size] + self.extrasize = self.extrasize - size + + self.offset += size + return chunk + + def _unread(self, buf): + self.extrasize = len(buf) + self.extrasize + self.offset -= len(buf) + + def _read(self, size=1024): + if self.fileobj is None: + raise EOFError, "Reached EOF" + + if self._new_member: + # If the _new_member flag is set, we have to + # jump to the next member, if there is one. + # + # First, check if we're at the end of the file; + # if so, it's time to stop; no more members to read. + pos = self.fileobj.tell() # Save current position + self.fileobj.seek(0, 2) # Seek to end of file + if pos == self.fileobj.tell(): + raise EOFError, "Reached EOF" + else: + self.fileobj.seek( pos ) # Return to original position + + self._init_read() + self._read_gzip_header() + self.decompress = zlib.decompressobj(-zlib.MAX_WBITS) + self._new_member = False + + # Read a chunk of data from the file + buf = self.fileobj.read(size) + + # If the EOF has been reached, flush the decompression object + # and mark this object as finished. + + if buf == "": + uncompress = self.decompress.flush() + self._read_eof() + self._add_read_data( uncompress ) + raise EOFError, 'Reached EOF' + + uncompress = self.decompress.decompress(buf) + self._add_read_data( uncompress ) + + if self.decompress.unused_data != "": + # Ending case: we've come to the end of a member in the file, + # so seek back to the start of the unused data, finish up + # this member, and read a new gzip header. + # (The number of bytes to seek back is the length of the unused + # data, minus 8 because _read_eof() will rewind a further 8 bytes) + self.fileobj.seek( -len(self.decompress.unused_data)+8, 1) + + # Check the CRC and file size, and set the flag so we read + # a new member on the next call + self._read_eof() + self._new_member = True + + def _add_read_data(self, data): + self.crc = zlib.crc32(data, self.crc) & 0xffffffffL + offset = self.offset - self.extrastart + self.extrabuf = self.extrabuf[offset:] + data + self.extrasize = self.extrasize + len(data) + self.extrastart = self.offset + self.size = self.size + len(data) + + def _read_eof(self): + # We've read to the end of the file, so we have to rewind in order + # to reread the 8 bytes containing the CRC and the file size. + # We check the that the computed CRC and size of the + # uncompressed data matches the stored values. Note that the size + # stored is the true file size mod 2**32. + self.fileobj.seek(-8, 1) + crc32 = read32(self.fileobj) + isize = read32(self.fileobj) # may exceed 2GB + if crc32 != self.crc: + raise IOError("CRC check failed %s != %s" % (hex(crc32), + hex(self.crc))) + elif isize != (self.size & 0xffffffffL): + raise IOError, "Incorrect length of data produced" + + # Gzip files can be padded with zeroes and still have archives. + # Consume all zero bytes and set the file position to the first + # non-zero byte. See http://www.gzip.org/#faq8 + c = "\x00" + while c == "\x00": + c = self.fileobj.read(1) + if c: + self.fileobj.seek(-1, 1) + + @property + def closed(self): + return self.fileobj is None + + def close(self): + if self.fileobj is None: + return + if self.mode == WRITE: + self.fileobj.write(self.compress.flush()) + write32u(self.fileobj, self.crc) + # self.size may exceed 2GB, or even 4GB + write32u(self.fileobj, self.size & 0xffffffffL) + self.fileobj = None + elif self.mode == READ: + self.fileobj = None + if self.myfileobj: + self.myfileobj.close() + self.myfileobj = None + + def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH): + self._check_closed() + if self.mode == WRITE: + # Ensure the compressor's buffer is flushed + self.fileobj.write(self.compress.flush(zlib_mode)) + self.fileobj.flush() + + def fileno(self): + """Invoke the underlying file object's fileno() method. + + This will raise AttributeError if the underlying file object + doesn't support fileno(). + """ + return self.fileobj.fileno() + + def rewind(self): + '''Return the uncompressed stream file position indicator to the + beginning of the file''' + if self.mode != READ: + raise IOError("Can't rewind in write mode") + self.fileobj.seek(0) + self._new_member = True + self.extrabuf = "" + self.extrasize = 0 + self.extrastart = 0 + self.offset = 0 + + def readable(self): + return self.mode == READ + + def writable(self): + return self.mode == WRITE + + def seekable(self): + return True + + def seek(self, offset, whence=0): + if whence: + if whence == 1: + offset = self.offset + offset + else: + raise ValueError('Seek from end not supported') + if self.mode == WRITE: + if offset < self.offset: + raise IOError('Negative seek in write mode') + count = offset - self.offset + for i in range(count // 1024): + self.write(1024 * '\0') + self.write((count % 1024) * '\0') + elif self.mode == READ: + if offset == self.offset: + self.read(0) # to make sure that this file is open + return self.offset + if offset < self.offset: + # for negative seek, rewind and do positive seek + self.rewind() + count = offset - self.offset + for i in range(count // 1024): + self.read(1024) + self.read(count % 1024) + + return self.offset + + def readline(self, size=-1): + if size < 0: + # Shortcut common case - newline found in buffer. + offset = self.offset - self.extrastart + i = self.extrabuf.find('\n', offset) + 1 + if i > 0: + self.extrasize -= i - offset + self.offset += i - offset + return self.extrabuf[offset: i] + + size = sys.maxint + readsize = self.min_readsize + else: + readsize = size + bufs = [] + while size != 0: + c = self.read(readsize) + i = c.find('\n') + + # We set i=size to break out of the loop under two + # conditions: 1) there's no newline, and the chunk is + # larger than size, or 2) there is a newline, but the + # resulting line would be longer than 'size'. + if (size <= i) or (i == -1 and len(c) > size): + i = size - 1 + + if i >= 0 or c == '': + bufs.append(c[:i + 1]) # Add portion of last chunk + self._unread(c[i + 1:]) # Push back rest of chunk + break + + # Append chunk to list, decrease 'size', + bufs.append(c) + size = size - len(c) + readsize = min(size, readsize * 2) + if readsize > self.min_readsize: + self.min_readsize = min(readsize, self.min_readsize * 2, 512) + return ''.join(bufs) # Return resulting line + + +def _test(): + # Act like gzip; with -d, act like gunzip. + # The input file is not deleted, however, nor are any other gzip + # options or features supported. + args = sys.argv[1:] + decompress = args and args[0] == "-d" + if decompress: + args = args[1:] + if not args: + args = ["-"] + for arg in args: + if decompress: + if arg == "-": + f = GzipFile(filename="", mode="rb", fileobj=sys.stdin) + g = sys.stdout + else: + if arg[-3:] != ".gz": + print "filename doesn't end in .gz:", repr(arg) + continue + f = open(arg, "rb") + g = __builtin__.open(arg[:-3], "wb") + else: + if arg == "-": + f = sys.stdin + g = GzipFile(filename="", mode="wb", fileobj=sys.stdout) + else: + f = __builtin__.open(arg, "rb") + g = open(arg + ".gz", "wb") + while True: + chunk = f.read(1024) + if not chunk: + break + g.write(chunk) + if g is not sys.stdout: + g.close() + if f is not sys.stdin: + f.close() + +if __name__ == '__main__': + _test() diff --git a/lib-python/modified-2.7/tarfile.py b/lib-python/modified-2.7/tarfile.py --- a/lib-python/modified-2.7/tarfile.py +++ b/lib-python/modified-2.7/tarfile.py @@ -252,8 +252,8 @@ the high bit set. So we calculate two checksums, unsigned and signed. """ - unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512])) - signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512])) + unsigned_chksum = 256 + sum(struct.unpack("148B8x356B", buf[:512])) + signed_chksum = 256 + sum(struct.unpack("148b8x356b", buf[:512])) return unsigned_chksum, signed_chksum def copyfileobj(src, dst, length=None): @@ -265,7 +265,6 @@ if length is None: shutil.copyfileobj(src, dst) return - BUFSIZE = 16 * 1024 blocks, remainder = divmod(length, BUFSIZE) for b in xrange(blocks): @@ -802,19 +801,19 @@ if self.closed: raise ValueError("I/O operation on closed file") - buf = "" if self.buffer: if size is None: - buf = self.buffer + buf = self.buffer + self.fileobj.read() self.buffer = "" else: buf = self.buffer[:size] self.buffer = self.buffer[size:] - - if size is None: - buf += self.fileobj.read() + buf += self.fileobj.read(size - len(buf)) else: - buf += self.fileobj.read(size - len(buf)) + if size is None: + buf = self.fileobj.read() + else: + buf = self.fileobj.read(size) self.position += len(buf) return buf diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -166,7 +166,8 @@ return tp._alignmentofinstances() def byref(cdata): - from ctypes import pointer + # "pointer" is imported at the end of this module to avoid circular + # imports return pointer(cdata) def cdata_from_address(self, address): @@ -226,3 +227,6 @@ 'v' : _ffi.types.sshort, } + +# used by "byref" +from _ctypes.pointer import pointer diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -469,7 +469,8 @@ newargs = [] for argtype, arg in zip(argtypes, args): param = argtype.from_param(arg) - if argtype._type_ == 'P': # special-case for c_void_p + _type_ = getattr(argtype, '_type_', None) + if _type_ == 'P': # special-case for c_void_p param = param._get_buffer_value() elif self._is_primitive(argtype): param = param.value diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -169,6 +169,8 @@ def from_address(self, address): instance = StructOrUnion.__new__(self) + if isinstance(address, _rawffi.StructureInstance): + address = address.buffer instance.__dict__['_buffer'] = self._ffistruct.fromaddress(address) return instance diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -24,6 +24,7 @@ from ctypes import c_void_p, c_int, c_double, c_int64, c_char_p, cdll from ctypes import POINTER, byref, string_at, CFUNCTYPE, cast from ctypes import sizeof, c_ssize_t +from collections import OrderedDict import datetime import sys import time @@ -274,6 +275,28 @@ def unicode_text_factory(x): return unicode(x, 'utf-8') + +class StatementCache(object): + def __init__(self, connection, maxcount): + self.connection = connection + self.maxcount = maxcount + self.cache = OrderedDict() + + def get(self, sql, cursor, row_factory): + try: + stat = self.cache[sql] + except KeyError: + stat = Statement(self.connection, sql) + self.cache[sql] = stat + if len(self.cache) > self.maxcount: + self.cache.popitem(0) + # + if stat.in_use: + stat = Statement(self.connection, sql) + stat.set_cursor_and_factory(cursor, row_factory) + return stat + + class Connection(object): def __init__(self, database, timeout=5.0, detect_types=0, isolation_level="", check_same_thread=True, factory=None, cached_statements=100): @@ -291,6 +314,7 @@ self.row_factory = None self._isolation_level = isolation_level self.detect_types = detect_types + self.statement_cache = StatementCache(self, cached_statements) self.cursors = [] @@ -399,7 +423,7 @@ cur = Cursor(self) if not isinstance(sql, (str, unicode)): raise Warning("SQL is of wrong type. Must be string or unicode.") - statement = Statement(cur, sql, self.row_factory) + statement = self.statement_cache.get(sql, cur, self.row_factory) return statement def _get_isolation_level(self): @@ -708,7 +732,7 @@ if type(sql) is unicode: sql = sql.encode("utf-8") self._check_closed() - self.statement = Statement(self, sql, self.row_factory) + self.statement = self.connection.statement_cache.get(sql, self, self.row_factory) if self.connection._isolation_level is not None: if self.statement.kind == "DDL": @@ -746,7 +770,8 @@ if type(sql) is unicode: sql = sql.encode("utf-8") self._check_closed() - self.statement = Statement(self, sql, self.row_factory) + self.statement = self.connection.statement_cache.get(sql, self, self.row_factory) + if self.statement.kind == "DML": self.connection._begin() else: @@ -871,14 +896,12 @@ lastrowid = property(_getlastrowid) class Statement(object): - def __init__(self, cur, sql, row_factory): + def __init__(self, connection, sql): self.statement = None if not isinstance(sql, str): raise ValueError, "sql must be a string" - self.con = cur.connection - self.cur = weakref.ref(cur) + self.con = connection self.sql = sql # DEBUG ONLY - self.row_factory = row_factory first_word = self._statement_kind = sql.lstrip().split(" ")[0].upper() if first_word in ("INSERT", "UPDATE", "DELETE", "REPLACE"): self.kind = "DML" @@ -887,6 +910,11 @@ else: self.kind = "DDL" self.exhausted = False + self.in_use = False + # + # set by set_cursor_and_factory + self.cur = None + self.row_factory = None self.statement = c_void_p() next_char = c_char_p() @@ -907,6 +935,10 @@ self._build_row_cast_map() + def set_cursor_and_factory(self, cur, row_factory): + self.cur = weakref.ref(cur) + self.row_factory = row_factory + def _build_row_cast_map(self): self.row_cast_map = [] for i in xrange(sqlite.sqlite3_column_count(self.statement)): @@ -976,6 +1008,7 @@ ret = sqlite.sqlite3_reset(self.statement) if ret != SQLITE_OK: raise self.con._get_exception(ret) + self.mark_dirty() if params is None: if sqlite.sqlite3_bind_parameter_count(self.statement) != 0: @@ -1068,11 +1101,17 @@ def reset(self): self.row_cast_map = None - return sqlite.sqlite3_reset(self.statement) + ret = sqlite.sqlite3_reset(self.statement) + self.in_use = False + return ret def finalize(self): sqlite.sqlite3_finalize(self.statement) self.statement = None + self.in_use = False + + def mark_dirty(self): + self.in_use = True def __del__(self): sqlite.sqlite3_finalize(self.statement) diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -1,1 +1,138 @@ -from _stackless import greenlet +import _continuation, sys + + +# ____________________________________________________________ +# Exceptions + +class GreenletExit(Exception): + """This special exception does not propagate to the parent greenlet; it +can be used to kill a single greenlet.""" + +error = _continuation.error + +# ____________________________________________________________ +# Helper function + +def getcurrent(): + "Returns the current greenlet (i.e. the one which called this function)." + try: + return _tls.current + except AttributeError: + # first call in this thread: current == main + _green_create_main() + return _tls.current + +# ____________________________________________________________ +# The 'greenlet' class + +_continulet = _continuation.continulet + +class greenlet(_continulet): + getcurrent = staticmethod(getcurrent) + error = error + GreenletExit = GreenletExit + __main = False + __started = False + + def __new__(cls, *args, **kwds): + self = _continulet.__new__(cls) + self.parent = getcurrent() + return self + + def __init__(self, run=None, parent=None): + if run is not None: + self.run = run + if parent is not None: + self.parent = parent + + def switch(self, *args): + "Switch execution to this greenlet, optionally passing the values " + "given as argument(s). Returns the value passed when switching back." + return self.__switch(_continulet.switch, args) + + def throw(self, typ=GreenletExit, val=None, tb=None): + "raise exception in greenlet, return value passed when switching back" + return self.__switch(_continulet.throw, typ, val, tb) + + def __switch(target, unbound_method, *args): + current = getcurrent() + # + while not target: + if not target.__started: + _continulet.__init__(target, _greenlet_start, *args) + args = () + target.__started = True + break + # already done, go to the parent instead + # (NB. infinite loop possible, but unlikely, unless you mess + # up the 'parent' explicitly. Good enough, because a Ctrl-C + # will show that the program is caught in this loop here.) + target = target.parent + # + try: + if current.__main: + if target.__main: + # switch from main to main + if unbound_method == _continulet.throw: + raise args[0], args[1], args[2] + (args,) = args + else: + # enter from main to target + args = unbound_method(target, *args) + else: + if target.__main: + # leave to go to target=main + args = unbound_method(current, *args) + else: + # switch from non-main to non-main + args = unbound_method(current, *args, to=target) + except GreenletExit, e: + args = (e,) + finally: + _tls.current = current + # + if len(args) == 1: + return args[0] + else: + return args + + def __nonzero__(self): + return self.__main or _continulet.is_pending(self) + + @property + def dead(self): + return self.__started and not self + + @property + def gr_frame(self): + raise NotImplementedError("attribute 'gr_frame' of greenlet objects") + +# ____________________________________________________________ +# Internal stuff + +try: + from thread import _local +except ImportError: + class _local(object): # assume no threads + pass + +_tls = _local() + +def _green_create_main(): + # create the main greenlet for this thread + _tls.current = None + gmain = greenlet.__new__(greenlet) + gmain._greenlet__main = True + gmain._greenlet__started = True + assert gmain.parent is None + _tls.main = gmain + _tls.current = gmain + +def _greenlet_start(greenlet, args): + _tls.current = greenlet + try: + res = greenlet.run(*args) + finally: + if greenlet.parent is not _tls.main: + _continuation.permute(greenlet, greenlet.parent) + return (res,) diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py --- a/lib_pypy/pyrepl/reader.py +++ b/lib_pypy/pyrepl/reader.py @@ -401,13 +401,19 @@ return "(arg: %s) "%self.arg if "\n" in self.buffer: if lineno == 0: - return self._ps2 + res = self.ps2 elif lineno == self.buffer.count("\n"): - return self._ps4 + res = self.ps4 else: - return self._ps3 + res = self.ps3 else: - return self._ps1 + res = self.ps1 + # Lazily call str() on self.psN, and cache the results using as key + # the object on which str() was called. This ensures that even if the + # same object is used e.g. for ps1 and ps2, str() is called only once. + if res not in self._pscache: + self._pscache[res] = str(res) + return self._pscache[res] def push_input_trans(self, itrans): self.input_trans_stack.append(self.input_trans) @@ -473,8 +479,7 @@ self.pos = 0 self.dirty = 1 self.last_command = None - self._ps1, self._ps2, self._ps3, self._ps4 = \ - map(str, [self.ps1, self.ps2, self.ps3, self.ps4]) + self._pscache = {} except: self.restore() raise diff --git a/pypy/config/makerestdoc.py b/pypy/config/makerestdoc.py --- a/pypy/config/makerestdoc.py +++ b/pypy/config/makerestdoc.py @@ -134,7 +134,7 @@ for child in self._children: subpath = fullpath + "." + child._name toctree.append(subpath) - content.add(Directive("toctree", *toctree, maxdepth=4)) + content.add(Directive("toctree", *toctree, **{'maxdepth': 4})) content.join( ListItem(Strong("name:"), self._name), ListItem(Strong("description:"), self.doc)) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -33,7 +33,8 @@ "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "_bisect", "binascii", "_multiprocessing", '_warnings', - "_collections", "_multibytecodec", "micronumpy", "_ffi"] + "_collections", "_multibytecodec", "micronumpy", "_ffi", + "_continuation"] )) translation_modules = default_modules.copy() @@ -99,6 +100,7 @@ "_ssl" : ["pypy.module._ssl.interp_ssl"], "_hashlib" : ["pypy.module._ssl.interp_ssl"], "_minimal_curses": ["pypy.module._minimal_curses.fficurses"], + "_continuation": ["pypy.rlib.rstacklet"], } def get_module_validator(modname): diff --git a/pypy/config/test/test_config.py b/pypy/config/test/test_config.py --- a/pypy/config/test/test_config.py +++ b/pypy/config/test/test_config.py @@ -1,5 +1,5 @@ from pypy.config.config import * -import py +import py, sys def make_description(): gcoption = ChoiceOption('name', 'GC name', ['ref', 'framework'], 'ref') @@ -69,13 +69,15 @@ attrs = dir(config) assert '__repr__' in attrs # from the type assert '_cfgimpl_values' in attrs # from self - assert 'gc' in attrs # custom attribute - assert 'objspace' in attrs # custom attribute + if sys.version_info >= (2, 6): + assert 'gc' in attrs # custom attribute + assert 'objspace' in attrs # custom attribute # attrs = dir(config.gc) - assert 'name' in attrs - assert 'dummy' in attrs - assert 'float' in attrs + if sys.version_info >= (2, 6): + assert 'name' in attrs + assert 'dummy' in attrs + assert 'float' in attrs def test_arbitrary_option(): descr = OptionDescription("top", "", [ diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -28,10 +28,9 @@ translation_optiondescription = OptionDescription( "translation", "Translation Options", [ - BoolOption("stackless", "enable stackless features during compilation", - default=False, cmdline="--stackless", - requires=[("translation.type_system", "lltype"), - ("translation.gcremovetypeptr", False)]), # XXX? + BoolOption("continuation", "enable single-shot continuations", + default=False, cmdline="--continuation", + requires=[("translation.type_system", "lltype")]), ChoiceOption("type_system", "Type system to use when RTyping", ["lltype", "ootype"], cmdline=None, default="lltype", requires={ @@ -70,7 +69,8 @@ "statistics": [("translation.gctransformer", "framework")], "generation": [("translation.gctransformer", "framework")], "hybrid": [("translation.gctransformer", "framework")], - "boehm": [("translation.gctransformer", "boehm")], + "boehm": [("translation.gctransformer", "boehm"), + ("translation.continuation", False)], # breaks "markcompact": [("translation.gctransformer", "framework")], "minimark": [("translation.gctransformer", "framework")], }, @@ -389,8 +389,6 @@ config.translation.suggest(withsmallfuncsets=5) elif word == 'jit': config.translation.suggest(jit=True) - if config.translation.stackless: - raise NotImplementedError("JIT conflicts with stackless for now") elif word == 'removetypeptr': config.translation.suggest(gcremovetypeptr=True) else: diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -1,11 +1,10 @@ .. _`ctypes_configure/doc/sample.py`: https://bitbucket.org/pypy/pypy/src/default/ctypes_configure/doc/sample.py .. _`demo/`: https://bitbucket.org/pypy/pypy/src/default/demo/ -.. _`demo/pickle_coroutine.py`: https://bitbucket.org/pypy/pypy/src/default/demo/pickle_coroutine.py .. _`lib-python/`: https://bitbucket.org/pypy/pypy/src/default/lib-python/ .. _`lib-python/2.7/dis.py`: https://bitbucket.org/pypy/pypy/src/default/lib-python/2.7/dis.py .. _`lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/ +.. _`lib_pypy/greenlet.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/greenlet.py .. _`lib_pypy/pypy_test/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/pypy_test/ -.. _`lib_pypy/stackless.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/stackless.py .. _`lib_pypy/tputil.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/tputil.py .. _`pypy/annotation`: .. _`pypy/annotation/`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation/ @@ -55,7 +54,6 @@ .. _`pypy/module`: .. _`pypy/module/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/ .. _`pypy/module/__builtin__/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/__init__.py -.. _`pypy/module/_stackless/test/test_composable_coroutine.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/_stackless/test/test_composable_coroutine.py .. _`pypy/objspace`: .. _`pypy/objspace/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/ .. _`pypy/objspace/dump.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/dump.py @@ -117,6 +115,7 @@ .. _`pypy/translator/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/ .. _`pypy/translator/backendopt/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/backendopt/ .. _`pypy/translator/c/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/ +.. _`pypy/translator/c/src/stacklet/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/src/stacklet/ .. _`pypy/translator/cli/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/cli/ .. _`pypy/translator/goal/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/goal/ .. _`pypy/translator/jvm/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/jvm/ diff --git a/pypy/doc/architecture.rst b/pypy/doc/architecture.rst --- a/pypy/doc/architecture.rst +++ b/pypy/doc/architecture.rst @@ -153,7 +153,7 @@ * Optionally, `various transformations`_ can then be applied which, for example, perform optimizations such as inlining, add capabilities - such as stackless_-style concurrency, or insert code for the + such as stackless-style concurrency (deprecated), or insert code for the `garbage collector`_. * Then, the graphs are converted to source code for the target platform @@ -255,7 +255,6 @@ .. _Python: http://docs.python.org/reference/ .. _Psyco: http://psyco.sourceforge.net -.. _stackless: stackless.html .. _`generate Just-In-Time Compilers`: jit/index.html .. _`JIT Generation in PyPy`: jit/index.html .. _`implement your own interpreter`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html diff --git a/pypy/doc/config/objspace.usemodules._stackless.txt b/pypy/doc/config/objspace.usemodules._continuation.txt copy from pypy/doc/config/objspace.usemodules._stackless.txt copy to pypy/doc/config/objspace.usemodules._continuation.txt --- a/pypy/doc/config/objspace.usemodules._stackless.txt +++ b/pypy/doc/config/objspace.usemodules._continuation.txt @@ -1,6 +1,4 @@ -Use the '_stackless' module. +Use the '_continuation' module. -Exposes the `stackless` primitives, and also implies a stackless build. -See also :config:`translation.stackless`. - -.. _`stackless`: ../stackless.html +Exposes the `continulet` app-level primitives. +See also :config:`translation.continuation`. diff --git a/pypy/doc/config/objspace.usemodules._stackless.txt b/pypy/doc/config/objspace.usemodules._stackless.txt --- a/pypy/doc/config/objspace.usemodules._stackless.txt +++ b/pypy/doc/config/objspace.usemodules._stackless.txt @@ -1,6 +1,1 @@ -Use the '_stackless' module. - -Exposes the `stackless` primitives, and also implies a stackless build. -See also :config:`translation.stackless`. - -.. _`stackless`: ../stackless.html +Deprecated. diff --git a/pypy/doc/config/translation.stackless.txt b/pypy/doc/config/translation.continuation.txt rename from pypy/doc/config/translation.stackless.txt rename to pypy/doc/config/translation.continuation.txt --- a/pypy/doc/config/translation.stackless.txt +++ b/pypy/doc/config/translation.continuation.txt @@ -1,5 +1,2 @@ -Run the `stackless transform`_ on each generated graph, which enables the use -of coroutines at RPython level and the "stackless" module when translating -PyPy. - -.. _`stackless transform`: ../stackless.html +Enable the use of a stackless-like primitive called "stacklet". +In PyPy, this is exposed at app-level by the "_continuation" module. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -24,6 +24,7 @@ _bisect _codecs _collections + `_continuation`_ `_ffi`_ _hashlib _io @@ -84,10 +85,6 @@ _winreg - Extra module with Stackless_ only: - - _stackless - Note that only some of these modules are built-in in a typical CPython installation, and the rest is from non built-in extension modules. This means that e.g. ``import parser`` will, on CPython, @@ -108,11 +105,11 @@ .. the nonstandard modules are listed below... .. _`__pypy__`: __pypy__-module.html +.. _`_continuation`: stackless.html .. _`_ffi`: ctypes-implementation.html .. _`_rawffi`: ctypes-implementation.html .. _`_minimal_curses`: config/objspace.usemodules._minimal_curses.html .. _`cpyext`: http://morepypy.blogspot.com/2010/04/using-cpython-extension-modules-with.html -.. _Stackless: stackless.html Differences related to garbage collection strategies diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -315,6 +315,28 @@ .. _`Andrew Brown's tutorial`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html +--------------------------------------------------------- +Can RPython modules for PyPy be translated independently? +--------------------------------------------------------- + +No, you have to rebuild the entire interpreter. This means two things: + +* It is imperative to use test-driven development. You have to test + exhaustively your module in pure Python, before even attempting to + translate it. Once you translate it, you should have only a few typing + issues left to fix, but otherwise the result should work out of the box. + +* Second, and perhaps most important: do you have a really good reason + for writing the module in RPython in the first place? Nowadays you + should really look at alternatives, like writing it in pure Python, + using ctypes if it needs to call C code. Other alternatives are being + developed too (as of summer 2011), like a Cython binding. + +In this context it is not that important to be able to translate +RPython modules independently of translating the complete interpreter. +(It could be done given enough efforts, but it's a really serious +undertaking. Consider it as quite unlikely for now.) + ---------------------------------------------------------- Why does PyPy draw a Mandelbrot fractal while translating? ---------------------------------------------------------- diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -67,7 +67,6 @@ * ``libssl-dev`` (for the optional ``_ssl`` module) * ``libgc-dev`` (for the Boehm garbage collector: only needed when translating with `--opt=0, 1` or `size`) * ``python-sphinx`` (for the optional documentation build. You need version 1.0.7 or later) - * ``python-greenlet`` (for the optional stackless support in interpreted mode/testing) 3. Translation is time-consuming -- 45 minutes on a very fast machine -- @@ -120,19 +119,8 @@ Installation_ below. The ``translate.py`` script takes a very large number of options controlling -what to translate and how. See ``translate.py -h``. Some of the more -interesting options (but for now incompatible with the JIT) are: - - * ``--stackless``: this produces a pypy-c that includes features - inspired by `Stackless Python `__. - - * ``--gc=boehm|ref|marknsweep|semispace|generation|hybrid|minimark``: - choose between using - the `Boehm-Demers-Weiser garbage collector`_, our reference - counting implementation or one of own collector implementations - (the default depends on the optimization level but is usually - ``minimark``). - +what to translate and how. See ``translate.py -h``. The default options +should be suitable for mostly everybody by now. Find a more detailed description of the various options in our `configuration sections`_. diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -42,7 +42,6 @@ JIT: windows, linux, os/x no JIT: windows, linux, os/x sandbox: linux, os/x - stackless: windows, linux, os/x * write release announcement pypy/doc/release-x.y(.z).txt the release announcement should contain a direct link to the download page diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -35,7 +35,7 @@ * `Differences between PyPy and CPython`_ * `What PyPy can do for your objects`_ - * `Stackless and coroutines`_ + * `Continulets and greenlets`_ * `JIT Generation in PyPy`_ * `Sandboxing Python code`_ @@ -292,8 +292,6 @@ `pypy/translator/jvm/`_ the Java backend -`pypy/translator/stackless/`_ the `Stackless Transform`_ - `pypy/translator/tool/`_ helper tools for translation, including the Pygame `graph viewer`_ @@ -318,7 +316,7 @@ .. _`transparent proxies`: objspace-proxies.html#tproxy .. _`Differences between PyPy and CPython`: cpython_differences.html .. _`What PyPy can do for your objects`: objspace-proxies.html -.. _`Stackless and coroutines`: stackless.html +.. _`Continulets and greenlets`: stackless.html .. _StdObjSpace: objspace.html#the-standard-object-space .. _`abstract interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation .. _`rpython`: coding-guide.html#rpython @@ -337,7 +335,6 @@ .. _`low-level type system`: rtyper.html#low-level-type .. _`object-oriented type system`: rtyper.html#oo-type .. _`garbage collector`: garbage_collection.html -.. _`Stackless Transform`: translation.html#the-stackless-transform .. _`main PyPy-translation scripts`: getting-started-python.html#translating-the-pypy-python-interpreter .. _`.NET`: http://www.microsoft.com/net/ .. _Mono: http://www.mono-project.com/ diff --git a/pypy/doc/jit/pyjitpl5.rst b/pypy/doc/jit/pyjitpl5.rst --- a/pypy/doc/jit/pyjitpl5.rst +++ b/pypy/doc/jit/pyjitpl5.rst @@ -103,7 +103,7 @@ The meta-interpreter starts interpreting the JIT bytecode. Each operation is executed and then recorded in a list of operations, called the trace. -Operations can have a list of boxes that operate on, arguments. Some operations +Operations can have a list of boxes they operate on, arguments. Some operations (like GETFIELD and GETARRAYITEM) also have special objects that describe how their arguments are laid out in memory. All possible operations generated by tracing are listed in metainterp/resoperation.py. When a (interpreter-level) diff --git a/pypy/doc/rlib.rst b/pypy/doc/rlib.rst --- a/pypy/doc/rlib.rst +++ b/pypy/doc/rlib.rst @@ -134,69 +134,6 @@ a hierarchy of Address classes, in a typical static-OO-programming style. -``rstack`` -========== - -The `pypy/rlib/rstack.py`_ module allows an RPython program to control its own execution stack. -This is only useful if the program is translated using stackless. An old -description of the exposed functions is below. - -We introduce an RPython type ``frame_stack_top`` and a built-in function -``yield_current_frame_to_caller()`` that work as follows (see example below): - -* The built-in function ``yield_current_frame_to_caller()`` causes the current - function's state to be captured in a new ``frame_stack_top`` object that is - returned to the parent. Only one frame, the current one, is captured this - way. The current frame is suspended and the caller continues to run. Note - that the caller is only resumed once: when - ``yield_current_frame_to_caller()`` is called. See below. - -* A ``frame_stack_top`` object can be jumped to by calling its ``switch()`` - method with no argument. - -* ``yield_current_frame_to_caller()`` and ``switch()`` themselves return a new - ``frame_stack_top`` object: the freshly captured state of the caller of the - source ``switch()`` that was just executed, or None in the case described - below. - -* the function that called ``yield_current_frame_to_caller()`` also has a - normal return statement, like all functions. This statement must return - another ``frame_stack_top`` object. The latter is *not* returned to the - original caller; there is no way to return several times to the caller. - Instead, it designates the place to which the execution must jump, as if by - a ``switch()``. The place to which we jump this way will see a None as the - source frame stack top. - -* every frame stack top must be resumed once and only once. Not resuming - it at all causes a leak. Resuming it several times causes a crash. - -* a function that called ``yield_current_frame_to_caller()`` should not raise. - It would have no implicit parent frame to propagate the exception to. That - would be a crashingly bad idea. - -The following example would print the numbers from 1 to 7 in order:: - - def g(): - print 2 - frametop_before_5 = yield_current_frame_to_caller() - print 4 - frametop_before_7 = frametop_before_5.switch() - print 6 - return frametop_before_7 - - def f(): - print 1 - frametop_before_4 = g() - print 3 - frametop_before_6 = frametop_before_4.switch() - print 5 - frametop_after_return = frametop_before_6.switch() - print 7 - assert frametop_after_return is None - - f() - - ``streamio`` ============ diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -8,446 +8,299 @@ ================ PyPy can expose to its user language features similar to the ones -present in `Stackless Python`_: **no recursion depth limit**, and the -ability to write code in a **massively concurrent style**. It actually -exposes three different paradigms to choose from: +present in `Stackless Python`_: the ability to write code in a +**massively concurrent style**. (It does not (any more) offer the +ability to run with no `recursion depth limit`_, but the same effect +can be achieved indirectly.) -* `Tasklets and channels`_; +This feature is based on a custom primitive called a continulet_. +Continulets can be directly used by application code, or it is possible +to write (entirely at app-level) more user-friendly interfaces. -* Greenlets_; +Currently PyPy implements greenlets_ on top of continulets. It would be +easy to implement tasklets and channels as well, emulating the model +of `Stackless Python`_. -* Plain coroutines_. +Continulets are extremely light-weight, which means that PyPy should be +able to handle programs containing large amounts of them. However, due +to an implementation restriction, a PyPy compiled with +``--gcrootfinder=shadowstack`` consumes at least one page of physical +memory (4KB) per live continulet, and half a megabyte of virtual memory +on 32-bit or a complete megabyte on 64-bit. Moreover, the feature is +only available (so far) on x86 and x86-64 CPUs; for other CPUs you need +to add a short page of custom assembler to +`pypy/translator/c/src/stacklet/`_. -All of them are extremely light-weight, which means that PyPy should be -able to handle programs containing large amounts of coroutines, tasklets -and greenlets. +Theory +====== -Requirements -++++++++++++++++ +The fundamental idea is that, at any point in time, the program happens +to run one stack of frames (or one per thread, in case of +multi-threading). To see the stack, start at the top frame and follow +the chain of ``f_back`` until you reach the bottom frame. From the +point of view of one of these frames, it has a ``f_back`` pointing to +another frame (unless it is the bottom frame), and it is itself being +pointed to by another frame (unless it is the top frame). -If you are running py.py on top of CPython, then you need to enable -the _stackless module by running it as follows:: +The theory behind continulets is to literally take the previous sentence +as definition of "an O.K. situation". The trick is that there are +O.K. situations that are more complex than just one stack: you will +always have one stack, but you can also have in addition one or more +detached *cycles* of frames, such that by following the ``f_back`` chain +you run in a circle. But note that these cycles are indeed completely +detached: the top frame (the currently running one) is always the one +which is not the ``f_back`` of anybody else, and it is always the top of +a stack that ends with the bottom frame, never a part of these extra +cycles. - py.py --withmod-_stackless +How do you create such cycles? The fundamental operation to do so is to +take two frames and *permute* their ``f_back`` --- i.e. exchange them. +You can permute any two ``f_back`` without breaking the rule of "an O.K. +situation". Say for example that ``f`` is some frame halfway down the +stack, and you permute its ``f_back`` with the ``f_back`` of the top +frame. Then you have removed from the normal stack all intermediate +frames, and turned them into one stand-alone cycle. By doing the same +permutation again you restore the original situation. -This is implemented internally using greenlets, so it only works on a -platform where `greenlets`_ are supported. A few features do -not work this way, though, and really require a translated -``pypy-c``. +In practice, in PyPy, you cannot change the ``f_back`` of an abitrary +frame, but only of frames stored in ``continulets``. -To obtain a translated version of ``pypy-c`` that includes Stackless -support, run translate.py as follows:: - - cd pypy/translator/goal - python translate.py --stackless +Continulets are internally implemented using stacklets. Stacklets are a +bit more primitive (they are really one-shot continuations), but that +idea only works in C, not in Python. The basic idea of continulets is +to have at any point in time a complete valid stack; this is important +e.g. to correctly propagate exceptions (and it seems to give meaningful +tracebacks too). Application level interface ============================= -A stackless PyPy contains a module called ``stackless``. The interface -exposed by this module have not been refined much, so it should be -considered in-flux (as of 2007). -So far, PyPy does not provide support for ``stackless`` in a threaded -environment. This limitation is not fundamental, as previous experience -has shown, so supporting this would probably be reasonably easy. +.. _continulet: -An interesting point is that the same ``stackless`` module can provide -a number of different concurrency paradigms at the same time. From a -theoretical point of view, none of above-mentioned existing three -paradigms considered on its own is new: two of them are from previous -Python work, and the third one is a variant of the classical coroutine. -The new part is that the PyPy implementation manages to provide all of -them and let the user implement more. Moreover - and this might be an -important theoretical contribution of this work - we manage to provide -these concurrency concepts in a "composable" way. In other words, it -is possible to naturally mix in a single application multiple -concurrency paradigms, and multiple unrelated usages of the same -paradigm. This is discussed in the Composability_ section below. +Continulets ++++++++++++ +A translated PyPy contains by default a module called ``_continuation`` +exporting the type ``continulet``. A ``continulet`` object from this +module is a container that stores a "one-shot continuation". It plays +the role of an extra frame you can insert in the stack, and whose +``f_back`` can be changed. -Infinite recursion -++++++++++++++++++ +To make a continulet object, call ``continulet()`` with a callable and +optional extra arguments. -Any stackless PyPy executable natively supports recursion that is only -limited by the available memory. As in normal Python, though, there is -an initial recursion limit (which is 5000 in all pypy-c's, and 1000 in -CPython). It can be changed with ``sys.setrecursionlimit()``. With a -stackless PyPy, any value is acceptable - use ``sys.maxint`` for -unlimited. +Later, the first time you ``switch()`` to the continulet, the callable +is invoked with the same continulet object as the extra first argument. +At that point, the one-shot continuation stored in the continulet points +to the caller of ``switch()``. In other words you have a perfectly +normal-looking stack of frames. But when ``switch()`` is called again, +this stored one-shot continuation is exchanged with the current one; it +means that the caller of ``switch()`` is suspended with its continuation +stored in the container, and the old continuation from the continulet +object is resumed. -In some cases, you can write Python code that causes interpreter-level -infinite recursion -- i.e. infinite recursion without going via -application-level function calls. It is possible to limit that too, -with ``_stackless.set_stack_depth_limit()``, or to unlimit it completely -by setting it to ``sys.maxint``. +The most primitive API is actually 'permute()', which just permutes the +one-shot continuation stored in two (or more) continulets. +In more details: -Coroutines -++++++++++ +* ``continulet(callable, *args, **kwds)``: make a new continulet. + Like a generator, this only creates it; the ``callable`` is only + actually called the first time it is switched to. It will be + called as follows:: -A Coroutine is similar to a very small thread, with no preemptive scheduling. -Within a family of coroutines, the flow of execution is explicitly -transferred from one to another by the programmer. When execution is -transferred to a coroutine, it begins to execute some Python code. When -it transfers execution away from itself it is temporarily suspended, and -when execution returns to it it resumes its execution from the -point where it was suspended. Conceptually, only one coroutine is -actively running at any given time (but see Composability_ below). + callable(cont, *args, **kwds) -The ``stackless.coroutine`` class is instantiated with no argument. -It provides the following methods and attributes: + where ``cont`` is the same continulet object. -* ``stackless.coroutine.getcurrent()`` + Note that it is actually ``cont.__init__()`` that binds + the continulet. It is also possible to create a not-bound-yet + continulet by calling explicitly ``continulet.__new__()``, and + only bind it later by calling explicitly ``cont.__init__()``. - Static method returning the currently running coroutine. There is a - so-called "main" coroutine object that represents the "outer" - execution context, where your main program started and where it runs - as long as it does not switch to another coroutine. +* ``cont.switch(value=None, to=None)``: start the continulet if + it was not started yet. Otherwise, store the current continuation + in ``cont``, and activate the target continuation, which is the + one that was previously stored in ``cont``. Note that the target + continuation was itself previously suspended by another call to + ``switch()``; this older ``switch()`` will now appear to return. + The ``value`` argument is any object that is carried to the target + and returned by the target's ``switch()``. -* ``coro.bind(callable, *args, **kwds)`` + If ``to`` is given, it must be another continulet object. In + that case, performs a "double switch": it switches as described + above to ``cont``, and then immediately switches again to ``to``. + This is different from switching directly to ``to``: the current + continuation gets stored in ``cont``, the old continuation from + ``cont`` gets stored in ``to``, and only then we resume the + execution from the old continuation out of ``to``. - Bind the coroutine so that it will execute ``callable(*args, - **kwds)``. The call is not performed immediately, but only the - first time we call the ``coro.switch()`` method. A coroutine must - be bound before it is switched to. When the coroutine finishes - (because the call to the callable returns), the coroutine exits and - implicitly switches back to another coroutine (its "parent"); after - this point, it is possible to bind it again and switch to it again. - (Which coroutine is the parent of which is not documented, as it is - likely to change when the interface is refined.) +* ``cont.throw(type, value=None, tb=None, to=None)``: similar to + ``switch()``, except that immediately after the switch is done, raise + the given exception in the target. -* ``coro.switch()`` +* ``cont.is_pending()``: return True if the continulet is pending. + This is False when it is not initialized (because we called + ``__new__`` and not ``__init__``) or when it is finished (because + the ``callable()`` returned). When it is False, the continulet + object is empty and cannot be ``switch()``-ed to. - Suspend the current (caller) coroutine, and resume execution in the - target coroutine ``coro``. +* ``permute(*continulets)``: a global function that permutes the + continuations stored in the given continulets arguments. Mostly + theoretical. In practice, using ``cont.switch()`` is easier and + more efficient than using ``permute()``; the latter does not on + its own change the currently running frame. -* ``coro.kill()`` - Kill ``coro`` by sending a CoroutineExit exception and switching - execution immediately to it. This exception can be caught in the - coroutine itself and can be raised from any call to ``coro.switch()``. - This exception isn't propagated to the parent coroutine. +Genlets ++++++++ -* ``coro.throw(type, value)`` +The ``_continuation`` module also exposes the ``generator`` decorator:: - Insert an exception in ``coro`` an resume switches execution - immediately to it. In the coroutine itself, this exception - will come from any call to ``coro.switch()`` and can be caught. If the - exception isn't caught, it will be propagated to the parent coroutine. + @generator + def f(cont, a, b): + cont.switch(a + b) + cont.switch(a + b + 1) -When a coroutine is garbage-collected, it gets the ``.kill()`` method sent to -it. This happens at the point the next ``.switch`` method is called, so the -target coroutine of this call will be executed only after the ``.kill`` has -finished. + for i in f(10, 20): + print i -Example -~~~~~~~ +This example prints 30 and 31. The only advantage over using regular +generators is that the generator itself is not limited to ``yield`` +statements that must all occur syntactically in the same function. +Instead, we can pass around ``cont``, e.g. to nested sub-functions, and +call ``cont.switch(x)`` from there. -Here is a classical producer/consumer example: an algorithm computes a -sequence of values, while another consumes them. For our purposes we -assume that the producer can generate several values at once, and the -consumer can process up to 3 values in a batch - it can also process -batches with fewer than 3 values without waiting for the producer (which -would be messy to express with a classical Python generator). :: +The ``generator`` decorator can also be applied to methods:: - def producer(lst): - while True: - ...compute some more values... - lst.extend(new_values) - coro_consumer.switch() - - def consumer(lst): - while True: - # First ask the producer for more values if needed - while len(lst) == 0: - coro_producer.switch() - # Process the available values in a batch, but at most 3 - batch = lst[:3] - del lst[:3] - ...process batch... - - # Initialize two coroutines with a shared list as argument - exchangelst = [] - coro_producer = coroutine() - coro_producer.bind(producer, exchangelst) - coro_consumer = coroutine() - coro_consumer.bind(consumer, exchangelst) - - # Start running the consumer coroutine - coro_consumer.switch() - - -Tasklets and channels -+++++++++++++++++++++ - -The ``stackless`` module also provides an interface that is roughly -compatible with the interface of the ``stackless`` module in `Stackless -Python`_: it contains ``stackless.tasklet`` and ``stackless.channel`` -classes. Tasklets are also similar to microthreads, but (like coroutines) -they don't actually run in parallel with other microthreads; instead, -they synchronize and exchange data with each other over Channels, and -these exchanges determine which Tasklet runs next. - -For usage reference, see the documentation on the `Stackless Python`_ -website. - -Note that Tasklets and Channels are implemented at application-level in -`lib_pypy/stackless.py`_ on top of coroutines_. You can refer to this -module for more details and API documentation. - -The stackless.py code tries to resemble the stackless C code as much -as possible. This makes the code somewhat unpythonic. - -Bird's eye view of tasklets and channels -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Tasklets are a bit like threads: they encapsulate a function in such a way that -they can be suspended/restarted any time. Unlike threads, they won't -run concurrently, but must be cooperative. When using stackless -features, it is vitally important that no action is performed that blocks -everything else. In particular, blocking input/output should be centralized -to a single tasklet. - -Communication between tasklets is done via channels. -There are three ways for a tasklet to give up control: - -1. call ``stackless.schedule()`` -2. send something over a channel -3. receive something from a channel - -A (live) tasklet can either be running, waiting to get scheduled, or be -blocked by a channel. - -Scheduling is done in strictly round-robin manner. A blocked tasklet -is removed from the scheduling queue and will be reinserted when it -becomes unblocked. - -Example -~~~~~~~ - -Here is a many-producers many-consumers example, where any consumer can -process the result of any producer. For this situation we set up a -single channel where all producer send, and on which all consumers -wait:: - - def producer(chan): - while True: - chan.send(...next value...) - - def consumer(chan): - while True: - x = chan.receive() - ...do something with x... - - # Set up the N producer and M consumer tasklets - common_channel = stackless.channel() - for i in range(N): - stackless.tasklet(producer, common_channel)() - for i in range(M): - stackless.tasklet(consumer, common_channel)() - - # Run it all - stackless.run() - -Each item sent over the channel is received by one of the waiting -consumers; which one is not specified. The producers block until their -item is consumed: the channel is not a queue, but rather a meeting point -which causes tasklets to block until both a consumer and a producer are -ready. In practice, the reason for having several consumers receiving -on a single channel is that some of the consumers can be busy in other -ways part of the time. For example, each consumer might receive a -database request, process it, and send the result to a further channel -before it asks for the next request. In this situation, further -requests can still be received by other consumers. + class X: + @generator + def f(self, cont, a, b): + ... Greenlets +++++++++ -A Greenlet is a kind of primitive Tasklet with a lower-level interface -and with exact control over the execution order. Greenlets are similar -to Coroutines, with a slightly different interface: greenlets put more -emphasis on a tree structure. The various greenlets of a program form a -precise tree, which fully determines their order of execution. +Greenlets are implemented on top of continulets in `lib_pypy/greenlet.py`_. +See the official `documentation of the greenlets`_. -For usage reference, see the `documentation of the greenlets`_. -The PyPy interface is identical. You should use ``greenlet.greenlet`` -instead of ``stackless.greenlet`` directly, because the greenlet library -can give you the latter when you ask for the former on top of PyPy. +Note that unlike the CPython greenlets, this version does not suffer +from GC issues: if the program "forgets" an unfinished greenlet, it will +always be collected at the next garbage collection. -PyPy's greenlets do not suffer from the cyclic GC limitation that the -CPython greenlets have: greenlets referencing each other via local -variables tend to leak on top of CPython (where it is mostly impossible -to do the right thing). It works correctly on top of PyPy. +Unimplemented features +++++++++++++++++++++++ -Coroutine Pickling -++++++++++++++++++ +The following features (present in some past Stackless version of PyPy) +are for the time being not supported any more: -Coroutines and tasklets can be pickled and unpickled, i.e. serialized to -a string of bytes for the purpose of storage or transmission. This -allows "live" coroutines or tasklets to be made persistent, moved to -other machines, or cloned in any way. The standard ``pickle`` module -works with coroutines and tasklets (at least in a translated ``pypy-c``; -unpickling live coroutines or tasklets cannot be easily implemented on -top of CPython). +* Tasklets and channels (currently ``stackless.py`` seems to import, + but you have tasklets on top of coroutines on top of greenlets on + top of continulets on top of stacklets, and it's probably not too + hard to cut two of these levels by adapting ``stackless.py`` to + use directly continulets) -To be able to achieve this result, we have to consider many objects that -are not normally pickleable in CPython. Here again, the `Stackless -Python`_ implementation has paved the way, and we follow the same -general design decisions: simple internal objects like bound method -objects and various kinds of iterators are supported; frame objects can -be fully pickled and unpickled -(by serializing a reference to the bytecode they are -running in addition to all the local variables). References to globals -and modules are pickled by name, similarly to references to functions -and classes in the traditional CPython ``pickle``. +* Coroutines (could be rewritten at app-level) -The "magic" part of this process is the implementation of the unpickling -of a chain of frames. The Python interpreter of PyPy uses -interpreter-level recursion to represent application-level calls. The -reason for this is that it tremendously simplifies the implementation of -the interpreter itself. Indeed, in Python, almost any operation can -potentially result in a non-tail-recursive call to another Python -function. This makes writing a non-recursive interpreter extremely -tedious; instead, we rely on lower-level transformations during the -translation process to control this recursion. This is the `Stackless -Transform`_, which is at the heart of PyPy's support for stackless-style -concurrency. +* Pickling and unpickling continulets (*) -At any point in time, a chain of Python-level frames corresponds to a -chain of interpreter-level frames (e.g. C frames in pypy-c), where each -single Python-level frame corresponds to one or a few interpreter-level -frames - depending on the length of the interpreter-level call chain -from one bytecode evaluation loop to the next (recursively invoked) one. +* Continuing execution of a continulet in a different thread (*) -This means that it is not sufficient to simply create a chain of Python -frame objects in the heap of a process before we can resume execution of -these newly built frames. We must recreate a corresponding chain of -interpreter-level frames. To this end, we have inserted a few *named -resume points* (see 3.2.4, in `D07.1 Massive Parallelism and Translation Aspects`_) in the Python interpreter of PyPy. This is the -motivation for implementing the interpreter-level primitives -``resume_state_create()`` and ``resume_state_invoke()``, the powerful -interface that allows an RPython program to artificially rebuild a chain -of calls in a reflective way, completely from scratch, and jump to it. +* Automatic unlimited stack (must be emulated__ so far) -.. _`D07.1 Massive Parallelism and Translation Aspects`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf +* Support for other CPUs than x86 and x86-64 -Example -~~~~~~~ +* The app-level ``f_back`` field of frames crossing continulet boundaries + is None for now, unlike what I explain in the theoretical overview + above. It mostly means that in a ``pdb.set_trace()`` you cannot go + ``up`` past countinulet boundaries. This could be fixed. -(See `demo/pickle_coroutine.py`_ for the complete source of this demo.) +.. __: `recursion depth limit`_ -Consider a program which contains a part performing a long-running -computation:: +(*) Pickling, as well as changing threads, could be implemented by using +a "soft" stack switching mode again. We would get either "hard" or +"soft" switches, similarly to Stackless Python 3rd version: you get a +"hard" switch (like now) when the C stack contains non-trivial C frames +to save, and a "soft" switch (like previously) when it contains only +simple calls from Python to Python. Soft-switched continulets would +also consume a bit less RAM, and the switch might be a bit faster too +(unsure about that; what is the Stackless Python experience?). - def ackermann(x, y): - if x == 0: - return y + 1 - if y == 0: - return ackermann(x - 1, 1) - return ackermann(x - 1, ackermann(x, y - 1)) -By using pickling, we can save the state of the computation while it is -running, for the purpose of restoring it later and continuing the -computation at another time or on a different machine. However, -pickling does not produce a whole-program dump: it can only pickle -individual coroutines. This means that the computation should be -started in its own coroutine:: +Recursion depth limit ++++++++++++++++++++++ - # Make a coroutine that will run 'ackermann(3, 8)' - coro = coroutine() - coro.bind(ackermann, 3, 8) +You can use continulets to emulate the infinite recursion depth present +in Stackless Python and in stackless-enabled older versions of PyPy. - # Now start running the coroutine - result = coro.switch() +The trick is to start a continulet "early", i.e. when the recursion +depth is very low, and switch to it "later", i.e. when the recursion +depth is high. Example:: -The coroutine itself must switch back to the main program when it needs -to be interrupted (we can only pickle suspended coroutines). Due to -current limitations this requires an explicit check in the -``ackermann()`` function:: + from _continuation import continulet - def ackermann(x, y): - if interrupt_flag: # test a global flag - main.switch() # and switch back to 'main' if it is set - if x == 0: - return y + 1 - if y == 0: - return ackermann(x - 1, 1) - return ackermann(x - 1, ackermann(x, y - 1)) + def invoke(_, callable, arg): + return callable(arg) -The global ``interrupt_flag`` would be set for example by a timeout, or -by a signal handler reacting to Ctrl-C, etc. It causes the coroutine to -transfer control back to the main program. The execution comes back -just after the line ``coro.switch()``, where we can pickle the coroutine -if necessary:: + def bootstrap(c): + # this loop runs forever, at a very low recursion depth + callable, arg = c.switch() + while True: + # start a new continulet from here, and switch to + # it using an "exchange", i.e. a switch with to=. + to = continulet(invoke, callable, arg) + callable, arg = c.switch(to=to) - if not coro.is_alive: - print "finished; the result is:", result - else: - # save the state of the suspended coroutine - f = open('demo.pickle', 'w') - pickle.dump(coro, f) - f.close() + c = continulet(bootstrap) + c.switch() -The process can then stop. At any later time, or on another machine, -we can reload the file and restart the coroutine with:: - f = open('demo.pickle', 'r') - coro = pickle.load(f) - f.close() - result = coro.switch() + def recursive(n): + if n == 0: + return ("ok", n) + if n % 200 == 0: + prev = c.switch((recursive, n - 1)) + else: + prev = recursive(n - 1) + return (prev[0], prev[1] + 1) -Limitations -~~~~~~~~~~~ + print recursive(999999) # prints ('ok', 999999) -Coroutine pickling is subject to some limitations. First of all, it is -not a whole-program "memory dump". It means that only the "local" state -of a coroutine is saved. The local state is defined to include the -chain of calls and the local variables, but not for example the value of -any global variable. +Note that if you press Ctrl-C while running this example, the traceback +will be built with *all* recursive() calls so far, even if this is more +than the number that can possibly fit in the C stack. These frames are +"overlapping" each other in the sense of the C stack; more precisely, +they are copied out of and into the C stack as needed. -As in normal Python, the pickle will not include any function object's -code, any class definition, etc., but only references to functions and -classes. Unlike normal Python, the pickle contains frames. A pickled -frame stores a bytecode index, representing the current execution -position. This means that the user program cannot be modified *at all* -between pickling and unpickling! +(The example above also makes use of the following general "guideline" +to help newcomers write continulets: in ``bootstrap(c)``, only call +methods on ``c``, not on another continulet object. That's why we wrote +``c.switch(to=to)`` and not ``to.switch()``, which would mess up the +state. This is however just a guideline; in general we would recommend +to use other interfaces like genlets and greenlets.) -On the other hand, the pickled data is fairly independent from the -platform and from the PyPy version. -Pickling/unpickling fails if the coroutine is suspended in a state that -involves Python frames which were *indirectly* called. To define this -more precisely, a Python function can issue a regular function or method -call to invoke another Python function - this is a *direct* call and can -be pickled and unpickled. But there are many ways to invoke a Python -function indirectly. For example, most operators can invoke a special -method ``__xyz__()`` on a class, various built-in functions can call -back Python functions, signals can invoke signal handlers, and so on. -These cases are not supported yet. - - -Composability -+++++++++++++ +Theory of composability ++++++++++++++++++++++++ Although the concept of coroutines is far from new, they have not been generally integrated into mainstream languages, or only in limited form (like generators in Python and iterators in C#). We can argue that a possible reason for that is that they do not scale well when a program's complexity increases: they look attractive in small examples, but the -models that require explicit switching, by naming the target coroutine, -do not compose naturally. This means that a program that uses -coroutines for two unrelated purposes may run into conflicts caused by -unexpected interactions. +models that require explicit switching, for example by naming the target +coroutine, do not compose naturally. This means that a program that +uses coroutines for two unrelated purposes may run into conflicts caused +by unexpected interactions. To illustrate the problem, consider the following example (simplified -code; see the full source in -`pypy/module/_stackless/test/test_composable_coroutine.py`_). First, a -simple usage of coroutine:: +code using a theorical ``coroutine`` class). First, a simple usage of +coroutine:: main_coro = coroutine.getcurrent() # the main (outer) coroutine data = [] @@ -530,74 +383,35 @@ main coroutine, which confuses the ``generator_iterator.next()`` method (it gets resumed, but not as a result of a call to ``Yield()``). -As part of trying to combine multiple different paradigms into a single -application-level module, we have built a way to solve this problem. -The idea is to avoid the notion of a single, global "main" coroutine (or -a single main greenlet, or a single main tasklet). Instead, each -conceptually separated user of one of these concurrency interfaces can -create its own "view" on what the main coroutine/greenlet/tasklet is, -which other coroutine/greenlet/tasklets there are, and which of these is -the currently running one. Each "view" is orthogonal to the others. In -particular, each view has one (and exactly one) "current" -coroutine/greenlet/tasklet at any point in time. When the user switches -to a coroutine/greenlet/tasklet, it implicitly means that he wants to -switch away from the current coroutine/greenlet/tasklet *that belongs to -the same view as the target*. +Thus the notion of coroutine is *not composable*. By opposition, the +primitive notion of continulets is composable: if you build two +different interfaces on top of it, or have a program that uses twice the +same interface in two parts, then assuming that both parts independently +work, the composition of the two parts still works. -The precise application-level interface has not been fixed yet; so far, -"views" in the above sense are objects of the type -``stackless.usercostate``. The above two examples can be rewritten in -the following way:: +A full proof of that claim would require careful definitions, but let us +just claim that this fact is true because of the following observation: +the API of continulets is such that, when doing a ``switch()``, it +requires the program to have some continulet to explicitly operate on. +It shuffles the current continuation with the continuation stored in +that continulet, but has no effect outside. So if a part of a program +has a continulet object, and does not expose it as a global, then the +rest of the program cannot accidentally influence the continuation +stored in that continulet object. - producer_view = stackless.usercostate() # a local view - main_coro = producer_view.getcurrent() # the main (outer) coroutine - ... - producer_coro = producer_view.newcoroutine() - ... - -and:: - - generators_view = stackless.usercostate() - - def generator(f): - def wrappedfunc(*args, **kwds): - g = generators_view.newcoroutine(generator_iterator) - ... - - ...generators_view.getcurrent()... - -Then the composition ``grab_values()`` works as expected, because the -two views are independent. The coroutine captured as ``self.caller`` in -the ``generator_iterator.next()`` method is the main coroutine of the -``generators_view``. It is no longer the same object as the main -coroutine of the ``producer_view``, so when ``data_producer()`` issues -the following command:: - - main_coro.switch() - -the control flow cannot accidentally jump back to -``generator_iterator.next()``. In other words, from the point of view -of ``producer_view``, the function ``grab_next_value()`` always runs in -its main coroutine ``main_coro`` and the function ``data_producer`` in -its coroutine ``producer_coro``. This is the case independently of -which ``generators_view``-based coroutine is the current one when -``grab_next_value()`` is called. - -Only code that has explicit access to the ``producer_view`` or its -coroutine objects can perform switches that are relevant for the -generator code. If the view object and the coroutine objects that share -this view are all properly encapsulated inside the generator logic, no -external code can accidentally temper with the expected control flow any -longer. - -In conclusion: we will probably change the app-level interface of PyPy's -stackless module in the future to not expose coroutines and greenlets at -all, but only views. They are not much more difficult to use, and they -scale automatically to larger programs. +In other words, if we regard the continulet object as being essentially +a modifiable ``f_back``, then it is just a link between the frame of +``callable()`` and the parent frame --- and it cannot be arbitrarily +changed by unrelated code, as long as they don't explicitly manipulate +the continulet object. Typically, both the frame of ``callable()`` +(commonly a local function) and its parent frame (which is the frame +that switched to it) belong to the same class or module; so from that +point of view the continulet is a purely local link between two local +frames. It doesn't make sense to have a concept that allows this link +to be manipulated from outside. .. _`Stackless Python`: http://www.stackless.com .. _`documentation of the greenlets`: http://packages.python.org/greenlet/ -.. _`Stackless Transform`: translation.html#the-stackless-transform .. include:: _ref.txt diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -552,14 +552,15 @@ The stackless transform converts functions into a form that knows how to save the execution point and active variables into a heap structure -and resume execution at that point. This is used to implement +and resume execution at that point. This was used to implement coroutines as an RPython-level feature, which in turn are used to -implement `coroutines, greenlets and tasklets`_ as an application +implement coroutines, greenlets and tasklets as an application level feature for the Standard Interpreter. -Enable the stackless transformation with :config:`translation.stackless`. +The stackless transformation has been deprecated and is no longer +available in trunk. It has been replaced with continulets_. -.. _`coroutines, greenlets and tasklets`: stackless.html +.. _continulets: stackless.html .. _`preparing the graphs for source generation`: diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -626,9 +626,9 @@ self.default_compiler = compiler return compiler - def createframe(self, code, w_globals, closure=None): + def createframe(self, code, w_globals, outer_func=None): "Create an empty PyFrame suitable for this code object." - return self.FrameClass(self, code, w_globals, closure) + return self.FrameClass(self, code, w_globals, outer_func) def allocate_lock(self): """Return an interp-level Lock object if threads are enabled, diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -30,7 +30,7 @@ can_change_code = True _immutable_fields_ = ['code?', 'w_func_globals?', - 'closure?', + 'closure?[*]', 'defs_w?[*]', 'name?'] @@ -96,7 +96,7 @@ assert isinstance(code, PyCode) if nargs < 5: new_frame = self.space.createframe(code, self.w_func_globals, - self.closure) + self) for i in funccallunrolling: if i < nargs: new_frame.locals_stack_w[i] = args_w[i] @@ -156,7 +156,7 @@ def _flat_pycall(self, code, nargs, frame): # code is a PyCode new_frame = self.space.createframe(code, self.w_func_globals, - self.closure) + self) for i in xrange(nargs): w_arg = frame.peekvalue(nargs-1-i) new_frame.locals_stack_w[i] = w_arg @@ -167,7 +167,7 @@ def _flat_pycall_defaults(self, code, nargs, frame, defs_to_load): # code is a PyCode new_frame = self.space.createframe(code, self.w_func_globals, - self.closure) + self) for i in xrange(nargs): w_arg = frame.peekvalue(nargs-1-i) new_frame.locals_stack_w[i] = w_arg diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -8,7 +8,7 @@ class Cell(Wrappable): "A simple container for a wrapped value." - + def __init__(self, w_value=None): self.w_value = w_value @@ -90,32 +90,33 @@ # variables coming from a parent function in which i'm nested # 'closure' is a list of Cell instances: the received free vars. - cells = None - @jit.unroll_safe - def initialize_frame_scopes(self, closure, code): - super_initialize_frame_scopes(self, closure, code) + def initialize_frame_scopes(self, outer_func, code): + super_initialize_frame_scopes(self, outer_func, code) ncellvars = len(code.co_cellvars) nfreevars = len(code.co_freevars) if not nfreevars: if not ncellvars: + self.cells = [] return # no self.cells needed - fast path - if closure is None: - closure = [] - elif closure is None: + elif outer_func is None: space = self.space raise OperationError(space.w_TypeError, space.wrap("directly executed code object " "may not contain free variables")) - if len(closure) != nfreevars: + if outer_func and outer_func.closure: + closure_size = len(outer_func.closure) + else: + closure_size = 0 + if closure_size != nfreevars: raise ValueError("code object received a closure with " "an unexpected number of free variables") self.cells = [None] * (ncellvars + nfreevars) for i in range(ncellvars): self.cells[i] = Cell() for i in range(nfreevars): - self.cells[i + ncellvars] = closure[i] - + self.cells[i + ncellvars] = outer_func.closure[i] + def _getcells(self): return self.cells diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -198,7 +198,7 @@ def funcrun(self, func, args): frame = self.space.createframe(self, func.w_func_globals, - func.closure) + func) sig = self._signature # speed hack fresh_frame = jit.hint(frame, access_directly=True, @@ -211,7 +211,7 @@ def funcrun_obj(self, func, w_obj, args): frame = self.space.createframe(self, func.w_func_globals, - func.closure) + func) sig = self._signature # speed hack fresh_frame = jit.hint(frame, access_directly=True, diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -51,7 +51,7 @@ is_being_profiled = False escaped = False # see mark_as_escaped() - def __init__(self, space, code, w_globals, closure): + def __init__(self, space, code, w_globals, outer_func): if not we_are_translated(): assert type(self) in (space.FrameClass, CPythonFrame), ( "use space.FrameClass(), not directly PyFrame()") @@ -70,7 +70,7 @@ self.builtin = space.builtin.pick_builtin(w_globals) # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. - self.initialize_frame_scopes(closure, code) + self.initialize_frame_scopes(outer_func, code) self.f_lineno = code.co_firstlineno def mark_as_escaped(self): @@ -117,8 +117,8 @@ return self.builtin else: return self.space.builtin - - def initialize_frame_scopes(self, closure, code): + + def initialize_frame_scopes(self, outer_func, code): # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. # CO_NEWLOCALS: make a locals dict unless optimized is also set @@ -385,7 +385,11 @@ # do not use the instance's __init__ but the base's, because we set # everything like cells from here - PyFrame.__init__(self, space, pycode, w_globals, closure) + # XXX hack + from pypy.interpreter.function import Function + outer_func = Function(space, None, closure=closure, + forcename="fake") + PyFrame.__init__(self, space, pycode, w_globals, outer_func) f_back = space.interp_w(PyFrame, w_f_back, can_be_None=True) new_frame.f_backref = jit.non_virtual_ref(f_back) diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -704,7 +704,7 @@ class TestPassThroughArguments_CALL_METHOD(TestPassThroughArguments): def setup_class(cls): - space = gettestobjspace(usemodules=('_stackless',), **{ + space = gettestobjspace(usemodules=('itertools',), **{ "objspace.opcodes.CALL_METHOD": True }) cls.space = space diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -25,13 +25,14 @@ class Descr(history.AbstractDescr): def __init__(self, ofs, typeinfo, extrainfo=None, name=None, - arg_types=None, count_fields_if_immut=-1): + arg_types=None, count_fields_if_immut=-1, ffi_flags=0): self.ofs = ofs self.typeinfo = typeinfo self.extrainfo = extrainfo self.name = name self.arg_types = arg_types self.count_fields_if_immut = count_fields_if_immut + self.ffi_flags = ffi_flags def get_arg_types(self): return self.arg_types @@ -67,6 +68,9 @@ def count_fields_if_immutable(self): return self.count_fields_if_immut + def get_ffi_flags(self): + return self.ffi_flags + def __lt__(self, other): raise TypeError("cannot use comparison on Descrs") def __le__(self, other): @@ -114,14 +118,14 @@ return False def getdescr(self, ofs, typeinfo='?', extrainfo=None, name=None, - arg_types=None, count_fields_if_immut=-1): + arg_types=None, count_fields_if_immut=-1, ffi_flags=0): key = (ofs, typeinfo, extrainfo, name, arg_types, - count_fields_if_immut) + count_fields_if_immut, ffi_flags) try: return self._descrs[key] except KeyError: descr = Descr(ofs, typeinfo, extrainfo, name, arg_types, - count_fields_if_immut) + count_fields_if_immut, ffi_flags) self._descrs[key] = descr return descr @@ -312,7 +316,7 @@ token = history.getkind(getattr(S, fieldname)) return self.getdescr(ofs, token[0], name=fieldname) - def calldescrof(self, FUNC, ARGS, RESULT, extrainfo=None): + def calldescrof(self, FUNC, ARGS, RESULT, extrainfo): arg_types = [] for ARG in ARGS: token = history.getkind(ARG) @@ -326,7 +330,7 @@ return self.getdescr(0, token[0], extrainfo=extrainfo, arg_types=''.join(arg_types)) - def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo=None): + def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo, ffi_flags): from pypy.jit.backend.llsupport.ffisupport import get_ffi_type_kind from pypy.jit.backend.llsupport.ffisupport import UnsupportedKind arg_types = [] @@ -339,7 +343,8 @@ except UnsupportedKind: return None return self.getdescr(0, reskind, extrainfo=extrainfo, - arg_types=''.join(arg_types)) + arg_types=''.join(arg_types), + ffi_flags=ffi_flags) def grab_exc_value(self): @@ -522,7 +527,7 @@ return FieldDescr.new(T1, fieldname) @staticmethod - def calldescrof(FUNC, ARGS, RESULT, extrainfo=None): + def calldescrof(FUNC, ARGS, RESULT, extrainfo): return StaticMethDescr.new(FUNC, ARGS, RESULT, extrainfo) @staticmethod diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -260,10 +260,12 @@ _clsname = '' loop_token = None arg_classes = '' # <-- annotation hack + ffi_flags = 0 - def __init__(self, arg_classes, extrainfo=None): + def __init__(self, arg_classes, extrainfo=None, ffi_flags=0): self.arg_classes = arg_classes # string of "r" and "i" (ref/int) self.extrainfo = extrainfo + self.ffi_flags = ffi_flags def __repr__(self): res = '%s(%s)' % (self.__class__.__name__, self.arg_classes) @@ -284,6 +286,13 @@ def get_extra_info(self): return self.extrainfo + def get_ffi_flags(self): + return self.ffi_flags + + def get_call_conv(self): + from pypy.rlib.clibffi import get_call_conv + return get_call_conv(self.ffi_flags, True) + def get_arg_types(self): return self.arg_classes @@ -391,8 +400,8 @@ """ _clsname = 'DynamicIntCallDescr' - def __init__(self, arg_classes, result_size, result_sign, extrainfo=None): - BaseIntCallDescr.__init__(self, arg_classes, extrainfo) + def __init__(self, arg_classes, result_size, result_sign, extrainfo=None, ffi_flags=0): + BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags) assert isinstance(result_sign, bool) self._result_size = chr(result_size) self._result_sign = result_sign diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py --- a/pypy/jit/backend/llsupport/ffisupport.py +++ b/pypy/jit/backend/llsupport/ffisupport.py @@ -8,7 +8,7 @@ class UnsupportedKind(Exception): pass -def get_call_descr_dynamic(cpu, ffi_args, ffi_result, extrainfo=None): +def get_call_descr_dynamic(cpu, ffi_args, ffi_result, extrainfo=None, ffi_flags=0): """Get a call descr: the types of result and args are represented by rlib.libffi.types.*""" try: @@ -20,18 +20,24 @@ if reskind == history.INT: size = intmask(ffi_result.c_size) signed = is_ffi_type_signed(ffi_result) - return DynamicIntCallDescr(arg_classes, size, signed, extrainfo) + return DynamicIntCallDescr(arg_classes, size, signed, extrainfo, + ffi_flags=ffi_flags) elif reskind == history.REF: - return NonGcPtrCallDescr(arg_classes, extrainfo) + return NonGcPtrCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) elif reskind == history.FLOAT: - return FloatCallDescr(arg_classes, extrainfo) + return FloatCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) elif reskind == history.VOID: - return VoidCallDescr(arg_classes, extrainfo) + return VoidCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) elif reskind == 'L': - return LongLongCallDescr(arg_classes, extrainfo) + return LongLongCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) elif reskind == 'S': SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - return SingleFloatCallDescr(arg_classes, extrainfo) + return SingleFloatCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) assert False def get_ffi_type_kind(cpu, ffi_type): diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -366,36 +366,92 @@ def add_jit2gc_hooks(self, jit2gc): # - def collect_jit_stack_root(callback, gc, addr): - if addr.signed[0] != GcRootMap_shadowstack.MARKER: - # common case - if gc.points_to_valid_gc_object(addr): - callback(gc, addr) - return WORD - else: - # case of a MARKER followed by an assembler stack frame - follow_stack_frame_of_assembler(callback, gc, addr) - return 2 * WORD + # --------------- + # This is used to enumerate the shadowstack in the presence + # of the JIT. It is also used by the stacklet support in + # rlib/_stacklet_shadowstack. That's why it is written as + # an iterator that can also be used with a custom_trace. # - def follow_stack_frame_of_assembler(callback, gc, addr): - frame_addr = addr.signed[1] - addr = llmemory.cast_int_to_adr(frame_addr + self.force_index_ofs) - force_index = addr.signed[0] - if force_index < 0: - force_index = ~force_index - callshape = self._callshapes[force_index] - n = 0 - while True: - offset = rffi.cast(lltype.Signed, callshape[n]) - if offset == 0: - break - addr = llmemory.cast_int_to_adr(frame_addr + offset) - if gc.points_to_valid_gc_object(addr): - callback(gc, addr) - n += 1 + class RootIterator: + _alloc_flavor_ = "raw" + + def next(iself, gc, next, range_highest): + # Return the "next" valid GC object' address. This usually + # means just returning "next", until we reach "range_highest", + # except that we are skipping NULLs. If "next" contains a + # MARKER instead, then we go into JIT-frame-lookup mode. + # + while True: + # + # If we are not iterating right now in a JIT frame + if iself.frame_addr == 0: + # + # Look for the next shadowstack address that + # contains a valid pointer + while next != range_highest: + if next.signed[0] == self.MARKER: + break + if gc.points_to_valid_gc_object(next): + return next + next += llmemory.sizeof(llmemory.Address) + else: + return llmemory.NULL # done + # + # It's a JIT frame. Save away 'next' for later, and + # go into JIT-frame-exploring mode. + next += llmemory.sizeof(llmemory.Address) + frame_addr = next.signed[0] + iself.saved_next = next + iself.frame_addr = frame_addr + addr = llmemory.cast_int_to_adr(frame_addr + + self.force_index_ofs) + addr = iself.translateptr(iself.context, addr) + force_index = addr.signed[0] + if force_index < 0: + force_index = ~force_index + # NB: the next line reads a still-alive _callshapes, + # because we ensure that just before we called this + # piece of assembler, we put on the (same) stack a + # pointer to a loop_token that keeps the force_index + # alive. + callshape = self._callshapes[force_index] + else: + # Continuing to explore this JIT frame + callshape = iself.callshape + # + # 'callshape' points to the next INT of the callshape. + # If it's zero we are done with the JIT frame. + while rffi.cast(lltype.Signed, callshape[0]) != 0: + # + # Non-zero: it's an offset inside the JIT frame. + # Read it and increment 'callshape'. + offset = rffi.cast(lltype.Signed, callshape[0]) + callshape = lltype.direct_ptradd(callshape, 1) + addr = llmemory.cast_int_to_adr(iself.frame_addr + + offset) + addr = iself.translateptr(iself.context, addr) + if gc.points_to_valid_gc_object(addr): + # + # The JIT frame contains a valid GC pointer at + # this address (as opposed to NULL). Save + # 'callshape' for the next call, and return the + # address. + iself.callshape = callshape + return addr + # + # Restore 'prev' and loop back to the start. + iself.frame_addr = 0 + next = iself.saved_next + next += llmemory.sizeof(llmemory.Address) + + # --------------- # + root_iterator = RootIterator() + root_iterator.frame_addr = 0 + root_iterator.context = llmemory.NULL + root_iterator.translateptr = lambda context, addr: addr jit2gc.update({ - 'rootstackhook': collect_jit_stack_root, + 'root_iterator': root_iterator, }) def initialize(self): @@ -550,7 +606,7 @@ has_finalizer = bool(tid & (1< 0 lltype.free(raw, flavor='raw') + def test_call_to_winapi_function(self): + from pypy.rlib.clibffi import _WIN32, FUNCFLAG_STDCALL + if not _WIN32: + py.test.skip("Windows test only") + from pypy.rlib.libffi import CDLL, types, ArgChain + from pypy.rlib.rwin32 import DWORD + libc = CDLL('KERNEL32') + c_GetCurrentDir = libc.getpointer('GetCurrentDirectoryA', + [types.ulong, types.pointer], + types.ulong) + + cwd = os.getcwd() + buflen = len(cwd) + 10 + buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') + argchain = ArgChain().arg(rffi.cast(DWORD, buflen)).arg(buffer) + res = c_GetCurrentDir.call(argchain, DWORD) + assert rffi.cast(lltype.Signed, res) == len(cwd) + assert rffi.charp2strn(buffer, buflen) == cwd + lltype.free(buffer, flavor='raw') + + cpu = self.cpu + func_adr = llmemory.cast_ptr_to_adr(c_GetCurrentDir.funcsym) + funcbox = ConstInt(heaptracker.adr2int(func_adr)) + calldescr = cpu.calldescrof_dynamic([types.ulong, types.pointer], + types.ulong, + EffectInfo.MOST_GENERAL, + ffi_flags=FUNCFLAG_STDCALL) + i1 = BoxInt() + i2 = BoxInt() + faildescr = BasicFailDescr(1) + # if the stdcall convention is ignored, then ESP is wrong after the + # call: 8 bytes too much. If we repeat the call often enough, crash. + ops = [] + for i in range(50): + i3 = BoxInt() + ops += [ + ResOperation(rop.CALL_RELEASE_GIL, [funcbox, i1, i2], i3, + descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + ] + ops[-1].setfailargs([]) + ops += [ + ResOperation(rop.FINISH, [i3], None, descr=BasicFailDescr(0)) + ] + looptoken = LoopToken() + self.cpu.compile_loop([i1, i2], ops, looptoken) + + buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') + self.cpu.set_future_value_int(0, buflen) + self.cpu.set_future_value_int(1, rffi.cast(lltype.Signed, buffer)) + fail = self.cpu.execute_token(looptoken) + assert fail.identifier == 0 + assert self.cpu.get_latest_value_int(0) == len(cwd) + assert rffi.charp2strn(buffer, buflen) == cwd + lltype.free(buffer, flavor='raw') + def test_guard_not_invalidated(self): cpu = self.cpu i0 = BoxInt() @@ -2292,7 +2366,8 @@ ARGS = [lltype.Signed] * 10 RES = lltype.Signed FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( - lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES) + lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES, + EffectInfo.MOST_GENERAL) for i in range(10): self.cpu.set_future_value_int(i, i+1) res = self.cpu.execute_token(looptoken) @@ -2332,7 +2407,8 @@ ARGS = [lltype.Float, lltype.Float] RES = lltype.Float FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( - lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES) + lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES, + EffectInfo.MOST_GENERAL) ops = ''' [f0, f1] @@ -2422,7 +2498,8 @@ ARGS = [lltype.Float, lltype.Float] RES = lltype.Float FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( - lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES) + lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES, + EffectInfo.MOST_GENERAL) ops = ''' [f0, f1] @@ -2634,7 +2711,8 @@ # FUNC = self.FuncType([lltype.Signed], RESTYPE) FPTR = self.Ptr(FUNC) - calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) x = self.cpu.bh_call_i(self.get_funcbox(self.cpu, f).value, calldescr, [value], None, None) assert x == expected, ( @@ -2667,7 +2745,8 @@ # FUNC = self.FuncType([lltype.Signed], RESTYPE) FPTR = self.Ptr(FUNC) - calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) funcbox = self.get_funcbox(self.cpu, f) res = self.execute_operation(rop.CALL, [funcbox, BoxInt(value)], 'int', descr=calldescr) @@ -2701,7 +2780,8 @@ # FUNC = self.FuncType([lltype.SignedLongLong], lltype.SignedLongLong) FPTR = self.Ptr(FUNC) - calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) x = self.cpu.bh_call_f(self.get_funcbox(self.cpu, f).value, calldescr, None, None, [value]) assert x == expected @@ -2728,7 +2808,8 @@ # FUNC = self.FuncType([lltype.SignedLongLong], lltype.SignedLongLong) FPTR = self.Ptr(FUNC) - calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) funcbox = self.get_funcbox(self.cpu, f) res = self.execute_operation(rop.CALL, [funcbox, BoxFloat(value)], 'float', descr=calldescr) @@ -2756,7 +2837,8 @@ # FUNC = self.FuncType([lltype.SingleFloat], lltype.SingleFloat) FPTR = self.Ptr(FUNC) - calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) ivalue = longlong.singlefloat2int(value) iexpected = longlong.singlefloat2int(expected) x = self.cpu.bh_call_i(self.get_funcbox(self.cpu, f).value, @@ -2785,7 +2867,8 @@ # FUNC = self.FuncType([lltype.SingleFloat], lltype.SingleFloat) FPTR = self.Ptr(FUNC) - calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) funcbox = self.get_funcbox(self.cpu, f) ivalue = longlong.singlefloat2int(value) iexpected = longlong.singlefloat2int(expected) diff --git a/pypy/jit/backend/test/test_ll_random.py b/pypy/jit/backend/test/test_ll_random.py --- a/pypy/jit/backend/test/test_ll_random.py +++ b/pypy/jit/backend/test/test_ll_random.py @@ -6,6 +6,7 @@ from pypy.jit.metainterp.history import BoxPtr, BoxInt from pypy.jit.metainterp.history import BasicFailDescr from pypy.jit.codewriter import heaptracker +from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.rpython.annlowlevel import llhelper from pypy.rlib.rarithmetic import intmask from pypy.rpython.llinterp import LLException @@ -468,6 +469,10 @@ exec code in d return subset, d['f'], vtableptr + def getcalldescr(self, builder, TP): + ef = EffectInfo.MOST_GENERAL + return builder.cpu.calldescrof(TP, TP.ARGS, TP.RESULT, ef) + # 1. non raising call and guard_no_exception class CallOperation(BaseCallOperation): def produce_into(self, builder, r): @@ -481,7 +486,7 @@ ptr = llhelper(lltype.Ptr(TP), f) c_addr = ConstAddr(llmemory.cast_ptr_to_adr(ptr), builder.cpu) args = [c_addr] + subset - descr = builder.cpu.calldescrof(TP, TP.ARGS, TP.RESULT) + descr = self.getcalldescr(builder, TP) self.put(builder, args, descr) op = ResOperation(rop.GUARD_NO_EXCEPTION, [], None, descr=BasicFailDescr()) @@ -501,7 +506,7 @@ ptr = llhelper(lltype.Ptr(TP), f) c_addr = ConstAddr(llmemory.cast_ptr_to_adr(ptr), builder.cpu) args = [c_addr] + subset - descr = builder.cpu.calldescrof(TP, TP.ARGS, TP.RESULT) + descr = self.getcalldescr(builder, TP) self.put(builder, args, descr) _, vtableptr = builder.get_random_structure_type_and_vtable(r) exc_box = ConstAddr(llmemory.cast_ptr_to_adr(vtableptr), builder.cpu) @@ -523,7 +528,7 @@ ptr = llhelper(lltype.Ptr(TP), f) c_addr = ConstAddr(llmemory.cast_ptr_to_adr(ptr), builder.cpu) args = [c_addr] + subset - descr = builder.cpu.calldescrof(TP, TP.ARGS, TP.RESULT) + descr = self.getcalldescr(builder, TP) self.put(builder, args, descr) exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu) op = ResOperation(rop.GUARD_EXCEPTION, [exc_box], BoxPtr(), @@ -540,7 +545,7 @@ ptr = llhelper(lltype.Ptr(TP), f) c_addr = ConstAddr(llmemory.cast_ptr_to_adr(ptr), builder.cpu) args = [c_addr] + subset - descr = builder.cpu.calldescrof(TP, TP.ARGS, TP.RESULT) + descr = self.getcalldescr(builder, TP) self.put(builder, args, descr) op = ResOperation(rop.GUARD_NO_EXCEPTION, [], BoxPtr(), descr=BasicFailDescr()) @@ -559,7 +564,7 @@ ptr = llhelper(lltype.Ptr(TP), f) c_addr = ConstAddr(llmemory.cast_ptr_to_adr(ptr), builder.cpu) args = [c_addr] + subset - descr = builder.cpu.calldescrof(TP, TP.ARGS, TP.RESULT) + descr = self.getcalldescr(builder, TP) self.put(builder, args, descr) while True: _, vtableptr = builder.get_random_structure_type_and_vtable(r) diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -34,6 +34,7 @@ from pypy.rlib.debug import (debug_print, debug_start, debug_stop, have_debug_prints) from pypy.rlib import rgc +from pypy.rlib.clibffi import FFI_DEFAULT_ABI from pypy.jit.backend.x86.jump import remap_frame_layout from pypy.jit.metainterp.history import ConstInt, BoxInt from pypy.jit.codewriter.effectinfo import EffectInfo @@ -1120,7 +1121,7 @@ return genop_cmp_guard_float def _emit_call(self, force_index, x, arglocs, start=0, tmp=eax, - argtypes=None): + argtypes=None, callconv=FFI_DEFAULT_ABI): if IS_X86_64: return self._emit_call_64(force_index, x, arglocs, start, argtypes) @@ -1149,6 +1150,16 @@ # x is a location self.mc.CALL(x) self.mark_gc_roots(force_index) + # + if callconv != FFI_DEFAULT_ABI: + self._fix_stdcall(callconv, p) + + def _fix_stdcall(self, callconv, p): + from pypy.rlib.clibffi import FFI_STDCALL + assert callconv == FFI_STDCALL + # it's a bit stupid, but we're just going to cancel the fact that + # the called function just added 'p' to ESP, by subtracting it again. + self.mc.SUB_ri(esp.value, p) def _emit_call_64(self, force_index, x, arglocs, start, argtypes): src_locs = [] @@ -2127,7 +2138,8 @@ tmp = eax self._emit_call(force_index, x, arglocs, 3, tmp=tmp, - argtypes=op.getdescr().get_arg_types()) + argtypes=op.getdescr().get_arg_types(), + callconv=op.getdescr().get_call_conv()) if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8: # a float or a long long return diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -843,8 +843,8 @@ def consider_call(self, op): effectinfo = op.getdescr().get_extra_info() - if effectinfo is not None: - oopspecindex = effectinfo.oopspecindex + oopspecindex = effectinfo.oopspecindex + if oopspecindex != EffectInfo.OS_NONE: if IS_X86_32: # support for some of the llong operations, # which only exist on x86-32 diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -527,6 +527,7 @@ NOP = insn('\x90') RET = insn('\xC3') + RET16_i = insn('\xC2', immediate(1, 'h')) PUSH_r = insn(rex_nw, register(1), '\x50') PUSH_b = insn(rex_nw, '\xFF', orbyte(6<<3), stack_bp(1)) diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -7,6 +7,7 @@ BoxPtr, ConstPtr, TreeLoop from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.codewriter import heaptracker +from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.backend.llsupport.descr import GcCache from pypy.jit.backend.llsupport.gc import GcLLDescription from pypy.jit.backend.detect_cpu import getcpuclass @@ -76,7 +77,8 @@ for box in boxes: regalloc.rm.try_allocate_reg(box) TP = lltype.FuncType([], lltype.Signed) - calldescr = cpu.calldescrof(TP, TP.ARGS, TP.RESULT) + calldescr = cpu.calldescrof(TP, TP.ARGS, TP.RESULT, + EffectInfo.MOST_GENERAL) regalloc.rm._check_invariants() box = boxes[0] regalloc.position = 0 diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -16,6 +16,7 @@ from pypy.rpython.annlowlevel import llhelper from pypy.rpython.lltypesystem import rclass, rstr from pypy.jit.codewriter import longlong +from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.backend.x86.rx86 import * def test_is_comparison_or_ovf_op(): @@ -92,7 +93,8 @@ zd_addr = cpu.cast_int_to_adr(zero_division_tp) zero_division_error = llmemory.cast_adr_to_ptr(zd_addr, lltype.Ptr(rclass.OBJECT_VTABLE)) - raising_calldescr = cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT) + raising_calldescr = cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT, + EffectInfo.MOST_GENERAL) fdescr1 = BasicFailDescr(1) fdescr2 = BasicFailDescr(2) @@ -115,9 +117,12 @@ f2ptr = llhelper(F2PTR, f2) f10ptr = llhelper(F10PTR, f10) - f1_calldescr = cpu.calldescrof(F1PTR.TO, F1PTR.TO.ARGS, F1PTR.TO.RESULT) - f2_calldescr = cpu.calldescrof(F2PTR.TO, F2PTR.TO.ARGS, F2PTR.TO.RESULT) - f10_calldescr = cpu.calldescrof(F10PTR.TO, F10PTR.TO.ARGS, F10PTR.TO.RESULT) + f1_calldescr = cpu.calldescrof(F1PTR.TO, F1PTR.TO.ARGS, F1PTR.TO.RESULT, + EffectInfo.MOST_GENERAL) + f2_calldescr = cpu.calldescrof(F2PTR.TO, F2PTR.TO.ARGS, F2PTR.TO.RESULT, + EffectInfo.MOST_GENERAL) + f10_calldescr= cpu.calldescrof(F10PTR.TO, F10PTR.TO.ARGS, F10PTR.TO.RESULT, + EffectInfo.MOST_GENERAL) namespace = locals().copy() type_system = 'lltype' diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -433,6 +433,88 @@ ops_offset[operations[2]] <= ops_offset[None]) + def test_calling_convention(self, monkeypatch): + if WORD != 4: + py.test.skip("32-bit only test") + from pypy.jit.backend.x86.regloc import eax, edx + from pypy.jit.backend.x86 import codebuf + from pypy.jit.codewriter.effectinfo import EffectInfo + from pypy.rlib.libffi import types, clibffi + had_stdcall = hasattr(clibffi, 'FFI_STDCALL') + if not had_stdcall: # not running on Windows, but we can still test + monkeypatch.setattr(clibffi, 'FFI_STDCALL', 12345, raising=False) + # + for ffi in [clibffi.FFI_DEFAULT_ABI, clibffi.FFI_STDCALL]: + cpu = self.cpu + mc = codebuf.MachineCodeBlockWrapper() + mc.MOV_rs(eax.value, 4) # argument 1 + mc.MOV_rs(edx.value, 40) # argument 10 + mc.SUB_rr(eax.value, edx.value) # return arg1 - arg10 + if ffi == clibffi.FFI_DEFAULT_ABI: + mc.RET() + else: + mc.RET16_i(40) + rawstart = mc.materialize(cpu.asmmemmgr, []) + # + calldescr = cpu.calldescrof_dynamic([types.slong] * 10, + types.slong, + EffectInfo.MOST_GENERAL, + ffi_flags=-1) + calldescr.get_call_conv = lambda: ffi # <==== hack + funcbox = ConstInt(rawstart) + i1 = BoxInt() + i2 = BoxInt() + i3 = BoxInt() + i4 = BoxInt() + i5 = BoxInt() + i6 = BoxInt() + c = ConstInt(-1) + faildescr = BasicFailDescr(1) + # we must call it repeatedly: if the stack pointer gets increased + # by 40 bytes by the STDCALL call, and if we don't expect it, + # then we are going to get our stack emptied unexpectedly by + # several repeated calls + ops = [ + ResOperation(rop.CALL_RELEASE_GIL, + [funcbox, i1, c, c, c, c, c, c, c, c, i2], + i3, descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + + ResOperation(rop.CALL_RELEASE_GIL, + [funcbox, i1, c, c, c, c, c, c, c, c, i2], + i4, descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + + ResOperation(rop.CALL_RELEASE_GIL, + [funcbox, i1, c, c, c, c, c, c, c, c, i2], + i5, descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + + ResOperation(rop.CALL_RELEASE_GIL, + [funcbox, i1, c, c, c, c, c, c, c, c, i2], + i6, descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + + ResOperation(rop.FINISH, [i3, i4, i5, i6], None, + descr=BasicFailDescr(0)) + ] + ops[1].setfailargs([]) + ops[3].setfailargs([]) + ops[5].setfailargs([]) + ops[7].setfailargs([]) + looptoken = LoopToken() + self.cpu.compile_loop([i1, i2], ops, looptoken) + + self.cpu.set_future_value_int(0, 123450) + self.cpu.set_future_value_int(1, 123408) + fail = self.cpu.execute_token(looptoken) + assert fail.identifier == 0 + assert self.cpu.get_latest_value_int(0) == 42 + assert self.cpu.get_latest_value_int(1) == 42 + assert self.cpu.get_latest_value_int(2) == 42 + assert self.cpu.get_latest_value_int(3) == 42 + + class TestDebuggingAssembler(object): def setup_method(self, meth): self.cpu = CPU(rtyper=None, stats=FakeStats()) diff --git a/pypy/jit/codewriter/call.py b/pypy/jit/codewriter/call.py --- a/pypy/jit/codewriter/call.py +++ b/pypy/jit/codewriter/call.py @@ -6,7 +6,7 @@ from pypy.jit.codewriter import support from pypy.jit.codewriter.jitcode import JitCode from pypy.jit.codewriter.effectinfo import (VirtualizableAnalyzer, - QuasiImmutAnalyzer, CanReleaseGILAnalyzer, effectinfo_from_writeanalyze, + QuasiImmutAnalyzer, RandomEffectsAnalyzer, effectinfo_from_writeanalyze, EffectInfo, CallInfoCollection) from pypy.translator.simplify import get_funcobj, get_functype from pypy.rpython.lltypesystem import lltype, llmemory @@ -31,7 +31,7 @@ self.readwrite_analyzer = ReadWriteAnalyzer(translator) self.virtualizable_analyzer = VirtualizableAnalyzer(translator) self.quasiimmut_analyzer = QuasiImmutAnalyzer(translator) - self.canreleasegil_analyzer = CanReleaseGILAnalyzer(translator) + self.randomeffects_analyzer = RandomEffectsAnalyzer(translator) # for index, jd in enumerate(jitdrivers_sd): jd.index = index @@ -187,7 +187,7 @@ fnaddr = llmemory.cast_ptr_to_adr(fnptr) NON_VOID_ARGS = [ARG for ARG in FUNC.ARGS if ARG is not lltype.Void] calldescr = self.cpu.calldescrof(FUNC, tuple(NON_VOID_ARGS), - FUNC.RESULT) + FUNC.RESULT, EffectInfo.MOST_GENERAL) return (fnaddr, calldescr) def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE, @@ -219,9 +219,11 @@ assert not NON_VOID_ARGS, ("arguments not supported for " "loop-invariant function!") # build the extraeffect - can_release_gil = self.canreleasegil_analyzer.analyze(op) - # can_release_gil implies can_invalidate - can_invalidate = can_release_gil or self.quasiimmut_analyzer.analyze(op) + random_effects = self.randomeffects_analyzer.analyze(op) + if random_effects: + extraeffect = EffectInfo.EF_RANDOM_EFFECTS + # random_effects implies can_invalidate + can_invalidate = random_effects or self.quasiimmut_analyzer.analyze(op) if extraeffect is None: if self.virtualizable_analyzer.analyze(op): extraeffect = EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE @@ -239,12 +241,10 @@ # effectinfo = effectinfo_from_writeanalyze( self.readwrite_analyzer.analyze(op), self.cpu, extraeffect, - oopspecindex, can_invalidate, can_release_gil) + oopspecindex, can_invalidate) # - if oopspecindex != EffectInfo.OS_NONE: - assert effectinfo is not None + assert effectinfo is not None if elidable or loopinvariant: - assert effectinfo is not None assert extraeffect != EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE # XXX this should also say assert not can_invalidate, but # it can't because our analyzer is not good enough for now @@ -264,8 +264,7 @@ def calldescr_canraise(self, calldescr): effectinfo = calldescr.get_extra_info() - return (effectinfo is None or - effectinfo.extraeffect > EffectInfo.EF_CANNOT_RAISE) + return effectinfo.check_can_raise() def jitdriver_sd_from_portal_graph(self, graph): for jd in self.jitdrivers_sd: diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -15,6 +15,7 @@ EF_ELIDABLE_CAN_RAISE = 3 #elidable function (but can raise) EF_CAN_RAISE = 4 #normal function (can raise) EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE = 5 #can raise and force virtualizables + EF_RANDOM_EFFECTS = 6 #can do whatever # the 'oopspecindex' field is one of the following values: OS_NONE = 0 # normal case, no oopspec @@ -80,17 +81,26 @@ write_descrs_fields, write_descrs_arrays, extraeffect=EF_CAN_RAISE, oopspecindex=OS_NONE, - can_invalidate=False, can_release_gil=False): - key = (frozenset(readonly_descrs_fields), - frozenset(readonly_descrs_arrays), - frozenset(write_descrs_fields), - frozenset(write_descrs_arrays), + can_invalidate=False): + key = (frozenset_or_none(readonly_descrs_fields), + frozenset_or_none(readonly_descrs_arrays), + frozenset_or_none(write_descrs_fields), + frozenset_or_none(write_descrs_arrays), extraeffect, oopspecindex, - can_invalidate, - can_release_gil) + can_invalidate) if key in cls._cache: return cls._cache[key] + if extraeffect == EffectInfo.EF_RANDOM_EFFECTS: + assert readonly_descrs_fields is None + assert readonly_descrs_arrays is None + assert write_descrs_fields is None + assert write_descrs_arrays is None + else: + assert readonly_descrs_fields is not None + assert readonly_descrs_arrays is not None + assert write_descrs_fields is not None + assert write_descrs_arrays is not None result = object.__new__(cls) result.readonly_descrs_fields = readonly_descrs_fields result.readonly_descrs_arrays = readonly_descrs_arrays @@ -104,11 +114,13 @@ result.write_descrs_arrays = write_descrs_arrays result.extraeffect = extraeffect result.can_invalidate = can_invalidate - result.can_release_gil = can_release_gil result.oopspecindex = oopspecindex cls._cache[key] = result return result + def check_can_raise(self): + return self.extraeffect > self.EF_CANNOT_RAISE + def check_can_invalidate(self): return self.can_invalidate @@ -116,56 +128,71 @@ return self.extraeffect >= self.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE def has_random_effects(self): - return self.oopspecindex == self.OS_LIBFFI_CALL or self.can_release_gil + return self.extraeffect >= self.EF_RANDOM_EFFECTS + + +def frozenset_or_none(x): + if x is None: + return None + return frozenset(x) + +EffectInfo.MOST_GENERAL = EffectInfo(None, None, None, None, + EffectInfo.EF_RANDOM_EFFECTS, + can_invalidate=True) + def effectinfo_from_writeanalyze(effects, cpu, extraeffect=EffectInfo.EF_CAN_RAISE, oopspecindex=EffectInfo.OS_NONE, - can_invalidate=False, - can_release_gil=False): + can_invalidate=False): from pypy.translator.backendopt.writeanalyze import top_set - if effects is top_set: - return None - readonly_descrs_fields = [] - readonly_descrs_arrays = [] - write_descrs_fields = [] - write_descrs_arrays = [] + if effects is top_set or extraeffect == EffectInfo.EF_RANDOM_EFFECTS: + readonly_descrs_fields = None + readonly_descrs_arrays = None + write_descrs_fields = None + write_descrs_arrays = None + extraeffect = EffectInfo.EF_RANDOM_EFFECTS + else: + readonly_descrs_fields = [] + readonly_descrs_arrays = [] + write_descrs_fields = [] + write_descrs_arrays = [] - def add_struct(descrs_fields, (_, T, fieldname)): - T = deref(T) - if consider_struct(T, fieldname): - descr = cpu.fielddescrof(T, fieldname) - descrs_fields.append(descr) + def add_struct(descrs_fields, (_, T, fieldname)): + T = deref(T) + if consider_struct(T, fieldname): + descr = cpu.fielddescrof(T, fieldname) + descrs_fields.append(descr) - def add_array(descrs_arrays, (_, T)): - ARRAY = deref(T) - if consider_array(ARRAY): - descr = cpu.arraydescrof(ARRAY) - descrs_arrays.append(descr) + def add_array(descrs_arrays, (_, T)): + ARRAY = deref(T) + if consider_array(ARRAY): + descr = cpu.arraydescrof(ARRAY) + descrs_arrays.append(descr) - for tup in effects: - if tup[0] == "struct": - add_struct(write_descrs_fields, tup) - elif tup[0] == "readstruct": - tupw = ("struct",) + tup[1:] - if tupw not in effects: - add_struct(readonly_descrs_fields, tup) - elif tup[0] == "array": - add_array(write_descrs_arrays, tup) - elif tup[0] == "readarray": - tupw = ("array",) + tup[1:] - if tupw not in effects: - add_array(readonly_descrs_arrays, tup) - else: - assert 0 + for tup in effects: + if tup[0] == "struct": + add_struct(write_descrs_fields, tup) + elif tup[0] == "readstruct": + tupw = ("struct",) + tup[1:] + if tupw not in effects: + add_struct(readonly_descrs_fields, tup) + elif tup[0] == "array": + add_array(write_descrs_arrays, tup) + elif tup[0] == "readarray": + tupw = ("array",) + tup[1:] + if tupw not in effects: + add_array(readonly_descrs_arrays, tup) + else: + assert 0 + # return EffectInfo(readonly_descrs_fields, readonly_descrs_arrays, write_descrs_fields, write_descrs_arrays, extraeffect, oopspecindex, - can_invalidate, - can_release_gil) + can_invalidate) def consider_struct(TYPE, fieldname): if fieldType(TYPE, fieldname) is lltype.Void: @@ -201,12 +228,13 @@ def analyze_simple_operation(self, op, graphinfo): return op.opname == 'jit_force_quasi_immutable' -class CanReleaseGILAnalyzer(BoolGraphAnalyzer): +class RandomEffectsAnalyzer(BoolGraphAnalyzer): def analyze_direct_call(self, graph, seen=None): - releases_gil = False if hasattr(graph, "func") and hasattr(graph.func, "_ptr"): - releases_gil = graph.func._ptr._obj.releases_gil - return releases_gil or super(CanReleaseGILAnalyzer, self).analyze_direct_call(graph, seen) + if graph.func._ptr._obj.random_effects_on_gcobjs: + return True + return super(RandomEffectsAnalyzer, self).analyze_direct_call(graph, + seen) def analyze_simple_operation(self, op, graphinfo): return False diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -1,4 +1,5 @@ import py + from pypy.jit.codewriter import support, heaptracker, longlong from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter.flatten import ListOfKind, IndirectCallTargets @@ -22,6 +23,11 @@ t = Transformer(cpu, callcontrol, portal_jd) t.transform(graph) +def integer_bounds(size, unsigned): + if unsigned: + return 0, 1 << (8 * size) + else: + return -(1 << (8 * size - 1)), 1 << (8 * size - 1) class Transformer(object): vable_array_vars = None @@ -780,81 +786,127 @@ raise NotImplementedError("cast_ptr_to_int") def rewrite_op_force_cast(self, op): - assert not self._is_gc(op.args[0]) - fromll = longlong.is_longlong(op.args[0].concretetype) - toll = longlong.is_longlong(op.result.concretetype) - if fromll and toll: + v_arg = op.args[0] + v_result = op.result + assert not self._is_gc(v_arg) + + if v_arg.concretetype == v_result.concretetype: return - if fromll: - args = op.args - opname = 'truncate_longlong_to_int' - RESULT = lltype.Signed - v = varoftype(RESULT) - op1 = SpaceOperation(opname, args, v) - op2 = self.rewrite_operation(op1) - oplist = self.force_cast_without_longlong(op2.result, op.result) + + float_arg = v_arg.concretetype in [lltype.Float, lltype.SingleFloat] + float_res = v_result.concretetype in [lltype.Float, lltype.SingleFloat] + if not float_arg and not float_res: + # some int -> some int cast + return self._int_to_int_cast(v_arg, v_result) + elif float_arg and float_res: + # some float -> some float cast + return self._float_to_float_cast(v_arg, v_result) + elif not float_arg and float_res: + # some int -> some float + ops = [] + v1 = varoftype(lltype.Signed) + oplist = self.rewrite_operation( + SpaceOperation('force_cast', [v_arg], v1) + ) if oplist: - return [op2] + oplist - # - # force a renaming to put the correct result in place, even though - # it might be slightly mistyped (e.g. Signed versus Unsigned) - assert op2.result is v - op2.result = op.result - return op2 - elif toll: - size, unsigned = rffi.size_and_sign(op.args[0].concretetype) - if unsigned: + ops.extend(oplist) + else: + v1 = v_arg + v2 = varoftype(lltype.Float) + op = self.rewrite_operation( + SpaceOperation('cast_int_to_float', [v1], v2) + ) + ops.append(op) + op2 = self.rewrite_operation( + SpaceOperation('force_cast', [v2], v_result) + ) + if op2: + ops.append(op2) + else: + op.result = v_result + return ops + elif float_arg and not float_res: + # some float -> some int + ops = [] + v1 = varoftype(lltype.Float) + op1 = self.rewrite_operation( + SpaceOperation('force_cast', [v_arg], v1) + ) + if op1: + ops.append(op1) + else: + v1 = v_arg + v2 = varoftype(lltype.Signed) + op = self.rewrite_operation( + SpaceOperation('cast_float_to_int', [v1], v2) + ) + ops.append(op) + oplist = self.rewrite_operation( + SpaceOperation('force_cast', [v2], v_result) + ) + if oplist: + ops.extend(oplist) + else: + op.result = v_result + return ops + else: + assert False + + def _int_to_int_cast(self, v_arg, v_result): + longlong_arg = longlong.is_longlong(v_arg.concretetype) + longlong_res = longlong.is_longlong(v_result.concretetype) + size1, unsigned1 = rffi.size_and_sign(v_arg.concretetype) + size2, unsigned2 = rffi.size_and_sign(v_result.concretetype) + + if longlong_arg and longlong_res: + return + elif longlong_arg: + v = varoftype(lltype.Signed) + op1 = self.rewrite_operation( + SpaceOperation('truncate_longlong_to_int', [v_arg], v) + ) + op2 = SpaceOperation('force_cast', [v], v_result) + oplist = self.rewrite_operation(op2) + if not oplist: + op1.result = v_result + oplist = [] + return [op1] + oplist + elif longlong_res: + if unsigned1: INTERMEDIATE = lltype.Unsigned else: INTERMEDIATE = lltype.Signed v = varoftype(INTERMEDIATE) - oplist = self.force_cast_without_longlong(op.args[0], v) + op1 = SpaceOperation('force_cast', [v_arg], v) + oplist = self.rewrite_operation(op1) if not oplist: - v = op.args[0] + v = v_arg oplist = [] - if unsigned: + if unsigned1: opname = 'cast_uint_to_longlong' else: opname = 'cast_int_to_longlong' - op1 = SpaceOperation(opname, [v], op.result) - op2 = self.rewrite_operation(op1) + op2 = self.rewrite_operation( + SpaceOperation(opname, [v], v_result) + ) return oplist + [op2] - else: - return self.force_cast_without_longlong(op.args[0], op.result) - def force_cast_without_longlong(self, v_arg, v_result): - if v_result.concretetype == v_arg.concretetype: + # We've now, ostensibly, dealt with the longlongs, everything should be + # a Signed or smaller + assert size1 <= rffi.sizeof(lltype.Signed) + assert size2 <= rffi.sizeof(lltype.Signed) + + # the target type is LONG or ULONG + if size2 == rffi.sizeof(lltype.Signed): return - if v_arg.concretetype == rffi.FLOAT: - assert v_result.concretetype == lltype.Float, "cast %s -> %s" % ( - v_arg.concretetype, v_result.concretetype) - return SpaceOperation('cast_singlefloat_to_float', [v_arg], - v_result) - if v_result.concretetype == rffi.FLOAT: - assert v_arg.concretetype == lltype.Float, "cast %s -> %s" % ( - v_arg.concretetype, v_result.concretetype) - return SpaceOperation('cast_float_to_singlefloat', [v_arg], - v_result) - return self.force_cast_without_singlefloat(v_arg, v_result) - def force_cast_without_singlefloat(self, v_arg, v_result): - size2, unsigned2 = rffi.size_and_sign(v_result.concretetype) - assert size2 <= rffi.sizeof(lltype.Signed) - if size2 == rffi.sizeof(lltype.Signed): - return # the target type is LONG or ULONG - size1, unsigned1 = rffi.size_and_sign(v_arg.concretetype) - assert size1 <= rffi.sizeof(lltype.Signed) - # - def bounds(size, unsigned): - if unsigned: - return 0, 1<<(8*size) - else: - return -(1<<(8*size-1)), 1<<(8*size-1) - min1, max1 = bounds(size1, unsigned1) - min2, max2 = bounds(size2, unsigned2) + min1, max1 = integer_bounds(size1, unsigned1) + min2, max2 = integer_bounds(size2, unsigned2) + + # the target type includes the source range if min2 <= min1 <= max1 <= max2: - return # the target type includes the source range - # + return + result = [] if min2: c_min2 = Constant(min2, lltype.Signed) @@ -862,15 +914,28 @@ result.append(SpaceOperation('int_sub', [v_arg, c_min2], v2)) else: v2 = v_arg - c_mask = Constant(int((1<<(8*size2))-1), lltype.Signed) - v3 = varoftype(lltype.Signed) + c_mask = Constant(int((1 << (8 * size2)) - 1), lltype.Signed) + if min2: + v3 = varoftype(lltype.Signed) + else: + v3 = v_result result.append(SpaceOperation('int_and', [v2, c_mask], v3)) if min2: result.append(SpaceOperation('int_add', [v3, c_min2], v_result)) - else: - result[-1].result = v_result return result + def _float_to_float_cast(self, v_arg, v_result): + if v_arg.concretetype == lltype.SingleFloat: + assert v_result.concretetype == lltype.Float, "cast %s -> %s" % ( + v_arg.concretetype, v_result.concretetype) + return SpaceOperation('cast_singlefloat_to_float', [v_arg], + v_result) + if v_result.concretetype == lltype.SingleFloat: + assert v_arg.concretetype == lltype.Float, "cast %s -> %s" % ( + v_arg.concretetype, v_result.concretetype) + return SpaceOperation('cast_float_to_singlefloat', [v_arg], + v_result) + def rewrite_op_direct_ptradd(self, op): # xxx otherwise, not implemented: assert op.args[0].concretetype == rffi.CCHARP @@ -1417,7 +1482,7 @@ extraeffect = EffectInfo.EF_CANNOT_RAISE elif oopspec_name.startswith('libffi_call_'): oopspecindex = EffectInfo.OS_LIBFFI_CALL - extraeffect = EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE + extraeffect = EffectInfo.EF_RANDOM_EFFECTS else: assert False, 'unsupported oopspec: %s' % oopspec_name return self._handle_oopspec_call(op, args, oopspecindex, extraeffect) diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -91,9 +91,12 @@ reds_v = op.args[2+numgreens:] assert len(reds_v) == numreds # - def _sort(args_v): + def _sort(args_v, is_green): from pypy.jit.metainterp.history import getkind lst = [v for v in args_v if v.concretetype is not lltype.Void] + if is_green: + assert len(lst) == len(args_v), ( + "not supported so far: 'greens' variables contain Void") _kind2count = {'int': 1, 'ref': 2, 'float': 3} lst2 = sorted(lst, key=lambda v: _kind2count[getkind(v.concretetype)]) # a crash here means that you have to reorder the variable named in @@ -102,7 +105,7 @@ assert lst == lst2 return lst # - return (_sort(greens_v), _sort(reds_v)) + return (_sort(greens_v, True), _sort(reds_v, False)) def maybe_on_top_of_llinterp(rtyper, fnptr): # Run a generated graph on top of the llinterp for testing. diff --git a/pypy/jit/codewriter/test/test_call.py b/pypy/jit/codewriter/test/test_call.py --- a/pypy/jit/codewriter/test/test_call.py +++ b/pypy/jit/codewriter/test/test_call.py @@ -191,4 +191,4 @@ [block, _] = list(f_graph.iterblocks()) [op] = block.operations call_descr = cc.getcalldescr(op) - assert call_descr.extrainfo.can_release_gil \ No newline at end of file + assert call_descr.extrainfo.has_random_effects() diff --git a/pypy/jit/codewriter/test/test_codewriter.py b/pypy/jit/codewriter/test/test_codewriter.py --- a/pypy/jit/codewriter/test/test_codewriter.py +++ b/pypy/jit/codewriter/test/test_codewriter.py @@ -5,7 +5,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rffi class FakeCallDescr(AbstractDescr): - def __init__(self, FUNC, ARGS, RESULT, effectinfo=None): + def __init__(self, FUNC, ARGS, RESULT, effectinfo): self.FUNC = FUNC self.ARGS = ARGS self.RESULT = RESULT diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py --- a/pypy/jit/codewriter/test/test_flatten.py +++ b/pypy/jit/codewriter/test/test_flatten.py @@ -50,7 +50,7 @@ def __init__(self, rtyper): rtyper._builtin_func_for_spec_cache = FakeDict() self.rtyper = rtyper - def calldescrof(self, FUNC, ARGS, RESULT): + def calldescrof(self, FUNC, ARGS, RESULT, effectinfo): return FakeDescr() def fielddescrof(self, STRUCT, name): return FakeDescr() @@ -324,7 +324,7 @@ def test_exc_exitswitch(self): def g(i): pass - + def f(i): try: g(i) @@ -854,13 +854,51 @@ int_return %i0 """, transform=True) - def test_force_cast_float(self): + def test_force_cast_floats(self): from pypy.rpython.lltypesystem import rffi + # Caststs to lltype.Float def f(n): return rffi.cast(lltype.Float, n) self.encoding_test(f, [12.456], """ float_return %f0 """, transform=True) + self.encoding_test(f, [rffi.cast(rffi.SIGNEDCHAR, 42)], """ + cast_int_to_float %i0 -> %f0 + float_return %f0 + """, transform=True) + + # Casts to lltype.SingleFloat + def g(n): + return rffi.cast(lltype.SingleFloat, n) + self.encoding_test(g, [12.456], """ + cast_float_to_singlefloat %f0 -> %i0 + int_return %i0 + """, transform=True) + self.encoding_test(g, [rffi.cast(rffi.SIGNEDCHAR, 42)], """ + cast_int_to_float %i0 -> %f0 + cast_float_to_singlefloat %f0 -> %i1 + int_return %i1 + """, transform=True) + + # Casts from floats + def f(n): + return rffi.cast(rffi.SIGNEDCHAR, n) + self.encoding_test(f, [12.456], """ + cast_float_to_int %f0 -> %i0 + int_sub %i0, $-128 -> %i1 + int_and %i1, $255 -> %i2 + int_add %i2, $-128 -> %i3 + int_return %i3 + """, transform=True) + self.encoding_test(f, [rffi.cast(lltype.SingleFloat, 12.456)], """ + cast_singlefloat_to_float %i0 -> %f0 + cast_float_to_int %f0 -> %i1 + int_sub %i1, $-128 -> %i2 + int_and %i2, $255 -> %i3 + int_add %i3, $-128 -> %i4 + int_return %i4 + """, transform=True) + def test_direct_ptradd(self): from pypy.rpython.lltypesystem import rffi diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py --- a/pypy/jit/metainterp/optimizeopt/fficall.py +++ b/pypy/jit/metainterp/optimizeopt/fficall.py @@ -18,25 +18,27 @@ def __init__(self, funcval, cpu, prepare_op): self.funcval = funcval self.opargs = [] - argtypes, restype = self._get_signature(funcval) - self.descr = cpu.calldescrof_dynamic(argtypes, restype) + argtypes, restype, flags = self._get_signature(funcval) + self.descr = cpu.calldescrof_dynamic(argtypes, restype, + EffectInfo.MOST_GENERAL, + ffi_flags=flags) # ^^^ may be None if unsupported self.prepare_op = prepare_op self.delayed_ops = [] def _get_signature(self, funcval): """ - given the funcval, return a tuple (argtypes, restype), where the - actuall types are libffi.types.* + given the funcval, return a tuple (argtypes, restype, flags), where + the actuall types are libffi.types.* The implementation is tricky because we have three possible cases: - translated: the easiest case, we can just cast back the pointer to - the original Func instance and read .argtypes and .restype + the original Func instance and read .argtypes, .restype and .flags - completely untranslated: this is what we get from test_optimizeopt tests. funcval contains a FakeLLObject whose _fake_class is Func, - and we can just get .argtypes and .restype + and we can just get .argtypes, .restype and .flags - partially translated: this happens when running metainterp tests: funcval contains the low-level equivalent of a Func, and thus we @@ -48,10 +50,10 @@ llfunc = funcval.box.getref_base() if we_are_translated(): func = cast_base_ptr_to_instance(Func, llfunc) - return func.argtypes, func.restype + return func.argtypes, func.restype, func.flags elif getattr(llfunc, '_fake_class', None) is Func: # untranslated - return llfunc.argtypes, llfunc.restype + return llfunc.argtypes, llfunc.restype, llfunc.flags else: # partially translated # llfunc contains an opaque pointer to something like the following: @@ -62,7 +64,7 @@ # because we don't have the exact TYPE to cast to. Instead, we # just fish it manually :-( f = llfunc._obj.container - return f.inst_argtypes, f.inst_restype + return f.inst_argtypes, f.inst_restype, f.inst_flags class OptFfiCall(Optimization): @@ -195,9 +197,7 @@ def _get_oopspec(self, op): effectinfo = op.getdescr().get_extra_info() - if effectinfo is not None: - return effectinfo.oopspecindex - return EffectInfo.OS_NONE + return effectinfo.oopspecindex def _get_funcval(self, op): return self.getvalue(op.getarg(1)) diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -235,31 +235,33 @@ opnum == rop.CALL_RELEASE_GIL or opnum == rop.CALL_ASSEMBLER): if opnum == rop.CALL_ASSEMBLER: - effectinfo = None + self._seen_guard_not_invalidated = False else: effectinfo = op.getdescr().get_extra_info() - if effectinfo is None or effectinfo.check_can_invalidate(): - self._seen_guard_not_invalidated = False - if effectinfo is not None and not effectinfo.has_random_effects(): - # XXX we can get the wrong complexity here, if the lists - # XXX stored on effectinfo are large - for fielddescr in effectinfo.readonly_descrs_fields: - self.force_lazy_setfield(fielddescr) - for arraydescr in effectinfo.readonly_descrs_arrays: - self.force_lazy_setarrayitem(arraydescr) - for fielddescr in effectinfo.write_descrs_fields: - self.force_lazy_setfield(fielddescr, can_cache=False) - for arraydescr in effectinfo.write_descrs_arrays: - self.force_lazy_setarrayitem(arraydescr, can_cache=False) - if effectinfo.check_forces_virtual_or_virtualizable(): - vrefinfo = self.optimizer.metainterp_sd.virtualref_info - self.force_lazy_setfield(vrefinfo.descr_forced) - # ^^^ we only need to force this field; the other fields - # of virtualref_info and virtualizable_info are not gcptrs. - return + if effectinfo.check_can_invalidate(): + self._seen_guard_not_invalidated = False + if not effectinfo.has_random_effects(): + self.force_from_effectinfo(effectinfo) + return self.force_all_lazy_setfields_and_arrayitems() self.clean_caches() + def force_from_effectinfo(self, effectinfo): + # XXX we can get the wrong complexity here, if the lists + # XXX stored on effectinfo are large + for fielddescr in effectinfo.readonly_descrs_fields: + self.force_lazy_setfield(fielddescr) + for arraydescr in effectinfo.readonly_descrs_arrays: + self.force_lazy_setarrayitem(arraydescr) + for fielddescr in effectinfo.write_descrs_fields: + self.force_lazy_setfield(fielddescr, can_cache=False) + for arraydescr in effectinfo.write_descrs_arrays: + self.force_lazy_setarrayitem(arraydescr, can_cache=False) + if effectinfo.check_forces_virtual_or_virtualizable(): + vrefinfo = self.optimizer.metainterp_sd.virtualref_info + self.force_lazy_setfield(vrefinfo.descr_forced) + # ^^^ we only need to force this field; the other fields + # of virtualref_info and virtualizable_info are not gcptrs. def turned_constant(self, value): assert value.is_constant() diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -433,11 +433,10 @@ # specifically the given oopspec call. For non-oopspec calls, # oopspecindex is just zero. effectinfo = op.getdescr().get_extra_info() - if effectinfo is not None: - oopspecindex = effectinfo.oopspecindex - if oopspecindex == EffectInfo.OS_ARRAYCOPY: - if self._optimize_CALL_ARRAYCOPY(op): - return + oopspecindex = effectinfo.oopspecindex + if oopspecindex == EffectInfo.OS_ARRAYCOPY: + if self._optimize_CALL_ARRAYCOPY(op): + return self.emit_operation(op) def _optimize_CALL_ARRAYCOPY(self, op): diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py @@ -14,12 +14,15 @@ can check that the signature of a call is really what you want. """ - def __init__(self, arg_types, typeinfo): + def __init__(self, arg_types, typeinfo, flags): self.arg_types = arg_types self.typeinfo = typeinfo # return type + self.flags = flags def __eq__(self, other): - return self.arg_types == other.arg_types and self.typeinfo == other.typeinfo + return (self.arg_types == other.arg_types and + self.typeinfo == other.typeinfo and + self.flags == other.get_ffi_flags()) class FakeLLObject(object): @@ -41,24 +44,31 @@ vable_token_descr = LLtypeMixin.valuedescr valuedescr = LLtypeMixin.valuedescr - int_float__int = MyCallDescr('if', 'i') + int_float__int_42 = MyCallDescr('if', 'i', 42) + int_float__int_43 = MyCallDescr('if', 'i', 43) funcptr = FakeLLObject() func = FakeLLObject(_fake_class=Func, argtypes=[types.sint, types.double], - restype=types.sint) + restype=types.sint, + flags=42) func2 = FakeLLObject(_fake_class=Func, argtypes=[types.sint, types.double], - restype=types.sint) + restype=types.sint, + flags=43) # def calldescr(cpu, FUNC, oopspecindex, extraeffect=None): - einfo = EffectInfo([], [], [], [], oopspecindex=oopspecindex, + if extraeffect == EffectInfo.EF_RANDOM_EFFECTS: + f = None # means "can force all" really + else: + f = [] + einfo = EffectInfo(f, f, f, f, oopspecindex=oopspecindex, extraeffect=extraeffect) return cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, einfo) # libffi_prepare = calldescr(cpu, FUNC, EffectInfo.OS_LIBFFI_PREPARE) libffi_push_arg = calldescr(cpu, FUNC, EffectInfo.OS_LIBFFI_PUSH_ARG) libffi_call = calldescr(cpu, FUNC, EffectInfo.OS_LIBFFI_CALL, - EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE) + EffectInfo.EF_RANDOM_EFFECTS) namespace = namespace.__dict__ @@ -79,7 +89,7 @@ """ expected = """ [i0, f1] - i3 = call_release_gil(12345, i0, f1, descr=int_float__int) + i3 = call_release_gil(12345, i0, f1, descr=int_float__int_42) guard_not_forced() [] guard_no_exception() [] jump(i3, f1) @@ -119,7 +129,7 @@ [i0, f1, p2] i4 = force_token() setfield_gc(p2, i4, descr=vable_token_descr) - i3 = call_release_gil(12345, i0, f1, descr=int_float__int) + i3 = call_release_gil(12345, i0, f1, descr=int_float__int_42) guard_not_forced() [p2] guard_no_exception() [p2] jump(i3, f1, p2) @@ -216,7 +226,7 @@ call(0, ConstPtr(func), descr=libffi_prepare) # # this "nested" call is nicely optimized - i4 = call_release_gil(67890, i0, f1, descr=int_float__int) + i4 = call_release_gil(67890, i0, f1, descr=int_float__int_43) guard_not_forced() [] guard_no_exception() [] # @@ -261,7 +271,7 @@ expected = """ [i0, f1, p2] setfield_gc(p2, i0, descr=valuedescr) - i3 = call_release_gil(12345, i0, f1, descr=int_float__int) + i3 = call_release_gil(12345, i0, f1, descr=int_float__int_42) guard_not_forced() [] guard_no_exception() [] jump(i3, f1, p2) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_util.py b/pypy/jit/metainterp/optimizeopt/test/test_util.py --- a/pypy/jit/metainterp/optimizeopt/test/test_util.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_util.py @@ -167,7 +167,8 @@ onedescr = cpu.fielddescrof(U, 'one') FUNC = lltype.FuncType([lltype.Signed], lltype.Signed) - plaincalldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + plaincalldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) nonwritedescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([], [], [], [])) writeadescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, diff --git a/pypy/jit/metainterp/optimizeopt/vstring.py b/pypy/jit/metainterp/optimizeopt/vstring.py --- a/pypy/jit/metainterp/optimizeopt/vstring.py +++ b/pypy/jit/metainterp/optimizeopt/vstring.py @@ -455,8 +455,8 @@ # specifically the given oopspec call. For non-oopspec calls, # oopspecindex is just zero. effectinfo = op.getdescr().get_extra_info() - if effectinfo is not None: - oopspecindex = effectinfo.oopspecindex + oopspecindex = effectinfo.oopspecindex + if oopspecindex != EffectInfo.OS_NONE: for value, meth in opt_call_oopspec_ops: if oopspecindex == value: # a match with the OS_STR_xxx if meth(self, op, mode_string): diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -1257,10 +1257,8 @@ assert i == len(allboxes) # effectinfo = descr.get_extra_info() - if (effectinfo is None or - effectinfo.extraeffect == - effectinfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE or - assembler_call): + if (assembler_call or + effectinfo.check_forces_virtual_or_virtualizable()): # residual calls require attention to keep virtualizables in-sync self.metainterp.clear_exception() self.metainterp.vable_and_vrefs_before_residual_call() @@ -1693,12 +1691,11 @@ return if opnum == rop.CALL: effectinfo = descr.get_extra_info() - if effectinfo is not None: - ef = effectinfo.extraeffect - if ef == effectinfo.EF_LOOPINVARIANT or \ - ef == effectinfo.EF_ELIDABLE_CANNOT_RAISE or \ - ef == effectinfo.EF_ELIDABLE_CAN_RAISE: - return + ef = effectinfo.extraeffect + if ef == effectinfo.EF_LOOPINVARIANT or \ + ef == effectinfo.EF_ELIDABLE_CANNOT_RAISE or \ + ef == effectinfo.EF_ELIDABLE_CAN_RAISE: + return if self.heap_cache: self.heap_cache.clear() if self.heap_array_cache: diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -190,7 +190,7 @@ class FakeJitDriverSD: portal_runner_ptr = llhelper(lltype.Ptr(FUNC), ll_portal_runner) portal_runner_adr = llmemory.cast_ptr_to_adr(portal_runner_ptr) - portal_calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + portal_calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, None) portal_finishtoken = compile.DoneWithThisFrameDescrInt() num_red_args = 2 result_type = INT diff --git a/pypy/jit/metainterp/test/test_string.py b/pypy/jit/metainterp/test/test_string.py --- a/pypy/jit/metainterp/test/test_string.py +++ b/pypy/jit/metainterp/test/test_string.py @@ -1,5 +1,6 @@ import py from pypy.rlib.jit import JitDriver, dont_look_inside, we_are_jitted +from pypy.rlib.debug import debug_print from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rpython.ootypesystem import ootype from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin @@ -521,7 +522,8 @@ jitdriver = JitDriver(greens = ['g'], reds = ['m']) @dont_look_inside def escape(x): - print str(x) + # a plain "print" would call os.write() and release the gil + debug_print(str(x)) def f(g, m): g = str(g) while m >= 0: diff --git a/pypy/jit/metainterp/test/test_virtualstate.py b/pypy/jit/metainterp/test/test_virtualstate.py --- a/pypy/jit/metainterp/test/test_virtualstate.py +++ b/pypy/jit/metainterp/test/test_virtualstate.py @@ -1,3 +1,4 @@ +from __future__ import with_statement import py from pypy.jit.metainterp.optimize import InvalidLoop from pypy.jit.metainterp.optimizeopt.virtualstate import VirtualStateInfo, VStructStateInfo, \ diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -252,6 +252,41 @@ self.check_loops({'int_sub': 1, 'int_gt': 1, 'guard_true': 1, 'jump': 1}) + def test_void_red_variable(self): + mydriver = JitDriver(greens=[], reds=['a', 'm']) + def f1(m): + a = None + while m > 0: + mydriver.jit_merge_point(a=a, m=m) + m = m - 1 + if m == 10: + pass # other case + self.meta_interp(f1, [18]) + + def test_bug_constant_rawptrs(self): + py.test.skip("crashes because a is a constant") + from pypy.rpython.lltypesystem import lltype, rffi + mydriver = JitDriver(greens=['a'], reds=['m']) + def f1(m): + a = lltype.nullptr(rffi.VOIDP.TO) + while m > 0: + mydriver.jit_merge_point(a=a, m=m) + m = m - 1 + self.meta_interp(f1, [18]) + + def test_bug_rawptrs(self): + from pypy.rpython.lltypesystem import lltype, rffi + mydriver = JitDriver(greens=['a'], reds=['m']) + def f1(m): + a = lltype.malloc(rffi.VOIDP.TO, 5, flavor='raw') + while m > 0: + mydriver.jit_merge_point(a=a, m=m) + m = m - 1 + if m == 10: + pass + lltype.free(a, flavor='raw') + self.meta_interp(f1, [18]) + class TestLLWarmspot(WarmspotTests, LLJitMixin): CPUClass = runner.LLtypeCPU diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -21,6 +21,7 @@ from pypy.jit.metainterp.jitdriver import JitDriverStaticData from pypy.jit.codewriter import support, codewriter, longlong from pypy.jit.codewriter.policy import JitPolicy +from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.metainterp.optimizeopt import ALL_OPTS_NAMES # ____________________________________________________________ @@ -244,7 +245,8 @@ graph.startblock = support.split_before_jit_merge_point(*jmpp) graph.startblock.isstartblock = True # a crash in the following checkgraph() means that you forgot - # to list some variable in greens=[] or reds=[] in JitDriver. + # to list some variable in greens=[] or reds=[] in JitDriver, + # or that a jit_merge_point() takes a constant as an argument. checkgraph(graph) for v in graph.getargs(): assert isinstance(v, Variable) @@ -654,11 +656,13 @@ portalfunc_ARGS = [] nums = {} for i, ARG in enumerate(PORTALFUNC.ARGS): + kind = history.getkind(ARG) + assert kind != 'void' if i < len(jd.jitdriver.greens): color = 'green' else: color = 'red' - attrname = '%s_%s' % (color, history.getkind(ARG)) + attrname = '%s_%s' % (color, kind) count = nums.get(attrname, 0) nums[attrname] = count + 1 portalfunc_ARGS.append((ARG, attrname, count)) @@ -746,7 +750,8 @@ jd.portal_calldescr = self.cpu.calldescrof( jd._PTR_PORTAL_FUNCTYPE.TO, jd._PTR_PORTAL_FUNCTYPE.TO.ARGS, - jd._PTR_PORTAL_FUNCTYPE.TO.RESULT) + jd._PTR_PORTAL_FUNCTYPE.TO.RESULT, + EffectInfo.MOST_GENERAL) vinfo = jd.virtualizable_info diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -124,7 +124,7 @@ # Hash of lltype or ootype object. # Only supports strings, unicodes and regular instances, # as well as primitives that can meaningfully be cast to Signed. - if isinstance(TYPE, lltype.Ptr): + if isinstance(TYPE, lltype.Ptr) and TYPE.TO._gckind == 'gc': if TYPE.TO is rstr.STR or TYPE.TO is rstr.UNICODE: return rstr.LLHelpers.ll_strhash(x) # assumed not null else: @@ -140,7 +140,7 @@ else: return 0 else: - return lltype.cast_primitive(lltype.Signed, x) + return rffi.cast(lltype.Signed, x) @specialize.ll_and_arg(3) def set_future_value(cpu, j, value, typecode): diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -19,6 +19,7 @@ 'sorted' : 'app_functional.sorted', 'any' : 'app_functional.any', 'all' : 'app_functional.all', + 'sum' : 'app_functional.sum', 'vars' : 'app_inspect.vars', 'dir' : 'app_inspect.dir', @@ -85,7 +86,6 @@ 'enumerate' : 'functional.W_Enumerate', 'min' : 'functional.min', 'max' : 'functional.max', - 'sum' : 'functional.sum', 'map' : 'functional.map', 'zip' : 'functional.zip', 'reduce' : 'functional.reduce', @@ -118,7 +118,7 @@ return module.Module(space, None, w_builtin) builtin = space.interpclass_w(w_builtin) if isinstance(builtin, module.Module): - return builtin + return builtin # no builtin! make a default one. Given them None, at least. builtin = module.Module(space, None) space.setitem(builtin.w_dict, space.wrap('None'), space.w_None) diff --git a/pypy/module/__builtin__/app_functional.py b/pypy/module/__builtin__/app_functional.py --- a/pypy/module/__builtin__/app_functional.py +++ b/pypy/module/__builtin__/app_functional.py @@ -34,3 +34,18 @@ if not x: return False return True + +def sum(sequence, start=0): + """sum(sequence[, start]) -> value + +Returns the sum of a sequence of numbers (NOT strings) plus the value +of parameter 'start' (which defaults to 0). When the sequence is +empty, returns start.""" + if isinstance(start, basestring): + raise TypeError("sum() can't sum strings") + last = start + for x in sequence: + # Very intentionally *not* +=, that would have different semantics if + # start was a mutable type, such as a list + last = last + x + return last \ No newline at end of file diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -292,7 +292,7 @@ raise break new_frame = space.createframe(code, w_func.w_func_globals, - w_func.closure) + w_func) new_frame.locals_stack_w[0] = w_item w_res = new_frame.run() result_w.append(w_res) @@ -325,27 +325,6 @@ result_w.append(w_res) return result_w -def sum(space, w_sequence, w_start=0): - """sum(sequence[, start]) -> value - -Returns the sum of a sequence of numbers (NOT strings) plus the value -of parameter 'start' (which defaults to 0). When the sequence is -empty, returns start.""" - if space.is_true(space.isinstance(w_start, space.w_basestring)): - msg = "sum() can't sum strings" - raise OperationError(space.w_TypeError, space.wrap(msg)) - w_iter = space.iter(w_sequence) - w_last = w_start - while True: - try: - w_next = space.next(w_iter) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break - w_last = space.add(w_last, w_next) - return w_last - @unwrap_spec(sequences_w="args_w") def zip(space, sequences_w): """Return a list of tuples, where the nth tuple contains every nth item of diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -981,6 +981,86 @@ assert a.x == 2 raises(TypeError, descr.__delete__, a) + def test_partial_ordering(self): + class A: + def __lt__(self, other): + return self + a1 = A() + a2 = A() + assert (a1 < a2) is a1 + assert (a1 > a2) is a2 + + def test_eq_order(self): + # this gives the ordering of equality-related functions on top of + # CPython **for old-style classes**. + class A: + def __eq__(self, other): return self.__class__.__name__+':A.eq' + def __ne__(self, other): return self.__class__.__name__+':A.ne' + def __lt__(self, other): return self.__class__.__name__+':A.lt' + def __le__(self, other): return self.__class__.__name__+':A.le' + def __gt__(self, other): return self.__class__.__name__+':A.gt' + def __ge__(self, other): return self.__class__.__name__+':A.ge' + class B: + def __eq__(self, other): return self.__class__.__name__+':B.eq' + def __ne__(self, other): return self.__class__.__name__+':B.ne' + def __lt__(self, other): return self.__class__.__name__+':B.lt' + def __le__(self, other): return self.__class__.__name__+':B.le' + def __gt__(self, other): return self.__class__.__name__+':B.gt' + def __ge__(self, other): return self.__class__.__name__+':B.ge' + # + assert (A() == B()) == 'A:A.eq' + assert (A() != B()) == 'A:A.ne' + assert (A() < B()) == 'A:A.lt' + assert (A() <= B()) == 'A:A.le' + assert (A() > B()) == 'A:A.gt' + assert (A() >= B()) == 'A:A.ge' + # + assert (B() == A()) == 'B:B.eq' + assert (B() != A()) == 'B:B.ne' + assert (B() < A()) == 'B:B.lt' + assert (B() <= A()) == 'B:B.le' + assert (B() > A()) == 'B:B.gt' + assert (B() >= A()) == 'B:B.ge' + # + class C(A): + def __eq__(self, other): return self.__class__.__name__+':C.eq' + def __ne__(self, other): return self.__class__.__name__+':C.ne' + def __lt__(self, other): return self.__class__.__name__+':C.lt' + def __le__(self, other): return self.__class__.__name__+':C.le' + def __gt__(self, other): return self.__class__.__name__+':C.gt' + def __ge__(self, other): return self.__class__.__name__+':C.ge' + # + assert (A() == C()) == 'A:A.eq' + assert (A() != C()) == 'A:A.ne' + assert (A() < C()) == 'A:A.lt' + assert (A() <= C()) == 'A:A.le' + assert (A() > C()) == 'A:A.gt' + assert (A() >= C()) == 'A:A.ge' + # + assert (C() == A()) == 'C:C.eq' + assert (C() != A()) == 'C:C.ne' + assert (C() < A()) == 'C:C.lt' + assert (C() <= A()) == 'C:C.le' + assert (C() > A()) == 'C:C.gt' + assert (C() >= A()) == 'C:C.ge' + # + class D(A): + pass + # + assert (A() == D()) == 'A:A.eq' + assert (A() != D()) == 'A:A.ne' + assert (A() < D()) == 'A:A.lt' + assert (A() <= D()) == 'A:A.le' + assert (A() > D()) == 'A:A.gt' + assert (A() >= D()) == 'A:A.ge' + # + assert (D() == A()) == 'D:A.eq' + assert (D() != A()) == 'D:A.ne' + assert (D() < A()) == 'D:A.lt' + assert (D() <= A()) == 'D:A.le' + assert (D() > A()) == 'D:A.gt' + assert (D() >= A()) == 'D:A.ge' + class AppTestOldStyleClassStrDict(object): def setup_class(cls): diff --git a/pypy/module/__pypy__/interp_builders.py b/pypy/module/__pypy__/interp_builders.py --- a/pypy/module/__pypy__/interp_builders.py +++ b/pypy/module/__pypy__/interp_builders.py @@ -7,7 +7,7 @@ class W_UnicodeBuilder(Wrappable): def __init__(self, space, size): - if size == -1: + if size < 0: self.builder = UnicodeBuilder() else: self.builder = UnicodeBuilder(size) @@ -47,4 +47,4 @@ append_slice = interp2app(W_UnicodeBuilder.descr_append_slice), build = interp2app(W_UnicodeBuilder.descr_build), ) -W_UnicodeBuilder.typedef.acceptable_as_base_class = False \ No newline at end of file +W_UnicodeBuilder.typedef.acceptable_as_base_class = False diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -687,11 +687,15 @@ # support for the "string escape" codec # This is a bytes-to bytes transformation - at unwrap_spec(errors='str_or_None') -def escape_encode(space, w_string, errors='strict'): - w_repr = space.repr(w_string) - w_result = space.getslice(w_repr, space.wrap(1), space.wrap(-1)) - return space.newtuple([w_result, space.len(w_string)]) + at unwrap_spec(data=str, errors='str_or_None') +def escape_encode(space, data, errors='strict'): + from pypy.objspace.std.stringobject import string_escape_encode + result = string_escape_encode(data, quote="'") + start = 1 + end = len(result) - 1 + assert end >= 0 + w_result = space.wrap(result[start:end]) + return space.newtuple([w_result, space.wrap(len(data))]) @unwrap_spec(data=str, errors='str_or_None') def escape_decode(space, data, errors='strict'): diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -102,7 +102,6 @@ def test_indexerror(self): test = "\\" # trailing backslash - raises (ValueError, test.decode,'string-escape') def test_charmap_decode(self): @@ -292,6 +291,10 @@ assert '\\0f'.decode('string_escape') == chr(0) + 'f' assert '\\08'.decode('string_escape') == chr(0) + '8' + def test_escape_encode(self): + assert '"'.encode('string_escape') == '"' + assert "'".encode('string_escape') == "\\'" + def test_decode_utf8_different_case(self): constant = u"a" assert constant.encode("utf-8") == constant.encode("UTF-8") diff --git a/pypy/module/_continuation/__init__.py b/pypy/module/_continuation/__init__.py new file mode 100644 --- /dev/null +++ b/pypy/module/_continuation/__init__.py @@ -0,0 +1,40 @@ +from pypy.interpreter.mixedmodule import MixedModule + + +class Module(MixedModule): + """This module exposes 'one-shot continuation containers'. + +A 'continulet' object from this module is a container that stores a +one-shot continuation. It is similar in purpose to the 'f_back' +attribute of frames, which points to where execution should continue +after this frame finishes. The difference is that it will be changed +(often repeatedly) before the frame actually returns. + +To make a continulet object, call 'continulet' with a callable and +optional extra arguments. Later, the first time you switch() to the +continulet, the callable is invoked with the same continulet object as +the extra first argument. + +At this point, the one-shot continuation stored in the continulet points +to the caller of switch(). When switch() is called again, this one-shot +continuation is exchanged with the current one; it means that the caller +of switch() is suspended, its continuation stored in the container, and +the old continuation from the continulet object is resumed. + +Continulets are internally implemented using stacklets. Stacklets +are a bit more primitive (they are really one-shot continuations), but +that idea only works in C, not in Python, notably because of exceptions. + +The most primitive API is actually 'permute()', which just permutes the +one-shot continuation stored in two (or more) continulets. +""" + + appleveldefs = { + 'error': 'app_continuation.error', + 'generator': 'app_continuation.generator', + } + + interpleveldefs = { + 'continulet': 'interp_continuation.W_Continulet', + 'permute': 'interp_continuation.permute', + } diff --git a/pypy/module/_continuation/app_continuation.py b/pypy/module/_continuation/app_continuation.py new file mode 100644 --- /dev/null +++ b/pypy/module/_continuation/app_continuation.py @@ -0,0 +1,35 @@ + +class error(Exception): + "Usage error of the _continuation module." + + +import _continuation + + +class generator(object): + + def __init__(self, callable): + self.__func__ = callable + + def __get__(self, obj, type=None): + return generator(self.__func__.__get__(obj, type)) + + def __call__(self, *args, **kwds): + return genlet(self.__func__, *args, **kwds) + + +class genlet(_continuation.continulet): + + def __iter__(self): + return self + + def next(self, value=None): + res = self.switch(value) + if self.is_pending(): + return res + else: + if res is not None: + raise TypeError("_continuation.generator must return None") + raise StopIteration + + send = next diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py new file mode 100644 --- /dev/null +++ b/pypy/module/_continuation/interp_continuation.py @@ -0,0 +1,245 @@ +from pypy.rlib.rstacklet import StackletThread +from pypy.rlib import jit +from pypy.interpreter.error import OperationError +from pypy.interpreter.executioncontext import ExecutionContext +from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.gateway import interp2app + + +class W_Continulet(Wrappable): + sthread = None + + def __init__(self, space): + self.space = space + # states: + # - not init'ed: self.sthread == None + # - normal: self.sthread != None, not is_empty_handle(self.h) + # - finished: self.sthread != None, is_empty_handle(self.h) + + def check_sthread(self): + ec = self.space.getexecutioncontext() + if ec.stacklet_thread is not self.sthread: + start_state.clear() + raise geterror(self.space, "inter-thread support is missing") + return ec + + def descr_init(self, w_callable, __args__): + if self.sthread is not None: + raise geterror(self.space, "continulet already __init__ialized") + start_state.origin = self + start_state.w_callable = w_callable + start_state.args = __args__ + self.sthread = build_sthread(self.space) + try: + self.h = self.sthread.new(new_stacklet_callback) + if self.sthread.is_empty_handle(self.h): # early return + raise MemoryError + except MemoryError: + self.sthread = None + start_state.clear() + raise getmemoryerror(self.space) + + def switch(self, w_to): + to = self.space.interp_w(W_Continulet, w_to, can_be_None=True) + if to is not None: + if self is to: # double-switch to myself: no-op + return get_result() + if to.sthread is None: + start_state.clear() + raise geterror(self.space, "continulet not initialized yet") + if self.sthread is None: + start_state.clear() + raise geterror(self.space, "continulet not initialized yet") + ec = self.check_sthread() + saved_topframeref = ec.topframeref + # + start_state.origin = self + if to is None: + # simple switch: going to self.h + start_state.destination = self + else: + # double switch: the final destination is to.h + start_state.destination = to + # + h = start_state.destination.h + sthread = self.sthread + if sthread.is_empty_handle(h): + start_state.clear() + raise geterror(self.space, "continulet already finished") + # + try: + do_switch(sthread, h) + except MemoryError: + start_state.clear() + raise getmemoryerror(self.space) + # + ec = sthread.ec + ec.topframeref = saved_topframeref + return get_result() + + def descr_switch(self, w_value=None, w_to=None): + start_state.w_value = w_value + return self.switch(w_to) + + def descr_throw(self, w_type, w_val=None, w_tb=None, w_to=None): + from pypy.interpreter.pytraceback import check_traceback + space = self.space + # + msg = "throw() third argument must be a traceback object" + if space.is_w(w_tb, space.w_None): + tb = None + else: + tb = check_traceback(space, w_tb, msg) + # + operr = OperationError(w_type, w_val, tb) + operr.normalize_exception(space) + start_state.w_value = None + start_state.propagate_exception = operr + return self.switch(w_to) + + def descr_is_pending(self): + valid = (self.sthread is not None + and not self.sthread.is_empty_handle(self.h)) + return self.space.newbool(valid) + + +def W_Continulet___new__(space, w_subtype, __args__): + r = space.allocate_instance(W_Continulet, w_subtype) + r.__init__(space) + return space.wrap(r) + + +W_Continulet.typedef = TypeDef( + 'continulet', + __module__ = '_continuation', + __new__ = interp2app(W_Continulet___new__), + __init__ = interp2app(W_Continulet.descr_init), + switch = interp2app(W_Continulet.descr_switch), + throw = interp2app(W_Continulet.descr_throw), + is_pending = interp2app(W_Continulet.descr_is_pending), + ) + + +# ____________________________________________________________ + + +class State: + def __init__(self, space): + self.space = space + w_module = space.getbuiltinmodule('_continuation') + self.w_error = space.getattr(w_module, space.wrap('error')) + self.w_memoryerror = OperationError(space.w_MemoryError, space.w_None) + +def geterror(space, message): + cs = space.fromcache(State) + return OperationError(cs.w_error, space.wrap(message)) + +def getmemoryerror(space): + cs = space.fromcache(State) + return cs.w_memoryerror + +# ____________________________________________________________ + + +class SThread(StackletThread): + + def __init__(self, space, ec): + StackletThread.__init__(self, space.config) + self.space = space + self.ec = ec + +ExecutionContext.stacklet_thread = None + +# ____________________________________________________________ + + +class StartState: # xxx a single global to pass around the function to start + def clear(self): + self.origin = None + self.destination = None + self.w_callable = None + self.args = None + self.w_value = None + self.propagate_exception = None +start_state = StartState() +start_state.clear() + + +def new_stacklet_callback(h, arg): + self = start_state.origin + w_callable = start_state.w_callable + args = start_state.args + start_state.clear() + try: + do_switch(self.sthread, h) + except MemoryError: + return h # oups! do an early return in this case + # + space = self.space + try: + ec = self.sthread.ec + ec.topframeref = jit.vref_None + + if start_state.propagate_exception is not None: + raise start_state.propagate_exception # just propagate it further + if start_state.w_value is not space.w_None: + raise OperationError(space.w_TypeError, space.wrap( + "can't send non-None value to a just-started continulet")) + + args = args.prepend(self.space.wrap(self)) + w_result = space.call_args(w_callable, args) + except Exception, e: + start_state.propagate_exception = e + else: + start_state.w_value = w_result + start_state.origin = self + start_state.destination = self + return self.h + + +def do_switch(sthread, h): + h = sthread.switch(h) + origin = start_state.origin + self = start_state.destination + start_state.origin = None + start_state.destination = None + self.h, origin.h = origin.h, h + +def get_result(): + if start_state.propagate_exception: + e = start_state.propagate_exception + start_state.propagate_exception = None + raise e + w_value = start_state.w_value + start_state.w_value = None + return w_value + +def build_sthread(space): + ec = space.getexecutioncontext() + sthread = ec.stacklet_thread + if not sthread: + sthread = ec.stacklet_thread = SThread(space, ec) + return sthread + +# ____________________________________________________________ + +def permute(space, args_w): + sthread = build_sthread(space) + # + contlist = [] + for w_cont in args_w: + cont = space.interp_w(W_Continulet, w_cont) + if cont.sthread is not sthread: + if cont.sthread is None: + raise geterror(space, "got a non-initialized continulet") + else: + raise geterror(space, "inter-thread support is missing") + elif sthread.is_empty_handle(cont.h): + raise geterror(space, "got an already-finished continulet") + contlist.append(cont) + # + if len(contlist) > 1: + other = contlist[-1].h + for cont in contlist: + other, cont.h = cont.h, other diff --git a/pypy/module/_continuation/test/__init__.py b/pypy/module/_continuation/test/__init__.py new file mode 100644 diff --git a/pypy/module/_continuation/test/support.py b/pypy/module/_continuation/test/support.py new file mode 100644 --- /dev/null +++ b/pypy/module/_continuation/test/support.py @@ -0,0 +1,12 @@ +import py +from pypy.conftest import gettestobjspace +from pypy.rpython.tool.rffi_platform import CompilationError + + +class BaseAppTest: + def setup_class(cls): + try: + import pypy.rlib.rstacklet + except CompilationError, e: + py.test.skip("cannot import rstacklet: %s" % e) + cls.space = gettestobjspace(usemodules=['_continuation']) diff --git a/pypy/module/_continuation/test/test_generator.py b/pypy/module/_continuation/test/test_generator.py new file mode 100644 --- /dev/null +++ b/pypy/module/_continuation/test/test_generator.py @@ -0,0 +1,70 @@ +from pypy.module._continuation.test.support import BaseAppTest + + +class AppTestGenerator(BaseAppTest): + + def test_simple(self): + from _continuation import generator + # + @generator + def f(gen, n): + gen.switch(n+1) + f2(gen, n+2) + gen.switch(n+3) + # + def f2(gen, m): + gen.switch(m*2) + # + g = f(10) + res = g.next() + assert res == 11 + res = g.next() + assert res == 24 + res = g.next() + assert res == 13 + raises(StopIteration, g.next) + + def test_iterator(self): + from _continuation import generator + # + @generator + def f(gen, n): + gen.switch(n+1) + f2(gen, n+2) + gen.switch(n+3) + # + def f2(gen, m): + gen.switch(m*2) + # + res = list(f(10)) + assert res == [11, 24, 13] + g = f(20) + assert iter(g) is g + + def test_bound_method(self): + from _continuation import generator + # + class A(object): + def __init__(self, m): + self.m = m + # + @generator + def f(self, gen, n): + gen.switch(n - self.m) + # + a = A(10) + res = list(a.f(25)) + assert res == [15] + + def test_must_return_None(self): + from _continuation import generator + # + @generator + def f(gen, n): + gen.switch(n+1) + return "foo" + # + g = f(10) + res = g.next() + assert res == 11 + raises(TypeError, g.next) diff --git a/pypy/module/_continuation/test/test_stacklet.py b/pypy/module/_continuation/test/test_stacklet.py new file mode 100644 --- /dev/null +++ b/pypy/module/_continuation/test/test_stacklet.py @@ -0,0 +1,635 @@ +import os +from pypy.module._continuation.test.support import BaseAppTest + + +class AppTestStacklet(BaseAppTest): + def setup_class(cls): + BaseAppTest.setup_class.im_func(cls) + cls.w_translated = cls.space.wrap( + os.path.join(os.path.dirname(__file__), + 'test_translated.py')) + + def test_new_empty(self): + from _continuation import continulet + # + def empty_callback(c): + pass + # + c = continulet(empty_callback) + assert type(c) is continulet + + def test_call_empty(self): + from _continuation import continulet + # + def empty_callback(c1): + assert c1 is c + seen.append(1) + return 42 + # + seen = [] + c = continulet(empty_callback) + res = c.switch() + assert res == 42 + assert seen == [1] + + def test_no_double_init(self): + from _continuation import continulet, error + # + def empty_callback(c1): + pass + # + c = continulet(empty_callback) + raises(error, c.__init__, empty_callback) + + def test_no_init_after_started(self): + from _continuation import continulet, error + # + def empty_callback(c1): + raises(error, c1.__init__, empty_callback) + return 42 + # + c = continulet(empty_callback) + res = c.switch() + assert res == 42 + + def test_no_init_after_finished(self): + from _continuation import continulet, error + # + def empty_callback(c1): + return 42 + # + c = continulet(empty_callback) + res = c.switch() + assert res == 42 + raises(error, c.__init__, empty_callback) + + def test_propagate_exception(self): + from _continuation import continulet + # + def empty_callback(c1): + assert c1 is c + seen.append(42) + raise ValueError + # + seen = [] + c = continulet(empty_callback) + raises(ValueError, c.switch) + assert seen == [42] + + def test_callback_with_arguments(self): + from _continuation import continulet + # + def empty_callback(c1, *args, **kwds): + seen.append(c1) + seen.append(args) + seen.append(kwds) + return 42 + # + seen = [] + c = continulet(empty_callback, 42, 43, foo=44, bar=45) + res = c.switch() + assert res == 42 + assert seen == [c, (42, 43), {'foo': 44, 'bar': 45}] + + def test_switch(self): + from _continuation import continulet + # + def switchbackonce_callback(c): + seen.append(1) + res = c.switch('a') + assert res == 'b' + seen.append(3) + return 'c' + # + seen = [] + c = continulet(switchbackonce_callback) + seen.append(0) + res = c.switch() + assert res == 'a' + seen.append(2) + res = c.switch('b') + assert res == 'c' + assert seen == [0, 1, 2, 3] + + def test_initial_switch_must_give_None(self): + from _continuation import continulet + # + def empty_callback(c): + return 'ok' + # + c = continulet(empty_callback) + res = c.switch(None) + assert res == 'ok' + # + c = continulet(empty_callback) + raises(TypeError, c.switch, 'foo') # "can't send non-None value" + + def test_continuation_error(self): + from _continuation import continulet, error + # + def empty_callback(c): + return 42 + # + c = continulet(empty_callback) + c.switch() + e = raises(error, c.switch) + assert str(e.value) == "continulet already finished" + + def test_not_initialized_yet(self): + from _continuation import continulet, error + c = continulet.__new__(continulet) + e = raises(error, c.switch) + assert str(e.value) == "continulet not initialized yet" + + def test_go_depth2(self): + from _continuation import continulet + # + def depth2(c): + seen.append(3) + return 4 + # + def depth1(c): + seen.append(1) + c2 = continulet(depth2) + seen.append(2) + res = c2.switch() + seen.append(res) + return 5 + # + seen = [] + c = continulet(depth1) + seen.append(0) + res = c.switch() + seen.append(res) + assert seen == [0, 1, 2, 3, 4, 5] + + def test_exception_depth2(self): + from _continuation import continulet + # + def depth2(c): + seen.append(2) + raise ValueError + # + def depth1(c): + seen.append(1) + try: + continulet(depth2).switch() + except ValueError: + seen.append(3) + return 4 + # + seen = [] + c = continulet(depth1) + res = c.switch() + seen.append(res) + assert seen == [1, 2, 3, 4] + + def test_exception_with_switch(self): + from _continuation import continulet + # + def depth1(c): + seen.append(1) + c.switch() + seen.append(3) + raise ValueError + # + seen = [] + c = continulet(depth1) + seen.append(0) + c.switch() + seen.append(2) + raises(ValueError, c.switch) + assert seen == [0, 1, 2, 3] + + def test_is_pending(self): + from _continuation import continulet + # + def switchbackonce_callback(c): + assert c.is_pending() + res = c.switch('a') + assert res == 'b' + assert c.is_pending() + return 'c' + # + c = continulet.__new__(continulet) + assert not c.is_pending() + c.__init__(switchbackonce_callback) + assert c.is_pending() + res = c.switch() + assert res == 'a' + assert c.is_pending() + res = c.switch('b') + assert res == 'c' + assert not c.is_pending() + + def test_switch_alternate(self): + from _continuation import continulet + # + def func_lower(c): + res = c.switch('a') + assert res == 'b' + res = c.switch('c') + assert res == 'd' + return 'e' + # + def func_upper(c): + res = c.switch('A') + assert res == 'B' + res = c.switch('C') + assert res == 'D' + return 'E' + # + c_lower = continulet(func_lower) + c_upper = continulet(func_upper) + res = c_lower.switch() + assert res == 'a' + res = c_upper.switch() + assert res == 'A' + res = c_lower.switch('b') + assert res == 'c' + res = c_upper.switch('B') + assert res == 'C' + res = c_lower.switch('d') + assert res == 'e' + res = c_upper.switch('D') + assert res == 'E' + + def test_exception_with_switch_depth2(self): + from _continuation import continulet + # + def depth2(c): + seen.append(4) + c.switch() + seen.append(6) + raise ValueError + # + def depth1(c): + seen.append(1) + c.switch() + seen.append(3) + c2 = continulet(depth2) + c2.switch() + seen.append(5) + raises(ValueError, c2.switch) + assert not c2.is_pending() + seen.append(7) + assert c.is_pending() + raise KeyError + # + seen = [] + c = continulet(depth1) + c.switch() + seen.append(2) + raises(KeyError, c.switch) + assert not c.is_pending() + assert seen == [1, 2, 3, 4, 5, 6, 7] + + def test_random_switching(self): + from _continuation import continulet + # + def t1(c1): + return c1.switch() + def s1(c1, n): + assert n == 123 + c2 = t1(c1) + return c1.switch('a') + 1 + # + def s2(c2, c1): + res = c1.switch(c2) + assert res == 'a' + return c2.switch('b') + 2 + # + def f(): + c1 = continulet(s1, 123) + c2 = continulet(s2, c1) + c1.switch() + res = c2.switch() + assert res == 'b' + res = c1.switch(1000) + assert res == 1001 + return c2.switch(2000) + # + res = f() + assert res == 2002 + + def test_f_back_is_None_for_now(self): + import sys + from _continuation import continulet + # + def g(c): + c.switch(sys._getframe(0)) + c.switch(sys._getframe(0).f_back) + c.switch(sys._getframe(1)) + c.switch(sys._getframe(1).f_back) + c.switch(sys._getframe(2)) + def f(c): + g(c) + # + c = continulet(f) + f1 = c.switch() + assert f1.f_code.co_name == 'g' + f2 = c.switch() + assert f2.f_code.co_name == 'f' + f3 = c.switch() + assert f3.f_code.co_name == 'f' + f4 = c.switch() + assert f4 is None + raises(ValueError, c.switch) # "call stack is not deep enough" + + def test_traceback_is_complete(self): + import sys + from _continuation import continulet + # + def g(): + raise KeyError + def f(c): + g() + # + def do(c): + c.switch() + # + c = continulet(f) + try: + do(c) + except KeyError: + tb = sys.exc_info()[2] + else: + raise AssertionError("should have raised!") + # + assert tb.tb_next.tb_frame.f_code.co_name == 'do' + assert tb.tb_next.tb_next.tb_frame.f_code.co_name == 'f' + assert tb.tb_next.tb_next.tb_next.tb_frame.f_code.co_name == 'g' + assert tb.tb_next.tb_next.tb_next.tb_next is None + + def test_switch2_simple(self): + from _continuation import continulet + # + def f1(c1): + res = c1.switch('started 1') + assert res == 'a' + res = c1.switch('b', to=c2) + assert res == 'c' + return 42 + def f2(c2): + res = c2.switch('started 2') + assert res == 'b' + res = c2.switch('c', to=c1) + not_reachable + # + c1 = continulet(f1) + c2 = continulet(f2) + res = c1.switch() + assert res == 'started 1' + res = c2.switch() + assert res == 'started 2' + res = c1.switch('a') + assert res == 42 + + def test_switch2_pingpong(self): + from _continuation import continulet + # + def f1(c1): + res = c1.switch('started 1') + assert res == 'go' + for i in range(10): + res = c1.switch(i, to=c2) + assert res == 100 + i + return 42 + def f2(c2): + res = c2.switch('started 2') + for i in range(10): + assert res == i + res = c2.switch(100 + i, to=c1) + not_reachable + # + c1 = continulet(f1) + c2 = continulet(f2) + res = c1.switch() + assert res == 'started 1' + res = c2.switch() + assert res == 'started 2' + res = c1.switch('go') + assert res == 42 + + def test_switch2_more_complex(self): + from _continuation import continulet + # + def f1(c1): + res = c1.switch(to=c2) + assert res == 'a' + res = c1.switch('b', to=c2) + assert res == 'c' + return 41 + def f2(c2): + res = c2.switch('a', to=c1) + assert res == 'b' + return 42 + # + c1 = continulet(f1) + c2 = continulet(f2) + res = c1.switch() + assert res == 42 + assert not c2.is_pending() # finished by returning 42 + res = c1.switch('c') + assert res == 41 + + def test_switch2_no_op(self): + from _continuation import continulet + # + def f1(c1): + res = c1.switch('a', to=c1) + assert res == 'a' + return 42 + # + c1 = continulet(f1) + res = c1.switch() + assert res == 42 + + def test_switch2_immediately_away(self): + from _continuation import continulet + # + def f1(c1): + print 'in f1' + return 'm' + # + def f2(c2): + res = c2.switch('z') + print 'got there!' + assert res == 'a' + return None + # + c1 = continulet(f1) + c2 = continulet(f2) + res = c2.switch() + assert res == 'z' + assert c1.is_pending() + assert c2.is_pending() + print 'calling!' + res = c1.switch('a', to=c2) + print 'back' + assert res == 'm' + + def test_switch2_immediately_away_corner_case(self): + from _continuation import continulet + # + def f1(c1): + this_is_never_seen + # + def f2(c2): + res = c2.switch('z') + assert res is None + return 'b' # this goes back into the caller, which is f1, + # but f1 didn't start yet, so a None-value value + # has nowhere to go to... + c1 = continulet(f1) + c2 = continulet(f2) + res = c2.switch() + assert res == 'z' + raises(TypeError, c1.switch, to=c2) # "can't send non-None value" + + def test_switch2_not_initialized_yet(self): + from _continuation import continulet, error + # + def f1(c1): + not_reachable + # + c1 = continulet(f1) + c2 = continulet.__new__(continulet) + e = raises(error, c1.switch, to=c2) + assert str(e.value) == "continulet not initialized yet" + + def test_switch2_already_finished(self): + from _continuation import continulet, error + # + def f1(c1): + not_reachable + def empty_callback(c): + return 42 + # + c1 = continulet(f1) + c2 = continulet(empty_callback) + c2.switch() + e = raises(error, c1.switch, to=c2) + assert str(e.value) == "continulet already finished" + + def test_throw(self): + import sys + from _continuation import continulet + # + def f1(c1): + try: + c1.switch() + except KeyError: + res = "got keyerror" + try: + c1.switch(res) + except IndexError, e: + pass + try: + c1.switch(e) + except IndexError, e2: + pass + try: + c1.switch(e2) + except IndexError: + c1.throw(*sys.exc_info()) + should_never_reach_here + # + c1 = continulet(f1) + c1.switch() + res = c1.throw(KeyError) + assert res == "got keyerror" + class FooError(IndexError): + pass + foo = FooError() + res = c1.throw(foo) + assert res is foo + res = c1.throw(IndexError, foo) + assert res is foo + # + def main(): + def do_raise(): + raise foo + try: + do_raise() + except IndexError: + tb = sys.exc_info()[2] + try: + c1.throw(IndexError, foo, tb) + except IndexError: + tb = sys.exc_info()[2] + return tb + # + tb = main() + assert tb.tb_frame.f_code.co_name == 'main' + assert tb.tb_next.tb_frame.f_code.co_name == 'f1' + assert tb.tb_next.tb_next.tb_frame.f_code.co_name == 'main' + assert tb.tb_next.tb_next.tb_next.tb_frame.f_code.co_name == 'do_raise' + assert tb.tb_next.tb_next.tb_next.tb_next is None + + def test_throw_to_starting(self): + from _continuation import continulet + # + def f1(c1): + not_reached + # + c1 = continulet(f1) + raises(IndexError, c1.throw, IndexError) + + def test_throw2_simple(self): + from _continuation import continulet + # + def f1(c1): + not_reached + def f2(c2): + try: + c2.switch("ready") + except IndexError: + raise ValueError + # + c1 = continulet(f1) + c2 = continulet(f2) + res = c2.switch() + assert res == "ready" + assert c1.is_pending() + assert c2.is_pending() + raises(ValueError, c1.throw, IndexError, to=c2) + assert not c1.is_pending() + assert not c2.is_pending() + + def test_throw2_no_op(self): + from _continuation import continulet + # + def f1(c1): + raises(ValueError, c1.throw, ValueError, to=c1) + return "ok" + # + c1 = continulet(f1) + res = c1.switch() + assert res == "ok" + + def test_permute(self): + from _continuation import continulet, permute + # + def f1(c1): + res = c1.switch() + assert res == "ok" + return "done" + # + def f2(c2): + permute(c1, c2) + return "ok" + # + c1 = continulet(f1) + c2 = continulet(f2) + c1.switch() + res = c2.switch() + assert res == "done" + + def test_various_depths(self): + skip("may fail on top of CPython") + # run it from test_translated, but not while being actually translated + d = {} + execfile(self.translated, d) + d['set_fast_mode']() + d['test_various_depths']() diff --git a/pypy/module/_continuation/test/test_translated.py b/pypy/module/_continuation/test/test_translated.py new file mode 100644 --- /dev/null +++ b/pypy/module/_continuation/test/test_translated.py @@ -0,0 +1,132 @@ +import py +try: + import _continuation +except ImportError: + py.test.skip("to run on top of a translated pypy-c") + +import sys, random + +# ____________________________________________________________ + +STATUS_MAX = 50000 +CONTINULETS = 50 + +def set_fast_mode(): + global STATUS_MAX, CONTINULETS + STATUS_MAX = 100 + CONTINULETS = 5 + +# ____________________________________________________________ + +class Done(Exception): + pass + + +class Runner(object): + + def __init__(self): + self.foobar = 12345 + self.conts = {} # {continulet: parent-or-None} + self.contlist = [] + + def run_test(self): + self.start_continulets() + self.n = 0 + try: + while True: + self.do_switch(src=None) + assert self.target is None + except Done: + self.check_traceback(sys.exc_info()[2]) + + def do_switch(self, src): + assert src not in self.conts.values() + c = random.choice(self.contlist) + self.target = self.conts[c] + self.conts[c] = src + c.switch() + assert self.target is src + + def run_continulet(self, c, i): + while True: + assert self.target is c + assert self.contlist[i] is c + self.do_switch(c) + assert self.foobar == 12345 + self.n += 1 + if self.n >= STATUS_MAX: + raise Done + + def start_continulets(self, i=0): + c = _continuation.continulet(self.run_continulet, i) + self.contlist.append(c) + if i < CONTINULETS: + self.start_continulets(i + 1) + # ^^^ start each continulet with a different base stack + self.conts[c] = c # initially (i.e. not started) there are all loops + + def check_traceback(self, tb): + found = [] + tb = tb.tb_next + while tb: + if tb.tb_frame.f_code.co_name != 'do_switch': + assert tb.tb_frame.f_code.co_name == 'run_continulet', ( + "got %r" % (tb.tb_frame.f_code.co_name,)) + found.append(tb.tb_frame.f_locals['c']) + tb = tb.tb_next + found.reverse() + # + expected = [] + c = self.target + while c is not None: + expected.append(c) + c = self.conts[c] + # + assert found == expected, "%r == %r" % (found, expected) + +# ____________________________________________________________ + +class AppTestWrapper: + def setup_class(cls): + "Run test_various_depths() when we are run with 'pypy py.test -A'." + from pypy.conftest import option + if not option.runappdirect: + py.test.skip("meant only for -A run") + + def test_single_threaded(self): + for i in range(20): + yield Runner().run_test, + + def test_multi_threaded(self): + for i in range(5): + yield multithreaded_test, + +class ThreadTest(object): + def __init__(self, lock): + self.lock = lock + self.ok = False + lock.acquire() + def run(self): + try: + Runner().run_test() + self.ok = True + finally: + self.lock.release() + +def multithreaded_test(): + try: + import thread + except ImportError: + py.test.skip("no threads") + ts = [ThreadTest(thread.allocate_lock()) for i in range(5)] + for t in ts: + thread.start_new_thread(t.run, ()) + for t in ts: + t.lock.acquire() + for t in ts: + assert t.ok + +# ____________________________________________________________ + +if __name__ == '__main__': + Runner().run_test() diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -351,6 +351,7 @@ self.decompressor = W_BZ2Decompressor(space) self.readlength = r_longlong(0) self.buffer = "" + self.pos = 0 self.finished = False if buffering < 1024: buffering = 1024 # minimum amount of compressed data read at once @@ -385,6 +386,7 @@ self.stream.seek(0, 0) self.decompressor = W_BZ2Decompressor(self.space) self.readlength = r_longlong(0) + self.pos = 0 self.buffer = "" self.finished = False else: @@ -410,15 +412,19 @@ self.space.wrap("compressed file ended before the logical end-of-the-stream was detected")) result = self.space.str_w(w_result) self.readlength += len(result) - result = self.buffer + result + if len(self.buffer) != self.pos: + pos = self.pos + assert pos >= 0 + result = self.buffer[pos:] + result self.buffer = '' + self.pos = 0 return result def read(self, n): # XXX not nice if n <= 0: return '' - while not self.buffer: + while self.pos == len(self.buffer): if self.finished: return "" moredata = self.stream.read(max(self.buffering, n)) @@ -433,17 +439,25 @@ return "" raise self.buffer = self.space.str_w(w_read) - if len(self.buffer) >= n: - result = self.buffer[:n] - self.buffer = self.buffer[n:] + self.pos = 0 + if len(self.buffer) - self.pos >= n: + pos = self.pos + assert pos >= 0 + result = self.buffer[pos:pos + n] + self.pos += n else: - result = self.buffer + pos = self.pos + assert pos >= 0 + result = self.buffer[pos:] + self.pos = 0 self.buffer = "" self.readlength += len(result) return result def peek(self): - return self.buffer + pos = self.pos + assert pos >= 0 + return self.buffer[pos:] def try_to_find_file_descriptor(self): return self.stream.try_to_find_file_descriptor() diff --git a/pypy/module/bz2/test/test_bz2_file.py b/pypy/module/bz2/test/test_bz2_file.py --- a/pypy/module/bz2/test/test_bz2_file.py +++ b/pypy/module/bz2/test/test_bz2_file.py @@ -274,14 +274,14 @@ pass del bz2f # delete from this frame, which is captured in the traceback - def test_read_chunk10(self): + def test_read_chunk9(self): from bz2 import BZ2File self.create_temp_file() bz2f = BZ2File(self.temppath) text_read = "" while True: - data = bz2f.read(10) + data = bz2f.read(9) # 9 doesn't divide evenly into data length if not data: break text_read = "%s%s" % (text_read, data) diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py --- a/pypy/module/cpyext/frameobject.py +++ b/pypy/module/cpyext/frameobject.py @@ -57,7 +57,7 @@ code = space.interp_w(PyCode, w_code) w_globals = from_ref(space, py_frame.c_f_globals) - frame = space.FrameClass(space, code, w_globals, closure=None) + frame = space.FrameClass(space, code, w_globals, outer_func=None) frame.f_lineno = py_frame.c_f_lineno w_obj = space.wrap(frame) track_reference(space, py_obj, w_obj) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -1,43 +1,46 @@ - from pypy.interpreter.mixedmodule import MixedModule + class Module(MixedModule): - applevel_name = 'numpy' interpleveldefs = { 'array': 'interp_numarray.SingleDimArray', 'dtype': 'interp_dtype.W_Dtype', + 'ufunc': 'interp_ufuncs.W_Ufunc', 'zeros': 'interp_numarray.zeros', 'empty': 'interp_numarray.zeros', 'ones': 'interp_numarray.ones', 'fromstring': 'interp_support.fromstring', + } - # ufuncs - 'abs': 'interp_ufuncs.absolute', - 'absolute': 'interp_ufuncs.absolute', - 'add': 'interp_ufuncs.add', - 'copysign': 'interp_ufuncs.copysign', - 'divide': 'interp_ufuncs.divide', - 'exp': 'interp_ufuncs.exp', - 'fabs': 'interp_ufuncs.fabs', - 'floor': 'interp_ufuncs.floor', - 'maximum': 'interp_ufuncs.maximum', - 'minimum': 'interp_ufuncs.minimum', - 'multiply': 'interp_ufuncs.multiply', - 'negative': 'interp_ufuncs.negative', - 'reciprocal': 'interp_ufuncs.reciprocal', - 'sign': 'interp_ufuncs.sign', - 'subtract': 'interp_ufuncs.subtract', - 'sin': 'interp_ufuncs.sin', - 'cos': 'interp_ufuncs.cos', - 'tan': 'interp_ufuncs.tan', - 'arcsin': 'interp_ufuncs.arcsin', - 'arccos': 'interp_ufuncs.arccos', - 'arctan': 'interp_ufuncs.arctan', - 'equal': 'interp_ufuncs.equal', - } + # ufuncs + for exposed, impl in [ + ("abs", "absolute"), + ("absolute", "absolute"), + ("add", "add"), + ("arccos", "arccos"), + ("arcsin", "arcsin"), + ("arctan", "arctan"), + ("copysign", "copysign"), + ("cos", "cos"), + ("divide", "divide"), + ("exp", "exp"), + ("fabs", "fabs"), + ("floor", "floor"), + ("maximum", "maximum"), + ("minimum", "minimum"), + ("multiply", "multiply"), + ("negative", "negative"), + ("reciprocal", "reciprocal"), + ("sign", "sign"), + ("sin", "sin"), + ("subtract", "subtract"), + ("tan", "tan"), + ("equal", "equal") + ]: + interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl appleveldefs = { 'average': 'app_numpy.average', diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -20,6 +20,7 @@ class FakeSpace(object): w_ValueError = None + w_TypeError = None def __init__(self): """NOT_RPYTHON""" diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -53,7 +53,9 @@ VOID_TP = lltype.Ptr(lltype.Array(lltype.Void, hints={'nolength': True, "uncast_on_llgraph": True})) -def create_low_level_dtype(num, kind, name, aliases, applevel_types, T, valtype): +def create_low_level_dtype(num, kind, name, aliases, applevel_types, T, valtype, + expected_size=None): + class Box(BaseBox): def __init__(self, val): self.val = val @@ -113,6 +115,8 @@ W_LowLevelDtype.aliases = aliases W_LowLevelDtype.applevel_types = applevel_types W_LowLevelDtype.num_bytes = rffi.sizeof(T) + if expected_size is not None: + assert W_LowLevelDtype.num_bytes == expected_size return W_LowLevelDtype @@ -263,6 +267,9 @@ class IntegerArithmeticDtype(ArithmaticTypeMixin): _mixin_ = True + def unwrap(self, space, w_item): + return self.adapt_val(space.int_w(space.int(w_item))) + def for_computation(self, v): return widen(v) @@ -290,7 +297,7 @@ T = lltype.Bool, valtype = bool, ) -class W_BoolDtype(W_BoolDtype): +class W_BoolDtype(IntegerArithmeticDtype, W_BoolDtype): def unwrap(self, space, w_item): return self.adapt_val(space.is_true(w_item)) @@ -301,20 +308,27 @@ def for_computation(self, v): return int(v) - @binop - def add(self, v1, v2): - return bool(v1 + v2) - W_Int8Dtype = create_low_level_dtype( num = 1, kind = SIGNEDLTR, name = "int8", aliases = ["int8"], applevel_types = [], T = rffi.SIGNEDCHAR, valtype = rffi.SIGNEDCHAR._type, + expected_size = 1, ) class W_Int8Dtype(IntegerArithmeticDtype, W_Int8Dtype): - def unwrap(self, space, w_item): - return self.adapt_val(space.int_w(space.int(w_item))) + pass + +W_Int16Dtype = create_low_level_dtype( + num = 3, kind = SIGNEDLTR, name = "int16", + aliases = ["int16"], + applevel_types = [], + T = rffi.SHORT, + valtype = rffi.SHORT._type, + expected_size = 2, +) +class W_Int16Dtype(IntegerArithmeticDtype, W_Int16Dtype): + pass W_Int32Dtype = create_low_level_dtype( num = 5, kind = SIGNEDLTR, name = "int32", @@ -322,10 +336,10 @@ applevel_types = [], T = rffi.INT, valtype = rffi.INT._type, + expected_size = 4, ) class W_Int32Dtype(IntegerArithmeticDtype, W_Int32Dtype): - def unwrap(self, space, w_item): - return self.adapt_val(space.int_w(space.int(w_item))) + pass W_Int64Dtype = create_low_level_dtype( num = 9, kind = SIGNEDLTR, name = "int64", @@ -333,10 +347,10 @@ applevel_types = ["long"], T = rffi.LONGLONG, valtype = rffi.LONGLONG._type, + expected_size = 8, ) class W_Int64Dtype(IntegerArithmeticDtype, W_Int64Dtype): - def unwrap(self, space, w_item): - return self.adapt_val(space.int_w(space.int(w_item))) + pass W_Float64Dtype = create_low_level_dtype( num = 12, kind = FLOATINGLTR, name = "float64", @@ -344,6 +358,7 @@ applevel_types = ["float"], T = lltype.Float, valtype = float, + expected_size = 8, ) class W_Float64Dtype(FloatArithmeticDtype, W_Float64Dtype): def unwrap(self, space, w_item): @@ -354,7 +369,7 @@ ALL_DTYPES = [ W_BoolDtype, - W_Int8Dtype, W_Int32Dtype, W_Int64Dtype, + W_Int8Dtype, W_Int16Dtype, W_Int32Dtype, W_Int64Dtype, W_Float64Dtype ] @@ -384,3 +399,4 @@ kind = interp_attrproperty("kind", cls=W_Dtype), shape = GetSetProperty(W_Dtype.descr_get_shape), ) +W_Dtype.typedef.acceptable_as_base_class = False diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -53,26 +53,26 @@ i += 1 return arr - def _unaryop_impl(w_ufunc): + def _unaryop_impl(ufunc_name): def impl(self, space): - return w_ufunc(space, self) - return func_with_new_name(impl, "unaryop_%s_impl" % w_ufunc.__name__) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) - descr_pos = _unaryop_impl(interp_ufuncs.positive) - descr_neg = _unaryop_impl(interp_ufuncs.negative) - descr_abs = _unaryop_impl(interp_ufuncs.absolute) + descr_pos = _unaryop_impl("positive") + descr_neg = _unaryop_impl("negative") + descr_abs = _unaryop_impl("absolute") - def _binop_impl(w_ufunc): + def _binop_impl(ufunc_name): def impl(self, space, w_other): - return w_ufunc(space, self, w_other) - return func_with_new_name(impl, "binop_%s_impl" % w_ufunc.__name__) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) - descr_add = _binop_impl(interp_ufuncs.add) - descr_sub = _binop_impl(interp_ufuncs.subtract) - descr_mul = _binop_impl(interp_ufuncs.multiply) - descr_div = _binop_impl(interp_ufuncs.divide) - descr_pow = _binop_impl(interp_ufuncs.power) - descr_mod = _binop_impl(interp_ufuncs.mod) + descr_add = _binop_impl("add") + descr_sub = _binop_impl("subtract") + descr_mul = _binop_impl("multiply") + descr_div = _binop_impl("divide") + descr_pow = _binop_impl("power") + descr_mod = _binop_impl("mod") descr_eq = _binop_impl(interp_ufuncs.equal) descr_ne = _binop_impl(interp_ufuncs.not_equal) @@ -81,69 +81,31 @@ descr_gt = _binop_impl(interp_ufuncs.greater) descr_ge = _binop_impl(interp_ufuncs.greater_equal) - def _binop_right_impl(w_ufunc): + def _binop_right_impl(ufunc_name): def impl(self, space, w_other): w_other = scalar_w(space, interp_ufuncs.find_dtype_for_scalar(space, w_other, self.find_dtype()), w_other ) - return w_ufunc(space, w_other, self) - return func_with_new_name(impl, "binop_right_%s_impl" % w_ufunc.__name__) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) - descr_radd = _binop_right_impl(interp_ufuncs.add) - descr_rsub = _binop_right_impl(interp_ufuncs.subtract) - descr_rmul = _binop_right_impl(interp_ufuncs.multiply) - descr_rdiv = _binop_right_impl(interp_ufuncs.divide) - descr_rpow = _binop_right_impl(interp_ufuncs.power) - descr_rmod = _binop_right_impl(interp_ufuncs.mod) + descr_radd = _binop_right_impl("add") + descr_rsub = _binop_right_impl("subtract") + descr_rmul = _binop_right_impl("multiply") + descr_rdiv = _binop_right_impl("divide") + descr_rpow = _binop_right_impl("power") + descr_rmod = _binop_right_impl("mod") - def _reduce_sum_prod_impl(op_name, init): - reduce_driver = jit.JitDriver(greens=['signature'], - reds = ['i', 'size', 'self', 'result', 'res_dtype']) + def _reduce_ufunc_impl(ufunc_name): + def impl(self, space): + return getattr(interp_ufuncs.get(space), ufunc_name).descr_reduce(space, self) + return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name) - def loop(self, res_dtype, result, size): - i = 0 - while i < size: - reduce_driver.jit_merge_point(signature=self.signature, - self=self, res_dtype=res_dtype, - size=size, i=i, result=result) - result = getattr(res_dtype, op_name)( - result, - self.eval(i).convert_to(res_dtype) - ) - i += 1 - return result - - def impl(self, space): - dtype = interp_ufuncs.find_unaryop_result_dtype( - space, self.find_dtype(), promote_to_largest=True - ) - result = dtype.adapt_val(init) - return loop(self, dtype, result, self.find_size()).wrap(space) - return func_with_new_name(impl, "reduce_%s_impl" % op_name) - - def _reduce_max_min_impl(op_name): - reduce_driver = jit.JitDriver(greens=['signature'], - reds = ['i', 'size', 'self', 'result', 'dtype']) - def loop(self, result, size): - i = 1 - dtype = self.find_dtype() - while i < size: - reduce_driver.jit_merge_point(signature=self.signature, - self=self, dtype=dtype, - size=size, i=i, result=result) - result = getattr(dtype, op_name)(result, self.eval(i)) - i += 1 - return result - - def impl(self, space): - size = self.find_size() - if size == 0: - raise OperationError(space.w_ValueError, - space.wrap("Can't call %s on zero-size arrays" \ - % op_name)) - return loop(self, self.eval(0), size).wrap(space) - return func_with_new_name(impl, "reduce_%s_impl" % op_name) + descr_sum = _reduce_ufunc_impl("add") + descr_prod = _reduce_ufunc_impl("multiply") + descr_max = _reduce_ufunc_impl("maximum") + descr_min = _reduce_ufunc_impl("minimum") def _reduce_argmax_argmin_impl(op_name): reduce_driver = jit.JitDriver(greens=['signature'], @@ -199,10 +161,6 @@ def descr_any(self, space): return space.wrap(self._any()) - descr_sum = _reduce_sum_prod_impl("add", 0) - descr_prod = _reduce_sum_prod_impl("mul", 1) - descr_max = _reduce_max_min_impl("max") - descr_min = _reduce_max_min_impl("min") descr_argmax = _reduce_argmax_argmin_impl("max") descr_argmin = _reduce_argmax_argmin_impl("min") @@ -255,7 +213,7 @@ res = "array([" + ", ".join(concrete._getnums(False)) + "]" dtype = concrete.find_dtype() if (dtype is not space.fromcache(interp_dtype.W_Float64Dtype) and - dtype is not space.fromcache(interp_dtype.W_Int64Dtype)): + dtype is not space.fromcache(interp_dtype.W_Int64Dtype)) or not self.find_size(): res += ", dtype=" + dtype.name res += ")" return space.wrap(res) @@ -266,7 +224,15 @@ return space.wrap("[" + " ".join(concrete._getnums(True)) + "]") def descr_getitem(self, space, w_idx): - # TODO: indexing by tuples + # TODO: indexing by arrays and lists + if space.isinstance_w(w_idx, space.w_tuple): + length = space.len_w(w_idx) + if length == 0: + return space.wrap(self) + if length > 1: # only one dimension for now. + raise OperationError(space.w_IndexError, + space.wrap("invalid index")) + w_idx = space.getitem(w_idx, space.wrap(0)) start, stop, step, slice_length = space.decode_index4(w_idx, self.find_size()) if step == 0: # Single index @@ -280,8 +246,19 @@ return space.wrap(res) def descr_setitem(self, space, w_idx, w_value): - # TODO: indexing by tuples and lists + # TODO: indexing by arrays and lists self.invalidated() + if space.isinstance_w(w_idx, space.w_tuple): + length = space.len_w(w_idx) + if length > 1: # only one dimension for now. + raise OperationError(space.w_IndexError, + space.wrap("invalid index")) + if length == 0: + w_idx = space.newslice(space.wrap(0), + space.wrap(self.find_size()), + space.wrap(1)) + else: + w_idx = space.getitem(w_idx, space.wrap(0)) start, stop, step, slice_length = space.decode_index4(w_idx, self.find_size()) if step == 0: @@ -487,7 +464,8 @@ return self.parent.setitem_w(space, self.calc_index(item), w_value) def setitem(self, item, value): - return self.parent.setitem(self.calc_index(item), value) + # This is currently not possible to be called from anywhere. + raise NotImplementedError def descr_len(self, space): return space.wrap(self.find_size()) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -1,65 +1,171 @@ +from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.gateway import interp2app +from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty from pypy.module.micronumpy import interp_dtype, signature +from pypy.rlib import jit from pypy.tool.sourcetools import func_with_new_name -def ufunc(func=None, promote_to_float=False): - if func is None: - return lambda func: ufunc(func, promote_to_float) - call_sig = signature.Call1(func) - def impl(space, w_obj): +reduce_driver = jit.JitDriver( + greens = ["signature"], + reds = ["i", "size", "self", "dtype", "value", "obj"] +) + +class W_Ufunc(Wrappable): + _attrs_ = ["name", "promote_to_float", "promote_bools", "bool_result", "identity"] + + def __init__(self, name, promote_to_float, promote_bools, bool_result, identity): + self.name = name + self.promote_to_float = promote_to_float + self.promote_bools = promote_bools + self.bool_result = bool_result + self.identity = identity + + def descr_repr(self, space): + return space.wrap("" % self.name) + + def descr_get_identity(self, space): + if self.identity is None: + return space.w_None + return self.identity.wrap(space) + + def descr_call(self, space, __args__): + try: + args_w = __args__.fixedunpack(self.argcount) + except ValueError, e: + raise OperationError(space.w_TypeError, space.wrap(str(e))) + return self.call(space, args_w) + + def descr_reduce(self, space, w_obj): + from pypy.module.micronumpy.interp_numarray import convert_to_array, Scalar + + if self.argcount != 2: + raise OperationError(space.w_ValueError, space.wrap("reduce only " + "supported for binary functions")) + + assert isinstance(self, W_Ufunc2) + obj = convert_to_array(space, w_obj) + if isinstance(obj, Scalar): + raise OperationError(space.w_TypeError, space.wrap("cannot reduce " + "on a scalar")) + + size = obj.find_size() + dtype = find_unaryop_result_dtype( + space, obj.find_dtype(), + promote_to_largest=True + ) + start = 0 + if self.identity is None: + if size == 0: + raise operationerrfmt(space.w_ValueError, "zero-size array to " + "%s.reduce without identity", self.name) + value = obj.eval(0).convert_to(dtype) + start += 1 + else: + value = self.identity.convert_to(dtype) + new_sig = signature.Signature.find_sig([ + self.reduce_signature, obj.signature + ]) + return self.reduce(new_sig, start, value, obj, dtype, size).wrap(space) + + def reduce(self, signature, start, value, obj, dtype, size): + i = start + while i < size: + reduce_driver.jit_merge_point(signature=signature, self=self, + value=value, obj=obj, i=i, + dtype=dtype, size=size) + value = self.func(dtype, value, obj.eval(i).convert_to(dtype)) + i += 1 + return value + +class W_Ufunc1(W_Ufunc): + argcount = 1 + + def __init__(self, func, name, promote_to_float=False, promote_bools=False, + identity=None): + + W_Ufunc.__init__(self, name, promote_to_float, promote_bools, False, identity) + self.func = func + self.signature = signature.Call1(func) + + def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call1, convert_to_array, Scalar) + [w_obj] = args_w w_obj = convert_to_array(space, w_obj) res_dtype = find_unaryop_result_dtype(space, w_obj.find_dtype(), - promote_to_float=promote_to_float, + promote_to_float=self.promote_to_float, + promote_bools=self.promote_bools, ) if isinstance(w_obj, Scalar): - return func(res_dtype, w_obj.value.convert_to(res_dtype)).wrap(space) + return self.func(res_dtype, w_obj.value.convert_to(res_dtype)).wrap(space) - new_sig = signature.Signature.find_sig([call_sig, w_obj.signature]) + new_sig = signature.Signature.find_sig([self.signature, w_obj.signature]) w_res = Call1(new_sig, res_dtype, w_obj) w_obj.add_invalidates(w_res) return w_res - return func_with_new_name(impl, "%s_dispatcher" % func.__name__) -def ufunc2(func=None, promote_to_float=False, bool_result=False): - if func is None: - return lambda func: ufunc2(func, promote_to_float, bool_result) +class W_Ufunc2(W_Ufunc): + argcount = 2 - call_sig = signature.Call2(func) - def impl(space, w_lhs, w_rhs): + def __init__(self, func, name, promote_to_float=False, promote_bools=False, + bool_result=False, identity=None): + + W_Ufunc.__init__(self, name, promote_to_float, promote_bools, bool_result, identity) + self.func = func + self.signature = signature.Call2(func) + self.reduce_signature = signature.BaseSignature() + + def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, convert_to_array, Scalar) + [w_lhs, w_rhs] = args_w w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) calc_dtype = find_binop_result_dtype(space, w_lhs.find_dtype(), w_rhs.find_dtype(), - promote_to_float=promote_to_float, + promote_to_float=self.promote_to_float, + promote_bools=self.promote_bools, ) # Some operations return bool regardless of input type - if bool_result: + if self.bool_result: res_dtype = space.fromcache(interp_dtype.W_BoolDtype) else: res_dtype = calc_dtype if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar): lhs = w_lhs.value.convert_to(calc_dtype) rhs = w_rhs.value.convert_to(calc_dtype) - interm_res = func(calc_dtype, lhs, rhs) + interm_res = self.func(calc_dtype, lhs, rhs) return interm_res.convert_to(res_dtype).wrap(space) + return self.func(res_dtype, w_lhs.value, w_rhs.value).wrap(space) new_sig = signature.Signature.find_sig([ - call_sig, w_lhs.signature, w_rhs.signature + self.signature, w_lhs.signature, w_rhs.signature ]) - w_res = Call2(new_sig, res_dtype, calc_dtype, w_lhs, w_rhs) + w_res = Call2(new_sig, res_dtype, res_dtype, calc_dtype, w_lhs, w_rhs) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) return w_res - return func_with_new_name(impl, "%s_dispatcher" % func.__name__) -def find_binop_result_dtype(space, dt1, dt2, promote_bools=False, promote_to_float=False): + +W_Ufunc.typedef = TypeDef("ufunc", + __module__ = "numpy", + + __call__ = interp2app(W_Ufunc.descr_call), + __repr__ = interp2app(W_Ufunc.descr_repr), + + identity = GetSetProperty(W_Ufunc.descr_get_identity), + nin = interp_attrproperty("argcount", cls=W_Ufunc), + + reduce = interp2app(W_Ufunc.descr_reduce), +) + +def find_binop_result_dtype(space, dt1, dt2, promote_to_float=False, + promote_bools=False): # dt1.num should be <= dt2.num if dt1.num > dt2.num: dt1, dt2 = dt2, dt1 @@ -79,7 +185,9 @@ assert False def find_unaryop_result_dtype(space, dt, promote_to_float=False, - promote_to_largest=False): + promote_bools=False, promote_to_largest=False): + if promote_bools and (dt.kind == interp_dtype.BOOLLTR): + return space.fromcache(interp_dtype.W_Int8Dtype) if promote_to_float: for bytes, dtype in interp_dtype.dtypes_by_num_bytes: if dtype.kind == interp_dtype.FLOATINGLTR and dtype.num_bytes >= dt.num_bytes: @@ -109,60 +217,72 @@ return space.fromcache(interp_dtype.W_Float64Dtype) -def ufunc_dtype_caller(ufunc_name, op_name, argcount, **kwargs): +def ufunc_dtype_caller(ufunc_name, op_name, argcount): if argcount == 1: - @ufunc(**kwargs) def impl(res_dtype, value): return getattr(res_dtype, op_name)(value) elif argcount == 2: - @ufunc2(**kwargs) def impl(res_dtype, lvalue, rvalue): return getattr(res_dtype, op_name)(lvalue, rvalue) return func_with_new_name(impl, ufunc_name) -for ufunc_def in [ - ("add", "add", 2), - ("subtract", "sub", 2), - ("multiply", "mul", 2), - ("divide", "div", 2), - ("mod", "mod", 2), - ("power", "pow", 2), +class UfuncState(object): + def __init__(self, space): + "NOT_RPYTHON" + for ufunc_def in [ + ("add", "add", 2, {"identity": 0}), + ("subtract", "sub", 2), + ("multiply", "mul", 2, {"identity": 1}), + ("divide", "div", 2, {"promote_bools": True}), + ("mod", "mod", 2, {"promote_bools": True}), + ("power", "pow", 2, {"promote_bools": True}), - ("maximum", "max", 2), - ("minimum", "min", 2), + ("maximum", "max", 2), + ("minimum", "min", 2), - ("equal", "eq", 2, {"bool_result": True}), - ("not_equal", "ne", 2, {"bool_result": True}), - ("less", "lt", 2, {"bool_result": True}), - ("less_equal", "le", 2, {"bool_result": True}), - ("greater", "gt", 2, {"bool_result": True}), - ("greater_equal", "ge", 2, {"bool_result": True}), + ("equal", "eq", 2, {"bool_result": True}), + ("not_equal", "ne", 2, {"bool_result": True}), + ("less", "lt", 2, {"bool_result": True}), + ("less_equal", "le", 2, {"bool_result": True}), + ("greater", "gt", 2, {"bool_result": True}), + ("greater_equal", "ge", 2, {"bool_result": True}), - ("copysign", "copysign", 2, {"promote_to_float": True}), + ("copysign", "copysign", 2, {"promote_to_float": True}), - ("positive", "pos", 1), - ("negative", "neg", 1), - ("absolute", "abs", 1), - ("sign", "sign", 1), - ("reciprocal", "reciprocal", 1), + ("positive", "pos", 1), + ("negative", "neg", 1), + ("absolute", "abs", 1), + ("sign", "sign", 1, {"promote_bools": True}), + ("reciprocal", "reciprocal", 1), - ("fabs", "fabs", 1, {"promote_to_float": True}), - ("floor", "floor", 1, {"promote_to_float": True}), - ("exp", "exp", 1, {"promote_to_float": True}), + ("fabs", "fabs", 1, {"promote_to_float": True}), + ("floor", "floor", 1, {"promote_to_float": True}), + ("exp", "exp", 1, {"promote_to_float": True}), - ("sin", "sin", 1, {"promote_to_float": True}), - ("cos", "cos", 1, {"promote_to_float": True}), - ("tan", "tan", 1, {"promote_to_float": True}), - ("arcsin", "arcsin", 1, {"promote_to_float": True}), - ("arccos", "arccos", 1, {"promote_to_float": True}), - ("arctan", "arctan", 1, {"promote_to_float": True}), -]: - ufunc_name = ufunc_def[0] - op_name = ufunc_def[1] - argcount = ufunc_def[2] - try: - extra_kwargs = ufunc_def[3] - except IndexError: - extra_kwargs = {} + ("sin", "sin", 1, {"promote_to_float": True}), + ("cos", "cos", 1, {"promote_to_float": True}), + ("tan", "tan", 1, {"promote_to_float": True}), + ("arcsin", "arcsin", 1, {"promote_to_float": True}), + ("arccos", "arccos", 1, {"promote_to_float": True}), + ("arctan", "arctan", 1, {"promote_to_float": True}), + ]: + self.add_ufunc(space, *ufunc_def) - globals()[ufunc_name] = ufunc_dtype_caller(ufunc_name, op_name, argcount, **extra_kwargs) + def add_ufunc(self, space, ufunc_name, op_name, argcount, extra_kwargs=None): + if extra_kwargs is None: + extra_kwargs = {} + + identity = extra_kwargs.get("identity") + if identity is not None: + identity = space.fromcache(interp_dtype.W_Int64Dtype).adapt_val(identity) + extra_kwargs["identity"] = identity + + func = ufunc_dtype_caller(ufunc_name, op_name, argcount) + if argcount == 1: + ufunc = W_Ufunc1(func, ufunc_name, **extra_kwargs) + elif argcount == 2: + ufunc = W_Ufunc2(func, ufunc_name, **extra_kwargs) + setattr(self, ufunc_name, ufunc) + +def get(space): + return space.fromcache(UfuncState) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -82,14 +82,30 @@ assert a[1] == 1 def test_add_int8(self): - from numpy import array + from numpy import array, dtype a = array(range(5), dtype="int8") b = a + a + assert b.dtype is dtype("int8") + for i in range(5): + assert b[i] == i * 2 + + def test_add_int16(self): + from numpy import array, dtype + + a = array(range(5), dtype="int16") + b = a + a + assert b.dtype is dtype("int16") for i in range(5): assert b[i] == i * 2 def test_shape(self): from numpy import dtype - assert dtype(long).shape == () \ No newline at end of file + assert dtype(long).shape == () + + def test_cant_subclass(self): + from numpy import dtype + + # You can't subclass dtype + raises(TypeError, type, "Foo", (dtype,), {}) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -52,10 +52,14 @@ from numpy import array, zeros a = array(range(5), float) assert repr(a) == "array([0.0, 1.0, 2.0, 3.0, 4.0])" + a = array([], float) + assert repr(a) == "array([], dtype=float64)" a = zeros(1001) assert repr(a) == "array([0.0, 0.0, 0.0, ..., 0.0, 0.0, 0.0])" a = array(range(5), long) assert repr(a) == "array([0, 1, 2, 3, 4])" + a = array([], long) + assert repr(a) == "array([], dtype=int64)" a = array([True, False, True, False], "?") assert repr(a) == "array([True, False, True, False], dtype=bool)" @@ -84,6 +88,9 @@ a = array(range(5), dtype="int8") assert str(a) == "[0 1 2 3 4]" + a = array(range(5), dtype="int16") + assert str(a) == "[0 1 2 3 4]" + def test_str_slice(self): from numpy import array, zeros a = array(range(5), float) @@ -102,6 +109,16 @@ assert a[-1] == 8 raises(IndexError, "a[-6]") + def test_getitem_tuple(self): + from numpy import array + a = array(range(5)) + raises(IndexError, "a[(1,2)]") + for i in xrange(5): + assert a[(i,)] == i + b = a[()] + for i in xrange(5): + assert a[i] == b[i] + def test_setitem(self): from numpy import array a = array(range(5)) @@ -110,6 +127,17 @@ raises(IndexError, "a[5] = 0.0") raises(IndexError, "a[-6] = 3.0") + def test_setitem_tuple(self): + from numpy import array + a = array(range(5)) + raises(IndexError, "a[(1,2)] = [0,1]") + for i in xrange(5): + a[(i,)] = i+1 + assert a[i] == i+1 + a[()] = range(5) + for i in xrange(5): + assert a[i] == i + def test_setslice_array(self): from numpy import array a = array(range(5)) @@ -236,12 +264,19 @@ assert b[i] == i - 5 def test_mul(self): - from numpy import array + from numpy import array, dtype a = array(range(5)) b = a * a for i in range(5): assert b[i] == i * i + a = array(range(5), dtype=bool) + b = a * a + assert b.dtype is dtype(bool) + assert b[0] is False + for i in range(1, 5): + assert b[i] is True + def test_mul_constant(self): from numpy import array a = array(range(5)) @@ -250,12 +285,18 @@ assert b[i] == i * 5 def test_div(self): - from numpy import array + from numpy import array, dtype a = array(range(1, 6)) b = a / a for i in range(5): assert b[i] == 1 + a = array(range(1, 6), dtype=bool) + b = a / a + assert b.dtype is dtype("int8") + for i in range(5): + assert b[i] == 1 + def test_div_other(self): from numpy import array a = array(range(5)) @@ -301,6 +342,12 @@ for i in range(5): assert b[i] == 0 + a = array(range(1, 6), float) + b = (a + 1) % a + assert b[0] == 0 + for i in range(1, 5): + assert b[i] == 1 + def test_mod_other(self): from numpy import array a = array(range(5)) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -3,6 +3,32 @@ class AppTestUfuncs(BaseNumpyAppTest): + def test_ufunc_instance(self): + from numpy import add, ufunc + + assert isinstance(add, ufunc) + assert repr(add) == "" + assert repr(ufunc) == "" + + def test_ufunc_attrs(self): + from numpy import add, multiply, sin + + assert add.identity == 0 + assert multiply.identity == 1 + assert sin.identity is None + + assert add.nin == 2 + assert multiply.nin == 2 + assert sin.nin == 1 + + def test_wrong_arguments(self): + from numpy import add, sin + + raises(TypeError, add, 1) + raises(TypeError, add, 1, 2, 3) + raises(TypeError, sin, 1, 2) + raises(TypeError, sin) + def test_single_item(self): from numpy import negative, sign, minimum @@ -112,7 +138,7 @@ x = maximum(2, 3) assert x == 3 - assert type(x) is int + assert isinstance(x, (int, long)) def test_multiply(self): from numpy import array, multiply @@ -124,7 +150,7 @@ assert c[i] == a[i] * b[i] def test_sign(self): - from numpy import array, sign + from numpy import array, sign, dtype reference = [-1.0, 0.0, 0.0, 1.0] a = array([-5.0, -0.0, 0.0, 6.0]) @@ -137,6 +163,11 @@ for i in range(10): assert a[i] == ref[i] + a = sign(array([True, False], dtype=bool)) + assert a.dtype == dtype("int8") + assert a[0] == 1 + assert a[1] == 0 + def test_reciporocal(self): from numpy import array, reciprocal @@ -275,3 +306,17 @@ assert equal(3.0, 3.5) is False assert equal(3.0, 3) is True assert equal(3.0, 4) is False + + def test_reduce_errors(self): + from numpy import sin, add + + raises(ValueError, sin.reduce, [1, 2, 3]) + raises(TypeError, add.reduce, 1) + + def test_reduce(self): + from numpy import add, maximum + + assert add.reduce([1, 2, 3]) == 6 + assert maximum.reduce([1]) == 1 + assert maximum.reduce([1, 2, 3]) == 3 + raises(ValueError, maximum.reduce, []) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -19,7 +19,7 @@ def test_add(self): def f(i): ar = SingleDimArray(i, dtype=self.float64_dtype) - v = interp_ufuncs.add(self.space, ar, ar) + v = interp_ufuncs.get(self.space).add.call(self.space, [ar, ar]) return v.get_concrete().eval(3).val result = self.meta_interp(f, [5], listops=True, backendopt=True) @@ -31,9 +31,10 @@ def test_floatadd(self): def f(i): ar = SingleDimArray(i, dtype=self.float64_dtype) - v = interp_ufuncs.add(self.space, - ar, - scalar_w(self.space, self.float64_dtype, self.space.wrap(4.5)) + v = interp_ufuncs.get(self.space).add.call(self.space, [ + ar, + scalar_w(self.space, self.float64_dtype, self.space.wrap(4.5)) + ], ) assert isinstance(v, BaseArray) return v.get_concrete().eval(3).val @@ -89,14 +90,21 @@ def test_max(self): space = self.space float64_dtype = self.float64_dtype + int64_dtype = self.int64_dtype def f(i): - ar = SingleDimArray(i, dtype=NonConstant(float64_dtype)) + if NonConstant(False): + dtype = int64_dtype + else: + dtype = float64_dtype + ar = SingleDimArray(i, dtype=dtype) j = 0 while j < i: ar.get_concrete().setitem(j, float64_dtype.box(float(j))) j += 1 - return ar.descr_add(space, ar).descr_max(space).floatval + v = ar.descr_add(space, ar).descr_max(space) + assert isinstance(v, FloatObject) + return v.floatval result = self.meta_interp(f, [5], listops=True, backendopt=True) self.check_loops({"getarrayitem_raw": 2, "float_add": 1, @@ -108,14 +116,21 @@ def test_min(self): space = self.space float64_dtype = self.float64_dtype + int64_dtype = self.int64_dtype def f(i): - ar = SingleDimArray(i, dtype=NonConstant(float64_dtype)) + if NonConstant(False): + dtype = int64_dtype + else: + dtype = float64_dtype + ar = SingleDimArray(i, dtype=dtype) j = 0 while j < i: ar.get_concrete().setitem(j, float64_dtype.box(float(j))) j += 1 - return ar.descr_add(space, ar).descr_min(space).floatval + v = ar.descr_add(space, ar).descr_min(space) + assert isinstance(v, FloatObject) + return v.floatval result = self.meta_interp(f, [5], listops=True, backendopt=True) self.check_loops({"getarrayitem_raw": 2, "float_add": 1, @@ -180,9 +195,9 @@ def f(i): ar = SingleDimArray(i, dtype=self.float64_dtype) - v1 = interp_ufuncs.add(space, ar, scalar_w(space, self.float64_dtype, space.wrap(4.5))) + v1 = interp_ufuncs.get(self.space).add.call(space, [ar, scalar_w(space, self.float64_dtype, space.wrap(4.5))]) assert isinstance(v1, BaseArray) - v2 = interp_ufuncs.multiply(space, v1, scalar_w(space, self.float64_dtype, space.wrap(4.5))) + v2 = interp_ufuncs.get(self.space).multiply.call(space, [v1, scalar_w(space, self.float64_dtype, space.wrap(4.5))]) v1.force_if_needed() assert isinstance(v2, BaseArray) return v2.get_concrete().eval(3).val @@ -200,8 +215,8 @@ space = self.space def f(i): ar = SingleDimArray(i, dtype=self.float64_dtype) - v1 = interp_ufuncs.add(space, ar, ar) - v2 = interp_ufuncs.negative(space, v1) + v1 = interp_ufuncs.get(self.space).add.call(space, [ar, ar]) + v2 = interp_ufuncs.get(self.space).negative.call(space, [v1]) return v2.get_concrete().eval(3).val result = self.meta_interp(f, [5], listops=True, backendopt=True) @@ -216,13 +231,13 @@ def f(i): ar = SingleDimArray(i, dtype=self.float64_dtype) - v1 = interp_ufuncs.add(space, ar, ar) - v2 = interp_ufuncs.negative(space, v1) + v1 = interp_ufuncs.get(self.space).add.call(space, [ar, ar]) + v2 = interp_ufuncs.get(self.space).negative.call(space, [v1]) v2.get_concrete() for i in xrange(5): - v1 = interp_ufuncs.multiply(space, ar, ar) - v2 = interp_ufuncs.negative(space, v1) + v1 = interp_ufuncs.get(self.space).multiply.call(space, [ar, ar]) + v2 = interp_ufuncs.get(self.space).negative.call(space, [v1]) v2.get_concrete() self.meta_interp(f, [5], listops=True, backendopt=True) @@ -237,7 +252,7 @@ SingleDimSlice.signature, ar.signature ]) s = SingleDimSlice(0, step*i, step, i, ar, new_sig) - v = interp_ufuncs.add(self.space, s, s) + v = interp_ufuncs.get(self.space).add.call(self.space, [s, s]) return v.get_concrete().eval(3).val result = self.meta_interp(f, [5], listops=True, backendopt=True) @@ -259,7 +274,7 @@ SingleDimSlice.signature, s1.signature ]) s2 = SingleDimSlice(0, step2*i, step2, i, ar, new_sig) - v = interp_ufuncs.add(self.space, s1, s2) + v = interp_ufuncs.get(self.space).add.call(self.space, [s1, s2]) return v.get_concrete().eval(3).val result = self.meta_interp(f, [5], listops=True, backendopt=True) diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -161,6 +161,8 @@ interpleveldefs['mknod'] = 'interp_posix.mknod' if hasattr(os, 'nice'): interpleveldefs['nice'] = 'interp_posix.nice' + if hasattr(os, 'getlogin'): + interpleveldefs['getlogin'] = 'interp_posix.getlogin' for name in ['setsid', 'getuid', 'geteuid', 'getgid', 'getegid', 'setuid', 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -464,6 +464,15 @@ space.wrap("strerror() argument out of range")) return space.wrap(text) +def getlogin(space): + """Return the currently logged in user.""" + try: + cur = os.getlogin() + except OSError, e: + raise wrap_oserror(space, e) + else: + return space.wrap(cur) + # ____________________________________________________________ def getstatfields(space): diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -805,6 +805,16 @@ data = f.read() assert data == "who cares?" + try: + os.getlogin() + except (AttributeError, OSError): + pass + else: + def test_getlogin(self): + assert isinstance(self.posix.getlogin(), str) + # How else could we test that getlogin is properly + # working? + def test_tmpfile(self): os = self.posix f = os.tmpfile() diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -9,6 +9,7 @@ from pypy.rpython.tool import rffi_platform from pypy.translator.tool.cbuild import ExternalCompilationInfo +from pypy.translator.platform import platform import sys import py @@ -19,7 +20,9 @@ libname = 'expat' eci = ExternalCompilationInfo( libraries=[libname], + library_dirs=platform.preprocess_library_dirs([]), includes=['expat.h'], + include_dirs=platform.preprocess_include_dirs([]), ) eci = rffi_platform.configure_external_library( diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -21,9 +21,11 @@ PyFrame._virtualizable2_ = ['last_instr', 'pycode', 'valuestackdepth', 'locals_stack_w[*]', + 'cells[*]', 'last_exception', 'lastblock', 'is_being_profiled', + 'w_globals', ] JUMP_ABSOLUTE = opmap['JUMP_ABSOLUTE'] diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -67,24 +67,14 @@ assert log.opnames(ops) == ["guard_value", "getfield_gc", "guard_value", "getfield_gc", "guard_value", - "getfield_gc", "guard_nonnull_class"] - # LOAD_GLOBAL of OFFSET but in different function partially folded - # away - # XXX could be improved + "guard_not_invalidated"] ops = entry_bridge.ops_by_id('add', opcode='LOAD_GLOBAL') - assert log.opnames(ops) == ["guard_value", "getfield_gc", "guard_value"] + assert log.opnames(ops) == ["guard_not_invalidated"] # - # two LOAD_GLOBAL of f, the second is folded away ops = entry_bridge.ops_by_id('call', opcode='LOAD_GLOBAL') - assert log.opnames(ops) == ["getfield_gc", "guard_nonnull_class"] + assert log.opnames(ops) == [] # assert entry_bridge.match_by_id('call', """ - p29 = getfield_gc(ConstPtr(ptr28), descr=) - guard_nonnull_class(p29, ConstClass(Function), descr=...) - p33 = getfield_gc(p29, descr=) - guard_value(p33, ConstPtr(ptr34), descr=...) - p35 = getfield_gc(p29, descr=) - p36 = getfield_gc(p29, descr=) p38 = call(ConstClass(getexecutioncontext), descr=) p39 = getfield_gc(p38, descr=) i40 = force_token() @@ -100,19 +90,16 @@ # ----------------------------- loop, = log.loops_by_id('call') assert loop.match(""" - i12 = int_lt(i5, i6) - guard_true(i12, descr=...) + guard_not_invalidated(descr=...) + i9 = int_lt(i5, i6) + guard_true(i9, descr=...) + i10 = force_token() + i12 = int_add(i5, 1) i13 = force_token() - i15 = int_add(i5, 1) - i16 = int_add_ovf(i15, i7) - guard_no_overflow(descr=...) - i18 = force_token() - i20 = int_add_ovf(i16, 1) - guard_no_overflow(descr=...) - i21 = int_add_ovf(i20, i7) + i15 = int_add_ovf(i12, 1) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, i21, i6, i7, p8, p9, p10, p11, descr=) + jump(p0, p1, p2, p3, p4, i15, i6, p7, p8, descr=) """) def test_method_call(self): @@ -187,7 +174,7 @@ guard_no_overflow(descr=...) i18 = force_token() --TICK-- - jump(p0, p1, p2, p3, p4, i8, p7, i17, p8, i9, i17, p10, p11, p12, descr=) + jump(..., descr=) """) def test_default_and_kw(self): @@ -409,3 +396,70 @@ --TICK-- jump(..., descr=) """) + + def test_global_closure_has_constant_cells(self): + log = self.run(""" + def make_adder(n): + def add(x): + return x + n + return add + add5 = make_adder(5) + def main(): + i = 0 + while i < 5000: + i = add5(i) # ID: call + """, []) + loop, = log.loops_by_id('call', is_entry_bridge=True) + assert loop.match(""" + guard_value(i6, 1, descr=...) + guard_nonnull_class(p8, ConstClass(W_IntObject), descr=...) + guard_value(i4, 0, descr=...) + guard_value(p3, ConstPtr(ptr14), descr=...) + i15 = getfield_gc_pure(p8, descr=) + i17 = int_lt(i15, 5000) + guard_true(i17, descr=...) + p18 = getfield_gc(p0, descr=) + guard_value(p18, ConstPtr(ptr19), descr=...) + p20 = getfield_gc(p18, descr=) + guard_value(p20, ConstPtr(ptr21), descr=...) + guard_not_invalidated(descr=...) + # most importantly, there is no getarrayitem_gc here + p23 = call(ConstClass(getexecutioncontext), descr=) + p24 = getfield_gc(p23, descr=) + i25 = force_token() + p26 = getfield_gc(p23, descr=) + guard_isnull(p26, descr=...) + i27 = getfield_gc(p23, descr=) + i28 = int_is_zero(i27) + guard_true(i28, descr=...) + p30 = getfield_gc(ConstPtr(ptr29), descr=) + guard_nonnull_class(p30, ConstClass(W_IntObject), descr=...) + i32 = getfield_gc_pure(p30, descr=) + i33 = int_add_ovf(i15, i32) + guard_no_overflow(descr=...) + --TICK-- + jump(p0, p1, p2, p5, i33, i32, p23, p30, p24, descr=) + """) + + def test_local_closure_is_virtual(self): + log = self.run(""" + def main(): + i = 0 + while i < 5000: + def add(): + return i + 1 + i = add() # ID: call + """, []) + loop, = log.loops_by_id('call') + assert loop.match(""" + i8 = getfield_gc_pure(p6, descr=) + i10 = int_lt(i8, 5000) + guard_true(i10, descr=...) + i11 = force_token() + i13 = int_add(i8, 1) + --TICK-- + p22 = new_with_vtable(ConstClass(W_IntObject)) + setfield_gc(p22, i13, descr=) + setfield_gc(p4, p22, descr=) + jump(p0, p1, p2, p3, p4, p7, p22, p7, descr=) + """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py @@ -20,11 +20,9 @@ guard_value(p10, ConstPtr(ptr11), descr=...) p12 = getfield_gc(p10, descr=) guard_value(p12, ConstPtr(ptr13), descr=...) - p15 = getfield_gc(ConstPtr(ptr14), descr=) - guard_isnull(p15, descr=...) guard_not_invalidated(descr=...) p19 = getfield_gc(ConstPtr(p17), descr=) guard_value(p19, ConstPtr(ptr20), descr=...) p22 = getfield_gc(ConstPtr(ptr21), descr=) guard_nonnull(p22, descr=...) - """) \ No newline at end of file + """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -52,7 +52,7 @@ i10 = int_add_ovf(i5, i7) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, i10, i6, p7, i7, p8, descr=) + jump(p0, p1, p2, p3, p4, i10, i6, i7, p8, descr=) """) def test_getattr_with_dynamic_attribute(self): @@ -151,6 +151,7 @@ assert loop.match_by_id('loadattr', ''' guard_not_invalidated(descr=...) + i16 = arraylen_gc(p10, descr=) i19 = call(ConstClass(ll_dict_lookup), _, _, _, descr=...) guard_no_exception(descr=...) i21 = int_and(i19, _) diff --git a/pypy/module/pypyjit/test_pypy_c/test_math.py b/pypy/module/pypyjit/test_pypy_c/test_math.py --- a/pypy/module/pypyjit/test_pypy_c/test_math.py +++ b/pypy/module/pypyjit/test_pypy_c/test_math.py @@ -47,6 +47,7 @@ assert loop.match(""" i2 = int_lt(i0, i1) guard_true(i2, descr=...) + guard_not_invalidated(descr=...) f1 = cast_int_to_float(i0) i3 = float_eq(f1, inf) i4 = float_eq(f1, -inf) @@ -60,4 +61,33 @@ i7 = int_add(i0, f1) --TICK-- jump(..., descr=) + """) + + def test_fmod(self): + def main(n): + import math + + s = 0 + while n > 0: + s += math.fmod(n, 2.0) + n -= 1 + return s + log = self.run(main, [500]) + assert log.result == main(500) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i1 = int_gt(i0, 0) + guard_true(i1, descr=...) + guard_not_invalidated(descr=...) + f1 = cast_int_to_float(i0) + i2 = float_eq(f1, inf) + i3 = float_eq(f1, -inf) + i4 = int_or(i2, i3) + i5 = int_is_true(i4) + guard_false(i5, descr=...) + f2 = call(ConstClass(fmod), f1, 2.0, descr=) + f3 = float_add(f0, f2) + i6 = int_sub(i0, 1) + --TICK-- + jump(..., descr=) """) \ No newline at end of file diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -234,3 +234,18 @@ return total # self.run_and_check(main, []) + + + def test_global(self): + log = self.run(""" + i = 0 + globalinc = 1 + def main(n): + global i + while i < n: + l = globalinc # ID: globalread + i += l + """, [1000]) + + loop, = log.loops_by_id("globalread", is_entry_bridge=True) + assert len(loop.ops_by_id("globalread")) == 0 diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -34,9 +34,9 @@ i25 = unicodegetitem(p13, i19) p27 = newstr(1) strsetitem(p27, 0, i23) - p30 = call(ConstClass(ll_str2unicode__rpy_stringPtr), p27, descr=) + p30 = call(ConstClass(ll_str2unicode__rpy_stringPtr), p27, descr=...) guard_no_exception(descr=...) - i32 = call(ConstClass(_ll_2_str_eq_checknull_char__rpy_unicodePtr_UniChar), p30, i25, descr=) + i32 = call(ConstClass(_ll_2_str_eq_checknull_char__rpy_unicodePtr_UniChar), p30, i25, descr=...) guard_true(i32, descr=...) i34 = int_add(i6, 1) --TICK-- @@ -105,5 +105,5 @@ i58 = int_add_ovf(i6, i57) guard_no_overflow(descr=...) --TICK-- - jump(p0, p1, p2, p3, p4, p5, i58, i7, i8, p9, p10, descr=) + jump(p0, p1, p2, p3, p4, p5, i58, i7, descr=) """) diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -14,7 +14,7 @@ if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) -elif platform.cc.startswith('gcc'): +elif platform.cc is not None and platform.cc.startswith('gcc'): out = platform.execute(platform.cc, '--version').out match = re.search(' (\d+\.\d+(\.\d+)*)', out) if match: diff --git a/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c b/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c --- a/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c +++ b/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c @@ -481,6 +481,16 @@ int a, b, c, d, e, f, g, h; } S8I; + + +typedef int (*CALLBACK_RECT)(RECT rect); + +EXPORT(int) call_callback_with_rect(CALLBACK_RECT cb, RECT rect) +{ + return cb(rect); +} + + EXPORT(S8I) ret_8i_func(S8I inp) { inp.a *= 2; diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py b/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py @@ -150,7 +150,6 @@ class TestMoreCallbacks(BaseCTypesTestChecker): def test_callback_with_struct_argument(self): - py.test.skip("callbacks with struct arguments not implemented yet") class RECT(Structure): _fields_ = [("left", c_int), ("top", c_int), ("right", c_int), ("bottom", c_int)] @@ -167,6 +166,28 @@ assert res == 1111 + def test_callback_from_c_with_struct_argument(self): + import conftest + _ctypes_test = str(conftest.sofile) + dll = CDLL(_ctypes_test) + + class RECT(Structure): + _fields_ = [("left", c_long), ("top", c_long), + ("right", c_long), ("bottom", c_long)] + + proto = CFUNCTYPE(c_int, RECT) + def callback(point): + return point.left+point.top+point.right+point.bottom + + cbp = proto(callback) + rect = RECT(1000,100,10,1) + + call_callback_with_rect = dll.call_callback_with_rect + call_callback_with_rect.restype = c_int + call_callback_with_rect.argtypes = [proto, RECT] + res = call_callback_with_rect(cbp, rect) + assert res == 1111 + def test_callback_unsupported_return_struct(self): class RECT(Structure): _fields_ = [("left", c_int), ("top", c_int), diff --git a/pypy/module/test_lib_pypy/test_greenlet.py b/pypy/module/test_lib_pypy/test_greenlet.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/test_greenlet.py @@ -0,0 +1,233 @@ +from pypy.conftest import gettestobjspace + + +class AppTestGreenlet: + def setup_class(cls): + cls.space = gettestobjspace(usemodules=['_continuation']) + + def test_simple(self): + from greenlet import greenlet + lst = [] + def f(): + lst.append(1) + greenlet.getcurrent().parent.switch() + lst.append(3) + g = greenlet(f) + lst.append(0) + g.switch() + lst.append(2) + g.switch() + lst.append(4) + assert lst == range(5) + + def test_parent(self): + from greenlet import greenlet + gmain = greenlet.getcurrent() + assert gmain.parent is None + g = greenlet(lambda: None) + assert g.parent is gmain + + def test_pass_around(self): + from greenlet import greenlet + seen = [] + def f(x, y): + seen.append((x, y)) + seen.append(greenlet.getcurrent().parent.switch()) + seen.append(greenlet.getcurrent().parent.switch(42)) + return 44, 'z' + g = greenlet(f) + seen.append(g.switch(40, 'x')) + seen.append(g.switch(41, 'y')) + seen.append(g.switch(43)) + # + def f2(): + return 45 + g = greenlet(f2) + seen.append(g.switch()) + # + def f3(): + pass + g = greenlet(f3) + seen.append(g.switch()) + # + assert seen == [(40, 'x'), (), (41, 'y'), 42, 43, (44, 'z'), 45, None] + + def test_exception_simple(self): + from greenlet import greenlet + # + def fmain(): + raise ValueError + # + g1 = greenlet(fmain) + raises(ValueError, g1.switch) + + def test_dead(self): + from greenlet import greenlet + # + def fmain(): + assert g1 and not g1.dead + # + g1 = greenlet(fmain) + assert not g1 and not g1.dead + g1.switch() + assert not g1 and g1.dead + # + gmain = greenlet.getcurrent() + assert gmain and not gmain.dead + + def test_GreenletExit(self): + from greenlet import greenlet, GreenletExit + # + def fmain(*args): + raise GreenletExit(*args) + # + g1 = greenlet(fmain) + res = g1.switch('foo', 'bar') + assert isinstance(res, GreenletExit) and res.args == ('foo', 'bar') + + def test_throw_1(self): + from greenlet import greenlet + gmain = greenlet.getcurrent() + # + def f(): + try: + gmain.switch() + except ValueError: + return "ok" + # + g = greenlet(f) + g.switch() + res = g.throw(ValueError) + assert res == "ok" + + def test_throw_2(self): + from greenlet import greenlet + gmain = greenlet.getcurrent() + # + def f(): + gmain.throw(ValueError) + # + g = greenlet(f) + raises(ValueError, g.switch) + + def test_throw_3(self): + from greenlet import greenlet + gmain = greenlet.getcurrent() + raises(ValueError, gmain.throw, ValueError) + + def test_throw_4(self): + from greenlet import greenlet + gmain = greenlet.getcurrent() + # + def f1(): + g2.throw(ValueError) + # + def f2(): + try: + gmain.switch() + except ValueError: + return "ok" + # + g1 = greenlet(f1) + g2 = greenlet(f2) + g2.switch() + res = g1.switch() + assert res == "ok" + + def test_nondefault_parent(self): + from greenlet import greenlet + # + def f1(): + g2 = greenlet(f2) + res = g2.switch() + assert res == "from 2" + return "from 1" + # + def f2(): + return "from 2" + # + g1 = greenlet(f1) + res = g1.switch() + assert res == "from 1" + + def test_change_parent(self): + from greenlet import greenlet + # + def f1(): + res = g2.switch() + assert res == "from 2" + return "from 1" + # + def f2(): + return "from 2" + # + g1 = greenlet(f1) + g2 = greenlet(f2) + g2.parent = g1 + res = g1.switch() + assert res == "from 1" + + def test_raises_through_parent_chain(self): + from greenlet import greenlet + # + def f1(): + raises(IndexError, g2.switch) + raise ValueError + # + def f2(): + raise IndexError + # + g1 = greenlet(f1) + g2 = greenlet(f2) + g2.parent = g1 + raises(ValueError, g1.switch) + + def test_switch_to_dead_1(self): + from greenlet import greenlet + # + def f1(): + return "ok" + # + g1 = greenlet(f1) + res = g1.switch() + assert res == "ok" + res = g1.switch("goes to gmain instead") + assert res == "goes to gmain instead" + + def test_switch_to_dead_2(self): + from greenlet import greenlet + # + def f1(): + g2 = greenlet(f2) + return g2.switch() + # + def f2(): + return "ok" + # + g1 = greenlet(f1) + res = g1.switch() + assert res == "ok" + res = g1.switch("goes to gmain instead") + assert res == "goes to gmain instead" + + def test_switch_to_dead_3(self): + from greenlet import greenlet + gmain = greenlet.getcurrent() + # + def f1(): + res = g2.switch() + assert res == "ok" + res = gmain.switch("next step") + assert res == "goes to f1 instead" + return "all ok" + # + def f2(): + return "ok" + # + g1 = greenlet(f1) + g2 = greenlet(f2) + g2.parent = g1 + res = g1.switch() + assert res == "next step" + res = g2.switch("goes to f1 instead") + assert res == "all ok" diff --git a/pypy/module/thread/os_thread.py b/pypy/module/thread/os_thread.py --- a/pypy/module/thread/os_thread.py +++ b/pypy/module/thread/os_thread.py @@ -15,11 +15,6 @@ # * The start-up data (the app-level callable and arguments) is # stored in the global bootstrapper object. # -# * The GC is notified that a new thread is about to start; in the -# framework GC with shadow stacks, this allocates a fresh new shadow -# stack (but doesn't use it yet). See gc_thread_prepare(). This -# has no effect in asmgcc. -# # * The new thread is launched at RPython level using an rffi call # to the C function RPyThreadStart() defined in # translator/c/src/thread*.h. This RPython thread will invoke the @@ -33,8 +28,8 @@ # operation is called (this is all done by gil.after_external_call(), # called from the rffi-generated wrapper). The gc_thread_run() # operation will automatically notice that the current thread id was -# not seen before, and start using the freshly prepared shadow stack. -# Again, this has no effect in asmgcc. +# not seen before, and (in shadowstack) it will allocate and use a +# fresh new stack. Again, this has no effect in asmgcc. # # * Only then does bootstrap() really run. The first thing it does # is grab the start-up information (app-level callable and args) @@ -180,7 +175,7 @@ bootstrapper.acquire(space, w_callable, args) try: try: - thread.gc_thread_prepare() + thread.gc_thread_prepare() # (this has no effect any more) ident = thread.start_new_thread(bootstrapper.bootstrap, ()) except Exception, e: bootstrapper.release() # normally called by the new thread diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -724,13 +724,22 @@ w_left_src, w_left_impl = space.lookup_in_type_where(w_typ1, left) w_first = w_obj1 w_second = w_obj2 - - if _same_class_w(space, w_obj1, w_obj2, w_typ1, w_typ2): + # + if left == right and _same_class_w(space, w_obj1, w_obj2, + w_typ1, w_typ2): + # for __eq__ and __ne__, if the objects have the same + # (old-style or new-style) class, then don't try the + # opposite method, which is the same one. w_right_impl = None else: - w_right_src, w_right_impl = space.lookup_in_type_where(w_typ2, right) - # XXX see binop_impl - if space.is_true(space.issubtype(w_typ2, w_typ1)): + # in all other cases, try the opposite method. + w_right_src, w_right_impl = space.lookup_in_type_where(w_typ2,right) + if space.is_w(w_typ1, w_typ2): + # if the type is the same, *or* if both are old-style classes, + # then don't reverse: try left first, right next. + pass + elif space.is_true(space.issubtype(w_typ2, w_typ1)): + # for new-style classes, if typ2 is a subclass of typ1. w_obj1, w_obj2 = w_obj2, w_obj1 w_left_impl, w_right_impl = w_right_impl, w_left_impl diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py --- a/pypy/objspace/flow/flowcontext.py +++ b/pypy/objspace/flow/flowcontext.py @@ -184,7 +184,7 @@ class FlowExecutionContext(ExecutionContext): - def __init__(self, space, code, globals, constargs={}, closure=None, + def __init__(self, space, code, globals, constargs={}, outer_func=None, name=None): ExecutionContext.__init__(self, space) self.code = code @@ -193,11 +193,11 @@ self.crnt_offset = -1 self.crnt_frame = None - if closure is None: + if outer_func and outer_func.closure: + self.closure = [nestedscope.Cell(Constant(value)) + for value in outer_func.closure] + else: self.closure = None - else: - self.closure = [nestedscope.Cell(Constant(value)) - for value in closure] frame = self.create_frame() formalargcount = code.getformalargcount() arg_list = [Variable() for i in range(formalargcount)] @@ -216,7 +216,7 @@ # while ignoring any operation like the creation of the locals dict self.recorder = [] frame = FlowSpaceFrame(self.space, self.code, - self.w_globals, self.closure) + self.w_globals, self) frame.last_instr = 0 return frame diff --git a/pypy/objspace/flow/objspace.py b/pypy/objspace/flow/objspace.py --- a/pypy/objspace/flow/objspace.py +++ b/pypy/objspace/flow/objspace.py @@ -252,9 +252,9 @@ raise TypeError("%r is a generator" % (func,)) code = PyCode._from_code(self, code) if func.func_closure is None: - closure = None + cl = None else: - closure = [extract_cell_content(c) for c in func.func_closure] + cl = [extract_cell_content(c) for c in func.func_closure] # CallableFactory.pycall may add class_ to functions that are methods name = func.func_name class_ = getattr(func, 'class_', None) @@ -262,8 +262,10 @@ name = '%s.%s' % (class_.__name__, name) for c in "<>&!": name = name.replace(c, '_') + class outerfunc: # hack + closure = cl ec = flowcontext.FlowExecutionContext(self, code, func.func_globals, - constargs, closure, name) + constargs, outerfunc, name) graph = ec.graph graph.func = func # attach a signature and defaults to the graph diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -1,50 +1,57 @@ -""" A very simple cell dict implementation. The dictionary maps keys to cell. -This ensures that the function (dict, key) -> cell is pure. By itself, this -optimization is not helping at all, but in conjunction with the JIT it can -speed up global lookups a lot.""" +""" A very simple cell dict implementation using a version tag. The dictionary +maps keys to objects. If a specific key is changed a lot, a level of +indirection is introduced to make the version tag change less often. +""" +from pypy.interpreter.baseobjspace import W_Root from pypy.objspace.std.dictmultiobject import IteratorImplementation from pypy.objspace.std.dictmultiobject import DictStrategy, _never_equal_to_string from pypy.objspace.std.dictmultiobject import ObjectDictStrategy from pypy.rlib import jit, rerased -class ModuleCell(object): +class VersionTag(object): + pass + +class ModuleCell(W_Root): def __init__(self, w_value=None): self.w_value = w_value - def invalidate(self): - w_value = self.w_value - self.w_value = None - return w_value - def __repr__(self): return "" % (self.w_value, ) +def unwrap_cell(w_value): + if isinstance(w_value, ModuleCell): + return w_value.w_value + return w_value + class ModuleDictStrategy(DictStrategy): erase, unerase = rerased.new_erasing_pair("modulecell") erase = staticmethod(erase) unerase = staticmethod(unerase) + _immutable_fields_ = ["version?"] + def __init__(self, space): self.space = space + self.version = VersionTag() def get_empty_storage(self): return self.erase({}) - def getcell(self, w_dict, key, makenew): - if makenew or jit.we_are_jitted(): - # when we are jitting, we always go through the pure function - # below, to ensure that we have no residual dict lookup - w_dict = jit.promote(w_dict) - self = jit.promote(self) - return self._getcell_makenew(w_dict, key) + def mutated(self): + self.version = VersionTag() + + def getdictvalue_no_unwrapping(self, w_dict, key): + # NB: it's important to promote self here, so that self.version is a + # no-op due to the quasi-immutable field + self = jit.promote(self) + return self._getdictvalue_no_unwrapping_pure(self.version, w_dict, key) + + @jit.elidable_promote('0,1,2') + def _getdictvalue_no_unwrapping_pure(self, version, w_dict, key): return self.unerase(w_dict.dstorage).get(key, None) - @jit.elidable - def _getcell_makenew(self, w_dict, key): - return self.unerase(w_dict.dstorage).setdefault(key, ModuleCell()) - def setitem(self, w_dict, w_key, w_value): space = self.space if space.is_w(space.type(w_key), space.w_str): @@ -54,15 +61,24 @@ w_dict.setitem(w_key, w_value) def setitem_str(self, w_dict, key, w_value): - self.getcell(w_dict, key, True).w_value = w_value + cell = self.getdictvalue_no_unwrapping(w_dict, key) + if isinstance(cell, ModuleCell): + cell.w_value = w_value + return + if cell is not None: + w_value = ModuleCell(w_value) + self.mutated() + self.unerase(w_dict.dstorage)[key] = w_value def setdefault(self, w_dict, w_key, w_default): space = self.space if space.is_w(space.type(w_key), space.w_str): - cell = self.getcell(w_dict, space.str_w(w_key), True) - if cell.w_value is None: - cell.w_value = w_default - return cell.w_value + key = space.str_w(w_key) + w_result = self.getitem_str(w_dict, key) + if w_result is not None: + return w_result + self.setitem_str(w_dict, key, w_default) + return w_default else: self.switch_to_object_strategy(w_dict) return w_dict.setdefault(w_key, w_default) @@ -72,14 +88,13 @@ w_key_type = space.type(w_key) if space.is_w(w_key_type, space.w_str): key = space.str_w(w_key) - cell = self.getcell(w_dict, key, False) - if cell is None or cell.w_value is None: - raise KeyError - # note that we don't remove the cell from self.content, to make - # sure that a key that was found at any point in the dict, still - # maps to the same cell later (even if this cell no longer - # represents a key) - cell.invalidate() + dict_w = self.unerase(w_dict.dstorage) + try: + del dict_w[key] + except KeyError: + raise + else: + self.mutated() elif _never_equal_to_string(space, w_key_type): raise KeyError else: @@ -87,12 +102,7 @@ w_dict.delitem(w_key) def length(self, w_dict): - # inefficient, but do we care? - res = 0 - for cell in self.unerase(w_dict.dstorage).itervalues(): - if cell.w_value is not None: - res += 1 - return res + return len(self.unerase(w_dict.dstorage)) def getitem(self, w_dict, w_key): space = self.space @@ -107,11 +117,8 @@ return w_dict.getitem(w_key) def getitem_str(self, w_dict, key): - res = self.getcell(w_dict, key, False) - if res is None: - return None - # note that even if the res.w_value is None, the next line is fine - return res.w_value + w_res = self.getdictvalue_no_unwrapping(w_dict, key) + return unwrap_cell(w_res) def iter(self, w_dict): return ModuleDictIteratorImplementation(self.space, self, w_dict) @@ -119,44 +126,34 @@ def keys(self, w_dict): space = self.space iterator = self.unerase(w_dict.dstorage).iteritems - return [space.wrap(key) for key, cell in iterator() - if cell.w_value is not None] + return [space.wrap(key) for key, cell in iterator()] def values(self, w_dict): iterator = self.unerase(w_dict.dstorage).itervalues - return [cell.w_value for cell in iterator() - if cell.w_value is not None] + return [unwrap_cell(cell) for cell in iterator()] def items(self, w_dict): space = self.space iterator = self.unerase(w_dict.dstorage).iteritems - return [space.newtuple([space.wrap(key), cell.w_value]) - for (key, cell) in iterator() - if cell.w_value is not None] + return [space.newtuple([space.wrap(key), unwrap_cell(cell)]) + for key, cell in iterator()] def clear(self, w_dict): - iterator = self.unerase(w_dict.dstorage).iteritems - for k, cell in iterator(): - cell.invalidate() + iterator = self.unerase(w_dict.dstorage).clear() + self.mutated() def popitem(self, w_dict): - # This is O(n) if called repeatadly, you probably shouldn't be on a - # Module's dict though - for k, cell in self.unerase(w_dict.dstorage).iteritems(): - if cell.w_value is not None: - w_value = cell.w_value - cell.invalidate() - return self.space.wrap(k), w_value - else: - raise KeyError + d = self.unerase(w_dict.dstorage) + key, w_value = d.popitem() + self.mutated() + return self.space.wrap(key), unwrap_cell(w_value) def switch_to_object_strategy(self, w_dict): d = self.unerase(w_dict.dstorage) strategy = self.space.fromcache(ObjectDictStrategy) d_new = strategy.unerase(strategy.get_empty_storage()) for key, cell in d.iteritems(): - if cell.w_value is not None: - d_new[self.space.wrap(key)] = cell.w_value + d_new[self.space.wrap(key)] = unwrap_cell(cell) w_dict.strategy = strategy w_dict.dstorage = strategy.erase(d_new) @@ -168,7 +165,6 @@ def next_entry(self): for key, cell in self.iterator: - if cell.w_value is not None: - return (self.space.wrap(key), cell.w_value) + return (self.space.wrap(key), unwrap_cell(cell)) else: return None, None diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -38,7 +38,9 @@ if space.config.objspace.std.withcelldict and module: from pypy.objspace.std.celldict import ModuleDictStrategy assert w_type is None - strategy = space.fromcache(ModuleDictStrategy) + # every module needs its own strategy, because the strategy stores + # the version tag + strategy = ModuleDictStrategy(space) elif instance or strdict or module: assert w_type is None diff --git a/pypy/objspace/std/fake.py b/pypy/objspace/std/fake.py --- a/pypy/objspace/std/fake.py +++ b/pypy/objspace/std/fake.py @@ -142,7 +142,7 @@ def funcrun(self, func, args): frame = func.space.createframe(self, func.w_func_globals, - func.closure) + func) sig = self.signature() scope_w = args.parse_obj(None, func.name, sig, func.defs_w) frame.setfastscope(scope_w) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -129,12 +129,12 @@ ec._py_repr = None return ec - def createframe(self, code, w_globals, closure=None): + def createframe(self, code, w_globals, outer_func=None): from pypy.objspace.std.fake import CPythonFakeCode, CPythonFakeFrame if not we_are_translated() and isinstance(code, CPythonFakeCode): return CPythonFakeFrame(self, code, w_globals) else: - return ObjSpace.createframe(self, code, w_globals, closure) + return ObjSpace.createframe(self, code, w_globals, outer_func) def gettypefor(self, cls): return self.gettypeobject(cls.typedef) diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -913,12 +913,16 @@ def repr__String(space, w_str): s = w_str._value - buf = StringBuilder(len(s) + 2) - quote = "'" if quote in s and '"' not in s: quote = '"' + return space.wrap(string_escape_encode(s, quote)) + +def string_escape_encode(s, quote): + + buf = StringBuilder(len(s) + 2) + buf.append(quote) startslice = 0 @@ -959,7 +963,7 @@ buf.append(quote) - return space.wrap(buf.build()) + return buf.build() DEFAULT_NOOP_TABLE = ''.join([chr(i) for i in range(256)]) diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -2,42 +2,110 @@ from pypy.conftest import gettestobjspace, option from pypy.objspace.std.dictmultiobject import W_DictMultiObject from pypy.objspace.std.celldict import ModuleCell, ModuleDictStrategy -from pypy.objspace.std.test.test_dictmultiobject import FakeSpace +from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, \ + BaseTestRDictImplementation, BaseTestDevolvedDictImplementation from pypy.interpreter import gateway space = FakeSpace() class TestCellDict(object): - def test_basic_property(self): + def test_basic_property_cells(self): strategy = ModuleDictStrategy(space) storage = strategy.get_empty_storage() d = W_DictMultiObject(space, strategy, storage) - # replace getcell with getcell from strategy - def f(key, makenew): - return strategy.getcell(d, key, makenew) - d.getcell = f + v1 = strategy.version + d.setitem("a", 1) + v2 = strategy.version + assert v1 is not v2 + assert d.getitem("a") == 1 + assert d.strategy.getdictvalue_no_unwrapping(d, "a") == 1 - d.setitem("a", 1) - assert d.getcell("a", False) is d.getcell("a", False) - acell = d.getcell("a", False) - d.setitem("b", 2) - assert d.getcell("b", False) is d.getcell("b", False) - assert d.getcell("c", True) is d.getcell("c", True) + d.setitem("a", 2) + v3 = strategy.version + assert v2 is not v3 + assert d.getitem("a") == 2 + assert d.strategy.getdictvalue_no_unwrapping(d, "a").w_value == 2 - assert d.getitem("a") == 1 - assert d.getitem("b") == 2 + d.setitem("a", 3) + v4 = strategy.version + assert v3 is v4 + assert d.getitem("a") == 3 + assert d.strategy.getdictvalue_no_unwrapping(d, "a").w_value == 3 d.delitem("a") - py.test.raises(KeyError, d.delitem, "a") + v5 = strategy.version + assert v5 is not v4 assert d.getitem("a") is None - assert d.getcell("a", False) is acell - assert d.length() == 1 + assert d.strategy.getdictvalue_no_unwrapping(d, "a") is None - d.clear() - assert d.getitem("a") is None - assert d.getcell("a", False) is acell - assert d.length() == 0 +class AppTestModuleDict(object): + def setup_class(cls): + cls.space = gettestobjspace(**{"objspace.std.withcelldict": True}) + cls.w_runappdirect = cls.space.wrap(option.runappdirect) + + def w_impl_used(self, obj): + if self.runappdirect: + skip("__repr__ doesn't work on appdirect") + import __pypy__ + assert "ModuleDictStrategy" in __pypy__.internal_repr(obj) + + def test_check_module_uses_module_dict(self): + m = type(__builtins__)("abc") + self.impl_used(m.__dict__) + + def test_key_not_there(self): + d = type(__builtins__)("abc").__dict__ + raises(KeyError, "d['def']") + + def test_fallback_evil_key(self): + class F(object): + def __hash__(self): + return hash("s") + def __eq__(self, other): + return other == "s" + d = type(__builtins__)("abc").__dict__ + d["s"] = 12 + assert d["s"] == 12 + assert d[F()] == d["s"] + + d = type(__builtins__)("abc").__dict__ + x = d.setdefault("s", 12) + assert x == 12 + x = d.setdefault(F(), 12) + assert x == 12 + + d = type(__builtins__)("abc").__dict__ + x = d.setdefault(F(), 12) + assert x == 12 + + d = type(__builtins__)("abc").__dict__ + d["s"] = 12 + del d[F()] + + assert "s" not in d + assert F() not in d + + +class TestModuleDictImplementation(BaseTestRDictImplementation): + StrategyClass = ModuleDictStrategy + +class TestModuleDictImplementationWithBuiltinNames(BaseTestRDictImplementation): + StrategyClass = ModuleDictStrategy + + string = "int" + string2 = "isinstance" + + +class TestDevolvedModuleDictImplementation(BaseTestDevolvedDictImplementation): + StrategyClass = ModuleDictStrategy + +class TestDevolvedModuleDictImplementationWithBuiltinNames(BaseTestDevolvedDictImplementation): + StrategyClass = ModuleDictStrategy + + string = "int" + string2 = "isinstance" + class AppTestCellDict(object): OPTIONS = {"objspace.std.withcelldict": True} @@ -67,4 +135,4 @@ d["a"] = 3 del d["a"] d[object()] = 5 - assert d.values() == [5] \ No newline at end of file + assert d.values() == [5] diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -5,7 +5,6 @@ W_DictMultiObject, setitem__DictMulti_ANY_ANY, getitem__DictMulti_ANY, \ StringDictStrategy, ObjectDictStrategy -from pypy.objspace.std.celldict import ModuleDictStrategy from pypy.conftest import gettestobjspace from pypy.conftest import option @@ -731,52 +730,6 @@ set([('a', 1), ('b', 2), ('d', 4), ('e', 5)])) -class AppTestModuleDict(object): - def setup_class(cls): - cls.space = gettestobjspace(**{"objspace.std.withcelldict": True}) - if option.runappdirect: - py.test.skip("__repr__ doesn't work on appdirect") - - def w_impl_used(self, obj): - import __pypy__ - assert "ModuleDictStrategy" in __pypy__.internal_repr(obj) - - def test_check_module_uses_module_dict(self): - m = type(__builtins__)("abc") - self.impl_used(m.__dict__) - - def test_key_not_there(self): - d = type(__builtins__)("abc").__dict__ - raises(KeyError, "d['def']") - - def test_fallback_evil_key(self): - class F(object): - def __hash__(self): - return hash("s") - def __eq__(self, other): - return other == "s" - d = type(__builtins__)("abc").__dict__ - d["s"] = 12 - assert d["s"] == 12 - assert d[F()] == d["s"] - - d = type(__builtins__)("abc").__dict__ - x = d.setdefault("s", 12) - assert x == 12 - x = d.setdefault(F(), 12) - assert x == 12 - - d = type(__builtins__)("abc").__dict__ - x = d.setdefault(F(), 12) - assert x == 12 - - d = type(__builtins__)("abc").__dict__ - d["s"] = 12 - del d[F()] - - assert "s" not in d - assert F() not in d - class AppTestStrategies(object): def setup_class(cls): if option.runappdirect: @@ -1071,16 +1024,6 @@ ## ImplementionClass = MeasuringDictImplementation ## DevolvedClass = MeasuringDictImplementation -class TestModuleDictImplementation(BaseTestRDictImplementation): - StrategyClass = ModuleDictStrategy - -class TestModuleDictImplementationWithBuiltinNames(BaseTestRDictImplementation): - StrategyClass = ModuleDictStrategy - - string = "int" - string2 = "isinstance" - - class BaseTestDevolvedDictImplementation(BaseTestRDictImplementation): def fill_impl(self): BaseTestRDictImplementation.fill_impl(self) @@ -1092,15 +1035,6 @@ class TestDevolvedStrDictImplementation(BaseTestDevolvedDictImplementation): StrategyClass = StringDictStrategy -class TestDevolvedModuleDictImplementation(BaseTestDevolvedDictImplementation): - StrategyClass = ModuleDictStrategy - -class TestDevolvedModuleDictImplementationWithBuiltinNames(BaseTestDevolvedDictImplementation): - StrategyClass = ModuleDictStrategy - - string = "int" - string2 = "isinstance" - def test_module_uses_strdict(): fakespace = FakeSpace() diff --git a/pypy/objspace/std/test/test_rangeobject.py b/pypy/objspace/std/test/test_rangeobject.py --- a/pypy/objspace/std/test/test_rangeobject.py +++ b/pypy/objspace/std/test/test_rangeobject.py @@ -89,6 +89,9 @@ assert not self.not_forced(r) r.sort() assert r == range(1, 100) + [999] + r = range(10) + r.sort(key=lambda x: -x) + assert r == range(9, -1, -1) def test_pop(self): r = range(10) diff --git a/pypy/objspace/test/test_descroperation.py b/pypy/objspace/test/test_descroperation.py --- a/pypy/objspace/test/test_descroperation.py +++ b/pypy/objspace/test/test_descroperation.py @@ -377,7 +377,26 @@ setattr(P, "__weakref__", 0) + def test_subclass_addition(self): + # the __radd__ is never called (compare with the next test) + l = [] + class A(object): + def __add__(self, other): + l.append(self.__class__) + l.append(other.__class__) + return 123 + def __radd__(self, other): + # should never be called! + return 456 + class B(A): + pass + res1 = A() + B() + res2 = B() + A() + assert res1 == res2 == 123 + assert l == [A, B, B, A] + def test_subclass_comparison(self): + # the __eq__ *is* called with reversed arguments l = [] class A(object): def __eq__(self, other): @@ -395,7 +414,27 @@ A() == B() A() < B() - assert l == [B, A, A, B] + B() < A() + assert l == [B, A, A, B, B, A] + + def test_subclass_comparison_more(self): + # similarly, __gt__(b,a) is called instead of __lt__(a,b) + l = [] + class A(object): + def __lt__(self, other): + l.append(self.__class__) + l.append(other.__class__) + return '<' + def __gt__(self, other): + l.append(self.__class__) + l.append(other.__class__) + return '>' + class B(A): + pass + res1 = A() < B() + res2 = B() < A() + assert res1 == '>' and res2 == '<' + assert l == [B, A, B, A] def test_rich_comparison(self): # Old-style @@ -434,6 +473,84 @@ assert not(C(1) == D(2)) assert not(D(1) == C(2)) + def test_partial_ordering(self): + class A(object): + def __lt__(self, other): + return self + a1 = A() + a2 = A() + assert (a1 < a2) is a1 + assert (a1 > a2) is a2 + + def test_eq_order(self): + class A(object): + def __eq__(self, other): return self.__class__.__name__+':A.eq' + def __ne__(self, other): return self.__class__.__name__+':A.ne' + def __lt__(self, other): return self.__class__.__name__+':A.lt' + def __le__(self, other): return self.__class__.__name__+':A.le' + def __gt__(self, other): return self.__class__.__name__+':A.gt' + def __ge__(self, other): return self.__class__.__name__+':A.ge' + class B(object): + def __eq__(self, other): return self.__class__.__name__+':B.eq' + def __ne__(self, other): return self.__class__.__name__+':B.ne' + def __lt__(self, other): return self.__class__.__name__+':B.lt' + def __le__(self, other): return self.__class__.__name__+':B.le' + def __gt__(self, other): return self.__class__.__name__+':B.gt' + def __ge__(self, other): return self.__class__.__name__+':B.ge' + # + assert (A() == B()) == 'A:A.eq' + assert (A() != B()) == 'A:A.ne' + assert (A() < B()) == 'A:A.lt' + assert (A() <= B()) == 'A:A.le' + assert (A() > B()) == 'A:A.gt' + assert (A() >= B()) == 'A:A.ge' + # + assert (B() == A()) == 'B:B.eq' + assert (B() != A()) == 'B:B.ne' + assert (B() < A()) == 'B:B.lt' + assert (B() <= A()) == 'B:B.le' + assert (B() > A()) == 'B:B.gt' + assert (B() >= A()) == 'B:B.ge' + # + class C(A): + def __eq__(self, other): return self.__class__.__name__+':C.eq' + def __ne__(self, other): return self.__class__.__name__+':C.ne' + def __lt__(self, other): return self.__class__.__name__+':C.lt' + def __le__(self, other): return self.__class__.__name__+':C.le' + def __gt__(self, other): return self.__class__.__name__+':C.gt' + def __ge__(self, other): return self.__class__.__name__+':C.ge' + # + assert (A() == C()) == 'C:C.eq' + assert (A() != C()) == 'C:C.ne' + assert (A() < C()) == 'C:C.gt' + assert (A() <= C()) == 'C:C.ge' + assert (A() > C()) == 'C:C.lt' + assert (A() >= C()) == 'C:C.le' + # + assert (C() == A()) == 'C:C.eq' + assert (C() != A()) == 'C:C.ne' + assert (C() < A()) == 'C:C.lt' + assert (C() <= A()) == 'C:C.le' + assert (C() > A()) == 'C:C.gt' + assert (C() >= A()) == 'C:C.ge' + # + class D(A): + pass + # + assert (A() == D()) == 'D:A.eq' + assert (A() != D()) == 'D:A.ne' + assert (A() < D()) == 'D:A.gt' + assert (A() <= D()) == 'D:A.ge' + assert (A() > D()) == 'D:A.lt' + assert (A() >= D()) == 'D:A.le' + # + assert (D() == A()) == 'D:A.eq' + assert (D() != A()) == 'D:A.ne' + assert (D() < A()) == 'D:A.lt' + assert (D() <= A()) == 'D:A.le' + assert (D() > A()) == 'D:A.gt' + assert (D() >= A()) == 'D:A.ge' + def test_addition(self): # Old-style class A: diff --git a/pypy/pytest-A-stackless.cfg b/pypy/pytest-A-stackless.cfg deleted file mode 100644 --- a/pypy/pytest-A-stackless.cfg +++ /dev/null @@ -1,10 +0,0 @@ -# run for some directories a file at a time - -def collect_one_testdir(testdirs, reldir, tests): - if (reldir.startswith('module/_stackless/') or - reldir.startswith('lib')): - testdirs.extend(tests) - else: - testdirs.append(reldir) - - diff --git a/pypy/rlib/_rffi_stacklet.py b/pypy/rlib/_rffi_stacklet.py new file mode 100644 --- /dev/null +++ b/pypy/rlib/_rffi_stacklet.py @@ -0,0 +1,49 @@ +import py +from pypy.tool.autopath import pypydir +from pypy.rpython.lltypesystem import lltype, llmemory, rffi +from pypy.translator.tool.cbuild import ExternalCompilationInfo +from pypy.rpython.tool import rffi_platform + + +cdir = py.path.local(pypydir) / 'translator' / 'c' + + +eci = ExternalCompilationInfo( + include_dirs = [cdir], + includes = ['src/stacklet/stacklet.h'], + separate_module_sources = ['#include "src/stacklet/stacklet.c"\n'], +) +rffi_platform.verify_eci(eci.convert_sources_to_files()) + +def llexternal(name, args, result, **kwds): + return rffi.llexternal(name, args, result, compilation_info=eci, + _nowrapper=True, **kwds) + +# ----- types ----- + +handle = rffi.COpaquePtr(typedef='stacklet_handle', compilation_info=eci) +thread_handle = rffi.COpaquePtr(typedef='stacklet_thread_handle', + compilation_info=eci) +run_fn = lltype.Ptr(lltype.FuncType([handle, llmemory.Address], handle)) + +# ----- constants ----- + +null_handle = lltype.nullptr(handle.TO) + +def is_empty_handle(h): + return rffi.cast(lltype.Signed, h) == -1 + +# ----- functions ----- + +newthread = llexternal('stacklet_newthread', [], thread_handle) +deletethread = llexternal('stacklet_deletethread',[thread_handle], lltype.Void) + +new = llexternal('stacklet_new', [thread_handle, run_fn, llmemory.Address], + handle, random_effects_on_gcobjs=True) +switch = llexternal('stacklet_switch', [thread_handle, handle], handle, + random_effects_on_gcobjs=True) +destroy = llexternal('stacklet_destroy', [thread_handle, handle], lltype.Void) + +_translate_pointer = llexternal("_stacklet_translate_pointer", + [llmemory.Address, llmemory.Address], + llmemory.Address) diff --git a/pypy/rlib/_rsocket_rffi.py b/pypy/rlib/_rsocket_rffi.py --- a/pypy/rlib/_rsocket_rffi.py +++ b/pypy/rlib/_rsocket_rffi.py @@ -489,10 +489,10 @@ getnameinfo = external('getnameinfo', [sockaddr_ptr, socklen_t, CCHARP, size_t, CCHARP, size_t, rffi.INT], rffi.INT) -htonl = external('htonl', [rffi.UINT], rffi.UINT) -htons = external('htons', [rffi.USHORT], rffi.USHORT) -ntohl = external('ntohl', [rffi.UINT], rffi.UINT) -ntohs = external('ntohs', [rffi.USHORT], rffi.USHORT) +htonl = external('htonl', [rffi.UINT], rffi.UINT, threadsafe=False) +htons = external('htons', [rffi.USHORT], rffi.USHORT, threadsafe=False) +ntohl = external('ntohl', [rffi.UINT], rffi.UINT, threadsafe=False) +ntohs = external('ntohs', [rffi.USHORT], rffi.USHORT, threadsafe=False) if _POSIX: inet_aton = external('inet_aton', [CCHARP, lltype.Ptr(in_addr)], diff --git a/pypy/rlib/_stacklet_asmgcc.py b/pypy/rlib/_stacklet_asmgcc.py new file mode 100644 --- /dev/null +++ b/pypy/rlib/_stacklet_asmgcc.py @@ -0,0 +1,277 @@ +from pypy.rlib import _rffi_stacklet as _c +from pypy.rlib.debug import ll_assert +from pypy.rpython.lltypesystem import lltype, llmemory, rffi +from pypy.rpython.lltypesystem.lloperation import llop +from pypy.rpython.annlowlevel import llhelper + + +_asmstackrootwalker = None # BIG HACK: monkey-patched by asmgcroot.py +_stackletrootwalker = None + +def get_stackletrootwalker(): + # lazily called, to make the following imports lazy + global _stackletrootwalker + if _stackletrootwalker is not None: + return _stackletrootwalker + + from pypy.rpython.memory.gctransform.asmgcroot import ( + WALKFRAME, CALLEE_SAVED_REGS, INDEX_OF_EBP, sizeofaddr) + + assert _asmstackrootwalker is not None, "should have been monkey-patched" + basewalker = _asmstackrootwalker + + class StackletRootWalker(object): + _alloc_flavor_ = "raw" + + enumerating = False + + def setup(self, obj): + # initialization: read the SUSPSTACK object + p = llmemory.cast_adr_to_ptr(obj, lltype.Ptr(SUSPSTACK)) + if not p.handle: + return False + self.context = llmemory.cast_ptr_to_adr(p.handle) + anchor = p.anchor + del p + self.curframe = lltype.malloc(WALKFRAME, flavor='raw') + self.otherframe = lltype.malloc(WALKFRAME, flavor='raw') + self.fill_initial_frame(self.curframe, anchor) + return True + + def fill_initial_frame(self, curframe, initialframedata): + # Copy&paste :-( + initialframedata += 2*sizeofaddr + reg = 0 + while reg < CALLEE_SAVED_REGS: + curframe.regs_stored_at[reg] = initialframedata+reg*sizeofaddr + reg += 1 + retaddraddr = initialframedata + CALLEE_SAVED_REGS * sizeofaddr + retaddraddr = self.translateptr(retaddraddr) + curframe.frame_address = retaddraddr.address[0] + + def teardown(self): + lltype.free(self.curframe, flavor='raw') + lltype.free(self.otherframe, flavor='raw') + self.context = llmemory.NULL + return llmemory.NULL + + def next(self, obj, prev): + # + # Pointers to the stack can be "translated" or not: + # + # * Non-translated pointers point to where the data would be + # if the stack was installed and running. + # + # * Translated pointers correspond to where the data + # is now really in memory. + # + # Note that 'curframe' contains non-translated pointers, and + # of course the stack itself is full of non-translated pointers. + # + while True: + if not self.enumerating: + if not prev: + if not self.setup(obj): # one-time initialization + return llmemory.NULL + prev = obj # random value, but non-NULL + callee = self.curframe + retaddraddr = self.translateptr(callee.frame_address) + retaddr = retaddraddr.address[0] + basewalker.locate_caller_based_on_retaddr(retaddr) + self.enumerating = True + # + # not really a loop, but kept this way for similarity + # with asmgcroot: + callee = self.curframe + ebp_in_caller = callee.regs_stored_at[INDEX_OF_EBP] + ebp_in_caller = self.translateptr(ebp_in_caller) + ebp_in_caller = ebp_in_caller.address[0] + while True: + location = basewalker._shape_decompressor.next() + if location == 0: + break + addr = basewalker.getlocation(callee, ebp_in_caller, + location) + # yield the translated addr of the next GCREF in the stack + return self.translateptr(addr) + # + self.enumerating = False + caller = self.otherframe + reg = CALLEE_SAVED_REGS - 1 + while reg >= 0: + location = basewalker._shape_decompressor.next() + addr = basewalker.getlocation(callee, ebp_in_caller, + location) + caller.regs_stored_at[reg] = addr # non-translated + reg -= 1 + + location = basewalker._shape_decompressor.next() + caller.frame_address = basewalker.getlocation(callee, + ebp_in_caller, + location) + # ^^^ non-translated + if caller.frame_address == llmemory.NULL: + return self.teardown() # completely done with this stack + # + self.otherframe = callee + self.curframe = caller + # loop back + + def translateptr(self, addr): + return _c._translate_pointer(self.context, addr) + + _stackletrootwalker = StackletRootWalker() + return _stackletrootwalker +get_stackletrootwalker._annspecialcase_ = 'specialize:memo' + + +def customtrace(obj, prev): + stackletrootwalker = get_stackletrootwalker() + return stackletrootwalker.next(obj, prev) + + +SUSPSTACK = lltype.GcStruct('SuspStack', + ('handle', _c.handle), + ('anchor', llmemory.Address), + rtti=True) +NULL_SUSPSTACK = lltype.nullptr(SUSPSTACK) +CUSTOMTRACEFUNC = lltype.FuncType([llmemory.Address, llmemory.Address], + llmemory.Address) +customtraceptr = llhelper(lltype.Ptr(CUSTOMTRACEFUNC), customtrace) +lltype.attachRuntimeTypeInfo(SUSPSTACK, customtraceptr=customtraceptr) + +ASM_FRAMEDATA_HEAD_PTR = lltype.Ptr(lltype.ForwardReference()) +ASM_FRAMEDATA_HEAD_PTR.TO.become(lltype.Struct('ASM_FRAMEDATA_HEAD', + ('prev', ASM_FRAMEDATA_HEAD_PTR), + ('next', ASM_FRAMEDATA_HEAD_PTR) + )) +alternateanchor = lltype.malloc(ASM_FRAMEDATA_HEAD_PTR.TO, + immortal=True) +alternateanchor.prev = alternateanchor +alternateanchor.next = alternateanchor + +FUNCNOARG_P = lltype.Ptr(lltype.FuncType([], _c.handle)) +pypy_asm_stackwalk2 = rffi.llexternal('pypy_asm_stackwalk', + [FUNCNOARG_P, + ASM_FRAMEDATA_HEAD_PTR], + _c.handle, sandboxsafe=True, + _nowrapper=True) + + +def _new_callback(): + # Here, we just closed the stack. Get the stack anchor, store + # it in the gcrootfinder.suspstack.anchor, and create a new + # stacklet with stacklet_new(). If this call fails, then we + # are just returning NULL. + _stack_just_closed() + return _c.new(gcrootfinder.thrd, llhelper(_c.run_fn, _new_runfn), + llmemory.NULL) + +def _stack_just_closed(): + # Immediately unlink the new stackanchor from the doubly-linked + # chained list. When returning from pypy_asm_stackwalk2, the + # assembler code will try to unlink it again, which should be + # a no-op given that the doubly-linked list is empty. + stackanchor = llmemory.cast_ptr_to_adr(alternateanchor.next) + gcrootfinder.suspstack.anchor = stackanchor + alternateanchor.prev = alternateanchor + alternateanchor.next = alternateanchor + +def _new_runfn(h, _): + # Here, we are in a fresh new stacklet. + llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py + # + # There is a fresh suspstack object waiting on the gcrootfinder, + # so populate it with data that represents the parent suspended + # stacklet and detach the suspstack object from gcrootfinder. + suspstack = gcrootfinder.attach_handle_on_suspstack(h) + # + # Call the main function provided by the (RPython) user. + suspstack = gcrootfinder.runfn(suspstack, gcrootfinder.arg) + # + # Here, suspstack points to the target stacklet to which we want + # to jump to next. Read the 'handle' and forget about the + # suspstack object. + return _consume_suspstack(suspstack) + +def _consume_suspstack(suspstack): + h = suspstack.handle + ll_assert(bool(h), "_consume_suspstack: null handle") + suspstack.handle = _c.null_handle + return h + +def _switch_callback(): + # Here, we just closed the stack. Get the stack anchor, store + # it in the gcrootfinder.suspstack.anchor, and switch to this + # suspstack with stacklet_switch(). If this call fails, then we + # are just returning NULL. + oldanchor = gcrootfinder.suspstack.anchor + _stack_just_closed() + h = _consume_suspstack(gcrootfinder.suspstack) + # + # gcrootfinder.suspstack.anchor is left with the anchor of the + # previous place (i.e. before the call to switch()). + h2 = _c.switch(gcrootfinder.thrd, h) + # + if not h2: # MemoryError: restore + gcrootfinder.suspstack.anchor = oldanchor + gcrootfinder.suspstack.handle = h + return h2 + + +class StackletGcRootFinder(object): + suspstack = NULL_SUSPSTACK + + def new(self, thrd, callback, arg): + self.thrd = thrd._thrd + self.runfn = callback + self.arg = arg + # make a fresh new clean SUSPSTACK + newsuspstack = lltype.malloc(SUSPSTACK) + newsuspstack.handle = _c.null_handle + self.suspstack = newsuspstack + # Invoke '_new_callback' by closing the stack + h = pypy_asm_stackwalk2(llhelper(FUNCNOARG_P, _new_callback), + alternateanchor) + return self.get_result_suspstack(h) + + def switch(self, thrd, suspstack): + self.thrd = thrd._thrd + self.suspstack = suspstack + h = pypy_asm_stackwalk2(llhelper(FUNCNOARG_P, _switch_callback), + alternateanchor) + return self.get_result_suspstack(h) + + def attach_handle_on_suspstack(self, handle): + s = self.suspstack + self.suspstack = NULL_SUSPSTACK + ll_assert(bool(s.anchor), "s.anchor should not be null") + s.handle = handle + llop.gc_assume_young_pointers(lltype.Void, llmemory.cast_ptr_to_adr(s)) + return s + + def get_result_suspstack(self, h): + # + # Return from a new() or a switch(): 'h' is a handle, possibly + # an empty one, that says from where we switched to. + if not h: + raise MemoryError + elif _c.is_empty_handle(h): + return NULL_SUSPSTACK + else: + # This is a return that gave us a real handle. Store it. + return self.attach_handle_on_suspstack(h) + + def destroy(self, thrd, suspstack): + h = suspstack.handle + suspstack.handle = _c.null_handle + _c.destroy(thrd._thrd, h) + + def is_empty_handle(self, suspstack): + return not suspstack + + def get_null_handle(self): + return NULL_SUSPSTACK + + +gcrootfinder = StackletGcRootFinder() diff --git a/pypy/rlib/_stacklet_n_a.py b/pypy/rlib/_stacklet_n_a.py new file mode 100644 --- /dev/null +++ b/pypy/rlib/_stacklet_n_a.py @@ -0,0 +1,31 @@ +from pypy.rlib import _rffi_stacklet as _c +from pypy.rpython.annlowlevel import llhelper +from pypy.tool.staticmethods import StaticMethods + + +class StackletGcRootFinder: + __metaclass__ = StaticMethods + + def new(thrd, callback, arg): + h = _c.new(thrd._thrd, llhelper(_c.run_fn, callback), arg) + if not h: + raise MemoryError + return h + new._annspecialcase_ = 'specialize:arg(1)' + + def switch(thrd, h): + h = _c.switch(thrd._thrd, h) + if not h: + raise MemoryError + return h + + def destroy(thrd, h): + _c.destroy(thrd._thrd, h) + + is_empty_handle = _c.is_empty_handle + + def get_null_handle(): + return _c.null_handle + + +gcrootfinder = StackletGcRootFinder # class object diff --git a/pypy/rlib/_stacklet_shadowstack.py b/pypy/rlib/_stacklet_shadowstack.py new file mode 100644 --- /dev/null +++ b/pypy/rlib/_stacklet_shadowstack.py @@ -0,0 +1,110 @@ +from pypy.rlib import _rffi_stacklet as _c +from pypy.rlib.debug import ll_assert +from pypy.rpython.annlowlevel import llhelper +from pypy.rpython.lltypesystem import lltype, llmemory +from pypy.rpython.lltypesystem.lloperation import llop +from pypy.tool.staticmethods import StaticMethods + + +NULL_SUSPSTACK = lltype.nullptr(llmemory.GCREF.TO) + + +def _new_callback(h, arg): + # We still have the old shadowstack active at this point; save it + # away, and start a fresh new one + oldsuspstack = gcrootfinder.oldsuspstack + h = llmemory.cast_ptr_to_adr(h) + llop.gc_save_current_state_away(lltype.Void, + oldsuspstack, h) + llop.gc_start_fresh_new_state(lltype.Void) + gcrootfinder.oldsuspstack = NULL_SUSPSTACK + # + newsuspstack = gcrootfinder.callback(oldsuspstack, arg) + # + # Finishing this stacklet. + gcrootfinder.oldsuspstack = NULL_SUSPSTACK + gcrootfinder.newsuspstack = newsuspstack + h = llop.gc_shadowstackref_context(llmemory.Address, newsuspstack) + return llmemory.cast_adr_to_ptr(h, _c.handle) + +def prepare_old_suspstack(): + if not gcrootfinder.oldsuspstack: # else reuse the one still there + _allocate_old_suspstack() + +def _allocate_old_suspstack(): + suspstack = llop.gc_shadowstackref_new(llmemory.GCREF) + gcrootfinder.oldsuspstack = suspstack +_allocate_old_suspstack._dont_inline_ = True + +def get_result_suspstack(h): + # Now we are in the target, after the switch() or the new(). + # Note that this whole module was carefully written in such a way as + # not to invoke pushing/popping things off the shadowstack at + # unexpected moments... + oldsuspstack = gcrootfinder.oldsuspstack + newsuspstack = gcrootfinder.newsuspstack + gcrootfinder.oldsuspstack = NULL_SUSPSTACK + gcrootfinder.newsuspstack = NULL_SUSPSTACK + if not h: + raise MemoryError + # We still have the old shadowstack active at this point; save it + # away, and restore the new one + if oldsuspstack: + ll_assert(not _c.is_empty_handle(h),"unexpected empty stacklet handle") + h = llmemory.cast_ptr_to_adr(h) + llop.gc_save_current_state_away(lltype.Void, oldsuspstack, h) + else: + ll_assert(_c.is_empty_handle(h),"unexpected non-empty stacklet handle") + llop.gc_forget_current_state(lltype.Void) + # + llop.gc_restore_state_from(lltype.Void, newsuspstack) + # + # From this point on, 'newsuspstack' is consumed and done, its + # shadow stack installed as the current one. It should not be + # used any more. For performance, we avoid it being deallocated + # by letting it be reused on the next switch. + gcrootfinder.oldsuspstack = newsuspstack + # Return. + return oldsuspstack + + +class StackletGcRootFinder: + __metaclass__ = StaticMethods + + def new(thrd, callback, arg): + gcrootfinder.callback = callback + thread_handle = thrd._thrd + prepare_old_suspstack() + h = _c.new(thread_handle, llhelper(_c.run_fn, _new_callback), arg) + return get_result_suspstack(h) + new._dont_inline_ = True + + def switch(thrd, suspstack): + # suspstack has a handle to target, i.e. where to switch to + ll_assert(suspstack != gcrootfinder.oldsuspstack, + "stacklet: invalid use") + gcrootfinder.newsuspstack = suspstack + thread_handle = thrd._thrd + h = llop.gc_shadowstackref_context(llmemory.Address, suspstack) + h = llmemory.cast_adr_to_ptr(h, _c.handle) + prepare_old_suspstack() + h = _c.switch(thread_handle, h) + return get_result_suspstack(h) + switch._dont_inline_ = True + + def destroy(thrd, suspstack): + h = llop.gc_shadowstackref_context(llmemory.Address, suspstack) + h = llmemory.cast_adr_to_ptr(h, _c.handle) + llop.gc_shadowstackref_destroy(lltype.Void, suspstack) + _c.destroy(thrd._thrd, h) + + def is_empty_handle(suspstack): + return not suspstack + + def get_null_handle(): + return NULL_SUSPSTACK + + +gcrootfinder = StackletGcRootFinder() +gcrootfinder.oldsuspstack = NULL_SUSPSTACK +gcrootfinder.newsuspstack = NULL_SUSPSTACK diff --git a/pypy/rlib/clibffi.py b/pypy/rlib/clibffi.py --- a/pypy/rlib/clibffi.py +++ b/pypy/rlib/clibffi.py @@ -286,10 +286,10 @@ FFI_OK = cConfig.FFI_OK FFI_BAD_TYPEDEF = cConfig.FFI_BAD_TYPEDEF -FFI_DEFAULT_ABI = rffi.cast(rffi.USHORT, cConfig.FFI_DEFAULT_ABI) +FFI_DEFAULT_ABI = cConfig.FFI_DEFAULT_ABI if _WIN32: - FFI_STDCALL = rffi.cast(rffi.USHORT, cConfig.FFI_STDCALL) -FFI_TYPE_STRUCT = rffi.cast(rffi.USHORT, cConfig.FFI_TYPE_STRUCT) + FFI_STDCALL = cConfig.FFI_STDCALL +FFI_TYPE_STRUCT = cConfig.FFI_TYPE_STRUCT FFI_CIFP = rffi.COpaquePtr('ffi_cif', compilation_info=eci) FFI_CLOSUREP = lltype.Ptr(cConfig.ffi_closure) @@ -319,7 +319,7 @@ which the 'ffistruct' member is a regular FFI_TYPE. """ tpe = lltype.malloc(FFI_STRUCT_P.TO, len(field_types)+1, flavor='raw') - tpe.ffistruct.c_type = FFI_TYPE_STRUCT + tpe.ffistruct.c_type = rffi.cast(rffi.USHORT, FFI_TYPE_STRUCT) tpe.ffistruct.c_size = rffi.cast(rffi.SIZE_T, size) tpe.ffistruct.c_alignment = rffi.cast(rffi.USHORT, aligment) tpe.ffistruct.c_elements = rffi.cast(FFI_TYPE_PP, @@ -402,12 +402,20 @@ closureHeap = ClosureHeap() -FUNCFLAG_STDCALL = 0 -FUNCFLAG_CDECL = 1 # for WINAPI calls +FUNCFLAG_STDCALL = 0 # on Windows: for WINAPI calls +FUNCFLAG_CDECL = 1 # on Windows: for __cdecl calls FUNCFLAG_PYTHONAPI = 4 FUNCFLAG_USE_ERRNO = 8 FUNCFLAG_USE_LASTERROR = 16 +def get_call_conv(flags, from_jit): + if _WIN32 and (flags & FUNCFLAG_CDECL == 0): + return FFI_STDCALL + else: + return FFI_DEFAULT_ABI +get_call_conv._annspecialcase_ = 'specialize:arg(1)' # hack :-/ + + class AbstractFuncPtr(object): ll_cif = lltype.nullptr(FFI_CIFP.TO) ll_argtypes = lltype.nullptr(FFI_TYPE_PP.TO) @@ -427,21 +435,17 @@ self.ll_cif = lltype.malloc(FFI_CIFP.TO, flavor='raw', track_allocation=False) # freed by the __del__ - if _WIN32 and (flags & FUNCFLAG_CDECL == 0): - cc = FFI_STDCALL - else: - cc = FFI_DEFAULT_ABI - if _MSVC: # This little trick works correctly with MSVC. # It returns small structures in registers - if r_uint(restype.c_type) == FFI_TYPE_STRUCT: + if intmask(restype.c_type) == FFI_TYPE_STRUCT: if restype.c_size <= 4: restype = ffi_type_sint32 elif restype.c_size <= 8: restype = ffi_type_sint64 - res = c_ffi_prep_cif(self.ll_cif, cc, + res = c_ffi_prep_cif(self.ll_cif, + rffi.cast(rffi.USHORT, get_call_conv(flags,False)), rffi.cast(rffi.UINT, argnum), restype, self.ll_argtypes) if not res == FFI_OK: diff --git a/pypy/rlib/debug.py b/pypy/rlib/debug.py --- a/pypy/rlib/debug.py +++ b/pypy/rlib/debug.py @@ -26,6 +26,7 @@ llop.debug_print_traceback(lltype.Void) llop.debug_fatalerror(lltype.Void, msg) fatalerror._dont_inline_ = True +fatalerror._annspecialcase_ = 'specialize:arg(1)' class DebugLog(list): diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -75,7 +75,7 @@ @staticmethod @jit.elidable def is_struct(ffi_type): - return intmask(ffi_type.c_type) == intmask(FFI_TYPE_STRUCT) + return intmask(ffi_type.c_type) == FFI_TYPE_STRUCT types._import() diff --git a/pypy/rlib/parsing/makepackrat.py b/pypy/rlib/parsing/makepackrat.py --- a/pypy/rlib/parsing/makepackrat.py +++ b/pypy/rlib/parsing/makepackrat.py @@ -251,9 +251,11 @@ return "ErrorInformation(%s, %s)" % (self.pos, self.expected) def get_line_column(self, source): - uptoerror = source[:self.pos] + pos = self.pos + assert pos >= 0 + uptoerror = source[:pos] lineno = uptoerror.count("\n") - columnno = self.pos - uptoerror.rfind("\n") + columnno = pos - uptoerror.rfind("\n") return lineno, columnno def nice_error_message(self, filename='', source=""): diff --git a/pypy/rlib/parsing/tree.py b/pypy/rlib/parsing/tree.py --- a/pypy/rlib/parsing/tree.py +++ b/pypy/rlib/parsing/tree.py @@ -6,9 +6,16 @@ content = ["digraph G{"] content.extend(self.dot()) content.append("}") - p = py.test.ensuretemp("automaton").join("temp.dot") + try: + p = py.test.ensuretemp("automaton").join("temp.dot") + remove = False + except AttributeError: # pytest lacks ensuretemp, make a normal one + p = py.path.local.mkdtemp().join('automaton.dot') + remove = True p.write("\n".join(content)) graphclient.display_dot_file(str(p)) + if remove: + p.dirpath().remove() class Symbol(Node): diff --git a/pypy/rlib/rcoroutine.py b/pypy/rlib/rcoroutine.py --- a/pypy/rlib/rcoroutine.py +++ b/pypy/rlib/rcoroutine.py @@ -29,6 +29,11 @@ The type of a switch is determined by the target's costate. """ +import py; py.test.skip("fixme: rewrite using rlib.rstacklet") +# XXX ^^^ the reason it is not done is that pypy.rlib.rcoroutine +# plus pypy/module/_stackless look like faaaaaar too much code +# to me :-( + from pypy.rlib.rstack import yield_current_frame_to_caller from pypy.rlib.objectmodel import we_are_translated diff --git a/pypy/rlib/rgc.py b/pypy/rlib/rgc.py --- a/pypy/rlib/rgc.py +++ b/pypy/rlib/rgc.py @@ -15,132 +15,8 @@ pass # ____________________________________________________________ -# Framework GC features - -class GcPool(object): - pass - -def gc_swap_pool(newpool): - """Set newpool as the current pool (create one if newpool is None). - All malloc'ed objects are put into the current pool;this is a - way to separate objects depending on when they were allocated. - """ - raise NotImplementedError("only works in stacklessgc translated versions") - -def gc_clone(gcobject, pool): - """Recursively clone the gcobject and everything it points to, - directly or indirectly -- but stops at objects that are not - in the specified pool. Pool can be None to mean the current one. - A new pool is built to contain the copies. Return (newobject, newpool). - """ - raise NotImplementedError("only works in stacklessgc translated versions") - -# ____________________________________________________________ # Annotation and specialization -class GcPoolEntry(ExtRegistryEntry): - "Link GcPool to its Repr." - _type_ = GcPool - - def get_repr(self, rtyper, s_pool): - config = rtyper.getconfig() - # if the gc policy doesn't support allocation pools, lltype - # pools as Void. - if config.translation.gc != 'marksweep': - from pypy.annotation.model import s_None - return rtyper.getrepr(s_None) - else: - from pypy.rpython.rmodel import SimplePointerRepr - from pypy.rpython.memory.gc.marksweep import X_POOL_PTR - return SimplePointerRepr(X_POOL_PTR) - - -class SwapPoolFnEntry(ExtRegistryEntry): - "Annotation and specialization of gc_swap_pool()." - _about_ = gc_swap_pool - - def compute_result_annotation(self, s_newpool): - from pypy.annotation import model as annmodel - return annmodel.SomeExternalObject(GcPool) - - def specialize_call(self, hop): - from pypy.annotation import model as annmodel - s_pool_ptr = annmodel.SomeExternalObject(GcPool) - r_pool_ptr = hop.rtyper.getrepr(s_pool_ptr) - - opname = 'gc_x_swap_pool' - config = hop.rtyper.getconfig() - if config.translation.gc != 'marksweep': - # when the gc policy doesn't support pools, just return - # the argument (which is lltyped as Void anyway) - opname = 'same_as' - - s_pool_ptr = annmodel.SomeExternalObject(GcPool) - r_pool_ptr = hop.rtyper.getrepr(s_pool_ptr) - vlist = hop.inputargs(r_pool_ptr) - return hop.genop(opname, vlist, resulttype = r_pool_ptr) - -def _raise(): - raise RuntimeError - -class CloneFnEntry(ExtRegistryEntry): - "Annotation and specialization of gc_clone()." - _about_ = gc_clone - - def compute_result_annotation(self, s_gcobject, s_pool): - from pypy.annotation import model as annmodel - return annmodel.SomeTuple([s_gcobject, - annmodel.SomeExternalObject(GcPool)]) - - def specialize_call(self, hop): - from pypy.rpython.error import TyperError - from pypy.rpython.lltypesystem import rtuple - from pypy.annotation import model as annmodel - from pypy.rpython.memory.gc.marksweep import X_CLONE, X_CLONE_PTR - - config = hop.rtyper.getconfig() - if config.translation.gc != 'marksweep': - # if the gc policy does not support allocation pools, - # gc_clone always raises RuntimeError - hop.exception_is_here() - hop.gendirectcall(_raise) - s_pool_ptr = annmodel.SomeExternalObject(GcPool) - r_pool_ptr = hop.rtyper.getrepr(s_pool_ptr) - r_tuple = hop.r_result - v_gcobject, v_pool = hop.inputargs(hop.args_r[0], r_pool_ptr) - return rtuple.newtuple(hop.llops, r_tuple, [v_gcobject, v_pool]) - - r_gcobject = hop.args_r[0] - if (not isinstance(r_gcobject.lowleveltype, lltype.Ptr) or - r_gcobject.lowleveltype.TO._gckind != 'gc'): - raise TyperError("gc_clone() can only clone a dynamically " - "allocated object;\ngot %r" % (r_gcobject,)) - s_pool_ptr = annmodel.SomeExternalObject(GcPool) - r_pool_ptr = hop.rtyper.getrepr(s_pool_ptr) - r_tuple = hop.r_result - - c_CLONE = hop.inputconst(lltype.Void, X_CLONE) - c_flags = hop.inputconst(lltype.Void, {'flavor': 'gc'}) - c_gcobjectptr = hop.inputconst(lltype.Void, "gcobjectptr") - c_pool = hop.inputconst(lltype.Void, "pool") - - v_gcobject, v_pool = hop.inputargs(hop.args_r[0], r_pool_ptr) - v_gcobjectptr = hop.genop('cast_opaque_ptr', [v_gcobject], - resulttype = llmemory.GCREF) - v_clonedata = hop.genop('malloc', [c_CLONE, c_flags], - resulttype = X_CLONE_PTR) - hop.genop('setfield', [v_clonedata, c_gcobjectptr, v_gcobjectptr]) - hop.genop('setfield', [v_clonedata, c_pool, v_pool]) - hop.exception_is_here() - hop.genop('gc_x_clone', [v_clonedata]) - v_gcobjectptr = hop.genop('getfield', [v_clonedata, c_gcobjectptr], - resulttype = llmemory.GCREF) - v_pool = hop.genop('getfield', [v_clonedata, c_pool], - resulttype = r_pool_ptr) - v_gcobject = hop.genop('cast_opaque_ptr', [v_gcobjectptr], - resulttype = r_tuple.items_r[0]) - return rtuple.newtuple(hop.llops, r_tuple, [v_gcobject, v_pool]) - # Support for collection. class CollectEntry(ExtRegistryEntry): diff --git a/pypy/rlib/rstack.py b/pypy/rlib/rstack.py --- a/pypy/rlib/rstack.py +++ b/pypy/rlib/rstack.py @@ -14,25 +14,6 @@ from pypy.rpython.controllerentry import Controller, SomeControlledInstance from pypy.translator.tool.cbuild import ExternalCompilationInfo -def stack_unwind(): - if we_are_translated(): - return llop.stack_unwind(lltype.Void) - raise RuntimeError("cannot unwind stack in non-translated versions") - - -def stack_capture(): - if we_are_translated(): - ptr = llop.stack_capture(OPAQUE_STATE_HEADER_PTR) - return frame_stack_top_controller.box(ptr) - raise RuntimeError("cannot unwind stack in non-translated versions") - - -def stack_frames_depth(): - if we_are_translated(): - return llop.stack_frames_depth(lltype.Signed) - else: - return len(inspect.stack()) - # ____________________________________________________________ compilation_info = ExternalCompilationInfo(includes=['src/stack.h']) @@ -88,78 +69,6 @@ @rgc.no_collect def stack_check_slowpath(current): if ord(_stack_too_big_slowpath(current)): - # Now we are sure that the stack is really too big. Note that the - # stack_unwind implementation is different depending on if stackless - # is enabled. If it is it unwinds the stack, otherwise it simply - # raises a RuntimeError. - stack_unwind() + from pypy.rlib.rstackovf import _StackOverflow + raise _StackOverflow stack_check_slowpath._dont_inline_ = True - -# ____________________________________________________________ - -def yield_current_frame_to_caller(): - raise NotImplementedError("only works in translated versions") - - -class frame_stack_top(object): - def switch(self): - raise NotImplementedError("only works in translated versions") - - -class BoundSwitchOfFrameStackTop(object): pass -class BoundSwitchOfFrameStackTopController(Controller): - knowntype = BoundSwitchOfFrameStackTop - def call(self, real_object): - from pypy.rpython.lltypesystem.lloperation import llop - ptr = llop.stack_switch(OPAQUE_STATE_HEADER_PTR, real_object) - return frame_stack_top_controller.box(ptr) - - -class FrameStackTopController(Controller): - knowntype = frame_stack_top - can_be_None = True - - def is_true(self, real_object): - return bool(real_object) - - def get_switch(self, real_object): - return bound_switch_of_frame_stack_top_controller.box(real_object) - - def convert(self, obj): - assert obj is None - return lltype.nullptr(OPAQUE_STATE_HEADER_PTR.TO) - -frame_stack_top_controller = FrameStackTopController() -bound_switch_of_frame_stack_top_controller = BoundSwitchOfFrameStackTopController() -OPAQUE_STATE_HEADER = lltype.GcOpaqueType("OPAQUE_STATE_HEADER", hints={"render_structure": True}) -OPAQUE_STATE_HEADER_PTR = lltype.Ptr(OPAQUE_STATE_HEADER) - - - -class FrameStackTopReturningFnEntry(ExtRegistryEntry): - def compute_result_annotation(self): - from pypy.annotation import model as annmodel - return SomeControlledInstance(annmodel.lltype_to_annotation(OPAQUE_STATE_HEADER_PTR), frame_stack_top_controller) - - -class YieldCurrentFrameToCallerFnEntry(FrameStackTopReturningFnEntry): - _about_ = yield_current_frame_to_caller - - def specialize_call(self, hop): - var = hop.genop("yield_current_frame_to_caller", [], hop.r_result.lowleveltype) - return var - - -# ____________________________________________________________ - -def get_stack_depth_limit(): - if we_are_translated(): - from pypy.rpython.lltypesystem.lloperation import llop - return llop.get_stack_depth_limit(lltype.Signed) - raise RuntimeError("no stack depth limit in non-translated versions") - -def set_stack_depth_limit(limit): - if we_are_translated(): - from pypy.rpython.lltypesystem.lloperation import llop - return llop.set_stack_depth_limit(lltype.Void, limit) - raise RuntimeError("no stack depth limit in non-translated versions") diff --git a/pypy/rlib/rstacklet.py b/pypy/rlib/rstacklet.py new file mode 100644 --- /dev/null +++ b/pypy/rlib/rstacklet.py @@ -0,0 +1,58 @@ +from pypy.rlib import _rffi_stacklet as _c +from pypy.rpython.lltypesystem import lltype, llmemory + + +class StackletThread(object): + + def __init__(self, config): + self._gcrootfinder = _getgcrootfinder(config) + self._thrd = _c.newthread() + if not self._thrd: + raise MemoryError + self._thrd_deleter = StackletThreadDeleter(self._thrd) + + def new(self, callback, arg=llmemory.NULL): + return self._gcrootfinder.new(self, callback, arg) + new._annspecialcase_ = 'specialize:arg(1)' + + def switch(self, stacklet): + return self._gcrootfinder.switch(self, stacklet) + + def destroy(self, stacklet): + self._gcrootfinder.destroy(self, stacklet) + + def is_empty_handle(self, stacklet): + # note that "being an empty handle" and being equal to + # "get_null_handle()" may be the same, or not; don't rely on it + return self._gcrootfinder.is_empty_handle(stacklet) + + def get_null_handle(self): + return self._gcrootfinder.get_null_handle() + + +class StackletThreadDeleter(object): + # quick hack: the __del__ is on another object, so that + # if the main StackletThread ends up in random circular + # references, on pypy deletethread() is only called + # when all that circular reference mess is gone. + def __init__(self, thrd): + self._thrd = thrd + def __del__(self): + thrd = self._thrd + if thrd: + self._thrd = lltype.nullptr(_c.thread_handle.TO) + _c.deletethread(thrd) + +# ____________________________________________________________ + +def _getgcrootfinder(config): + if (config is None or + config.translation.gc in ('ref', 'boehm', 'none')): # for tests + gcrootfinder = 'n/a' + else: + gcrootfinder = config.translation.gcrootfinder + gcrootfinder = gcrootfinder.replace('/', '_') + module = __import__('pypy.rlib._stacklet_%s' % gcrootfinder, + None, None, ['__doc__']) + return module.gcrootfinder +_getgcrootfinder._annspecialcase_ = 'specialize:memo' diff --git a/pypy/rlib/runicode.py b/pypy/rlib/runicode.py --- a/pypy/rlib/runicode.py +++ b/pypy/rlib/runicode.py @@ -1403,7 +1403,7 @@ s, pos, pos + unicode_bytes) result.append(res) continue - result.append(unichr(t)) + result.append(UNICHR(t)) pos += unicode_bytes return result.build(), pos diff --git a/pypy/rlib/streamio.py b/pypy/rlib/streamio.py --- a/pypy/rlib/streamio.py +++ b/pypy/rlib/streamio.py @@ -496,29 +496,24 @@ if bufsize == -1: # Get default from the class bufsize = self.bufsize self.bufsize = bufsize # buffer size (hint only) - self.lines = [] # ready-made lines (sans "\n") - self.buf = "" # raw data (may contain "\n") - # Invariant: readahead == "\n".join(self.lines + [self.buf]) - # self.lines contains no "\n" - # self.buf may contain "\n" + self.buf = "" # raw data + self.pos = 0 def flush_buffers(self): - if self.lines or self.buf: + if self.buf: try: self.do_seek(self.tell(), 0) except MyNotImplementedError: pass else: - self.lines = [] self.buf = "" + self.pos = 0 def tell(self): - bytes = self.do_tell() # This may fail - offset = len(self.buf) - for line in self.lines: - offset += len(line) + 1 - assert bytes >= offset #, (locals(), self.__dict__) - return bytes - offset + tellpos = self.do_tell() # This may fail + offset = len(self.buf) - self.pos + assert tellpos >= offset #, (locals(), self.__dict__) + return tellpos - offset def seek(self, offset, whence): # This may fail on the do_seek() or do_tell() call. @@ -526,32 +521,25 @@ # Nor on a seek to the very end. if whence == 0: self.do_seek(offset, 0) - self.lines = [] self.buf = "" + self.pos = 0 return if whence == 1: + currentsize = len(self.buf) - self.pos if offset < 0: - self.do_seek(self.tell() + offset, 0) - self.lines = [] - self.buf = "" + if self.pos + offset >= 0: + self.pos += offset + else: + self.do_seek(self.tell() + offset, 0) + self.pos = 0 + self.buf = "" return - while self.lines: - line = self.lines[-1] - if offset <= len(line): - intoffset = intmask(offset) - assert intoffset >= 0 - self.lines[-1] = line[intoffset:] - return - offset -= len(self.lines[-1]) - 1 - self.lines.pop() - assert not self.lines - if offset <= len(self.buf): - intoffset = intmask(offset) - assert intoffset >= 0 - self.buf = self.buf[intoffset:] + elif offset <= currentsize: + self.pos += offset return - offset -= len(self.buf) self.buf = "" + self.pos = 0 + offset -= currentsize try: self.do_seek(offset, 1) except MyNotImplementedError: @@ -564,18 +552,18 @@ except MyNotImplementedError: pass else: - self.lines = [] + self.pos = 0 self.buf = "" return # Skip relative to EOF by reading and saving only just as # much as needed intoffset = offset2int(offset) - self.lines.reverse() - data = "\n".join(self.lines + [self.buf]) - total = len(data) - buffers = [data] - self.lines = [] + pos = self.pos + assert pos >= 0 + buffers = [self.buf[pos:]] + total = len(buffers[0]) self.buf = "" + self.pos = 0 while 1: data = self.do_read(self.bufsize) if not data: @@ -589,157 +577,101 @@ if cutoff < 0: raise StreamError("cannot seek back") if buffers: + assert cutoff >= 0 buffers[0] = buffers[0][cutoff:] self.buf = "".join(buffers) - self.lines = [] return + raise StreamError("whence should be 0, 1 or 2") def readall(self): - self.lines.reverse() - self.lines.append(self.buf) - more = ["\n".join(self.lines)] - self.lines = [] + pos = self.pos + assert pos >= 0 + chunks = [self.buf[pos:]] self.buf = "" + self.pos = 0 bufsize = self.bufsize while 1: data = self.do_read(bufsize) if not data: break - more.append(data) + chunks.append(data) bufsize = min(bufsize*2, self.bigsize) - return "".join(more) + return "".join(chunks) - def read(self, n): + def read(self, n=-1): assert isinstance(n, int) - assert n >= 0 - if self.lines: - # See if this can be satisfied from self.lines[0] - line = self.lines[-1] - if len(line) >= n: - self.lines[-1] = line[n:] - return line[:n] - - # See if this can be satisfied *without exhausting* self.lines - k = 0 - i = 0 - lgt = len(self.lines) - for linenum in range(lgt-1,-1,-1): - line = self.lines[linenum] - k += len(line) - if k >= n: - lines = self.lines[linenum + 1:] - data = self.lines[linenum] - cutoff = len(data) - (k-n) - assert cutoff >= 0 - lines.reverse() - lines.append(data[:cutoff]) - del self.lines[linenum:] - self.lines.append(data[cutoff:]) - return "\n".join(lines) - k += 1 - - # See if this can be satisfied from self.lines plus self.buf - if k + len(self.buf) >= n: - lines = self.lines - lines.reverse() - self.lines = [] - cutoff = n - k - assert cutoff >= 0 - lines.append(self.buf[:cutoff]) - self.buf = self.buf[cutoff:] - return "\n".join(lines) - + if n < 0: + return self.readall() + currentsize = len(self.buf) - self.pos + start = self.pos + assert start >= 0 + if n <= currentsize: + stop = start + n + assert stop >= 0 + result = self.buf[start:stop] + self.pos += n + return result else: - # See if this can be satisfied from self.buf - data = self.buf - k = len(data) - if k >= n: - cutoff = len(data) - (k-n) - assert cutoff >= 0 - assert len(data) >= cutoff - self.buf = data[cutoff:] - return data[:cutoff] - - lines = self.lines - lines.reverse() - self.lines = [] - lines.append(self.buf) - self.buf = "" - data = "\n".join(lines) - more = [data] - k = len(data) - while k < n: - data = self.do_read(max(self.bufsize, n-k)) - k += len(data) - more.append(data) - if not data: - break - cutoff = len(data) - (k-n) - assert cutoff >= 0 - if len(data) <= cutoff: - self.buf = "" - else: - self.buf = data[cutoff:] - more[-1] = data[:cutoff] - return "".join(more) - - # read_next_bunch is generally this, version below is slightly faster - #def _read_next_bunch(self): - # self.lines = self.buf.split("\n") - # self.buf = self.lines.pop() - # self.lines.reverse() - - def _read_next_bunch(self): - numlines = self.buf.count("\n") - self.lines = [None] * numlines - last = -1 - num = numlines - 1 - while True: - start = last + 1 - assert start >= 0 - next = self.buf.find("\n", start) - if next == -1: - if last != -1: - self.buf = self.buf[start:] - break - assert next >= 0 - self.lines[num] = self.buf[start:next] - last = next - num -= 1 + chunks = [self.buf[start:]] + while 1: + self.buf = self.do_read(self.bufsize) + if not self.buf: + self.pos = 0 + break + currentsize += len(self.buf) + if currentsize >= n: + self.pos = len(self.buf) - (currentsize - n) + stop = self.pos + assert stop >= 0 + chunks.append(self.buf[:stop]) + break + chunks.append(self.buf) + return ''.join(chunks) def readline(self): - if self.lines: - return self.lines.pop() + "\n" - - # This block is needed because read() can leave self.buf - # containing newlines - self._read_next_bunch() - if self.lines: - return self.lines.pop() + "\n" - - if self.buf: - buf = [self.buf] - else: - buf = [] + pos = self.pos + assert pos >= 0 + i = self.buf.find("\n", pos) + start = self.pos + assert start >= 0 + if i >= 0: # new line found + i += 1 + result = self.buf[start:i] + self.pos = i + return result + temp = self.buf[start:] + # read one buffer and most of the time a new line will be found + self.buf = self.do_read(self.bufsize) + i = self.buf.find("\n") + if i >= 0: # new line found + i += 1 + result = temp + self.buf[:i] + self.pos = i + return result + if not self.buf: + self.pos = 0 + return temp + # need to keep getting data until we find a new line + chunks = [temp, self.buf] while 1: self.buf = self.do_read(self.bufsize) - self._read_next_bunch() - if self.lines: - buf.append(self.lines.pop()) - buf.append("\n") + if not self.buf: + self.pos = 0 break - if not self.buf: + i = self.buf.find("\n") + if i >= 0: + i += 1 + chunks.append(self.buf[:i]) + self.pos = i break - buf.append(self.buf) - - return "".join(buf) + chunks.append(self.buf) + return "".join(chunks) def peek(self): - if self.lines: - return self.lines[-1] + "\n" - else: - return self.buf + pos = self.pos + assert pos >= 0 + return self.buf[pos:] From noreply at buildbot.pypy.org Fri Sep 2 14:46:02 2011 From: noreply at buildbot.pypy.org (snus_mumrik) Date: Fri, 2 Sep 2011 14:46:02 +0200 (CEST) Subject: [pypy-commit] pypy numpy-comparison: post-merge fixes Message-ID: <20110902124602.440EA8204C@wyvern.cs.uni-duesseldorf.de> Author: Ilya Osadchiy Branch: numpy-comparison Changeset: r47016:35292645eb01 Date: 2011-09-02 11:29 +0300 http://bitbucket.org/pypy/pypy/changeset/35292645eb01/ Log: post-merge fixes diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -74,12 +74,12 @@ descr_pow = _binop_impl("power") descr_mod = _binop_impl("mod") - descr_eq = _binop_impl(interp_ufuncs.equal) - descr_ne = _binop_impl(interp_ufuncs.not_equal) - descr_lt = _binop_impl(interp_ufuncs.less) - descr_le = _binop_impl(interp_ufuncs.less_equal) - descr_gt = _binop_impl(interp_ufuncs.greater) - descr_ge = _binop_impl(interp_ufuncs.greater_equal) + descr_eq = _binop_impl("equal") + descr_ne = _binop_impl("not_equal") + descr_lt = _binop_impl("less") + descr_le = _binop_impl("less_equal") + descr_gt = _binop_impl("greater") + descr_ge = _binop_impl("greater_equal") def _binop_right_impl(ufunc_name): def impl(self, space, w_other): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -141,12 +141,11 @@ rhs = w_rhs.value.convert_to(calc_dtype) interm_res = self.func(calc_dtype, lhs, rhs) return interm_res.convert_to(res_dtype).wrap(space) - return self.func(res_dtype, w_lhs.value, w_rhs.value).wrap(space) new_sig = signature.Signature.find_sig([ self.signature, w_lhs.signature, w_rhs.signature ]) - w_res = Call2(new_sig, res_dtype, res_dtype, calc_dtype, w_lhs, w_rhs) + w_res = Call2(new_sig, res_dtype, calc_dtype, w_lhs, w_rhs) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) return w_res From noreply at buildbot.pypy.org Fri Sep 2 14:46:03 2011 From: noreply at buildbot.pypy.org (snus_mumrik) Date: Fri, 2 Sep 2011 14:46:03 +0200 (CEST) Subject: [pypy-commit] pypy numpy-comparison: Translation fix Message-ID: <20110902124603.7CE818204C@wyvern.cs.uni-duesseldorf.de> Author: Ilya Osadchiy Branch: numpy-comparison Changeset: r47017:b151eb30e871 Date: 2011-09-02 13:51 +0300 http://bitbucket.org/pypy/pypy/changeset/b151eb30e871/ Log: Translation fix diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -132,10 +132,10 @@ def bool_binop(func): @functools.wraps(func) def impl(self, v1, v2): - return self.box(func(self, + return self.box(bool(func(self, self.for_computation(self.unbox(v1)), self.for_computation(self.unbox(v2)), - )) + ))) return impl def unaryop(func): @@ -198,8 +198,8 @@ def bool(self, v): return bool(self.for_computation(self.unbox(v))) -# def ne(self, v1, v2): -# return self.for_computation(self.unbox(v1)) != self.for_computation(self.unbox(v2)) + def ne_w(self, v1, v2): + return self.for_computation(self.unbox(v1)) != self.for_computation(self.unbox(v2)) class FloatArithmeticDtype(ArithmaticTypeMixin): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -121,7 +121,7 @@ size=size, i=i, result=result, cur_best=cur_best) new_best = getattr(dtype, op_name)(cur_best, self.eval(i)) - if dtype.unbox(dtype.ne(new_best, cur_best)): + if dtype.ne_w(new_best, cur_best): result = i cur_best = new_best i += 1 From noreply at buildbot.pypy.org Fri Sep 2 14:46:04 2011 From: noreply at buildbot.pypy.org (snus_mumrik) Date: Fri, 2 Sep 2011 14:46:04 +0200 (CEST) Subject: [pypy-commit] pypy numpy-comparison: Add ufuncs to appleveldefs Message-ID: <20110902124604.B25C88204C@wyvern.cs.uni-duesseldorf.de> Author: Ilya Osadchiy Branch: numpy-comparison Changeset: r47018:da8ca4cb9579 Date: 2011-09-02 14:13 +0300 http://bitbucket.org/pypy/pypy/changeset/da8ca4cb9579/ Log: Add ufuncs to appleveldefs diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -38,7 +38,13 @@ ("sin", "sin"), ("subtract", "subtract"), ("tan", "tan"), - ("equal", "equal") + ("equal", "equal"), + ("equal", "equal"), + ("not_equal", "not_equal"), + ("less", "less"), + ("less_equal", "less_equal"), + ("greater", "greater"), + ("greater_equal", "greater_equal"), ]: interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -561,6 +561,7 @@ from numpy import array, dtype a = array(range(5)) b = array(range(5), dtype=float) + c = array(reversed(range(5))) for func in [ lambda x, y: x == y, lambda x, y: x != y, @@ -585,6 +586,14 @@ assert _3b.dtype is dtype(bool) for i in xrange(5): assert _3b[i] == (True if func(3, b[i]) else False) + _ac = func (a, c) + assert _ac.dtype is dtype(bool) + for i in xrange(5): + assert _ac[i] == (True if func(a[i], c[i]) else False) + _bc = func (b, c) + assert _bc.dtype is dtype(bool) + for i in xrange(5): + assert _bc[i] == (True if func(b[i], c[i]) else False) class AppTestSupport(object): def setup_class(cls): diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -299,13 +299,35 @@ assert math.isnan(b[0]) def test_comparison(self): - from numpy import array, dtype, equal - assert equal(3, 3) is True - assert equal(3, 4) is False - assert equal(3.0, 3.0) is True - assert equal(3.0, 3.5) is False - assert equal(3.0, 3) is True - assert equal(3.0, 4) is False + from numpy import ( + equal, + not_equal, + less, + less_equal, + greater, + greater_equal, + ) + for (ufunc, func) in [ + (equal, lambda x, y: x == y), + (not_equal, lambda x, y: x != y), + (less, lambda x, y: x < y), + (less_equal, lambda x, y: x <= y), + (greater, lambda x, y: x > y), + (greater_equal, lambda x, y: x >= y), + ]: + for a, b in [ + (3, 3), + (3, 4), + (4, 3), + (3.0, 3.0), + (3.0, 3.5), + (3.5, 3.0), + (3.0, 3), + (3, 3.0), + (3.5, 3), + (3, 3.5), + ]: + assert ufunc(a, b) is (True if func(a, b) else False) def test_reduce_errors(self): from numpy import sin, add From noreply at buildbot.pypy.org Fri Sep 2 14:54:39 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 2 Sep 2011 14:54:39 +0200 (CEST) Subject: [pypy-commit] pypy default: When a key in a celldict is set with it's current value, don't create a level of indirection, or mutate the version. This shows up for the attributes of all MixedModules (including __builtins__!). Message-ID: <20110902125439.8078D8204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r47019:a9ad422cdb38 Date: 2011-09-02 08:54 -0400 http://bitbucket.org/pypy/pypy/changeset/a9ad422cdb38/ Log: When a key in a celldict is set with it's current value, don't create a level of indirection, or mutate the version. This shows up for the attributes of all MixedModules (including __builtins__!). diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py @@ -23,6 +23,4 @@ guard_not_invalidated(descr=...) p19 = getfield_gc(ConstPtr(p17), descr=) guard_value(p19, ConstPtr(ptr20), descr=...) - p22 = getfield_gc(ConstPtr(ptr21), descr=) - guard_nonnull(p22, descr=...) - """) + """) \ No newline at end of file diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -65,6 +65,10 @@ if isinstance(cell, ModuleCell): cell.w_value = w_value return + # If the new value and the current value are the same, don't create a + # level of indirection, or mutate are version. + if self.space.is_w(w_value, cell): + return if cell is not None: w_value = ModuleCell(w_value) self.mutated() diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -39,6 +39,20 @@ assert d.getitem("a") is None assert d.strategy.getdictvalue_no_unwrapping(d, "a") is None + def test_same_key_set_twice(self): + strategy = ModuleDictStrategy(space) + storage = strategy.get_empty_storage() + d = W_DictMultiObject(space, strategy, storage) + + v1 = strategy.version + x = object() + d.setitem("a", x) + v2 = strategy.version + assert v1 is not v2 + d.setitem("a", x) + v3 = strategy.version + assert v2 is v3 + class AppTestModuleDict(object): def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withcelldict": True}) From noreply at buildbot.pypy.org Fri Sep 2 15:13:30 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 2 Sep 2011 15:13:30 +0200 (CEST) Subject: [pypy-commit] pypy improve-heap-caching-tracing: next test that should pass Message-ID: <20110902131330.D86068204C@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: improve-heap-caching-tracing Changeset: r47020:f3f43660f5e9 Date: 2011-09-02 11:46 +0200 http://bitbucket.org/pypy/pypy/changeset/f3f43660f5e9/ Log: next test that should pass diff --git a/pypy/jit/metainterp/test/test_tracingopts.py b/pypy/jit/metainterp/test/test_tracingopts.py --- a/pypy/jit/metainterp/test/test_tracingopts.py +++ b/pypy/jit/metainterp/test/test_tracingopts.py @@ -357,7 +357,7 @@ assert res == f(10, 1, 1) self.check_history(getarrayitem_gc=0, getfield_gc=0) - def test_heap_caching_pure(self): + def test_heap_caching_array_pure(self): class A(object): pass p1 = A() @@ -405,3 +405,26 @@ assert res == -7 + 7 self.check_operations_history(getfield_gc=0) return + + + def test_heap_caching_multiple_objects(self): + class Gbl(object): + pass + g = Gbl() + class A(object): + pass + def fn(n): + a1 = A() + g.a = a1 + a1.x = n - 2 + a2 = A() + g.a = a2 + a2.x = n - 3 + return a1.x + a2.x + res = self.interp_operations(fn, [7]) + assert res == 2 * 7 - 5 + self.check_operations_history(getfield_gc=0) + res = self.interp_operations(fn, [-7]) + assert res == 2 * -7 - 5 + self.check_operations_history(getfield_gc=0) + From noreply at buildbot.pypy.org Fri Sep 2 15:13:32 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 2 Sep 2011 15:13:32 +0200 (CEST) Subject: [pypy-commit] pypy improve-heap-caching-tracing: pass a slightly simpler version of this first test Message-ID: <20110902131332.2B5A08204C@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: improve-heap-caching-tracing Changeset: r47021:1988d4a527b0 Date: 2011-09-02 11:59 +0200 http://bitbucket.org/pypy/pypy/changeset/1988d4a527b0/ Log: pass a slightly simpler version of this first test diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py --- a/pypy/jit/metainterp/heapcache.py +++ b/pypy/jit/metainterp/heapcache.py @@ -11,7 +11,7 @@ # contains frame boxes that are not virtualizables self.nonstandard_virtualizables = {} # heap cache - # maps descrs to (from_box, to_box) tuples + # maps descrs to {from_box, to_box} dicts self.heap_cache = {} # heap array cache # maps descrs to {index: (from_box, to_box)} dicts @@ -48,13 +48,18 @@ def getfield(self, box, descr): - frombox, tobox = self.heap_cache.get(descr, (None, None)) - if box is frombox: - return tobox + d = self.heap_cache.get(descr, None) + if d: + tobox = d.get(box, None) + if tobox: + return tobox return None + def getfield_now_known(self, box, descr, fieldbox): + self.heap_cache.setdefault(descr, {})[box] = fieldbox + def setfield(self, box, descr, fieldbox): - self.heap_cache[descr] = (box, fieldbox) + self.heap_cache[descr] = {box: fieldbox} def getarrayitem(self, box, descr, indexbox): if not isinstance(indexbox, ConstInt): @@ -77,14 +82,13 @@ cache[index] = box, valuebox def replace_box(self, oldbox, newbox): - for descr, (frombox, tobox) in self.heap_cache.iteritems(): - change = False - if frombox is oldbox: - change = True - frombox = newbox - if tobox is oldbox: - change = True - tobox = newbox - if change: - self.heap_cache[descr] = frombox, tobox + for descr, d in self.heap_cache.iteritems(): + new_d = {} + for frombox, tobox in d.iteritems(): + if frombox is oldbox: + frombox = newbox + if tobox is oldbox: + tobox = newbox + new_d[frombox] = tobox + self.heap_cache[descr] = new_d # XXX what about self.heap_array_cache? diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py --- a/pypy/jit/metainterp/test/test_heapcache.py +++ b/pypy/jit/metainterp/test/test_heapcache.py @@ -81,6 +81,22 @@ assert h.getfield(box1, descr2) is None assert h.getfield(box3, descr1) is None + def test_heapcache_fields_multiple(self): + h = HeapCache() + h.getfield_now_known(box1, descr1, box2) + h.getfield_now_known(box3, descr1, box4) + assert h.getfield(box1, descr1) is box2 + assert h.getfield(box1, descr2) is None + assert h.getfield(box3, descr1) is box4 + assert h.getfield(box3, descr2) is None + + h.reset() + assert h.getfield(box1, descr1) is None + assert h.getfield(box1, descr2) is None + assert h.getfield(box3, descr1) is None + assert h.getfield(box3, descr2) is None + + def test_heapcache_arrays(self): h = HeapCache() assert h.getarrayitem(box1, descr1, index1) is None @@ -166,3 +182,4 @@ assert h.getfield(box1, descr2) is None assert h.getfield(box4, descr1) is box2 assert h.getfield(box4, descr2) is box3 + diff --git a/pypy/jit/metainterp/test/test_tracingopts.py b/pypy/jit/metainterp/test/test_tracingopts.py --- a/pypy/jit/metainterp/test/test_tracingopts.py +++ b/pypy/jit/metainterp/test/test_tracingopts.py @@ -416,15 +416,15 @@ def fn(n): a1 = A() g.a = a1 - a1.x = n - 2 + a1.x = n a2 = A() g.a = a2 - a2.x = n - 3 - return a1.x + a2.x + a2.x = n - 1 + return a1.x + a2.x + a1.x + a2.x res = self.interp_operations(fn, [7]) - assert res == 2 * 7 - 5 - self.check_operations_history(getfield_gc=0) + assert res == 2 * 7 + 2 * 6 + self.check_operations_history(getfield_gc=2) res = self.interp_operations(fn, [-7]) - assert res == 2 * -7 - 5 - self.check_operations_history(getfield_gc=0) + assert res == 2 * -7 + 2 * -8 + self.check_operations_history(getfield_gc=2) From noreply at buildbot.pypy.org Fri Sep 2 15:13:33 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 2 Sep 2011 15:13:33 +0200 (CEST) Subject: [pypy-commit] pypy improve-heap-caching-tracing: add the knowledge that two boxes with that were the result of a "new" operation Message-ID: <20110902131333.66AEC8204C@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: improve-heap-caching-tracing Changeset: r47022:604e55dccba6 Date: 2011-09-02 15:13 +0200 http://bitbucket.org/pypy/pypy/changeset/604e55dccba6/ Log: add the knowledge that two boxes with that were the result of a "new" operation can never alias each other. diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py --- a/pypy/jit/metainterp/heapcache.py +++ b/pypy/jit/metainterp/heapcache.py @@ -8,6 +8,8 @@ def reset(self): # contains boxes where the class is already known self.known_class_boxes = {} + # store the boxes that contain newly allocated objects: + self.new_boxes = {} # contains frame boxes that are not virtualizables self.nonstandard_virtualizables = {} # heap cache @@ -46,6 +48,8 @@ def nonstandard_virtualizables_now_known(self, box): self.nonstandard_virtualizables[box] = None + def new(self, box): + self.new_boxes[box] = None def getfield(self, box, descr): d = self.heap_cache.get(descr, None) @@ -59,7 +63,15 @@ self.heap_cache.setdefault(descr, {})[box] = fieldbox def setfield(self, box, descr, fieldbox): - self.heap_cache[descr] = {box: fieldbox} + d = self.heap_cache.get(descr, None) + new_d = {box: fieldbox} + if not d or box not in self.new_boxes: + self.heap_cache[descr] = new_d + return + for frombox, tobox in d.iteritems(): + if frombox in self.new_boxes: + new_d[frombox] = tobox + self.heap_cache[descr] = new_d def getarrayitem(self, box, descr, indexbox): if not isinstance(indexbox, ConstInt): diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -374,6 +374,7 @@ cpu = self.metainterp.cpu cls = heaptracker.descr2vtable(cpu, sizedescr) resbox = self.execute(rop.NEW_WITH_VTABLE, ConstInt(cls)) + self.metainterp.heapcache.new(resbox) self.metainterp.heapcache.class_now_know(resbox) return resbox diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py --- a/pypy/jit/metainterp/test/test_heapcache.py +++ b/pypy/jit/metainterp/test/test_heapcache.py @@ -81,7 +81,7 @@ assert h.getfield(box1, descr2) is None assert h.getfield(box3, descr1) is None - def test_heapcache_fields_multiple(self): + def test_heapcache_read_fields_multiple(self): h = HeapCache() h.getfield_now_known(box1, descr1, box2) h.getfield_now_known(box3, descr1, box4) @@ -96,6 +96,31 @@ assert h.getfield(box3, descr1) is None assert h.getfield(box3, descr2) is None + def test_heapcache_write_fields_multiple(self): + h = HeapCache() + h.setfield(box1, descr1, box2) + assert h.getfield(box1, descr1) is box2 + h.setfield(box3, descr1, box4) + assert h.getfield(box3, descr1) is box4 + assert h.getfield(box1, descr1) is None # box1 and box3 can alias + + h = HeapCache() + h.new(box1) + h.setfield(box1, descr1, box2) + assert h.getfield(box1, descr1) is box2 + h.setfield(box3, descr1, box4) + assert h.getfield(box3, descr1) is box4 + assert h.getfield(box1, descr1) is None # box1 and box3 can alias + + h = HeapCache() + h.new(box1) + h.new(box3) + h.setfield(box1, descr1, box2) + assert h.getfield(box1, descr1) is box2 + h.setfield(box3, descr1, box4) + assert h.getfield(box3, descr1) is box4 + assert h.getfield(box1, descr1) is box2 # box1 and box3 cannot alias + def test_heapcache_arrays(self): h = HeapCache() diff --git a/pypy/jit/metainterp/test/test_tracingopts.py b/pypy/jit/metainterp/test/test_tracingopts.py --- a/pypy/jit/metainterp/test/test_tracingopts.py +++ b/pypy/jit/metainterp/test/test_tracingopts.py @@ -423,8 +423,8 @@ return a1.x + a2.x + a1.x + a2.x res = self.interp_operations(fn, [7]) assert res == 2 * 7 + 2 * 6 - self.check_operations_history(getfield_gc=2) + self.check_operations_history(getfield_gc=0) res = self.interp_operations(fn, [-7]) assert res == 2 * -7 + 2 * -8 - self.check_operations_history(getfield_gc=2) + self.check_operations_history(getfield_gc=0) From noreply at buildbot.pypy.org Fri Sep 2 15:25:59 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 2 Sep 2011 15:25:59 +0200 (CEST) Subject: [pypy-commit] pypy improve-heap-caching-tracing: add some helpful comments Message-ID: <20110902132559.8652C8204C@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: improve-heap-caching-tracing Changeset: r47023:3dbbdca31951 Date: 2011-09-02 15:25 +0200 http://bitbucket.org/pypy/pypy/changeset/3dbbdca31951/ Log: add some helpful comments diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py --- a/pypy/jit/metainterp/heapcache.py +++ b/pypy/jit/metainterp/heapcache.py @@ -63,12 +63,20 @@ self.heap_cache.setdefault(descr, {})[box] = fieldbox def setfield(self, box, descr, fieldbox): + # slightly subtle logic here d = self.heap_cache.get(descr, None) new_d = {box: fieldbox} + # a write to an arbitrary box, all other boxes can alias this one if not d or box not in self.new_boxes: + # therefore we throw away the cache self.heap_cache[descr] = new_d return + # the object we are writing to is freshly allocated + # only remove some boxes from the cache for frombox, tobox in d.iteritems(): + # the other box is *also* freshly allocated + # therefore frombox and box *must* contain different objects + # thus we can keep it in the cache if frombox in self.new_boxes: new_d[frombox] = tobox self.heap_cache[descr] = new_d From noreply at buildbot.pypy.org Fri Sep 2 15:46:41 2011 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 2 Sep 2011 15:46:41 +0200 (CEST) Subject: [pypy-commit] pypy default: minor improvements Message-ID: <20110902134641.359488204C@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r47024:f32bde9a07ac Date: 2011-09-02 15:46 +0200 http://bitbucket.org/pypy/pypy/changeset/f32bde9a07ac/ Log: minor improvements diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -705,6 +705,8 @@ from sqlite3.dump import _iterdump return _iterdump(self) +DML, DQL, DDL = range(3) + class Cursor(object): def __init__(self, con): if not isinstance(con, Connection): @@ -735,9 +737,9 @@ self.statement = self.connection.statement_cache.get(sql, self, self.row_factory) if self.connection._isolation_level is not None: - if self.statement.kind == "DDL": + if self.statement.kind == DDL: self.connection.commit() - elif self.statement.kind == "DML": + elif self.statement.kind == DML: self.connection._begin() self.statement.set_params(params) @@ -748,18 +750,18 @@ self.statement.reset() raise self.connection._get_exception(ret) - if self.statement.kind == "DQL"and ret == SQLITE_ROW: + if self.statement.kind == DQL and ret == SQLITE_ROW: self.statement._build_row_cast_map() self.statement._readahead() else: self.statement.item = None self.statement.exhausted = True - if self.statement.kind in ("DML", "DDL"): + if self.statement.kind == DML or self.statement.kind == DDL: self.statement.reset() self.rowcount = -1 - if self.statement.kind == "DML": + if self.statement.kind == DML: self.rowcount = sqlite.sqlite3_changes(self.connection.db) return self @@ -772,7 +774,7 @@ self._check_closed() self.statement = self.connection.statement_cache.get(sql, self, self.row_factory) - if self.statement.kind == "DML": + if self.statement.kind == DML: self.connection._begin() else: raise ProgrammingError, "executemany is only for DML statements" @@ -904,11 +906,11 @@ self.sql = sql # DEBUG ONLY first_word = self._statement_kind = sql.lstrip().split(" ")[0].upper() if first_word in ("INSERT", "UPDATE", "DELETE", "REPLACE"): - self.kind = "DML" + self.kind = DML elif first_word in ("SELECT", "PRAGMA"): - self.kind = "DQL" + self.kind = DQL else: - self.kind = "DDL" + self.kind = DDL self.exhausted = False self.in_use = False # @@ -923,7 +925,7 @@ if ret == SQLITE_OK and self.statement.value is None: # an empty statement, we work around that, as it's the least trouble ret = sqlite.sqlite3_prepare_v2(self.con.db, "select 42", -1, byref(self.statement), byref(next_char)) - self.kind = "DQL" + self.kind = DQL if ret != SQLITE_OK: raise self.con._get_exception(ret) @@ -1118,7 +1120,7 @@ self.statement = None def _get_description(self): - if self.kind == "DML": + if self.kind == DML: return None desc = [] for i in xrange(sqlite.sqlite3_column_count(self.statement)): From noreply at buildbot.pypy.org Fri Sep 2 16:04:34 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Sep 2011 16:04:34 +0200 (CEST) Subject: [pypy-commit] pypy default: Test and fix (baaa*a*a*ah, took too long): IR_QUASIIMMUTABLE fields Message-ID: <20110902140434.229228204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47025:324d50d0f5c7 Date: 2011-09-02 16:02 +0200 http://bitbucket.org/pypy/pypy/changeset/324d50d0f5c7/ Log: Test and fix (baaa*a*a*ah, took too long): IR_QUASIIMMUTABLE fields must of course not be considered as fully immutable fields! diff --git a/pypy/rpython/memory/gctypelayout.py b/pypy/rpython/memory/gctypelayout.py --- a/pypy/rpython/memory/gctypelayout.py +++ b/pypy/rpython/memory/gctypelayout.py @@ -459,7 +459,7 @@ if t._hints.get('immutable'): return if 'immutable_fields' in t._hints: - skip = t._hints['immutable_fields'].fields + skip = t._hints['immutable_fields'].all_immutable_fields() for n, t2 in t._flds.iteritems(): if isinstance(t2, lltype.Ptr) and t2.TO._gckind == 'gc': if n not in skip: diff --git a/pypy/rpython/memory/test/test_gctypelayout.py b/pypy/rpython/memory/test/test_gctypelayout.py --- a/pypy/rpython/memory/test/test_gctypelayout.py +++ b/pypy/rpython/memory/test/test_gctypelayout.py @@ -4,7 +4,7 @@ from pypy.rpython.memory.gctypelayout import gc_pointers_inside from pypy.rpython.lltypesystem import lltype, llmemory, rclass from pypy.rpython.test.test_llinterp import get_interpreter -from pypy.rpython.rclass import IR_IMMUTABLE +from pypy.rpython.rclass import IR_IMMUTABLE, IR_QUASIIMMUTABLE from pypy.objspace.flow.model import Constant class FakeGC: @@ -102,7 +102,7 @@ accessor = rclass.FieldListAccessor() S3 = lltype.GcStruct('S', ('x', PT), ('y', PT), hints={'immutable_fields': accessor}) - accessor.initialize(S3, {'x': IR_IMMUTABLE}) + accessor.initialize(S3, {'x': IR_IMMUTABLE, 'y': IR_QUASIIMMUTABLE}) # s1 = lltype.malloc(S1) adr = llmemory.cast_ptr_to_adr(s1) diff --git a/pypy/rpython/rclass.py b/pypy/rpython/rclass.py --- a/pypy/rpython/rclass.py +++ b/pypy/rpython/rclass.py @@ -16,6 +16,13 @@ for x in fields.itervalues(): assert isinstance(x, ImmutableRanking) + def all_immutable_fields(self): + result = set() + for key, value in self.fields.iteritems(): + if value in (IR_IMMUTABLE, IR_IMMUTABLE_ARRAY): + result.add(key) + return result + def __repr__(self): return '' % getattr(self, 'TYPE', '?') From noreply at buildbot.pypy.org Fri Sep 2 16:04:35 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Sep 2011 16:04:35 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20110902140435.586B88204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47026:5fb2ba368b6b Date: 2011-09-02 16:04 +0200 http://bitbucket.org/pypy/pypy/changeset/5fb2ba368b6b/ Log: merge heads diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -705,6 +705,8 @@ from sqlite3.dump import _iterdump return _iterdump(self) +DML, DQL, DDL = range(3) + class Cursor(object): def __init__(self, con): if not isinstance(con, Connection): @@ -735,9 +737,9 @@ self.statement = self.connection.statement_cache.get(sql, self, self.row_factory) if self.connection._isolation_level is not None: - if self.statement.kind == "DDL": + if self.statement.kind == DDL: self.connection.commit() - elif self.statement.kind == "DML": + elif self.statement.kind == DML: self.connection._begin() self.statement.set_params(params) @@ -748,18 +750,18 @@ self.statement.reset() raise self.connection._get_exception(ret) - if self.statement.kind == "DQL"and ret == SQLITE_ROW: + if self.statement.kind == DQL and ret == SQLITE_ROW: self.statement._build_row_cast_map() self.statement._readahead() else: self.statement.item = None self.statement.exhausted = True - if self.statement.kind in ("DML", "DDL"): + if self.statement.kind == DML or self.statement.kind == DDL: self.statement.reset() self.rowcount = -1 - if self.statement.kind == "DML": + if self.statement.kind == DML: self.rowcount = sqlite.sqlite3_changes(self.connection.db) return self @@ -772,7 +774,7 @@ self._check_closed() self.statement = self.connection.statement_cache.get(sql, self, self.row_factory) - if self.statement.kind == "DML": + if self.statement.kind == DML: self.connection._begin() else: raise ProgrammingError, "executemany is only for DML statements" @@ -904,11 +906,11 @@ self.sql = sql # DEBUG ONLY first_word = self._statement_kind = sql.lstrip().split(" ")[0].upper() if first_word in ("INSERT", "UPDATE", "DELETE", "REPLACE"): - self.kind = "DML" + self.kind = DML elif first_word in ("SELECT", "PRAGMA"): - self.kind = "DQL" + self.kind = DQL else: - self.kind = "DDL" + self.kind = DDL self.exhausted = False self.in_use = False # @@ -923,7 +925,7 @@ if ret == SQLITE_OK and self.statement.value is None: # an empty statement, we work around that, as it's the least trouble ret = sqlite.sqlite3_prepare_v2(self.con.db, "select 42", -1, byref(self.statement), byref(next_char)) - self.kind = "DQL" + self.kind = DQL if ret != SQLITE_OK: raise self.con._get_exception(ret) @@ -1118,7 +1120,7 @@ self.statement = None def _get_description(self): - if self.kind == "DML": + if self.kind == DML: return None desc = [] for i in xrange(sqlite.sqlite3_column_count(self.statement)): diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py @@ -23,6 +23,4 @@ guard_not_invalidated(descr=...) p19 = getfield_gc(ConstPtr(p17), descr=) guard_value(p19, ConstPtr(ptr20), descr=...) - p22 = getfield_gc(ConstPtr(ptr21), descr=) - guard_nonnull(p22, descr=...) - """) + """) \ No newline at end of file diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -65,6 +65,10 @@ if isinstance(cell, ModuleCell): cell.w_value = w_value return + # If the new value and the current value are the same, don't create a + # level of indirection, or mutate are version. + if self.space.is_w(w_value, cell): + return if cell is not None: w_value = ModuleCell(w_value) self.mutated() diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -39,6 +39,20 @@ assert d.getitem("a") is None assert d.strategy.getdictvalue_no_unwrapping(d, "a") is None + def test_same_key_set_twice(self): + strategy = ModuleDictStrategy(space) + storage = strategy.get_empty_storage() + d = W_DictMultiObject(space, strategy, storage) + + v1 = strategy.version + x = object() + d.setitem("a", x) + v2 = strategy.version + assert v1 is not v2 + d.setitem("a", x) + v3 = strategy.version + assert v2 is v3 + class AppTestModuleDict(object): def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withcelldict": True}) From noreply at buildbot.pypy.org Fri Sep 2 16:39:05 2011 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 2 Sep 2011 16:39:05 +0200 (CEST) Subject: [pypy-commit] pypy default: cache the _ffiargtype; this saves a dict lookup in hot loops Message-ID: <20110902143905.CA7438204C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r47027:ba4f25da7080 Date: 2011-09-02 16:38 +0200 http://bitbucket.org/pypy/pypy/changeset/ba4f25da7080/ Log: cache the _ffiargtype; this saves a dict lookup in hot loops diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -54,7 +54,8 @@ def get_ffi_argtype(self): if self._ffiargtype: return self._ffiargtype - return _shape_to_ffi_type(self._ffiargshape) + self._ffiargtype = _shape_to_ffi_type(self._ffiargshape) + return self._ffiargtype def _CData_output(self, resbuffer, base=None, index=-1): #assert isinstance(resbuffer, _rawffi.ArrayInstance) From noreply at buildbot.pypy.org Fri Sep 2 17:21:32 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 2 Sep 2011 17:21:32 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix a test I broke, by better optimizing things :) Message-ID: <20110902152132.79A6B8204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r47028:43ff84065e19 Date: 2011-09-02 11:21 -0400 http://bitbucket.org/pypy/pypy/changeset/43ff84065e19/ Log: Fix a test I broke, by better optimizing things :) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -181,8 +181,7 @@ assert loop.match_by_id("contains", """ guard_not_invalidated(descr=...) i11 = force_token() - i12 = int_add_ovf(i5, i7) - guard_no_overflow(descr=...) + i12 = int_add(i5, 1) """) def test_id_compare_optimization(self): From noreply at buildbot.pypy.org Fri Sep 2 19:40:33 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Fri, 2 Sep 2011 19:40:33 +0200 (CEST) Subject: [pypy-commit] pypy default: force _trace_drag_out to always be inlined. The function is only called by 2-3 other functions. This change really helps with gc-intensive scripts like using a dict as a counter (can be a 10% speed-up). Message-ID: <20110902174033.318F28204C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: Changeset: r47029:0f015355e6c5 Date: 2011-09-02 11:40 -0600 http://bitbucket.org/pypy/pypy/changeset/0f015355e6c5/ Log: force _trace_drag_out to always be inlined. The function is only called by 2-3 other functions. This change really helps with gc- intensive scripts like using a dict as a counter (can be a 10% speed-up). diff --git a/pypy/rpython/memory/gc/minimark.py b/pypy/rpython/memory/gc/minimark.py --- a/pypy/rpython/memory/gc/minimark.py +++ b/pypy/rpython/memory/gc/minimark.py @@ -1461,6 +1461,7 @@ # We will fix such references to point to the copy of the young # objects when we walk 'old_objects_pointing_to_young'. self.old_objects_pointing_to_young.append(newobj) + _trace_drag_out._always_inline_ = True def _visit_young_rawmalloced_object(self, obj): # 'obj' points to a young, raw-malloced object. From noreply at buildbot.pypy.org Fri Sep 2 21:24:10 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Fri, 2 Sep 2011 21:24:10 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: (alex gaynor) add a proper Float32 dtype and drop the fake Float96 dtype. Message-ID: <20110902192410.DC98B8204C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: unsigned-dtypes Changeset: r47030:84e2299f2a7c Date: 2011-09-02 13:23 -0600 http://bitbucket.org/pypy/pypy/changeset/84e2299f2a7c/ Log: (alex gaynor) add a proper Float32 dtype and drop the fake Float96 dtype. diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -7,7 +7,7 @@ from pypy.interpreter.typedef import TypeDef, interp_attrproperty, GetSetProperty from pypy.module.micronumpy import signature from pypy.objspace.std.floatobject import float2string -from pypy.rlib import rfloat +from pypy.rlib import rarithmetic, rfloat from pypy.rlib.rarithmetic import LONG_BIT, widen from pypy.rlib.objectmodel import specialize, enforceargs from pypy.rlib.unroll import unrolling_iterable @@ -62,7 +62,10 @@ self.val = val def wrap(self, space): - return space.wrap(self.val) + val = self.val + if valtype is rarithmetic.r_singlefloat: + val = float(val) + return space.wrap(val) def convert_to(self, dtype): return dtype.adapt_val(self.val) @@ -178,8 +181,14 @@ class FloatArithmeticDtype(ArithmaticTypeMixin): _mixin_ = True + def unwrap(self, space, w_item): + return self.adapt_val(space.float_w(space.float(w_item))) + def for_computation(self, v): - return v + return float(v) + + def str_format(self, item): + return float2string(self.for_computation(self.unbox(item)), 'g', rfloat.DTSF_STR_PRECISION) @binop def mod(self, v1, v2): @@ -246,6 +255,9 @@ def for_computation(self, v): return widen(v) + def str_format(self, item): + return str(widen(self.unbox(item))) + @binop def mod(self, v1, v2): return v1 % v2 @@ -260,8 +272,6 @@ assert v == 0 return 0 - def str_format(self, item): - return str(widen(self.unbox(item))) W_BoolDtype = create_low_level_dtype( num = 0, kind = BOOLLTR, name = "bool", @@ -390,18 +400,14 @@ W_Float32Dtype = create_low_level_dtype( num = 11, kind = FLOATINGLTR, name = "float32", - aliases = ["f"], + aliases = ["f", "float32"], applevel_types = [], - T = lltype.Float, # SingleFloat - valtype = float, # r_singlefloat - expected_size = 8, # 4 + T = lltype.SingleFloat, + valtype = rarithmetic.r_singlefloat, + expected_size = 4, ) class W_Float32Dtype(FloatArithmeticDtype, W_Float32Dtype): - def unwrap(self, space, w_item): - return self.adapt_val(space.float_w(space.float(w_item))) - - def str_format(self, item): - return float2string(self.unbox(item), 'g', rfloat.DTSF_STR_PRECISION) + pass W_Float64Dtype = create_low_level_dtype( num = 12, kind = FLOATINGLTR, name = "float64", @@ -412,33 +418,25 @@ expected_size = 8, ) class W_Float64Dtype(FloatArithmeticDtype, W_Float64Dtype): - def unwrap(self, space, w_item): - return self.adapt_val(space.float_w(space.float(w_item))) + pass - def str_format(self, item): - return float2string(self.unbox(item), 'g', rfloat.DTSF_STR_PRECISION) - -W_Float96Dtype = create_low_level_dtype( - num = 13, kind = FLOATINGLTR, name = "float96", - aliases = ["g"], - applevel_types = [], - T = lltype.Float, # LongFloat - valtype = float, # r_longfloat - expected_size = 8, # 12 -) -class W_Float96Dtype(FloatArithmeticDtype, W_Float96Dtype): - def unwrap(self, space, w_item): - return self.adapt_val(space.float_w(space.float(w_item))) - - def str_format(self, item): - return float2string(self.unbox(item), 'g', rfloat.DTSF_STR_PRECISION) +#W_Float96Dtype = create_low_level_dtype( +# num = 13, kind = FLOATINGLTR, name = "float96", +# aliases = ["g"], +# applevel_types = [], +# T = lltype.Float, # LongFloat +# valtype = float, # r_longfloat +# expected_size = 8, # 12 +#) +#class W_Float96Dtype(FloatArithmeticDtype, W_Float96Dtype): +# pass ALL_DTYPES = [ W_BoolDtype, W_Int8Dtype, W_UInt8Dtype, W_Int16Dtype, W_UInt16Dtype, W_Int32Dtype, W_UInt32Dtype, W_LongDtype, W_ULongDtype, W_Int64Dtype, W_UInt64Dtype, - W_Float32Dtype, W_Float64Dtype, W_Float96Dtype, + W_Float32Dtype, W_Float64Dtype, #W_Float96Dtype, ] dtypes_by_alias = unrolling_iterable([ diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -95,7 +95,7 @@ def test_bool_binop_types(self): from numpy import array, dtype - types = ('?','b','B','h','H','i','I','l','L','q','Q','f','d','g') + types = ('?','b','B','h','H','i','I','l','L','q','Q','f','d')#,'g') N = len(types) a = array([True], '?') for t in types: diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -310,4 +310,4 @@ assert add.reduce([1, 2, 3]) == 6 assert maximum.reduce([1]) == 1 assert maximum.reduce([1, 2, 3]) == 3 - raises(ValueError, maximum.reduce, []) \ No newline at end of file + raises(ValueError, maximum.reduce, []) From noreply at buildbot.pypy.org Fri Sep 2 21:30:16 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Fri, 2 Sep 2011 21:30:16 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: fix test_sin to deal with Float32's decreased precision Message-ID: <20110902193016.20F0A8204C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: unsigned-dtypes Changeset: r47031:6801ef32508e Date: 2011-09-02 13:29 -0600 http://bitbucket.org/pypy/pypy/changeset/6801ef32508e/ Log: fix test_sin to deal with Float32's decreased precision diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -234,7 +234,7 @@ assert b[i] == math.sin(a[i]) a = sin(array([True, False], dtype=bool)) - assert a[0] == sin(1) + assert abs(a[0] - sin(1)) < 1e-7 # a[0] will be less precise assert a[1] == 0.0 def test_cos(self): From noreply at buildbot.pypy.org Fri Sep 2 23:38:07 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 2 Sep 2011 23:38:07 +0200 (CEST) Subject: [pypy-commit] pypy default: allow inlining into more of the thread library, specifically for get_ident, which used to be rendered as a call_may_force, and is now properly elided. Seems to be worth about 5% on the sqlite benchmark. Message-ID: <20110902213807.ADEF18204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r47032:68fbc674a800 Date: 2011-09-02 17:37 -0400 http://bitbucket.org/pypy/pypy/changeset/68fbc674a800/ Log: allow inlining into more of the thread library, specifically for get_ident, which used to be rendered as a call_may_force, and is now properly elided. Seems to be worth about 5% on the sqlite benchmark. diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -8,7 +8,8 @@ modname == '__builtin__.interp_classobj' or modname == '__builtin__.functional' or modname == '__builtin__.descriptor' or - modname == 'thread.os_local'): + modname == 'thread.os_local' or + modname == 'thread.os_thread'): return True if '.' in modname: modname, _ = modname.split('.', 1) diff --git a/pypy/module/pypyjit/test/test_policy.py b/pypy/module/pypyjit/test/test_policy.py --- a/pypy/module/pypyjit/test/test_policy.py +++ b/pypy/module/pypyjit/test/test_policy.py @@ -34,7 +34,9 @@ def test_thread_local(): from pypy.module.thread.os_local import Local + from pypy.module.thread.os_thread import get_ident assert pypypolicy.look_inside_function(Local.getdict.im_func) + assert pypypolicy.look_inside_function(get_ident) def test_pypy_module(): from pypy.module._collections.interp_deque import W_Deque From noreply at buildbot.pypy.org Sat Sep 3 00:23:34 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 3 Sep 2011 00:23:34 +0200 (CEST) Subject: [pypy-commit] pypy default: remove unnecsary use of weakrefs from our sqlite3. This is worth about 25% on the sqlite benchmark. Message-ID: <20110902222334.AC8218204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r47033:aa9eef58cf01 Date: 2011-09-02 18:23 -0400 http://bitbucket.org/pypy/pypy/changeset/aa9eef58cf01/ Log: remove unnecsary use of weakrefs from our sqlite3. This is worth about 25% on the sqlite benchmark. diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -752,7 +752,7 @@ if self.statement.kind == DQL and ret == SQLITE_ROW: self.statement._build_row_cast_map() - self.statement._readahead() + self.statement._readahead(self) else: self.statement.item = None self.statement.exhausted = True @@ -773,7 +773,7 @@ sql = sql.encode("utf-8") self._check_closed() self.statement = self.connection.statement_cache.get(sql, self, self.row_factory) - + if self.statement.kind == DML: self.connection._begin() else: @@ -826,7 +826,7 @@ return self def __iter__(self): - return self.statement + return iter(self.fetchone, None) def _check_reset(self): if self.reset: @@ -843,7 +843,7 @@ return None try: - return self.statement.next() + return self.statement.next(self) except StopIteration: return None @@ -857,7 +857,7 @@ if size is None: size = self.arraysize lst = [] - for row in self.statement: + for row in self: lst.append(row) if len(lst) == size: break @@ -868,7 +868,7 @@ self._check_reset() if self.statement is None: return [] - return list(self.statement) + return list(self) def _getdescription(self): if self._description is None: @@ -938,7 +938,6 @@ self._build_row_cast_map() def set_cursor_and_factory(self, cur, row_factory): - self.cur = weakref.ref(cur) self.row_factory = row_factory def _build_row_cast_map(self): @@ -1041,10 +1040,7 @@ raise ProgrammingError("missing parameter '%s'" %param) self.set_param(idx, param) - def __iter__(self): - return self - - def next(self): + def next(self, cursor): self.con._check_closed() self.con._check_thread() if self.exhausted: @@ -1060,10 +1056,10 @@ sqlite.sqlite3_reset(self.statement) raise exc - self._readahead() + self._readahead(cursor) return item - def _readahead(self): + def _readahead(self, cursor): self.column_count = sqlite.sqlite3_column_count(self.statement) row = [] for i in xrange(self.column_count): @@ -1098,7 +1094,7 @@ row = tuple(row) if self.row_factory is not None: - row = self.row_factory(self.cur(), row) + row = self.row_factory(cursor, row) self.item = row def reset(self): From noreply at buildbot.pypy.org Sat Sep 3 00:25:59 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 3 Sep 2011 00:25:59 +0200 (CEST) Subject: [pypy-commit] pypy default: remove completely misnamed method after the previous commit. Message-ID: <20110902222559.633CC8204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r47034:a8027ddfe2b2 Date: 2011-09-02 18:25 -0400 http://bitbucket.org/pypy/pypy/changeset/a8027ddfe2b2/ Log: remove completely misnamed method after the previous commit. diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -293,7 +293,7 @@ # if stat.in_use: stat = Statement(self.connection, sql) - stat.set_cursor_and_factory(cursor, row_factory) + stat.set_row_factory(row_factory) return stat @@ -914,8 +914,7 @@ self.exhausted = False self.in_use = False # - # set by set_cursor_and_factory - self.cur = None + # set by set_row_factory self.row_factory = None self.statement = c_void_p() @@ -937,7 +936,7 @@ self._build_row_cast_map() - def set_cursor_and_factory(self, cur, row_factory): + def set_row_factory(self, row_factory): self.row_factory = row_factory def _build_row_cast_map(self): From noreply at buildbot.pypy.org Sat Sep 3 09:09:26 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Sep 2011 09:09:26 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Update for Windows. Message-ID: <20110903070926.075838204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r252:4acec1aeebfe Date: 2011-09-03 09:09 +0200 http://bitbucket.org/pypy/pypy.org/changeset/4acec1aeebfe/ Log: Update for Windows. diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -150,7 +150,8 @@ official release of PyPy (with the JIT). If you really have to use CPython then note that we are talking about CPython 2.5-2.7 here, not CPython 3.x.

  • -
  • If RAM usage is a problem, then you can (for now) tweak some parameters +

  • If RAM usage is a problem (or if you are on Windows, because win32's limit +is 2 GB unless you hack a lot), then you can (for now) tweak some parameters via environment variables and command-line options. The following command takes a bit more time, but finishes with only using 3.0 GB of RAM (on Linux 64-bit; probably not much more than 1.5 GB on 32-bit). It should be diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -141,10 +141,11 @@ official release of PyPy (with the JIT). If you really have to use CPython then note that we are talking about CPython 2.5-2.7 here, not CPython 3.x. -* If RAM usage is a problem, then you can (for now) tweak some parameters +* If RAM usage is a problem (or if you are on Windows, because win32's limit + is 2 GB unless you hack a lot), then you can (for now) tweak some parameters via environment variables and command-line options. The following command takes a bit more time, but finishes with only using 3.0 GB of RAM (on - Linux 64-bit; probably not much more than 1.5 GB on 32-bit). It should be + Linux 64-bit; probably not much more than 1.6 GB on 32-bit). It should be noted that it is less than with CPython. :: PYPY_GC_MAX_DELTA=200MB pypy --jit loop_longevity=300 ./translate.py -Ojit From noreply at buildbot.pypy.org Sat Sep 3 10:35:03 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Sep 2011 10:35:03 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix for an obscure case in which prejit_optimizations() renames Message-ID: <20110903083503.B942B8204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47035:e24ce043ef77 Date: 2011-09-03 10:34 +0200 http://bitbucket.org/pypy/pypy/changeset/e24ce043ef77/ Log: Fix for an obscure case in which prejit_optimizations() renames some variables in the jit_merge_point, after which the original op cannot be found in the graph any more. diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -130,8 +130,15 @@ results = _find_jit_marker(graphs, 'jit_merge_point') if not results: raise Exception("no jit_merge_point found!") + seen = set([graph for graph, block, pos in results]) + assert len(seen) == len(results), ( + "found several jit_merge_points in the same graph") return results +def locate_jit_merge_point(graph): + [(graph, block, pos)] = find_jit_merge_points([graph]) + return block, pos, block.operations[pos] + def find_set_param(graphs): return _find_jit_marker(graphs, 'set_param') @@ -235,7 +242,7 @@ def split_graph_and_record_jitdriver(self, graph, block, pos): op = block.operations[pos] jd = JitDriverStaticData() - jd._jit_merge_point_pos = (graph, op) + jd._jit_merge_point_in = graph args = op.args[2:] s_binding = self.translator.annotator.binding jd._portal_args_s = [s_binding(v) for v in args] @@ -504,7 +511,8 @@ self.make_args_specification(jd) def make_args_specification(self, jd): - graph, op = jd._jit_merge_point_pos + graph = jd._jit_merge_point_in + _, _, op = locate_jit_merge_point(graph) greens_v, reds_v = support.decode_hp_hint_args(op) ALLARGS = [v.concretetype for v in (greens_v + reds_v)] jd._green_args_spec = [v.concretetype for v in greens_v] @@ -552,7 +560,7 @@ assert jitdriver in sublists, \ "can_enter_jit with no matching jit_merge_point" jd, sublist = sublists[jitdriver] - origportalgraph = jd._jit_merge_point_pos[0] + origportalgraph = jd._jit_merge_point_in if graph is not origportalgraph: sublist.append((graph, block, index)) jd.no_loop_header = False @@ -582,7 +590,7 @@ can_enter_jits = [(jd.portal_graph, jd.portal_graph.startblock, 0)] for graph, block, index in can_enter_jits: - if graph is jd._jit_merge_point_pos[0]: + if graph is jd._jit_merge_point_in: continue op = block.operations[index] @@ -640,7 +648,7 @@ # while 1: # more stuff # - origportalgraph = jd._jit_merge_point_pos[0] + origportalgraph = jd._jit_merge_point_in portalgraph = jd.portal_graph PORTALFUNC = jd._PORTAL_FUNCTYPE @@ -794,14 +802,7 @@ # ____________________________________________________________ # Now mutate origportalgraph to end with a call to portal_runner_ptr # - _, op = jd._jit_merge_point_pos - for origblock in origportalgraph.iterblocks(): - if op in origblock.operations: - break - else: - assert False, "lost the operation %r in the graph %r" % ( - op, origportalgraph) - origindex = origblock.operations.index(op) + origblock, origindex, op = locate_jit_merge_point(origportalgraph) assert op.opname == 'jit_marker' assert op.args[0].value == 'jit_merge_point' greens_v, reds_v = support.decode_hp_hint_args(op) From noreply at buildbot.pypy.org Sat Sep 3 11:57:49 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Sep 2011 11:57:49 +0200 (CEST) Subject: [pypy-commit] pypy default: A passing test. Message-ID: <20110903095749.58FBC8204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47036:c9c5e66fd236 Date: 2011-09-03 11:57 +0200 http://bitbucket.org/pypy/pypy/changeset/c9c5e66fd236/ Log: A passing test. diff --git a/pypy/module/sys/test/test_encoding.py b/pypy/module/sys/test/test_encoding.py new file mode 100644 --- /dev/null +++ b/pypy/module/sys/test/test_encoding.py @@ -0,0 +1,30 @@ +import os, py +from pypy.rlib import rlocale +from pypy.module.sys.interp_encoding import _getfilesystemencoding +from pypy.module.sys.interp_encoding import base_encoding + + +def test__getfilesystemencoding(space): + if not (rlocale.HAVE_LANGINFO and rlocale.CODESET): + py.test.skip("requires HAVE_LANGINFO and CODESET") + + def clear(): + for key in os.environ.keys(): + if key == 'LANG' or key.startswith('LC_'): + del os.environ[key] + + def get(**env): + original_env = os.environ.copy() + try: + clear() + os.environ.update(env) + return _getfilesystemencoding(space) + finally: + clear() + os.environ.update(original_env) + + assert get() in (base_encoding, 'ANSI_X3.4-1968') + assert get(LANG='foobar') in (base_encoding, 'ANSI_X3.4-1968') + assert get(LANG='en_US.UTF-8') == 'UTF-8' + assert get(LC_ALL='en_US.UTF-8') == 'UTF-8' + assert get(LC_CTYPE='en_US.UTF-8') == 'UTF-8' From noreply at buildbot.pypy.org Sat Sep 3 11:57:50 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Sep 2011 11:57:50 +0200 (CEST) Subject: [pypy-commit] pypy default: Test and fix for issue856. Message-ID: <20110903095750.989428204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47037:96deb30da7fc Date: 2011-09-03 11:57 +0200 http://bitbucket.org/pypy/pypy/changeset/96deb30da7fc/ Log: Test and fix for issue856. diff --git a/pypy/translator/goal/app_main.py b/pypy/translator/goal/app_main.py --- a/pypy/translator/goal/app_main.py +++ b/pypy/translator/goal/app_main.py @@ -260,6 +260,8 @@ try: import _file except ImportError: + if sys.version_info < (2, 7): + return import ctypes # HACK: while running on top of CPython set_file_encoding = ctypes.pythonapi.PyFile_SetEncodingAndErrors set_file_encoding.argtypes = [ctypes.py_object, ctypes.c_char_p, ctypes.c_char_p] @@ -479,7 +481,8 @@ print >> sys.stderr, "'import site' failed" readenv = not ignore_environment - io_encoding = readenv and os.getenv("PYTHONIOENCODING") + io_encoding = ((readenv and os.getenv("PYTHONIOENCODING")) + or sys.getfilesystemencoding()) if io_encoding: set_io_encoding(io_encoding) diff --git a/pypy/translator/goal/test2/test_app_main.py b/pypy/translator/goal/test2/test_app_main.py --- a/pypy/translator/goal/test2/test_app_main.py +++ b/pypy/translator/goal/test2/test_app_main.py @@ -739,6 +739,19 @@ data = self.run(p + os.sep) assert data == p + os.sep + '\n' + def test_getfilesystemencoding(self): + if sys.version_info < (2, 7): + skip("test requires Python >= 2.7") + p = getscript_in_dir(""" + import sys + sys.stdout.write(u'15\u20ac') + sys.stdout.flush() + """) + env = os.environ.copy() + env["LC_CTYPE"] = 'en_US.UTF-8' + data = self.run(p, env=env) + assert data == '15\xe2\x82\xac' + def test_pythonioencoding(self): if sys.version_info < (2, 7): skip("test requires Python >= 2.7") From noreply at buildbot.pypy.org Sat Sep 3 12:13:31 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Sep 2011 12:13:31 +0200 (CEST) Subject: [pypy-commit] pypy default: Add the type for c_bool here. Message-ID: <20110903101331.488708204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47038:7fdbf8332028 Date: 2011-09-03 12:13 +0200 http://bitbucket.org/pypy/pypy/changeset/7fdbf8332028/ Log: Add the type for c_bool here. diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -226,6 +226,7 @@ 'Z' : _ffi.types.void_p, 'X' : _ffi.types.void_p, 'v' : _ffi.types.sshort, + '?' : _ffi.types.ubyte, } From noreply at buildbot.pypy.org Sat Sep 3 12:41:00 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Sep 2011 12:41:00 +0200 (CEST) Subject: [pypy-commit] pypy default: Test and fix for issue852. Message-ID: <20110903104100.F27A88204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47039:4de034d3fb88 Date: 2011-09-03 12:40 +0200 http://bitbucket.org/pypy/pypy/changeset/4de034d3fb88/ Log: Test and fix for issue852. diff --git a/pypy/interpreter/pyparser/future.py b/pypy/interpreter/pyparser/future.py --- a/pypy/interpreter/pyparser/future.py +++ b/pypy/interpreter/pyparser/future.py @@ -109,25 +109,19 @@ self.getc() == self.getc(+2)): self.pos += 3 while 1: # Deal with a triple quoted docstring - if self.getc() == '\\': - self.pos += 2 + c = self.getc() + if c == '\\': + self.pos += 1 + self._skip_next_char_from_docstring() + elif c != endchar: + self._skip_next_char_from_docstring() else: - c = self.getc() - if c != endchar: - self.pos += 1 - if c == '\n': - self.atbol() - elif c == '\r': - if self.getc() == '\n': - self.pos += 1 - self.atbol() - else: - self.pos += 1 - if (self.getc() == endchar and - self.getc(+1) == endchar): - self.pos += 2 - self.consume_empty_line() - break + self.pos += 1 + if (self.getc() == endchar and + self.getc(+1) == endchar): + self.pos += 2 + self.consume_empty_line() + break else: # Deal with a single quoted docstring self.pos += 1 @@ -138,17 +132,21 @@ self.consume_empty_line() return elif c == '\\': - # Deal with linefeeds - if self.getc() != '\r': - self.pos += 1 - else: - self.pos += 1 - if self.getc() == '\n': - self.pos += 1 + self._skip_next_char_from_docstring() elif c in '\r\n': # Syntax error return + def _skip_next_char_from_docstring(self): + c = self.getc() + self.pos += 1 + if c == '\n': + self.atbol() + elif c == '\r': + if self.getc() == '\n': + self.pos += 1 + self.atbol() + def consume_continuation(self): c = self.getc() if c in '\n\r': diff --git a/pypy/interpreter/pyparser/test/test_futureautomaton.py b/pypy/interpreter/pyparser/test/test_futureautomaton.py --- a/pypy/interpreter/pyparser/test/test_futureautomaton.py +++ b/pypy/interpreter/pyparser/test/test_futureautomaton.py @@ -221,6 +221,14 @@ assert f.lineno == 3 assert f.col_offset == 0 +def test_lots_of_continuation_lines(): + s = "\\\n\\\n\\\n\\\n\\\n\\\n\nfrom __future__ import with_statement\n" + f = run(s) + assert f.pos == len(s) + assert f.flags == fut.CO_FUTURE_WITH_STATEMENT + assert f.lineno == 8 + assert f.col_offset == 0 + # This looks like a bug in cpython parser # and would require extensive modifications # to future.py in order to emulate the same behaviour @@ -239,3 +247,19 @@ raise AssertionError('IndentationError not raised') assert f.lineno == 2 assert f.col_offset == 0 + +def test_continuation_lines_in_docstring_single_quoted(): + s = '"\\\n\\\n\\\n\\\n\\\n\\\n"\nfrom __future__ import division\n' + f = run(s) + assert f.pos == len(s) + assert f.flags == fut.CO_FUTURE_DIVISION + assert f.lineno == 8 + assert f.col_offset == 0 + +def test_continuation_lines_in_docstring_triple_quoted(): + s = '"""\\\n\\\n\\\n\\\n\\\n\\\n"""\nfrom __future__ import division\n' + f = run(s) + assert f.pos == len(s) + assert f.flags == fut.CO_FUTURE_DIVISION + assert f.lineno == 8 + assert f.col_offset == 0 From noreply at buildbot.pypy.org Sat Sep 3 13:36:16 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Sep 2011 13:36:16 +0200 (CEST) Subject: [pypy-commit] pypy default: Split the class WeakrefLifeline in a parent class without __del__ Message-ID: <20110903113616.BF8698204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47040:a0a35705fe84 Date: 2011-09-03 13:19 +0200 http://bitbucket.org/pypy/pypy/changeset/a0a35705fe84/ Log: Split the class WeakrefLifeline in a parent class without __del__ and a subclass with __del__, that is used if we create weakrefs with callbacks only. diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -8,24 +8,12 @@ class WeakrefLifeline(W_Root): + cached_weakref_index = -1 + cached_proxy_index = -1 + def __init__(self, space): self.space = space self.refs_weak = [] - self.cached_weakref_index = -1 - self.cached_proxy_index = -1 - - def __del__(self): - """This runs when the interp-level object goes away, and allows - its lifeline to go away. The purpose of this is to activate the - callbacks even if there is no __del__ method on the interp-level - W_Root subclass implementing the object. - """ - for i in range(len(self.refs_weak) - 1, -1, -1): - w_ref = self.refs_weak[i]() - if w_ref is not None and w_ref.w_callable is not None: - w_ref.enqueue_for_destruction(self.space, - W_WeakrefBase.activate_callback, - 'weakref callback of ') def clear_all_weakrefs(self): """Clear all weakrefs. This is called when an app-level object has @@ -39,12 +27,10 @@ # weakref callbacks are not invoked eagerly here. They are # invoked by self.__del__() anyway. - @jit.dont_look_inside - def get_or_make_weakref(self, space, w_subtype, w_obj, w_callable): + def get_or_make_weakref(self, space, w_subtype, w_obj): w_weakreftype = space.gettypeobject(W_Weakref.typedef) is_weakreftype = space.is_w(w_weakreftype, w_subtype) - can_reuse = space.is_w(w_callable, space.w_None) - if is_weakreftype and can_reuse and self.cached_weakref_index >= 0: + if is_weakreftype and self.cached_weakref_index >= 0: w_cached = self.refs_weak[self.cached_weakref_index]() if w_cached is not None: return w_cached @@ -52,16 +38,15 @@ self.cached_weakref_index = -1 w_ref = space.allocate_instance(W_Weakref, w_subtype) index = len(self.refs_weak) - W_Weakref.__init__(w_ref, space, w_obj, w_callable) + W_Weakref.__init__(w_ref, space, w_obj, None) self.refs_weak.append(weakref.ref(w_ref)) - if is_weakreftype and can_reuse: + if is_weakreftype: self.cached_weakref_index = index return w_ref @jit.dont_look_inside - def get_or_make_proxy(self, space, w_obj, w_callable): - can_reuse = space.is_w(w_callable, space.w_None) - if can_reuse and self.cached_proxy_index >= 0: + def get_or_make_proxy(self, space, w_obj): + if self.cached_proxy_index >= 0: w_cached = self.refs_weak[self.cached_proxy_index]() if w_cached is not None: return w_cached @@ -69,12 +54,11 @@ self.cached_proxy_index = -1 index = len(self.refs_weak) if space.is_true(space.callable(w_obj)): - w_proxy = W_CallableProxy(space, w_obj, w_callable) + w_proxy = W_CallableProxy(space, w_obj, None) else: - w_proxy = W_Proxy(space, w_obj, w_callable) + w_proxy = W_Proxy(space, w_obj, None) self.refs_weak.append(weakref.ref(w_proxy)) - if can_reuse: - self.cached_proxy_index = index + self.cached_proxy_index = index return w_proxy def get_any_weakref(self, space): @@ -90,6 +74,46 @@ return w_ref return space.w_None + +class WeakrefLifelineWithCallbacks(WeakrefLifeline): + + def __init__(self, space, oldlifeline=None): + self.space = space + if oldlifeline is None: + self.refs_weak = [] + else: + self.refs_weak = oldlifeline.refs_weak + + def __del__(self): + """This runs when the interp-level object goes away, and allows + its lifeline to go away. The purpose of this is to activate the + callbacks even if there is no __del__ method on the interp-level + W_Root subclass implementing the object. + """ + for i in range(len(self.refs_weak) - 1, -1, -1): + w_ref = self.refs_weak[i]() + if w_ref is not None and w_ref.w_callable is not None: + w_ref.enqueue_for_destruction(self.space, + W_WeakrefBase.activate_callback, + 'weakref callback of ') + + @jit.dont_look_inside + def get_or_make_weakref_with_callback(self, space, w_subtype, w_obj, + w_callable): + w_ref = space.allocate_instance(W_Weakref, w_subtype) + W_Weakref.__init__(w_ref, space, w_obj, w_callable) + self.refs_weak.append(weakref.ref(w_ref)) + return w_ref + + @jit.dont_look_inside + def get_or_make_proxy_with_callback(self, space, w_obj, w_callable): + if space.is_true(space.callable(w_obj)): + w_proxy = W_CallableProxy(space, w_obj, w_callable) + else: + w_proxy = W_Proxy(space, w_obj, w_callable) + self.refs_weak.append(weakref.ref(w_proxy)) + return w_proxy + # ____________________________________________________________ class Dummy: @@ -103,8 +127,7 @@ class W_WeakrefBase(Wrappable): def __init__(w_self, space, w_obj, w_callable): - if space.is_w(w_callable, space.w_None): - w_callable = None + assert w_callable is not space.w_None # should be really None w_self.space = space assert w_obj is not None w_self.w_obj_weak = weakref.ref(w_obj) @@ -183,10 +206,18 @@ raise OperationError(space.w_TypeError, space.wrap( "__new__ expected at most 2 arguments")) lifeline = w_obj.getweakref() - if lifeline is None: - lifeline = WeakrefLifeline(space) - w_obj.setweakref(space, lifeline) - return lifeline.get_or_make_weakref(space, w_subtype, w_obj, w_callable) + if space.is_w(w_callable, space.w_None): + if lifeline is None: + lifeline = WeakrefLifeline(space) + w_obj.setweakref(space, lifeline) + return lifeline.get_or_make_weakref(space, w_subtype, w_obj) + else: + oldlifeline = lifeline + if not isinstance(lifeline, WeakrefLifelineWithCallbacks): # or None + lifeline = WeakrefLifelineWithCallbacks(space, oldlifeline) + w_obj.setweakref(space, lifeline) + return lifeline.get_or_make_weakref_with_callback(space, w_subtype, + w_obj, w_callable) W_Weakref.typedef = TypeDef("weakref", __doc__ = """A weak reference to an object 'obj'. A 'callback' can be given, @@ -244,10 +275,18 @@ 'callback', if given, is called with the proxy as an argument when 'obj' is about to be finalized.""" lifeline = w_obj.getweakref() - if lifeline is None: - lifeline = WeakrefLifeline(space) - w_obj.setweakref(space, lifeline) - return lifeline.get_or_make_proxy(space, w_obj, w_callable) + if space.is_w(w_callable, space.w_None): + if lifeline is None: + lifeline = WeakrefLifeline(space) + w_obj.setweakref(space, lifeline) + return lifeline.get_or_make_proxy(space, w_obj) + else: + oldlifeline = lifeline + if not isinstance(lifeline, WeakrefLifelineWithCallbacks): # or None + lifeline = WeakrefLifelineWithCallbacks(space, oldlifeline) + w_obj.setweakref(space, lifeline) + return lifeline.get_or_make_proxy_with_callback(space, w_obj, + w_callable) def descr__new__proxy(space, w_subtype, w_obj, w_callable=None): raise OperationError( diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py --- a/pypy/module/_weakref/test/test_weakref.py +++ b/pypy/module/_weakref/test/test_weakref.py @@ -369,6 +369,26 @@ return A raises(TypeError, tryit) + def test_proxy_to_dead_object(self): + import _weakref, gc + class A(object): + pass + p = _weakref.proxy(A()) + gc.collect() + raises(ReferenceError, "p + 1") + + def test_proxy_with_callback(self): + import _weakref, gc + class A(object): + pass + a2 = A() + def callback(proxy): + a2.seen = proxy + p = _weakref.proxy(A(), callback) + gc.collect() + raises(ReferenceError, "p + 1") + assert a2.seen is p + def test_repr(self): import _weakref, gc for kind in ('ref', 'proxy'): From noreply at buildbot.pypy.org Sat Sep 3 13:36:18 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Sep 2011 13:36:18 +0200 (CEST) Subject: [pypy-commit] pypy default: Rename these two methods to their more precise meaning. Message-ID: <20110903113618.003A88204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47041:12b209c22de0 Date: 2011-09-03 13:25 +0200 http://bitbucket.org/pypy/pypy/changeset/12b209c22de0/ Log: Rename these two methods to their more precise meaning. diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -98,15 +98,14 @@ 'weakref callback of ') @jit.dont_look_inside - def get_or_make_weakref_with_callback(self, space, w_subtype, w_obj, - w_callable): + def make_weakref_with_callback(self, space, w_subtype, w_obj, w_callable): w_ref = space.allocate_instance(W_Weakref, w_subtype) W_Weakref.__init__(w_ref, space, w_obj, w_callable) self.refs_weak.append(weakref.ref(w_ref)) return w_ref @jit.dont_look_inside - def get_or_make_proxy_with_callback(self, space, w_obj, w_callable): + def make_proxy_with_callback(self, space, w_obj, w_callable): if space.is_true(space.callable(w_obj)): w_proxy = W_CallableProxy(space, w_obj, w_callable) else: @@ -216,8 +215,8 @@ if not isinstance(lifeline, WeakrefLifelineWithCallbacks): # or None lifeline = WeakrefLifelineWithCallbacks(space, oldlifeline) w_obj.setweakref(space, lifeline) - return lifeline.get_or_make_weakref_with_callback(space, w_subtype, - w_obj, w_callable) + return lifeline.make_weakref_with_callback(space, w_subtype, w_obj, + w_callable) W_Weakref.typedef = TypeDef("weakref", __doc__ = """A weak reference to an object 'obj'. A 'callback' can be given, @@ -285,8 +284,7 @@ if not isinstance(lifeline, WeakrefLifelineWithCallbacks): # or None lifeline = WeakrefLifelineWithCallbacks(space, oldlifeline) w_obj.setweakref(space, lifeline) - return lifeline.get_or_make_proxy_with_callback(space, w_obj, - w_callable) + return lifeline.make_proxy_with_callback(space, w_obj, w_callable) def descr__new__proxy(space, w_subtype, w_obj, w_callable=None): raise OperationError( From noreply at buildbot.pypy.org Sat Sep 3 13:36:19 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Sep 2011 13:36:19 +0200 (CEST) Subject: [pypy-commit] pypy default: Reorganization. Probably more jit-friendly. Message-ID: <20110903113619.3D2EE8204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47042:9e7dfe2ed587 Date: 2011-09-03 13:35 +0200 http://bitbucket.org/pypy/pypy/changeset/9e7dfe2ed587/ Log: Reorganization. Probably more jit-friendly. diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -27,7 +27,8 @@ # weakref callbacks are not invoked eagerly here. They are # invoked by self.__del__() anyway. - def get_or_make_weakref(self, space, w_subtype, w_obj): + def get_or_make_weakref(self, w_subtype, w_obj): + space = self.space w_weakreftype = space.gettypeobject(W_Weakref.typedef) is_weakreftype = space.is_w(w_weakreftype, w_subtype) if is_weakreftype and self.cached_weakref_index >= 0: @@ -44,8 +45,8 @@ self.cached_weakref_index = index return w_ref - @jit.dont_look_inside - def get_or_make_proxy(self, space, w_obj): + def get_or_make_proxy(self, w_obj): + space = self.space if self.cached_proxy_index >= 0: w_cached = self.refs_weak[self.cached_proxy_index]() if w_cached is not None: @@ -97,15 +98,15 @@ W_WeakrefBase.activate_callback, 'weakref callback of ') - @jit.dont_look_inside - def make_weakref_with_callback(self, space, w_subtype, w_obj, w_callable): + def make_weakref_with_callback(self, w_subtype, w_obj, w_callable): + space = self.space w_ref = space.allocate_instance(W_Weakref, w_subtype) W_Weakref.__init__(w_ref, space, w_obj, w_callable) self.refs_weak.append(weakref.ref(w_ref)) return w_ref - @jit.dont_look_inside - def make_proxy_with_callback(self, space, w_obj, w_callable): + def make_proxy_with_callback(self, w_obj, w_callable): + space = self.space if space.is_true(space.callable(w_obj)): w_proxy = W_CallableProxy(space, w_obj, w_callable) else: @@ -199,24 +200,39 @@ def descr__ne__(self, space, w_ref2): return space.not_(space.eq(self, w_ref2)) +def getlifeline(space, w_obj): + lifeline = w_obj.getweakref() + if lifeline is None: + lifeline = WeakrefLifeline(space) + w_obj.setweakref(space, lifeline) + return lifeline + +def getlifelinewithcallbacks(space, w_obj): + lifeline = w_obj.getweakref() + if not isinstance(lifeline, WeakrefLifelineWithCallbacks): # or None + oldlifeline = lifeline + lifeline = WeakrefLifelineWithCallbacks(space, oldlifeline) + w_obj.setweakref(space, lifeline) + return lifeline + + at jit.dont_look_inside +def get_or_make_weakref(space, w_subtype, w_obj): + return getlifeline(space, w_obj).get_or_make_weakref(w_subtype, w_obj) + + at jit.dont_look_inside +def make_weakref_with_callback(space, w_subtype, w_obj, w_callable): + lifeline = getlifelinewithcallbacks(space, w_obj) + return lifeline.make_weakref_with_callback(w_subtype, w_obj, w_callable) + def descr__new__weakref(space, w_subtype, w_obj, w_callable=None, __args__=None): if __args__.arguments_w: raise OperationError(space.w_TypeError, space.wrap( "__new__ expected at most 2 arguments")) - lifeline = w_obj.getweakref() if space.is_w(w_callable, space.w_None): - if lifeline is None: - lifeline = WeakrefLifeline(space) - w_obj.setweakref(space, lifeline) - return lifeline.get_or_make_weakref(space, w_subtype, w_obj) + return get_or_make_weakref(space, w_subtype, w_obj) else: - oldlifeline = lifeline - if not isinstance(lifeline, WeakrefLifelineWithCallbacks): # or None - lifeline = WeakrefLifelineWithCallbacks(space, oldlifeline) - w_obj.setweakref(space, lifeline) - return lifeline.make_weakref_with_callback(space, w_subtype, w_obj, - w_callable) + return make_weakref_with_callback(space, w_subtype, w_obj, w_callable) W_Weakref.typedef = TypeDef("weakref", __doc__ = """A weak reference to an object 'obj'. A 'callback' can be given, @@ -269,22 +285,23 @@ w_obj = force(space, self) return space.call_args(w_obj, __args__) + at jit.dont_look_inside +def get_or_make_proxy(space, w_obj): + return getlifeline(space, w_obj).get_or_make_proxy(w_obj) + + at jit.dont_look_inside +def make_proxy_with_callback(space, w_obj, w_callable): + lifeline = getlifelinewithcallbacks(space, w_obj) + return lifeline.make_proxy_with_callback(w_obj, w_callable) + def proxy(space, w_obj, w_callable=None): """Create a proxy object that weakly references 'obj'. 'callback', if given, is called with the proxy as an argument when 'obj' is about to be finalized.""" - lifeline = w_obj.getweakref() if space.is_w(w_callable, space.w_None): - if lifeline is None: - lifeline = WeakrefLifeline(space) - w_obj.setweakref(space, lifeline) - return lifeline.get_or_make_proxy(space, w_obj) + return get_or_make_proxy(space, w_obj) else: - oldlifeline = lifeline - if not isinstance(lifeline, WeakrefLifelineWithCallbacks): # or None - lifeline = WeakrefLifelineWithCallbacks(space, oldlifeline) - w_obj.setweakref(space, lifeline) - return lifeline.make_proxy_with_callback(space, w_obj, w_callable) + return make_proxy_with_callback(space, w_obj, w_callable) def descr__new__proxy(space, w_subtype, w_obj, w_callable=None): raise OperationError( From noreply at buildbot.pypy.org Sat Sep 3 14:42:00 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Sep 2011 14:42:00 +0200 (CEST) Subject: [pypy-commit] pypy default: Rewrite py.cleanup to also remove __pycache__ directories, Message-ID: <20110903124200.AFD468204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47043:4f48b5228d50 Date: 2011-09-03 14:41 +0200 http://bitbucket.org/pypy/pypy/changeset/4f48b5228d50/ Log: Rewrite py.cleanup to also remove __pycache__ directories, created by CPython 3.x. diff --git a/pypy/tool/py.cleanup b/pypy/tool/py.cleanup --- a/pypy/tool/py.cleanup +++ b/pypy/tool/py.cleanup @@ -1,16 +1,30 @@ #!/usr/bin/env python -import py, sys +import sys, os, stat, shutil -def shouldremove(p): - return p.ext == '.pyc' +def clean(path): + global count + try: + content = os.listdir(path) + except OSError: + print >> sys.stderr, "skipping", path + return + for fn in content: + filename = os.path.join(path, fn) + st = os.lstat(filename) + if stat.S_ISDIR(st.st_mode): + if fn == '__pycache__': + shutil.rmtree(filename) + count += 1 + else: + clean(filename) + elif fn.endswith('.pyc') or fn.endswith('.pyo'): + os.unlink(filename) + count += 1 count = 0 for arg in sys.argv[1:] or ['.']: - path = py.path.local(arg) - print "cleaning path", path, "of .pyc files" - for x in path.visit(shouldremove, lambda x: x.check(dotfile=0, link=0)): - x.remove() - count += 1 + print "cleaning path", arg, "of .pyc/.pyo/__pycache__ files" + clean(arg) print "%d files removed" % (count,) From noreply at buildbot.pypy.org Sat Sep 3 14:46:44 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Sep 2011 14:46:44 +0200 (CEST) Subject: [pypy-commit] pypy default: Don't use shutil and don't remove all files within __pycache__. Message-ID: <20110903124644.500AA8204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47044:5841ae63ade9 Date: 2011-09-03 14:46 +0200 http://bitbucket.org/pypy/pypy/changeset/5841ae63ade9/ Log: Don't use shutil and don't remove all files within __pycache__. Instead only remove .pyc/.pyo files from these __pycache__ directories, and only kill the directory if it is empty afterwards (common case). diff --git a/pypy/tool/py.cleanup b/pypy/tool/py.cleanup --- a/pypy/tool/py.cleanup +++ b/pypy/tool/py.cleanup @@ -1,5 +1,5 @@ #!/usr/bin/env python -import sys, os, stat, shutil +import sys, os, stat def clean(path): global count @@ -12,11 +12,12 @@ filename = os.path.join(path, fn) st = os.lstat(filename) if stat.S_ISDIR(st.st_mode): + clean(filename) if fn == '__pycache__': - shutil.rmtree(filename) - count += 1 - else: - clean(filename) + try: + os.rmdir(filename) + except OSError: + pass elif fn.endswith('.pyc') or fn.endswith('.pyo'): os.unlink(filename) count += 1 From noreply at buildbot.pypy.org Sat Sep 3 16:32:17 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Sep 2011 16:32:17 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix a corner case. Message-ID: <20110903143217.D8C918204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47045:c3d09f049028 Date: 2011-09-03 16:31 +0200 http://bitbucket.org/pypy/pypy/changeset/c3d09f049028/ Log: Fix a corner case. diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -59,7 +59,12 @@ # while not target: if not target.__started: - _continulet.__init__(target, _greenlet_start, *args) + if unbound_method != _continulet.throw: + greenlet_func = _greenlet_start + else: + greenlet_func = _greenlet_throw + _continulet.__init__(target, greenlet_func, *args) + unbound_method = _continulet.switch args = () target.__started = True break @@ -136,3 +141,11 @@ if greenlet.parent is not _tls.main: _continuation.permute(greenlet, greenlet.parent) return (res,) + +def _greenlet_throw(greenlet, exc, value, tb): + _tls.current = greenlet + try: + raise exc, value, tb + finally: + if greenlet.parent is not _tls.main: + _continuation.permute(greenlet, greenlet.parent) diff --git a/pypy/module/test_lib_pypy/test_greenlet.py b/pypy/module/test_lib_pypy/test_greenlet.py --- a/pypy/module/test_lib_pypy/test_greenlet.py +++ b/pypy/module/test_lib_pypy/test_greenlet.py @@ -231,3 +231,13 @@ assert res == "next step" res = g2.switch("goes to f1 instead") assert res == "all ok" + + def test_throw_in_not_started_yet(self): + from greenlet import greenlet + # + def f1(): + never_reached + # + g1 = greenlet(f1) + raises(ValueError, g1.throw, ValueError) + assert g1.dead From noreply at buildbot.pypy.org Sat Sep 3 16:56:19 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Sep 2011 16:56:19 +0200 (CEST) Subject: [pypy-commit] pypy default: Better order for this. Corner-casish and not important. Message-ID: <20110903145619.B05EE8204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47046:2293ffeeaf79 Date: 2011-09-03 16:55 +0200 http://bitbucket.org/pypy/pypy/changeset/2293ffeeaf79/ Log: Better order for this. Corner-casish and not important. diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -43,11 +43,11 @@ def switch(self, w_to): to = self.space.interp_w(W_Continulet, w_to, can_be_None=True) if to is not None: - if self is to: # double-switch to myself: no-op - return get_result() if to.sthread is None: start_state.clear() raise geterror(self.space, "continulet not initialized yet") + if self is to: # double-switch to myself: no-op + return get_result() if self.sthread is None: start_state.clear() raise geterror(self.space, "continulet not initialized yet") From noreply at buildbot.pypy.org Sat Sep 3 17:24:37 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 3 Sep 2011 17:24:37 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix a crash (escaped ValueError) in various division methods on floats with an inf LHS. Message-ID: <20110903152437.770F78204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r47047:7c36492d9f9d Date: 2011-09-03 11:24 -0400 http://bitbucket.org/pypy/pypy/changeset/7c36492d9f9d/ Log: Fix a crash (escaped ValueError) in various division methods on floats with an inf LHS. diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -355,9 +355,13 @@ y = w_float2.floatval if y == 0.0: raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float modulo")) - mod = math.fmod(x, y) - if (mod and ((y < 0.0) != (mod < 0.0))): - mod += y + try: + mod = math.fmod(x, y) + except ValueError: + mod = rfloat.NAN + else: + if (mod and ((y < 0.0) != (mod < 0.0))): + mod += y return W_FloatObject(mod) @@ -366,7 +370,10 @@ y = w_float2.floatval if y == 0.0: raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float modulo")) - mod = math.fmod(x, y) + try: + mod = math.fmod(x, y) + except ValueError: + return [W_FloatObject(rfloat.NAN), W_FloatObject(rfloat.NAN)] # fmod is typically exact, so vx-mod is *mathematically* an # exact multiple of wx. But this is fp arithmetic, and fp # vx - mod is an approximation; the result is that div may diff --git a/pypy/objspace/std/test/test_floatobject.py b/pypy/objspace/std/test/test_floatobject.py --- a/pypy/objspace/std/test/test_floatobject.py +++ b/pypy/objspace/std/test/test_floatobject.py @@ -767,3 +767,19 @@ def test_invalid(self): raises(ValueError, float.fromhex, "0P") + + def test_division_edgecases(self): + import math + + # inf + inf = float("inf") + assert math.isnan(inf % 3) + assert math.isnan(inf // 3) + x, y = divmod(inf, 3) + assert math.isnan(x) + assert math.isnan(y) + + # divide by 0 + raises(ZeroDivisionError, lambda: inf % 0) + raises(ZeroDivisionError, lambda: inf // 0) + raises(ZeroDivisionError, divmod, inf, 0) \ No newline at end of file From noreply at buildbot.pypy.org Sat Sep 3 17:24:38 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 3 Sep 2011 17:24:38 +0200 (CEST) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20110903152438.ABE4E8204C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r47048:6dad1c11da53 Date: 2011-09-03 11:24 -0400 http://bitbucket.org/pypy/pypy/changeset/6dad1c11da53/ Log: merged upstream diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -43,11 +43,11 @@ def switch(self, w_to): to = self.space.interp_w(W_Continulet, w_to, can_be_None=True) if to is not None: - if self is to: # double-switch to myself: no-op - return get_result() if to.sthread is None: start_state.clear() raise geterror(self.space, "continulet not initialized yet") + if self is to: # double-switch to myself: no-op + return get_result() if self.sthread is None: start_state.clear() raise geterror(self.space, "continulet not initialized yet") From noreply at buildbot.pypy.org Sat Sep 3 17:51:59 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Sep 2011 17:51:59 +0200 (CEST) Subject: [pypy-commit] pypy default: Rename this file. (For now it's always skipped.) Message-ID: <20110903155159.B74E28204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47049:1cd7868cbf93 Date: 2011-09-03 17:51 +0200 http://bitbucket.org/pypy/pypy/changeset/1cd7868cbf93/ Log: Rename this file. (For now it's always skipped.) diff --git a/pypy/module/test_lib_pypy/test_stackless.py b/pypy/module/test_lib_pypy/test_stackless_pickle.py rename from pypy/module/test_lib_pypy/test_stackless.py rename to pypy/module/test_lib_pypy/test_stackless_pickle.py From noreply at buildbot.pypy.org Sat Sep 3 17:52:00 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Sep 2011 17:52:00 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20110903155200.F0A718204C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47050:6c5b36d058ec Date: 2011-09-03 17:51 +0200 http://bitbucket.org/pypy/pypy/changeset/6c5b36d058ec/ Log: merge heads diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -355,9 +355,13 @@ y = w_float2.floatval if y == 0.0: raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float modulo")) - mod = math.fmod(x, y) - if (mod and ((y < 0.0) != (mod < 0.0))): - mod += y + try: + mod = math.fmod(x, y) + except ValueError: + mod = rfloat.NAN + else: + if (mod and ((y < 0.0) != (mod < 0.0))): + mod += y return W_FloatObject(mod) @@ -366,7 +370,10 @@ y = w_float2.floatval if y == 0.0: raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float modulo")) - mod = math.fmod(x, y) + try: + mod = math.fmod(x, y) + except ValueError: + return [W_FloatObject(rfloat.NAN), W_FloatObject(rfloat.NAN)] # fmod is typically exact, so vx-mod is *mathematically* an # exact multiple of wx. But this is fp arithmetic, and fp # vx - mod is an approximation; the result is that div may diff --git a/pypy/objspace/std/test/test_floatobject.py b/pypy/objspace/std/test/test_floatobject.py --- a/pypy/objspace/std/test/test_floatobject.py +++ b/pypy/objspace/std/test/test_floatobject.py @@ -767,3 +767,19 @@ def test_invalid(self): raises(ValueError, float.fromhex, "0P") + + def test_division_edgecases(self): + import math + + # inf + inf = float("inf") + assert math.isnan(inf % 3) + assert math.isnan(inf // 3) + x, y = divmod(inf, 3) + assert math.isnan(x) + assert math.isnan(y) + + # divide by 0 + raises(ZeroDivisionError, lambda: inf % 0) + raises(ZeroDivisionError, lambda: inf // 0) + raises(ZeroDivisionError, divmod, inf, 0) \ No newline at end of file From noreply at buildbot.pypy.org Sat Sep 3 20:11:35 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sat, 3 Sep 2011 20:11:35 +0200 (CEST) Subject: [pypy-commit] pypy jit-duplicated_short_boxes: In case of conflicts, prioritize among the potential short ops that can Message-ID: <20110903181135.647058204C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-duplicated_short_boxes Changeset: r47051:77e3e26cc551 Date: 2011-09-03 20:11 +0200 http://bitbucket.org/pypy/pypy/changeset/77e3e26cc551/ Log: In case of conflicts, prioritize among the potential short ops that can produce one specific box instead of duplicating recursivly every op that uses that box as an argument. The priority order is: - ops found in the original trace - synthetic ops (setfields converted to getfields) - inputargs - potential ops that was never promoted to short_boxes This makes the effect of the optimizations less random and should always remove loop invariant ops. Non loop invariant cases can still benefit from unrolling but in exactly what situations has become more complicated. diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -138,9 +138,7 @@ result = newresult getop = ResOperation(rop.GETFIELD_GC, [op.getarg(0)], result, op.getdescr()) - getop = shortboxes.add_potential(getop) - self._cached_fields_getfield_op[structvalue] = getop - self._cached_fields[structvalue] = optimizer.getvalue(result) + shortboxes.add_potential(getop, synthetic=True) elif op.result is not None: shortboxes.add_potential(op) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -7015,10 +7015,11 @@ jump(p5, p6) """ expected = """ - [p5, p6, i12, i13, i10] + [p5, p6, i14, i12, i10] + i13 = int_add(i14, 7) call(i12, i13, descr=nonwritedescr) setfield_gc(p6, i10, descr=nextdescr) - jump(p5, p6, i12, i12, i10) + jump(p5, p6, i10, i12, i10) """ self.optimize_loop(ops, expected) @@ -7138,13 +7139,57 @@ jump(i1, i3) """ expected = """ - [i1, i3, i20, i18] - i26 = int_add(i20, i20) - call(i26, descr=nonwritedescr) - jump(i1, i18, i20, i18) - """ - self.optimize_loop(ops, expected) - + [i1, i2, i6, i3] + call(i6, descr=nonwritedescr) + jump(i1, i3, i6, i3) + """ + short = """ + [i1, i2] + i3 = int_add(i1, i1) + i4 = int_add(i3, i3) + i5 = int_add(i4, i4) + i6 = int_add(i5, i5) + jump(i1, i2, i6, i3) + """ + self.optimize_loop(ops, expected, expected_short=short) + + def test_prioritize_getfield1(self): + ops = """ + [p1, p2] + i1 = getfield_gc(p1, descr=valuedescr) + setfield_gc(p2, i1, descr=nextdescr) + i2 = int_neg(i1) + call(i2, descr=nonwritedescr) + jump(p1, p2) + """ + expected = """ + [p1, p2, i2, i1] + call(i2, descr=nonwritedescr) + setfield_gc(p2, i1, descr=nextdescr) + jump(p1, p2, i2, i1) + """ + self.optimize_loop(ops, expected) + + def test_prioritize_getfield2(self): + # Same as previous, but with descrs intercahnged which means + # that the getfield is discovered first when looking for + # potential short boxes during tests + ops = """ + [p1, p2] + i1 = getfield_gc(p1, descr=nextdescr) + setfield_gc(p2, i1, descr=valuedescr) + i2 = int_neg(i1) + call(i2, descr=nonwritedescr) + jump(p1, p2) + """ + expected = """ + [p1, p2, i2, i1] + call(i2, descr=nonwritedescr) + setfield_gc(p2, i1, descr=valuedescr) + jump(p1, p2, i2, i1) + """ + self.optimize_loop(ops, expected) + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -150,6 +150,7 @@ args = ", ".join([logops.repr_of_arg(arg) for arg in short_inputargs]) debug_print('short inputargs: ' + args) self.short_boxes.debug_print(logops) + # Force virtuals amoung the jump_args of the preamble to get the # operations needed to setup the proper state of those virtuals diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -462,8 +462,10 @@ class ShortBoxes(object): def __init__(self, optimizer, surviving_boxes): self.potential_ops = {} - self.duplicates = {} + self.alternatives = {} + self.synthetic = {} self.aliases = {} + self.rename = {} self.optimizer = optimizer for box in surviving_boxes: self.potential_ops[box] = None @@ -476,27 +478,82 @@ self.produce_short_preamble_box(box) except BoxNotProducable: pass - self.duplicate_short_boxes_if_needed() + def prioritized_alternatives(self, box): + if box not in self.alternatives: + return [self.potential_ops[box]] + alts = self.alternatives[box] + hi, lo = 0, len(alts) - 1 + while hi < lo: + if alts[lo] is None: # Inputarg, lowest priority + alts[lo], alts[-1] = alts[-1], alts[lo] + lo -= 1 + elif alts[lo] not in self.synthetic: # Hi priority + alts[hi], alts[lo] = alts[lo], alts[hi] + hi += 1 + else: # Low priority + lo -= 1 + return alts + + def renamed(self, box): + if box in self.rename: + return self.rename[box] + return box + + def add_to_short(self, box, op): + if op: + op = op.clone() + for i in range(op.numargs()): + op.setarg(i, self.renamed(op.getarg(i))) + if box in self.short_boxes: + if op is None: + oldop = self.short_boxes[box].clone() + oldres = oldop.result + newbox = oldop.result = oldres.clonebox() + self.rename[box] = newbox + self.short_boxes[box] = None + self.short_boxes[newbox] = oldop + else: + newop = op.clone() + newbox = newop.result = op.result.clonebox() + self.short_boxes[newop.result] = newop + value = self.optimizer.getvalue(box) + self.optimizer.make_equal_to(newbox, value) + else: + self.short_boxes[box] = op + def produce_short_preamble_box(self, box): if box in self.short_boxes: return if isinstance(box, Const): return if box in self.potential_ops: - op = self.potential_ops[box] - if op: - for arg in op.getarglist(): - self.produce_short_preamble_box(arg) - self.short_boxes[box] = op + ops = self.prioritized_alternatives(box) + produced_one = False + for op in ops: + try: + if op: + for arg in op.getarglist(): + self.produce_short_preamble_box(arg) + except BoxNotProducable: + pass + else: + produced_one = True + self.add_to_short(box, op) + if not produced_one: + raise BoxNotProducable else: raise BoxNotProducable - def add_potential(self, op): + def add_potential(self, op, synthetic=False): if op.result not in self.potential_ops: self.potential_ops[op.result] = op - return op - return self.duplicate(self.potential_ops, op) + else: + if op.result not in self.alternatives: + self.alternatives[op.result] = [self.potential_ops[op.result]] + self.alternatives[op.result].append(op) + if synthetic: + self.synthetic[op] = True def duplicate(self, destination, op): newop = op.clone() diff --git a/pypy/jit/metainterp/test/test_virtualstate.py b/pypy/jit/metainterp/test/test_virtualstate.py --- a/pypy/jit/metainterp/test/test_virtualstate.py +++ b/pypy/jit/metainterp/test/test_virtualstate.py @@ -1058,3 +1058,55 @@ int_ops = int_add.values() + int_neg.values() assert len(set([op.result for op in int_ops])) == 8 + def test_prioritize1(self): + class Optimizer(FakeOptimizer): + def produce_potential_short_preamble_ops(_self, sb): + sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1)) + sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.i1)) + sb.add_potential(ResOperation(rop.INT_NEG, [self.i1], self.i2)) + sb = ShortBoxes(Optimizer(), [self.p1, self.p2]) + assert len(sb.short_boxes.values()) == 5 + int_neg = [op for op in sb.short_boxes.values() + if op and op.getopnum() == rop.INT_NEG] + assert len(int_neg) == 1 + int_neg = int_neg[0] + getfield = [op for op in sb.short_boxes.values() + if op and op.result == int_neg.getarg(0)] + assert len(getfield) == 1 + assert getfield[0].getarg(0) == self.p1 + + def test_prioritize2(self): + class Optimizer(FakeOptimizer): + def produce_potential_short_preamble_ops(_self, sb): + sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1), + synthetic=True) + sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.i1)) + sb.add_potential(ResOperation(rop.INT_NEG, [self.i1], self.i2)) + sb = ShortBoxes(Optimizer(), [self.p1, self.p2]) + assert len(sb.short_boxes.values()) == 5 + int_neg = [op for op in sb.short_boxes.values() + if op and op.getopnum() == rop.INT_NEG] + assert len(int_neg) == 1 + int_neg = int_neg[0] + getfield = [op for op in sb.short_boxes.values() + if op and op.result == int_neg.getarg(0)] + assert len(getfield) == 1 + assert getfield[0].getarg(0) == self.p2 + + def test_prioritize3(self): + class Optimizer(FakeOptimizer): + def produce_potential_short_preamble_ops(_self, sb): + sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1)) + sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.i1), + synthetic=True) + sb.add_potential(ResOperation(rop.INT_NEG, [self.i1], self.i2)) + sb = ShortBoxes(Optimizer(), [self.p1, self.p2]) + assert len(sb.short_boxes.values()) == 5 + int_neg = [op for op in sb.short_boxes.values() + if op and op.getopnum() == rop.INT_NEG] + assert len(int_neg) == 1 + int_neg = int_neg[0] + getfield = [op for op in sb.short_boxes.values() + if op and op.result == int_neg.getarg(0)] + assert len(getfield) == 1 + assert getfield[0].getarg(0) == self.p1 From notifications-noreply at bitbucket.org Sat Sep 3 21:05:01 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Sat, 03 Sep 2011 19:05:01 -0000 Subject: [pypy-commit] Notification: pypy Message-ID: <20110903190501.19738.87797@bitbucket01.managed.contegix.com> You have received a notification from Henrik Vendelbo. Hi, I forked pypy. My fork is at https://bitbucket.org/thepian/pypy. -- Disable notifications at https://bitbucket.org/account/notifications/ From noreply at buildbot.pypy.org Sun Sep 4 10:59:52 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 4 Sep 2011 10:59:52 +0200 (CEST) Subject: [pypy-commit] pypy jit-duplicated_short_boxes: with equal priority either can occure Message-ID: <20110904085952.610C48203C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-duplicated_short_boxes Changeset: r47054:767b8639a9c2 Date: 2011-09-04 10:06 +0200 http://bitbucket.org/pypy/pypy/changeset/767b8639a9c2/ Log: with equal priority either can occure diff --git a/pypy/jit/metainterp/test/test_virtualstate.py b/pypy/jit/metainterp/test/test_virtualstate.py --- a/pypy/jit/metainterp/test/test_virtualstate.py +++ b/pypy/jit/metainterp/test/test_virtualstate.py @@ -1073,7 +1073,26 @@ getfield = [op for op in sb.short_boxes.values() if op and op.result == int_neg.getarg(0)] assert len(getfield) == 1 - assert getfield[0].getarg(0) == self.p1 + assert getfield[0].getarg(0) in [self.p1, self.p2] + + def test_prioritize1bis(self): + class Optimizer(FakeOptimizer): + def produce_potential_short_preamble_ops(_self, sb): + sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1), + synthetic=True) + sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.i1), + synthetic=True) + sb.add_potential(ResOperation(rop.INT_NEG, [self.i1], self.i2)) + sb = ShortBoxes(Optimizer(), [self.p1, self.p2]) + assert len(sb.short_boxes.values()) == 5 + int_neg = [op for op in sb.short_boxes.values() + if op and op.getopnum() == rop.INT_NEG] + assert len(int_neg) == 1 + int_neg = int_neg[0] + getfield = [op for op in sb.short_boxes.values() + if op and op.result == int_neg.getarg(0)] + assert len(getfield) == 1 + assert getfield[0].getarg(0) in [self.p1, self.p2] def test_prioritize2(self): class Optimizer(FakeOptimizer): From noreply at buildbot.pypy.org Sun Sep 4 10:59:58 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 4 Sep 2011 10:59:58 +0200 (CEST) Subject: [pypy-commit] pypy jit-duplicated_short_boxes: hg merge default Message-ID: <20110904085958.386BE822AB@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-duplicated_short_boxes Changeset: r47057:9541261c8da3 Date: 2011-09-04 10:21 +0200 http://bitbucket.org/pypy/pypy/changeset/9541261c8da3/ Log: hg merge default diff --git a/lib-python/modified-2.7/ctypes/util.py b/lib-python/modified-2.7/ctypes/util.py --- a/lib-python/modified-2.7/ctypes/util.py +++ b/lib-python/modified-2.7/ctypes/util.py @@ -72,8 +72,8 @@ return name if os.name == "posix" and sys.platform == "darwin": - from ctypes.macholib.dyld import dyld_find as _dyld_find def find_library(name): + from ctypes.macholib.dyld import dyld_find as _dyld_find possible = ['lib%s.dylib' % name, '%s.dylib' % name, '%s.framework/%s' % (name, name)] diff --git a/lib-python/modified-2.7/gzip.py b/lib-python/modified-2.7/gzip.py new file mode 100644 --- /dev/null +++ b/lib-python/modified-2.7/gzip.py @@ -0,0 +1,514 @@ +"""Functions that read and write gzipped files. + +The user of the file doesn't have to worry about the compression, +but random access is not allowed.""" + +# based on Andrew Kuchling's minigzip.py distributed with the zlib module + +import struct, sys, time, os +import zlib +import io +import __builtin__ + +__all__ = ["GzipFile","open"] + +FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16 + +READ, WRITE = 1, 2 + +def write32u(output, value): + # The L format writes the bit pattern correctly whether signed + # or unsigned. + output.write(struct.pack("' + + def _check_closed(self): + """Raises a ValueError if the underlying file object has been closed. + + """ + if self.closed: + raise ValueError('I/O operation on closed file.') + + def _init_write(self, filename): + self.name = filename + self.crc = zlib.crc32("") & 0xffffffffL + self.size = 0 + self.writebuf = [] + self.bufsize = 0 + + def _write_gzip_header(self): + self.fileobj.write('\037\213') # magic header + self.fileobj.write('\010') # compression method + fname = os.path.basename(self.name) + if fname.endswith(".gz"): + fname = fname[:-3] + flags = 0 + if fname: + flags = FNAME + self.fileobj.write(chr(flags)) + mtime = self.mtime + if mtime is None: + mtime = time.time() + write32u(self.fileobj, long(mtime)) + self.fileobj.write('\002') + self.fileobj.write('\377') + if fname: + self.fileobj.write(fname + '\000') + + def _init_read(self): + self.crc = zlib.crc32("") & 0xffffffffL + self.size = 0 + + def _read_gzip_header(self): + magic = self.fileobj.read(2) + if magic != '\037\213': + raise IOError, 'Not a gzipped file' + method = ord( self.fileobj.read(1) ) + if method != 8: + raise IOError, 'Unknown compression method' + flag = ord( self.fileobj.read(1) ) + self.mtime = read32(self.fileobj) + # extraflag = self.fileobj.read(1) + # os = self.fileobj.read(1) + self.fileobj.read(2) + + if flag & FEXTRA: + # Read & discard the extra field, if present + xlen = ord(self.fileobj.read(1)) + xlen = xlen + 256*ord(self.fileobj.read(1)) + self.fileobj.read(xlen) + if flag & FNAME: + # Read and discard a null-terminated string containing the filename + while True: + s = self.fileobj.read(1) + if not s or s=='\000': + break + if flag & FCOMMENT: + # Read and discard a null-terminated string containing a comment + while True: + s = self.fileobj.read(1) + if not s or s=='\000': + break + if flag & FHCRC: + self.fileobj.read(2) # Read & discard the 16-bit header CRC + + def write(self,data): + self._check_closed() + if self.mode != WRITE: + import errno + raise IOError(errno.EBADF, "write() on read-only GzipFile object") + + if self.fileobj is None: + raise ValueError, "write() on closed GzipFile object" + + # Convert data type if called by io.BufferedWriter. + if isinstance(data, memoryview): + data = data.tobytes() + + if len(data) > 0: + self.size = self.size + len(data) + self.crc = zlib.crc32(data, self.crc) & 0xffffffffL + self.fileobj.write( self.compress.compress(data) ) + self.offset += len(data) + + return len(data) + + def read(self, size=-1): + self._check_closed() + if self.mode != READ: + import errno + raise IOError(errno.EBADF, "read() on write-only GzipFile object") + + if self.extrasize <= 0 and self.fileobj is None: + return '' + + readsize = 1024 + if size < 0: # get the whole thing + try: + while True: + self._read(readsize) + readsize = min(self.max_read_chunk, readsize * 2) + except EOFError: + size = self.extrasize + elif size == 0: + return "" + else: # just get some more of it + try: + while size > self.extrasize: + self._read(readsize) + readsize = min(self.max_read_chunk, readsize * 2) + except EOFError: + if size > self.extrasize: + size = self.extrasize + + offset = self.offset - self.extrastart + chunk = self.extrabuf[offset: offset + size] + self.extrasize = self.extrasize - size + + self.offset += size + return chunk + + def _unread(self, buf): + self.extrasize = len(buf) + self.extrasize + self.offset -= len(buf) + + def _read(self, size=1024): + if self.fileobj is None: + raise EOFError, "Reached EOF" + + if self._new_member: + # If the _new_member flag is set, we have to + # jump to the next member, if there is one. + # + # First, check if we're at the end of the file; + # if so, it's time to stop; no more members to read. + pos = self.fileobj.tell() # Save current position + self.fileobj.seek(0, 2) # Seek to end of file + if pos == self.fileobj.tell(): + raise EOFError, "Reached EOF" + else: + self.fileobj.seek( pos ) # Return to original position + + self._init_read() + self._read_gzip_header() + self.decompress = zlib.decompressobj(-zlib.MAX_WBITS) + self._new_member = False + + # Read a chunk of data from the file + buf = self.fileobj.read(size) + + # If the EOF has been reached, flush the decompression object + # and mark this object as finished. + + if buf == "": + uncompress = self.decompress.flush() + self._read_eof() + self._add_read_data( uncompress ) + raise EOFError, 'Reached EOF' + + uncompress = self.decompress.decompress(buf) + self._add_read_data( uncompress ) + + if self.decompress.unused_data != "": + # Ending case: we've come to the end of a member in the file, + # so seek back to the start of the unused data, finish up + # this member, and read a new gzip header. + # (The number of bytes to seek back is the length of the unused + # data, minus 8 because _read_eof() will rewind a further 8 bytes) + self.fileobj.seek( -len(self.decompress.unused_data)+8, 1) + + # Check the CRC and file size, and set the flag so we read + # a new member on the next call + self._read_eof() + self._new_member = True + + def _add_read_data(self, data): + self.crc = zlib.crc32(data, self.crc) & 0xffffffffL + offset = self.offset - self.extrastart + self.extrabuf = self.extrabuf[offset:] + data + self.extrasize = self.extrasize + len(data) + self.extrastart = self.offset + self.size = self.size + len(data) + + def _read_eof(self): + # We've read to the end of the file, so we have to rewind in order + # to reread the 8 bytes containing the CRC and the file size. + # We check the that the computed CRC and size of the + # uncompressed data matches the stored values. Note that the size + # stored is the true file size mod 2**32. + self.fileobj.seek(-8, 1) + crc32 = read32(self.fileobj) + isize = read32(self.fileobj) # may exceed 2GB + if crc32 != self.crc: + raise IOError("CRC check failed %s != %s" % (hex(crc32), + hex(self.crc))) + elif isize != (self.size & 0xffffffffL): + raise IOError, "Incorrect length of data produced" + + # Gzip files can be padded with zeroes and still have archives. + # Consume all zero bytes and set the file position to the first + # non-zero byte. See http://www.gzip.org/#faq8 + c = "\x00" + while c == "\x00": + c = self.fileobj.read(1) + if c: + self.fileobj.seek(-1, 1) + + @property + def closed(self): + return self.fileobj is None + + def close(self): + if self.fileobj is None: + return + if self.mode == WRITE: + self.fileobj.write(self.compress.flush()) + write32u(self.fileobj, self.crc) + # self.size may exceed 2GB, or even 4GB + write32u(self.fileobj, self.size & 0xffffffffL) + self.fileobj = None + elif self.mode == READ: + self.fileobj = None + if self.myfileobj: + self.myfileobj.close() + self.myfileobj = None + + def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH): + self._check_closed() + if self.mode == WRITE: + # Ensure the compressor's buffer is flushed + self.fileobj.write(self.compress.flush(zlib_mode)) + self.fileobj.flush() + + def fileno(self): + """Invoke the underlying file object's fileno() method. + + This will raise AttributeError if the underlying file object + doesn't support fileno(). + """ + return self.fileobj.fileno() + + def rewind(self): + '''Return the uncompressed stream file position indicator to the + beginning of the file''' + if self.mode != READ: + raise IOError("Can't rewind in write mode") + self.fileobj.seek(0) + self._new_member = True + self.extrabuf = "" + self.extrasize = 0 + self.extrastart = 0 + self.offset = 0 + + def readable(self): + return self.mode == READ + + def writable(self): + return self.mode == WRITE + + def seekable(self): + return True + + def seek(self, offset, whence=0): + if whence: + if whence == 1: + offset = self.offset + offset + else: + raise ValueError('Seek from end not supported') + if self.mode == WRITE: + if offset < self.offset: + raise IOError('Negative seek in write mode') + count = offset - self.offset + for i in range(count // 1024): + self.write(1024 * '\0') + self.write((count % 1024) * '\0') + elif self.mode == READ: + if offset == self.offset: + self.read(0) # to make sure that this file is open + return self.offset + if offset < self.offset: + # for negative seek, rewind and do positive seek + self.rewind() + count = offset - self.offset + for i in range(count // 1024): + self.read(1024) + self.read(count % 1024) + + return self.offset + + def readline(self, size=-1): + if size < 0: + # Shortcut common case - newline found in buffer. + offset = self.offset - self.extrastart + i = self.extrabuf.find('\n', offset) + 1 + if i > 0: + self.extrasize -= i - offset + self.offset += i - offset + return self.extrabuf[offset: i] + + size = sys.maxint + readsize = self.min_readsize + else: + readsize = size + bufs = [] + while size != 0: + c = self.read(readsize) + i = c.find('\n') + + # We set i=size to break out of the loop under two + # conditions: 1) there's no newline, and the chunk is + # larger than size, or 2) there is a newline, but the + # resulting line would be longer than 'size'. + if (size <= i) or (i == -1 and len(c) > size): + i = size - 1 + + if i >= 0 or c == '': + bufs.append(c[:i + 1]) # Add portion of last chunk + self._unread(c[i + 1:]) # Push back rest of chunk + break + + # Append chunk to list, decrease 'size', + bufs.append(c) + size = size - len(c) + readsize = min(size, readsize * 2) + if readsize > self.min_readsize: + self.min_readsize = min(readsize, self.min_readsize * 2, 512) + return ''.join(bufs) # Return resulting line + + +def _test(): + # Act like gzip; with -d, act like gunzip. + # The input file is not deleted, however, nor are any other gzip + # options or features supported. + args = sys.argv[1:] + decompress = args and args[0] == "-d" + if decompress: + args = args[1:] + if not args: + args = ["-"] + for arg in args: + if decompress: + if arg == "-": + f = GzipFile(filename="", mode="rb", fileobj=sys.stdin) + g = sys.stdout + else: + if arg[-3:] != ".gz": + print "filename doesn't end in .gz:", repr(arg) + continue + f = open(arg, "rb") + g = __builtin__.open(arg[:-3], "wb") + else: + if arg == "-": + f = sys.stdin + g = GzipFile(filename="", mode="wb", fileobj=sys.stdout) + else: + f = __builtin__.open(arg, "rb") + g = open(arg + ".gz", "wb") + while True: + chunk = f.read(1024) + if not chunk: + break + g.write(chunk) + if g is not sys.stdout: + g.close() + if f is not sys.stdin: + f.close() + +if __name__ == '__main__': + _test() diff --git a/lib-python/modified-2.7/sqlite3/test/regression.py b/lib-python/modified-2.7/sqlite3/test/regression.py --- a/lib-python/modified-2.7/sqlite3/test/regression.py +++ b/lib-python/modified-2.7/sqlite3/test/regression.py @@ -274,6 +274,18 @@ cur.execute("UPDATE foo SET id = 3 WHERE id = 1") self.assertEqual(cur.description, None) + def CheckStatementCache(self): + cur = self.con.cursor() + cur.execute("CREATE TABLE foo (id INTEGER)") + values = [(i,) for i in xrange(5)] + cur.executemany("INSERT INTO foo (id) VALUES (?)", values) + + cur.execute("SELECT id FROM foo") + self.assertEqual(list(cur), values) + self.con.commit() + cur.execute("SELECT id FROM foo") + self.assertEqual(list(cur), values) + def suite(): regression_suite = unittest.makeSuite(RegressionTests, "Check") return unittest.TestSuite((regression_suite,)) diff --git a/lib-python/modified-2.7/tarfile.py b/lib-python/modified-2.7/tarfile.py --- a/lib-python/modified-2.7/tarfile.py +++ b/lib-python/modified-2.7/tarfile.py @@ -252,8 +252,8 @@ the high bit set. So we calculate two checksums, unsigned and signed. """ - unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512])) - signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512])) + unsigned_chksum = 256 + sum(struct.unpack("148B8x356B", buf[:512])) + signed_chksum = 256 + sum(struct.unpack("148b8x356b", buf[:512])) return unsigned_chksum, signed_chksum def copyfileobj(src, dst, length=None): @@ -265,7 +265,6 @@ if length is None: shutil.copyfileobj(src, dst) return - BUFSIZE = 16 * 1024 blocks, remainder = divmod(length, BUFSIZE) for b in xrange(blocks): @@ -802,19 +801,19 @@ if self.closed: raise ValueError("I/O operation on closed file") - buf = "" if self.buffer: if size is None: - buf = self.buffer + buf = self.buffer + self.fileobj.read() self.buffer = "" else: buf = self.buffer[:size] self.buffer = self.buffer[size:] - - if size is None: - buf += self.fileobj.read() + buf += self.fileobj.read(size - len(buf)) else: - buf += self.fileobj.read(size - len(buf)) + if size is None: + buf = self.fileobj.read() + else: + buf = self.fileobj.read(size) self.position += len(buf) return buf diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -54,7 +54,8 @@ def get_ffi_argtype(self): if self._ffiargtype: return self._ffiargtype - return _shape_to_ffi_type(self._ffiargshape) + self._ffiargtype = _shape_to_ffi_type(self._ffiargshape) + return self._ffiargtype def _CData_output(self, resbuffer, base=None, index=-1): #assert isinstance(resbuffer, _rawffi.ArrayInstance) @@ -166,7 +167,8 @@ return tp._alignmentofinstances() def byref(cdata): - from ctypes import pointer + # "pointer" is imported at the end of this module to avoid circular + # imports return pointer(cdata) def cdata_from_address(self, address): @@ -224,5 +226,9 @@ 'Z' : _ffi.types.void_p, 'X' : _ffi.types.void_p, 'v' : _ffi.types.sshort, + '?' : _ffi.types.ubyte, } + +# used by "byref" +from _ctypes.pointer import pointer diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -24,6 +24,7 @@ from ctypes import c_void_p, c_int, c_double, c_int64, c_char_p, cdll from ctypes import POINTER, byref, string_at, CFUNCTYPE, cast from ctypes import sizeof, c_ssize_t +from collections import OrderedDict import datetime import sys import time @@ -274,6 +275,28 @@ def unicode_text_factory(x): return unicode(x, 'utf-8') + +class StatementCache(object): + def __init__(self, connection, maxcount): + self.connection = connection + self.maxcount = maxcount + self.cache = OrderedDict() + + def get(self, sql, cursor, row_factory): + try: + stat = self.cache[sql] + except KeyError: + stat = Statement(self.connection, sql) + self.cache[sql] = stat + if len(self.cache) > self.maxcount: + self.cache.popitem(0) + # + if stat.in_use: + stat = Statement(self.connection, sql) + stat.set_row_factory(row_factory) + return stat + + class Connection(object): def __init__(self, database, timeout=5.0, detect_types=0, isolation_level="", check_same_thread=True, factory=None, cached_statements=100): @@ -291,6 +314,7 @@ self.row_factory = None self._isolation_level = isolation_level self.detect_types = detect_types + self.statement_cache = StatementCache(self, cached_statements) self.cursors = [] @@ -399,7 +423,7 @@ cur = Cursor(self) if not isinstance(sql, (str, unicode)): raise Warning("SQL is of wrong type. Must be string or unicode.") - statement = Statement(cur, sql, self.row_factory) + statement = self.statement_cache.get(sql, cur, self.row_factory) return statement def _get_isolation_level(self): @@ -681,6 +705,8 @@ from sqlite3.dump import _iterdump return _iterdump(self) +DML, DQL, DDL = range(3) + class Cursor(object): def __init__(self, con): if not isinstance(con, Connection): @@ -708,12 +734,12 @@ if type(sql) is unicode: sql = sql.encode("utf-8") self._check_closed() - self.statement = Statement(self, sql, self.row_factory) + self.statement = self.connection.statement_cache.get(sql, self, self.row_factory) if self.connection._isolation_level is not None: - if self.statement.kind == "DDL": + if self.statement.kind == DDL: self.connection.commit() - elif self.statement.kind == "DML": + elif self.statement.kind == DML: self.connection._begin() self.statement.set_params(params) @@ -724,18 +750,18 @@ self.statement.reset() raise self.connection._get_exception(ret) - if self.statement.kind == "DQL"and ret == SQLITE_ROW: + if self.statement.kind == DQL and ret == SQLITE_ROW: self.statement._build_row_cast_map() - self.statement._readahead() + self.statement._readahead(self) else: self.statement.item = None self.statement.exhausted = True - if self.statement.kind in ("DML", "DDL"): + if self.statement.kind == DML or self.statement.kind == DDL: self.statement.reset() self.rowcount = -1 - if self.statement.kind == "DML": + if self.statement.kind == DML: self.rowcount = sqlite.sqlite3_changes(self.connection.db) return self @@ -746,8 +772,9 @@ if type(sql) is unicode: sql = sql.encode("utf-8") self._check_closed() - self.statement = Statement(self, sql, self.row_factory) - if self.statement.kind == "DML": + self.statement = self.connection.statement_cache.get(sql, self, self.row_factory) + + if self.statement.kind == DML: self.connection._begin() else: raise ProgrammingError, "executemany is only for DML statements" @@ -799,7 +826,7 @@ return self def __iter__(self): - return self.statement + return iter(self.fetchone, None) def _check_reset(self): if self.reset: @@ -816,7 +843,7 @@ return None try: - return self.statement.next() + return self.statement.next(self) except StopIteration: return None @@ -830,7 +857,7 @@ if size is None: size = self.arraysize lst = [] - for row in self.statement: + for row in self: lst.append(row) if len(lst) == size: break @@ -841,7 +868,7 @@ self._check_reset() if self.statement is None: return [] - return list(self.statement) + return list(self) def _getdescription(self): if self._description is None: @@ -871,22 +898,24 @@ lastrowid = property(_getlastrowid) class Statement(object): - def __init__(self, cur, sql, row_factory): + def __init__(self, connection, sql): self.statement = None if not isinstance(sql, str): raise ValueError, "sql must be a string" - self.con = cur.connection - self.cur = weakref.ref(cur) + self.con = connection self.sql = sql # DEBUG ONLY - self.row_factory = row_factory first_word = self._statement_kind = sql.lstrip().split(" ")[0].upper() if first_word in ("INSERT", "UPDATE", "DELETE", "REPLACE"): - self.kind = "DML" + self.kind = DML elif first_word in ("SELECT", "PRAGMA"): - self.kind = "DQL" + self.kind = DQL else: - self.kind = "DDL" + self.kind = DDL self.exhausted = False + self.in_use = False + # + # set by set_row_factory + self.row_factory = None self.statement = c_void_p() next_char = c_char_p() @@ -895,7 +924,7 @@ if ret == SQLITE_OK and self.statement.value is None: # an empty statement, we work around that, as it's the least trouble ret = sqlite.sqlite3_prepare_v2(self.con.db, "select 42", -1, byref(self.statement), byref(next_char)) - self.kind = "DQL" + self.kind = DQL if ret != SQLITE_OK: raise self.con._get_exception(ret) @@ -907,6 +936,9 @@ self._build_row_cast_map() + def set_row_factory(self, row_factory): + self.row_factory = row_factory + def _build_row_cast_map(self): self.row_cast_map = [] for i in xrange(sqlite.sqlite3_column_count(self.statement)): @@ -976,6 +1008,7 @@ ret = sqlite.sqlite3_reset(self.statement) if ret != SQLITE_OK: raise self.con._get_exception(ret) + self.mark_dirty() if params is None: if sqlite.sqlite3_bind_parameter_count(self.statement) != 0: @@ -1006,10 +1039,7 @@ raise ProgrammingError("missing parameter '%s'" %param) self.set_param(idx, param) - def __iter__(self): - return self - - def next(self): + def next(self, cursor): self.con._check_closed() self.con._check_thread() if self.exhausted: @@ -1025,10 +1055,10 @@ sqlite.sqlite3_reset(self.statement) raise exc - self._readahead() + self._readahead(cursor) return item - def _readahead(self): + def _readahead(self, cursor): self.column_count = sqlite.sqlite3_column_count(self.statement) row = [] for i in xrange(self.column_count): @@ -1063,23 +1093,30 @@ row = tuple(row) if self.row_factory is not None: - row = self.row_factory(self.cur(), row) + row = self.row_factory(cursor, row) self.item = row def reset(self): self.row_cast_map = None - return sqlite.sqlite3_reset(self.statement) + ret = sqlite.sqlite3_reset(self.statement) + self.in_use = False + self.exhausted = False + return ret def finalize(self): sqlite.sqlite3_finalize(self.statement) self.statement = None + self.in_use = False + + def mark_dirty(self): + self.in_use = True def __del__(self): sqlite.sqlite3_finalize(self.statement) self.statement = None def _get_description(self): - if self.kind == "DML": + if self.kind == DML: return None desc = [] for i in xrange(sqlite.sqlite3_column_count(self.statement)): diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -59,7 +59,12 @@ # while not target: if not target.__started: - _continulet.__init__(target, _greenlet_start, *args) + if unbound_method != _continulet.throw: + greenlet_func = _greenlet_start + else: + greenlet_func = _greenlet_throw + _continulet.__init__(target, greenlet_func, *args) + unbound_method = _continulet.switch args = () target.__started = True break @@ -136,3 +141,11 @@ if greenlet.parent is not _tls.main: _continuation.permute(greenlet, greenlet.parent) return (res,) + +def _greenlet_throw(greenlet, exc, value, tb): + _tls.current = greenlet + try: + raise exc, value, tb + finally: + if greenlet.parent is not _tls.main: + _continuation.permute(greenlet, greenlet.parent) diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py --- a/lib_pypy/pyrepl/reader.py +++ b/lib_pypy/pyrepl/reader.py @@ -401,13 +401,19 @@ return "(arg: %s) "%self.arg if "\n" in self.buffer: if lineno == 0: - return self._ps2 + res = self.ps2 elif lineno == self.buffer.count("\n"): - return self._ps4 + res = self.ps4 else: - return self._ps3 + res = self.ps3 else: - return self._ps1 + res = self.ps1 + # Lazily call str() on self.psN, and cache the results using as key + # the object on which str() was called. This ensures that even if the + # same object is used e.g. for ps1 and ps2, str() is called only once. + if res not in self._pscache: + self._pscache[res] = str(res) + return self._pscache[res] def push_input_trans(self, itrans): self.input_trans_stack.append(self.input_trans) @@ -473,8 +479,7 @@ self.pos = 0 self.dirty = 1 self.last_command = None - self._ps1, self._ps2, self._ps3, self._ps4 = \ - map(str, [self.ps1, self.ps2, self.ps3, self.ps4]) + self._pscache = {} except: self.restore() raise diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -315,6 +315,28 @@ .. _`Andrew Brown's tutorial`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html +--------------------------------------------------------- +Can RPython modules for PyPy be translated independently? +--------------------------------------------------------- + +No, you have to rebuild the entire interpreter. This means two things: + +* It is imperative to use test-driven development. You have to test + exhaustively your module in pure Python, before even attempting to + translate it. Once you translate it, you should have only a few typing + issues left to fix, but otherwise the result should work out of the box. + +* Second, and perhaps most important: do you have a really good reason + for writing the module in RPython in the first place? Nowadays you + should really look at alternatives, like writing it in pure Python, + using ctypes if it needs to call C code. Other alternatives are being + developed too (as of summer 2011), like a Cython binding. + +In this context it is not that important to be able to translate +RPython modules independently of translating the complete interpreter. +(It could be done given enough efforts, but it's a really serious +undertaking. Consider it as quite unlikely for now.) + ---------------------------------------------------------- Why does PyPy draw a Mandelbrot fractal while translating? ---------------------------------------------------------- diff --git a/pypy/doc/jit/pyjitpl5.rst b/pypy/doc/jit/pyjitpl5.rst --- a/pypy/doc/jit/pyjitpl5.rst +++ b/pypy/doc/jit/pyjitpl5.rst @@ -103,7 +103,7 @@ The meta-interpreter starts interpreting the JIT bytecode. Each operation is executed and then recorded in a list of operations, called the trace. -Operations can have a list of boxes that operate on, arguments. Some operations +Operations can have a list of boxes they operate on, arguments. Some operations (like GETFIELD and GETARRAYITEM) also have special objects that describe how their arguments are laid out in memory. All possible operations generated by tracing are listed in metainterp/resoperation.py. When a (interpreter-level) diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -199,7 +199,11 @@ The following features (present in some past Stackless version of PyPy) are for the time being not supported any more: -* Tasklets and channels (needs to be rewritten at app-level) +* Tasklets and channels (currently ``stackless.py`` seems to import, + but you have tasklets on top of coroutines on top of greenlets on + top of continulets on top of stacklets, and it's probably not too + hard to cut two of these levels by adapting ``stackless.py`` to + use directly continulets) * Coroutines (could be rewritten at app-level) @@ -209,6 +213,13 @@ * Automatic unlimited stack (must be emulated__ so far) +* Support for other CPUs than x86 and x86-64 + +* The app-level ``f_back`` field of frames crossing continulet boundaries + is None for now, unlike what I explain in the theoretical overview + above. It mostly means that in a ``pdb.set_trace()`` you cannot go + ``up`` past countinulet boundaries. This could be fixed. + .. __: `recursion depth limit`_ (*) Pickling, as well as changing threads, could be implemented by using @@ -217,9 +228,8 @@ "hard" switch (like now) when the C stack contains non-trivial C frames to save, and a "soft" switch (like previously) when it contains only simple calls from Python to Python. Soft-switched continulets would -also consume a bit less RAM, at the possible expense of making the -switch a bit slower (unsure about that; what is the Stackless Python -experience?). +also consume a bit less RAM, and the switch might be a bit faster too +(unsure about that; what is the Stackless Python experience?). Recursion depth limit diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -626,9 +626,9 @@ self.default_compiler = compiler return compiler - def createframe(self, code, w_globals, closure=None): + def createframe(self, code, w_globals, outer_func=None): "Create an empty PyFrame suitable for this code object." - return self.FrameClass(self, code, w_globals, closure) + return self.FrameClass(self, code, w_globals, outer_func) def allocate_lock(self): """Return an interp-level Lock object if threads are enabled, diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -30,7 +30,7 @@ can_change_code = True _immutable_fields_ = ['code?', 'w_func_globals?', - 'closure?', + 'closure?[*]', 'defs_w?[*]', 'name?'] @@ -96,7 +96,7 @@ assert isinstance(code, PyCode) if nargs < 5: new_frame = self.space.createframe(code, self.w_func_globals, - self.closure) + self) for i in funccallunrolling: if i < nargs: new_frame.locals_stack_w[i] = args_w[i] @@ -156,7 +156,7 @@ def _flat_pycall(self, code, nargs, frame): # code is a PyCode new_frame = self.space.createframe(code, self.w_func_globals, - self.closure) + self) for i in xrange(nargs): w_arg = frame.peekvalue(nargs-1-i) new_frame.locals_stack_w[i] = w_arg @@ -167,7 +167,7 @@ def _flat_pycall_defaults(self, code, nargs, frame, defs_to_load): # code is a PyCode new_frame = self.space.createframe(code, self.w_func_globals, - self.closure) + self) for i in xrange(nargs): w_arg = frame.peekvalue(nargs-1-i) new_frame.locals_stack_w[i] = w_arg diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -8,7 +8,7 @@ class Cell(Wrappable): "A simple container for a wrapped value." - + def __init__(self, w_value=None): self.w_value = w_value @@ -90,32 +90,33 @@ # variables coming from a parent function in which i'm nested # 'closure' is a list of Cell instances: the received free vars. - cells = None - @jit.unroll_safe - def initialize_frame_scopes(self, closure, code): - super_initialize_frame_scopes(self, closure, code) + def initialize_frame_scopes(self, outer_func, code): + super_initialize_frame_scopes(self, outer_func, code) ncellvars = len(code.co_cellvars) nfreevars = len(code.co_freevars) if not nfreevars: if not ncellvars: + self.cells = [] return # no self.cells needed - fast path - if closure is None: - closure = [] - elif closure is None: + elif outer_func is None: space = self.space raise OperationError(space.w_TypeError, space.wrap("directly executed code object " "may not contain free variables")) - if len(closure) != nfreevars: + if outer_func and outer_func.closure: + closure_size = len(outer_func.closure) + else: + closure_size = 0 + if closure_size != nfreevars: raise ValueError("code object received a closure with " "an unexpected number of free variables") self.cells = [None] * (ncellvars + nfreevars) for i in range(ncellvars): self.cells[i] = Cell() for i in range(nfreevars): - self.cells[i + ncellvars] = closure[i] - + self.cells[i + ncellvars] = outer_func.closure[i] + def _getcells(self): return self.cells diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -198,7 +198,7 @@ def funcrun(self, func, args): frame = self.space.createframe(self, func.w_func_globals, - func.closure) + func) sig = self._signature # speed hack fresh_frame = jit.hint(frame, access_directly=True, @@ -211,7 +211,7 @@ def funcrun_obj(self, func, w_obj, args): frame = self.space.createframe(self, func.w_func_globals, - func.closure) + func) sig = self._signature # speed hack fresh_frame = jit.hint(frame, access_directly=True, diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -51,7 +51,7 @@ is_being_profiled = False escaped = False # see mark_as_escaped() - def __init__(self, space, code, w_globals, closure): + def __init__(self, space, code, w_globals, outer_func): if not we_are_translated(): assert type(self) in (space.FrameClass, CPythonFrame), ( "use space.FrameClass(), not directly PyFrame()") @@ -70,7 +70,7 @@ self.builtin = space.builtin.pick_builtin(w_globals) # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. - self.initialize_frame_scopes(closure, code) + self.initialize_frame_scopes(outer_func, code) self.f_lineno = code.co_firstlineno def mark_as_escaped(self): @@ -117,8 +117,8 @@ return self.builtin else: return self.space.builtin - - def initialize_frame_scopes(self, closure, code): + + def initialize_frame_scopes(self, outer_func, code): # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. # CO_NEWLOCALS: make a locals dict unless optimized is also set @@ -385,7 +385,11 @@ # do not use the instance's __init__ but the base's, because we set # everything like cells from here - PyFrame.__init__(self, space, pycode, w_globals, closure) + # XXX hack + from pypy.interpreter.function import Function + outer_func = Function(space, None, closure=closure, + forcename="fake") + PyFrame.__init__(self, space, pycode, w_globals, outer_func) f_back = space.interp_w(PyFrame, w_f_back, can_be_None=True) new_frame.f_backref = jit.non_virtual_ref(f_back) diff --git a/pypy/interpreter/pyparser/future.py b/pypy/interpreter/pyparser/future.py --- a/pypy/interpreter/pyparser/future.py +++ b/pypy/interpreter/pyparser/future.py @@ -109,25 +109,19 @@ self.getc() == self.getc(+2)): self.pos += 3 while 1: # Deal with a triple quoted docstring - if self.getc() == '\\': - self.pos += 2 + c = self.getc() + if c == '\\': + self.pos += 1 + self._skip_next_char_from_docstring() + elif c != endchar: + self._skip_next_char_from_docstring() else: - c = self.getc() - if c != endchar: - self.pos += 1 - if c == '\n': - self.atbol() - elif c == '\r': - if self.getc() == '\n': - self.pos += 1 - self.atbol() - else: - self.pos += 1 - if (self.getc() == endchar and - self.getc(+1) == endchar): - self.pos += 2 - self.consume_empty_line() - break + self.pos += 1 + if (self.getc() == endchar and + self.getc(+1) == endchar): + self.pos += 2 + self.consume_empty_line() + break else: # Deal with a single quoted docstring self.pos += 1 @@ -138,17 +132,21 @@ self.consume_empty_line() return elif c == '\\': - # Deal with linefeeds - if self.getc() != '\r': - self.pos += 1 - else: - self.pos += 1 - if self.getc() == '\n': - self.pos += 1 + self._skip_next_char_from_docstring() elif c in '\r\n': # Syntax error return + def _skip_next_char_from_docstring(self): + c = self.getc() + self.pos += 1 + if c == '\n': + self.atbol() + elif c == '\r': + if self.getc() == '\n': + self.pos += 1 + self.atbol() + def consume_continuation(self): c = self.getc() if c in '\n\r': diff --git a/pypy/interpreter/pyparser/test/test_futureautomaton.py b/pypy/interpreter/pyparser/test/test_futureautomaton.py --- a/pypy/interpreter/pyparser/test/test_futureautomaton.py +++ b/pypy/interpreter/pyparser/test/test_futureautomaton.py @@ -221,6 +221,14 @@ assert f.lineno == 3 assert f.col_offset == 0 +def test_lots_of_continuation_lines(): + s = "\\\n\\\n\\\n\\\n\\\n\\\n\nfrom __future__ import with_statement\n" + f = run(s) + assert f.pos == len(s) + assert f.flags == fut.CO_FUTURE_WITH_STATEMENT + assert f.lineno == 8 + assert f.col_offset == 0 + # This looks like a bug in cpython parser # and would require extensive modifications # to future.py in order to emulate the same behaviour @@ -239,3 +247,19 @@ raise AssertionError('IndentationError not raised') assert f.lineno == 2 assert f.col_offset == 0 + +def test_continuation_lines_in_docstring_single_quoted(): + s = '"\\\n\\\n\\\n\\\n\\\n\\\n"\nfrom __future__ import division\n' + f = run(s) + assert f.pos == len(s) + assert f.flags == fut.CO_FUTURE_DIVISION + assert f.lineno == 8 + assert f.col_offset == 0 + +def test_continuation_lines_in_docstring_triple_quoted(): + s = '"""\\\n\\\n\\\n\\\n\\\n\\\n"""\nfrom __future__ import division\n' + f = run(s) + assert f.pos == len(s) + assert f.flags == fut.CO_FUTURE_DIVISION + assert f.lineno == 8 + assert f.col_offset == 0 diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -25,13 +25,14 @@ class Descr(history.AbstractDescr): def __init__(self, ofs, typeinfo, extrainfo=None, name=None, - arg_types=None, count_fields_if_immut=-1): + arg_types=None, count_fields_if_immut=-1, ffi_flags=0): self.ofs = ofs self.typeinfo = typeinfo self.extrainfo = extrainfo self.name = name self.arg_types = arg_types self.count_fields_if_immut = count_fields_if_immut + self.ffi_flags = ffi_flags def get_arg_types(self): return self.arg_types @@ -67,6 +68,9 @@ def count_fields_if_immutable(self): return self.count_fields_if_immut + def get_ffi_flags(self): + return self.ffi_flags + def __lt__(self, other): raise TypeError("cannot use comparison on Descrs") def __le__(self, other): @@ -114,14 +118,14 @@ return False def getdescr(self, ofs, typeinfo='?', extrainfo=None, name=None, - arg_types=None, count_fields_if_immut=-1): + arg_types=None, count_fields_if_immut=-1, ffi_flags=0): key = (ofs, typeinfo, extrainfo, name, arg_types, - count_fields_if_immut) + count_fields_if_immut, ffi_flags) try: return self._descrs[key] except KeyError: descr = Descr(ofs, typeinfo, extrainfo, name, arg_types, - count_fields_if_immut) + count_fields_if_immut, ffi_flags) self._descrs[key] = descr return descr @@ -326,7 +330,7 @@ return self.getdescr(0, token[0], extrainfo=extrainfo, arg_types=''.join(arg_types)) - def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo): + def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo, ffi_flags): from pypy.jit.backend.llsupport.ffisupport import get_ffi_type_kind from pypy.jit.backend.llsupport.ffisupport import UnsupportedKind arg_types = [] @@ -339,7 +343,8 @@ except UnsupportedKind: return None return self.getdescr(0, reskind, extrainfo=extrainfo, - arg_types=''.join(arg_types)) + arg_types=''.join(arg_types), + ffi_flags=ffi_flags) def grab_exc_value(self): diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -260,10 +260,12 @@ _clsname = '' loop_token = None arg_classes = '' # <-- annotation hack + ffi_flags = 0 - def __init__(self, arg_classes, extrainfo=None): + def __init__(self, arg_classes, extrainfo=None, ffi_flags=0): self.arg_classes = arg_classes # string of "r" and "i" (ref/int) self.extrainfo = extrainfo + self.ffi_flags = ffi_flags def __repr__(self): res = '%s(%s)' % (self.__class__.__name__, self.arg_classes) @@ -284,6 +286,13 @@ def get_extra_info(self): return self.extrainfo + def get_ffi_flags(self): + return self.ffi_flags + + def get_call_conv(self): + from pypy.rlib.clibffi import get_call_conv + return get_call_conv(self.ffi_flags, True) + def get_arg_types(self): return self.arg_classes @@ -391,8 +400,8 @@ """ _clsname = 'DynamicIntCallDescr' - def __init__(self, arg_classes, result_size, result_sign, extrainfo=None): - BaseIntCallDescr.__init__(self, arg_classes, extrainfo) + def __init__(self, arg_classes, result_size, result_sign, extrainfo=None, ffi_flags=0): + BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags) assert isinstance(result_sign, bool) self._result_size = chr(result_size) self._result_sign = result_sign diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py --- a/pypy/jit/backend/llsupport/ffisupport.py +++ b/pypy/jit/backend/llsupport/ffisupport.py @@ -8,7 +8,7 @@ class UnsupportedKind(Exception): pass -def get_call_descr_dynamic(cpu, ffi_args, ffi_result, extrainfo=None): +def get_call_descr_dynamic(cpu, ffi_args, ffi_result, extrainfo=None, ffi_flags=0): """Get a call descr: the types of result and args are represented by rlib.libffi.types.*""" try: @@ -20,18 +20,24 @@ if reskind == history.INT: size = intmask(ffi_result.c_size) signed = is_ffi_type_signed(ffi_result) - return DynamicIntCallDescr(arg_classes, size, signed, extrainfo) + return DynamicIntCallDescr(arg_classes, size, signed, extrainfo, + ffi_flags=ffi_flags) elif reskind == history.REF: - return NonGcPtrCallDescr(arg_classes, extrainfo) + return NonGcPtrCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) elif reskind == history.FLOAT: - return FloatCallDescr(arg_classes, extrainfo) + return FloatCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) elif reskind == history.VOID: - return VoidCallDescr(arg_classes, extrainfo) + return VoidCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) elif reskind == 'L': - return LongLongCallDescr(arg_classes, extrainfo) + return LongLongCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) elif reskind == 'S': SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) - return SingleFloatCallDescr(arg_classes, extrainfo) + return SingleFloatCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) assert False def get_ffi_type_kind(cpu, ffi_type): diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -257,10 +257,10 @@ def calldescrof(self, FUNC, ARGS, RESULT, extrainfo): return get_call_descr(self.gc_ll_descr, ARGS, RESULT, extrainfo) - def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo): + def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo, ffi_flags): from pypy.jit.backend.llsupport import ffisupport return ffisupport.get_call_descr_dynamic(self, ffi_args, ffi_result, - extrainfo) + extrainfo, ffi_flags) def get_overflow_error(self): ovf_vtable = self.cast_adr_to_int(self._ovf_error_vtable) diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py --- a/pypy/jit/backend/llsupport/test/test_ffisupport.py +++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py @@ -13,17 +13,19 @@ def test_call_descr_dynamic(): args = [types.sint, types.pointer] - descr = get_call_descr_dynamic(FakeCPU(), args, types.sint) + descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, ffi_flags=42) assert isinstance(descr, DynamicIntCallDescr) assert descr.arg_classes == 'ii' + assert descr.get_ffi_flags() == 42 args = [types.sint, types.double, types.pointer] descr = get_call_descr_dynamic(FakeCPU(), args, types.void) assert descr is None # missing floats descr = get_call_descr_dynamic(FakeCPU(supports_floats=True), - args, types.void) + args, types.void, ffi_flags=43) assert isinstance(descr, VoidCallDescr) assert descr.arg_classes == 'ifi' + assert descr.get_ffi_flags() == 43 descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8) assert isinstance(descr, DynamicIntCallDescr) @@ -39,14 +41,16 @@ descr = get_call_descr_dynamic(FakeCPU(), [], types.slonglong) assert descr is None # missing longlongs descr = get_call_descr_dynamic(FakeCPU(supports_longlong=True), - [], types.slonglong) + [], types.slonglong, ffi_flags=43) assert isinstance(descr, LongLongCallDescr) + assert descr.get_ffi_flags() == 43 else: assert types.slonglong is types.slong descr = get_call_descr_dynamic(FakeCPU(), [], types.float) assert descr is None # missing singlefloats descr = get_call_descr_dynamic(FakeCPU(supports_singlefloats=True), - [], types.float) + [], types.float, ffi_flags=44) SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) assert isinstance(descr, SingleFloatCallDescr) + assert descr.get_ffi_flags() == 44 diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -468,7 +468,7 @@ assert longlong.getrealfloat(x) == 3.5 - 42 def test_call(self): - from pypy.rlib.libffi import types + from pypy.rlib.libffi import types, FUNCFLAG_CDECL def func_int(a, b): return a + b @@ -497,7 +497,8 @@ assert res.value == 2 * num # then, try it with the dynamic calldescr dyn_calldescr = cpu.calldescrof_dynamic([ffi_type, ffi_type], ffi_type, - EffectInfo.MOST_GENERAL) + EffectInfo.MOST_GENERAL, + ffi_flags=FUNCFLAG_CDECL) res = self.execute_operation(rop.CALL, [funcbox, BoxInt(num), BoxInt(num)], 'int', descr=dyn_calldescr) @@ -1944,7 +1945,7 @@ assert values == [1, 10] def test_call_to_c_function(self): - from pypy.rlib.libffi import CDLL, types, ArgChain + from pypy.rlib.libffi import CDLL, types, ArgChain, FUNCFLAG_CDECL from pypy.rpython.lltypesystem.ll2ctypes import libc_name libc = CDLL(libc_name) c_tolower = libc.getpointer('tolower', [types.uchar], types.sint) @@ -1955,7 +1956,8 @@ func_adr = llmemory.cast_ptr_to_adr(c_tolower.funcsym) funcbox = ConstInt(heaptracker.adr2int(func_adr)) calldescr = cpu.calldescrof_dynamic([types.uchar], types.sint, - EffectInfo.MOST_GENERAL) + EffectInfo.MOST_GENERAL, + ffi_flags=FUNCFLAG_CDECL) i1 = BoxInt() i2 = BoxInt() tok = BoxInt() @@ -2012,7 +2014,8 @@ calldescr = cpu.calldescrof_dynamic([types.pointer, types_size_t, types_size_t, types.pointer], types.void, - EffectInfo.MOST_GENERAL) + EffectInfo.MOST_GENERAL, + ffi_flags=clibffi.FUNCFLAG_CDECL) i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() @@ -2038,6 +2041,62 @@ assert len(glob.lst) > 0 lltype.free(raw, flavor='raw') + def test_call_to_winapi_function(self): + from pypy.rlib.clibffi import _WIN32, FUNCFLAG_STDCALL + if not _WIN32: + py.test.skip("Windows test only") + from pypy.rlib.libffi import CDLL, types, ArgChain + from pypy.rlib.rwin32 import DWORD + libc = CDLL('KERNEL32') + c_GetCurrentDir = libc.getpointer('GetCurrentDirectoryA', + [types.ulong, types.pointer], + types.ulong) + + cwd = os.getcwd() + buflen = len(cwd) + 10 + buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') + argchain = ArgChain().arg(rffi.cast(DWORD, buflen)).arg(buffer) + res = c_GetCurrentDir.call(argchain, DWORD) + assert rffi.cast(lltype.Signed, res) == len(cwd) + assert rffi.charp2strn(buffer, buflen) == cwd + lltype.free(buffer, flavor='raw') + + cpu = self.cpu + func_adr = llmemory.cast_ptr_to_adr(c_GetCurrentDir.funcsym) + funcbox = ConstInt(heaptracker.adr2int(func_adr)) + calldescr = cpu.calldescrof_dynamic([types.ulong, types.pointer], + types.ulong, + EffectInfo.MOST_GENERAL, + ffi_flags=FUNCFLAG_STDCALL) + i1 = BoxInt() + i2 = BoxInt() + faildescr = BasicFailDescr(1) + # if the stdcall convention is ignored, then ESP is wrong after the + # call: 8 bytes too much. If we repeat the call often enough, crash. + ops = [] + for i in range(50): + i3 = BoxInt() + ops += [ + ResOperation(rop.CALL_RELEASE_GIL, [funcbox, i1, i2], i3, + descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + ] + ops[-1].setfailargs([]) + ops += [ + ResOperation(rop.FINISH, [i3], None, descr=BasicFailDescr(0)) + ] + looptoken = LoopToken() + self.cpu.compile_loop([i1, i2], ops, looptoken) + + buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') + self.cpu.set_future_value_int(0, buflen) + self.cpu.set_future_value_int(1, rffi.cast(lltype.Signed, buffer)) + fail = self.cpu.execute_token(looptoken) + assert fail.identifier == 0 + assert self.cpu.get_latest_value_int(0) == len(cwd) + assert rffi.charp2strn(buffer, buflen) == cwd + lltype.free(buffer, flavor='raw') + def test_guard_not_invalidated(self): cpu = self.cpu i0 = BoxInt() diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -34,6 +34,7 @@ from pypy.rlib.debug import (debug_print, debug_start, debug_stop, have_debug_prints) from pypy.rlib import rgc +from pypy.rlib.clibffi import FFI_DEFAULT_ABI from pypy.jit.backend.x86.jump import remap_frame_layout from pypy.jit.metainterp.history import ConstInt, BoxInt from pypy.jit.codewriter.effectinfo import EffectInfo @@ -1120,7 +1121,7 @@ return genop_cmp_guard_float def _emit_call(self, force_index, x, arglocs, start=0, tmp=eax, - argtypes=None): + argtypes=None, callconv=FFI_DEFAULT_ABI): if IS_X86_64: return self._emit_call_64(force_index, x, arglocs, start, argtypes) @@ -1149,6 +1150,16 @@ # x is a location self.mc.CALL(x) self.mark_gc_roots(force_index) + # + if callconv != FFI_DEFAULT_ABI: + self._fix_stdcall(callconv, p) + + def _fix_stdcall(self, callconv, p): + from pypy.rlib.clibffi import FFI_STDCALL + assert callconv == FFI_STDCALL + # it's a bit stupid, but we're just going to cancel the fact that + # the called function just added 'p' to ESP, by subtracting it again. + self.mc.SUB_ri(esp.value, p) def _emit_call_64(self, force_index, x, arglocs, start, argtypes): src_locs = [] @@ -2127,7 +2138,8 @@ tmp = eax self._emit_call(force_index, x, arglocs, 3, tmp=tmp, - argtypes=op.getdescr().get_arg_types()) + argtypes=op.getdescr().get_arg_types(), + callconv=op.getdescr().get_call_conv()) if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8: # a float or a long long return diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -527,6 +527,7 @@ NOP = insn('\x90') RET = insn('\xC3') + RET16_i = insn('\xC2', immediate(1, 'h')) PUSH_r = insn(rex_nw, register(1), '\x50') PUSH_b = insn(rex_nw, '\xFF', orbyte(6<<3), stack_bp(1)) diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py --- a/pypy/jit/backend/x86/test/test_runner.py +++ b/pypy/jit/backend/x86/test/test_runner.py @@ -433,6 +433,88 @@ ops_offset[operations[2]] <= ops_offset[None]) + def test_calling_convention(self, monkeypatch): + if WORD != 4: + py.test.skip("32-bit only test") + from pypy.jit.backend.x86.regloc import eax, edx + from pypy.jit.backend.x86 import codebuf + from pypy.jit.codewriter.effectinfo import EffectInfo + from pypy.rlib.libffi import types, clibffi + had_stdcall = hasattr(clibffi, 'FFI_STDCALL') + if not had_stdcall: # not running on Windows, but we can still test + monkeypatch.setattr(clibffi, 'FFI_STDCALL', 12345, raising=False) + # + for ffi in [clibffi.FFI_DEFAULT_ABI, clibffi.FFI_STDCALL]: + cpu = self.cpu + mc = codebuf.MachineCodeBlockWrapper() + mc.MOV_rs(eax.value, 4) # argument 1 + mc.MOV_rs(edx.value, 40) # argument 10 + mc.SUB_rr(eax.value, edx.value) # return arg1 - arg10 + if ffi == clibffi.FFI_DEFAULT_ABI: + mc.RET() + else: + mc.RET16_i(40) + rawstart = mc.materialize(cpu.asmmemmgr, []) + # + calldescr = cpu.calldescrof_dynamic([types.slong] * 10, + types.slong, + EffectInfo.MOST_GENERAL, + ffi_flags=-1) + calldescr.get_call_conv = lambda: ffi # <==== hack + funcbox = ConstInt(rawstart) + i1 = BoxInt() + i2 = BoxInt() + i3 = BoxInt() + i4 = BoxInt() + i5 = BoxInt() + i6 = BoxInt() + c = ConstInt(-1) + faildescr = BasicFailDescr(1) + # we must call it repeatedly: if the stack pointer gets increased + # by 40 bytes by the STDCALL call, and if we don't expect it, + # then we are going to get our stack emptied unexpectedly by + # several repeated calls + ops = [ + ResOperation(rop.CALL_RELEASE_GIL, + [funcbox, i1, c, c, c, c, c, c, c, c, i2], + i3, descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + + ResOperation(rop.CALL_RELEASE_GIL, + [funcbox, i1, c, c, c, c, c, c, c, c, i2], + i4, descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + + ResOperation(rop.CALL_RELEASE_GIL, + [funcbox, i1, c, c, c, c, c, c, c, c, i2], + i5, descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + + ResOperation(rop.CALL_RELEASE_GIL, + [funcbox, i1, c, c, c, c, c, c, c, c, i2], + i6, descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + + ResOperation(rop.FINISH, [i3, i4, i5, i6], None, + descr=BasicFailDescr(0)) + ] + ops[1].setfailargs([]) + ops[3].setfailargs([]) + ops[5].setfailargs([]) + ops[7].setfailargs([]) + looptoken = LoopToken() + self.cpu.compile_loop([i1, i2], ops, looptoken) + + self.cpu.set_future_value_int(0, 123450) + self.cpu.set_future_value_int(1, 123408) + fail = self.cpu.execute_token(looptoken) + assert fail.identifier == 0 + assert self.cpu.get_latest_value_int(0) == 42 + assert self.cpu.get_latest_value_int(1) == 42 + assert self.cpu.get_latest_value_int(2) == 42 + assert self.cpu.get_latest_value_int(3) == 42 + + class TestDebuggingAssembler(object): def setup_method(self, meth): self.cpu = CPU(rtyper=None, stats=FakeStats()) diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -91,9 +91,12 @@ reds_v = op.args[2+numgreens:] assert len(reds_v) == numreds # - def _sort(args_v): + def _sort(args_v, is_green): from pypy.jit.metainterp.history import getkind lst = [v for v in args_v if v.concretetype is not lltype.Void] + if is_green: + assert len(lst) == len(args_v), ( + "not supported so far: 'greens' variables contain Void") _kind2count = {'int': 1, 'ref': 2, 'float': 3} lst2 = sorted(lst, key=lambda v: _kind2count[getkind(v.concretetype)]) # a crash here means that you have to reorder the variable named in @@ -102,7 +105,7 @@ assert lst == lst2 return lst # - return (_sort(greens_v), _sort(reds_v)) + return (_sort(greens_v, True), _sort(reds_v, False)) def maybe_on_top_of_llinterp(rtyper, fnptr): # Run a generated graph on top of the llinterp for testing. diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py --- a/pypy/jit/metainterp/optimizeopt/fficall.py +++ b/pypy/jit/metainterp/optimizeopt/fficall.py @@ -18,26 +18,27 @@ def __init__(self, funcval, cpu, prepare_op): self.funcval = funcval self.opargs = [] - argtypes, restype = self._get_signature(funcval) + argtypes, restype, flags = self._get_signature(funcval) self.descr = cpu.calldescrof_dynamic(argtypes, restype, - EffectInfo.MOST_GENERAL) + EffectInfo.MOST_GENERAL, + ffi_flags=flags) # ^^^ may be None if unsupported self.prepare_op = prepare_op self.delayed_ops = [] def _get_signature(self, funcval): """ - given the funcval, return a tuple (argtypes, restype), where the - actuall types are libffi.types.* + given the funcval, return a tuple (argtypes, restype, flags), where + the actuall types are libffi.types.* The implementation is tricky because we have three possible cases: - translated: the easiest case, we can just cast back the pointer to - the original Func instance and read .argtypes and .restype + the original Func instance and read .argtypes, .restype and .flags - completely untranslated: this is what we get from test_optimizeopt tests. funcval contains a FakeLLObject whose _fake_class is Func, - and we can just get .argtypes and .restype + and we can just get .argtypes, .restype and .flags - partially translated: this happens when running metainterp tests: funcval contains the low-level equivalent of a Func, and thus we @@ -49,10 +50,10 @@ llfunc = funcval.box.getref_base() if we_are_translated(): func = cast_base_ptr_to_instance(Func, llfunc) - return func.argtypes, func.restype + return func.argtypes, func.restype, func.flags elif getattr(llfunc, '_fake_class', None) is Func: # untranslated - return llfunc.argtypes, llfunc.restype + return llfunc.argtypes, llfunc.restype, llfunc.flags else: # partially translated # llfunc contains an opaque pointer to something like the following: @@ -63,7 +64,7 @@ # because we don't have the exact TYPE to cast to. Instead, we # just fish it manually :-( f = llfunc._obj.container - return f.inst_argtypes, f.inst_restype + return f.inst_argtypes, f.inst_restype, f.inst_flags class OptFfiCall(Optimization): diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py @@ -14,12 +14,15 @@ can check that the signature of a call is really what you want. """ - def __init__(self, arg_types, typeinfo): + def __init__(self, arg_types, typeinfo, flags): self.arg_types = arg_types self.typeinfo = typeinfo # return type + self.flags = flags def __eq__(self, other): - return self.arg_types == other.arg_types and self.typeinfo == other.typeinfo + return (self.arg_types == other.arg_types and + self.typeinfo == other.typeinfo and + self.flags == other.get_ffi_flags()) class FakeLLObject(object): @@ -41,14 +44,17 @@ vable_token_descr = LLtypeMixin.valuedescr valuedescr = LLtypeMixin.valuedescr - int_float__int = MyCallDescr('if', 'i') + int_float__int_42 = MyCallDescr('if', 'i', 42) + int_float__int_43 = MyCallDescr('if', 'i', 43) funcptr = FakeLLObject() func = FakeLLObject(_fake_class=Func, argtypes=[types.sint, types.double], - restype=types.sint) + restype=types.sint, + flags=42) func2 = FakeLLObject(_fake_class=Func, argtypes=[types.sint, types.double], - restype=types.sint) + restype=types.sint, + flags=43) # def calldescr(cpu, FUNC, oopspecindex, extraeffect=None): if extraeffect == EffectInfo.EF_RANDOM_EFFECTS: @@ -83,7 +89,7 @@ """ expected = """ [i0, f1] - i3 = call_release_gil(12345, i0, f1, descr=int_float__int) + i3 = call_release_gil(12345, i0, f1, descr=int_float__int_42) guard_not_forced() [] guard_no_exception() [] jump(i3, f1) @@ -123,7 +129,7 @@ [i0, f1, p2] i4 = force_token() setfield_gc(p2, i4, descr=vable_token_descr) - i3 = call_release_gil(12345, i0, f1, descr=int_float__int) + i3 = call_release_gil(12345, i0, f1, descr=int_float__int_42) guard_not_forced() [p2] guard_no_exception() [p2] jump(i3, f1, p2) @@ -220,7 +226,7 @@ call(0, ConstPtr(func), descr=libffi_prepare) # # this "nested" call is nicely optimized - i4 = call_release_gil(67890, i0, f1, descr=int_float__int) + i4 = call_release_gil(67890, i0, f1, descr=int_float__int_43) guard_not_forced() [] guard_no_exception() [] # @@ -265,7 +271,7 @@ expected = """ [i0, f1, p2] setfield_gc(p2, i0, descr=valuedescr) - i3 = call_release_gil(12345, i0, f1, descr=int_float__int) + i3 = call_release_gil(12345, i0, f1, descr=int_float__int_42) guard_not_forced() [] guard_no_exception() [] jump(i3, f1, p2) diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -252,6 +252,41 @@ self.check_loops({'int_sub': 1, 'int_gt': 1, 'guard_true': 1, 'jump': 1}) + def test_void_red_variable(self): + mydriver = JitDriver(greens=[], reds=['a', 'm']) + def f1(m): + a = None + while m > 0: + mydriver.jit_merge_point(a=a, m=m) + m = m - 1 + if m == 10: + pass # other case + self.meta_interp(f1, [18]) + + def test_bug_constant_rawptrs(self): + py.test.skip("crashes because a is a constant") + from pypy.rpython.lltypesystem import lltype, rffi + mydriver = JitDriver(greens=['a'], reds=['m']) + def f1(m): + a = lltype.nullptr(rffi.VOIDP.TO) + while m > 0: + mydriver.jit_merge_point(a=a, m=m) + m = m - 1 + self.meta_interp(f1, [18]) + + def test_bug_rawptrs(self): + from pypy.rpython.lltypesystem import lltype, rffi + mydriver = JitDriver(greens=['a'], reds=['m']) + def f1(m): + a = lltype.malloc(rffi.VOIDP.TO, 5, flavor='raw') + while m > 0: + mydriver.jit_merge_point(a=a, m=m) + m = m - 1 + if m == 10: + pass + lltype.free(a, flavor='raw') + self.meta_interp(f1, [18]) + class TestLLWarmspot(WarmspotTests, LLJitMixin): CPUClass = runner.LLtypeCPU diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -130,8 +130,15 @@ results = _find_jit_marker(graphs, 'jit_merge_point') if not results: raise Exception("no jit_merge_point found!") + seen = set([graph for graph, block, pos in results]) + assert len(seen) == len(results), ( + "found several jit_merge_points in the same graph") return results +def locate_jit_merge_point(graph): + [(graph, block, pos)] = find_jit_merge_points([graph]) + return block, pos, block.operations[pos] + def find_set_param(graphs): return _find_jit_marker(graphs, 'set_param') @@ -235,7 +242,7 @@ def split_graph_and_record_jitdriver(self, graph, block, pos): op = block.operations[pos] jd = JitDriverStaticData() - jd._jit_merge_point_pos = (graph, op) + jd._jit_merge_point_in = graph args = op.args[2:] s_binding = self.translator.annotator.binding jd._portal_args_s = [s_binding(v) for v in args] @@ -245,7 +252,8 @@ graph.startblock = support.split_before_jit_merge_point(*jmpp) graph.startblock.isstartblock = True # a crash in the following checkgraph() means that you forgot - # to list some variable in greens=[] or reds=[] in JitDriver. + # to list some variable in greens=[] or reds=[] in JitDriver, + # or that a jit_merge_point() takes a constant as an argument. checkgraph(graph) for v in graph.getargs(): assert isinstance(v, Variable) @@ -503,7 +511,8 @@ self.make_args_specification(jd) def make_args_specification(self, jd): - graph, op = jd._jit_merge_point_pos + graph = jd._jit_merge_point_in + _, _, op = locate_jit_merge_point(graph) greens_v, reds_v = support.decode_hp_hint_args(op) ALLARGS = [v.concretetype for v in (greens_v + reds_v)] jd._green_args_spec = [v.concretetype for v in greens_v] @@ -551,7 +560,7 @@ assert jitdriver in sublists, \ "can_enter_jit with no matching jit_merge_point" jd, sublist = sublists[jitdriver] - origportalgraph = jd._jit_merge_point_pos[0] + origportalgraph = jd._jit_merge_point_in if graph is not origportalgraph: sublist.append((graph, block, index)) jd.no_loop_header = False @@ -581,7 +590,7 @@ can_enter_jits = [(jd.portal_graph, jd.portal_graph.startblock, 0)] for graph, block, index in can_enter_jits: - if graph is jd._jit_merge_point_pos[0]: + if graph is jd._jit_merge_point_in: continue op = block.operations[index] @@ -639,7 +648,7 @@ # while 1: # more stuff # - origportalgraph = jd._jit_merge_point_pos[0] + origportalgraph = jd._jit_merge_point_in portalgraph = jd.portal_graph PORTALFUNC = jd._PORTAL_FUNCTYPE @@ -655,11 +664,13 @@ portalfunc_ARGS = [] nums = {} for i, ARG in enumerate(PORTALFUNC.ARGS): + kind = history.getkind(ARG) + assert kind != 'void' if i < len(jd.jitdriver.greens): color = 'green' else: color = 'red' - attrname = '%s_%s' % (color, history.getkind(ARG)) + attrname = '%s_%s' % (color, kind) count = nums.get(attrname, 0) nums[attrname] = count + 1 portalfunc_ARGS.append((ARG, attrname, count)) @@ -791,14 +802,7 @@ # ____________________________________________________________ # Now mutate origportalgraph to end with a call to portal_runner_ptr # - _, op = jd._jit_merge_point_pos - for origblock in origportalgraph.iterblocks(): - if op in origblock.operations: - break - else: - assert False, "lost the operation %r in the graph %r" % ( - op, origportalgraph) - origindex = origblock.operations.index(op) + origblock, origindex, op = locate_jit_merge_point(origportalgraph) assert op.opname == 'jit_marker' assert op.args[0].value == 'jit_merge_point' greens_v, reds_v = support.decode_hp_hint_args(op) diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -124,7 +124,7 @@ # Hash of lltype or ootype object. # Only supports strings, unicodes and regular instances, # as well as primitives that can meaningfully be cast to Signed. - if isinstance(TYPE, lltype.Ptr): + if isinstance(TYPE, lltype.Ptr) and TYPE.TO._gckind == 'gc': if TYPE.TO is rstr.STR or TYPE.TO is rstr.UNICODE: return rstr.LLHelpers.ll_strhash(x) # assumed not null else: @@ -140,7 +140,7 @@ else: return 0 else: - return lltype.cast_primitive(lltype.Signed, x) + return rffi.cast(lltype.Signed, x) @specialize.ll_and_arg(3) def set_future_value(cpu, j, value, typecode): diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -292,7 +292,7 @@ raise break new_frame = space.createframe(code, w_func.w_func_globals, - w_func.closure) + w_func) new_frame.locals_stack_w[0] = w_item w_res = new_frame.run() result_w.append(w_res) diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -687,11 +687,15 @@ # support for the "string escape" codec # This is a bytes-to bytes transformation - at unwrap_spec(errors='str_or_None') -def escape_encode(space, w_string, errors='strict'): - w_repr = space.repr(w_string) - w_result = space.getslice(w_repr, space.wrap(1), space.wrap(-1)) - return space.newtuple([w_result, space.len(w_string)]) + at unwrap_spec(data=str, errors='str_or_None') +def escape_encode(space, data, errors='strict'): + from pypy.objspace.std.stringobject import string_escape_encode + result = string_escape_encode(data, quote="'") + start = 1 + end = len(result) - 1 + assert end >= 0 + w_result = space.wrap(result[start:end]) + return space.newtuple([w_result, space.wrap(len(data))]) @unwrap_spec(data=str, errors='str_or_None') def escape_decode(space, data, errors='strict'): diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -102,7 +102,6 @@ def test_indexerror(self): test = "\\" # trailing backslash - raises (ValueError, test.decode,'string-escape') def test_charmap_decode(self): @@ -292,6 +291,10 @@ assert '\\0f'.decode('string_escape') == chr(0) + 'f' assert '\\08'.decode('string_escape') == chr(0) + '8' + def test_escape_encode(self): + assert '"'.encode('string_escape') == '"' + assert "'".encode('string_escape') == "\\'" + def test_decode_utf8_different_case(self): constant = u"a" assert constant.encode("utf-8") == constant.encode("UTF-8") diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -43,11 +43,11 @@ def switch(self, w_to): to = self.space.interp_w(W_Continulet, w_to, can_be_None=True) if to is not None: - if self is to: # double-switch to myself: no-op - return get_result() if to.sthread is None: start_state.clear() raise geterror(self.space, "continulet not initialized yet") + if self is to: # double-switch to myself: no-op + return get_result() if self.sthread is None: start_state.clear() raise geterror(self.space, "continulet not initialized yet") diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -8,24 +8,12 @@ class WeakrefLifeline(W_Root): + cached_weakref_index = -1 + cached_proxy_index = -1 + def __init__(self, space): self.space = space self.refs_weak = [] - self.cached_weakref_index = -1 - self.cached_proxy_index = -1 - - def __del__(self): - """This runs when the interp-level object goes away, and allows - its lifeline to go away. The purpose of this is to activate the - callbacks even if there is no __del__ method on the interp-level - W_Root subclass implementing the object. - """ - for i in range(len(self.refs_weak) - 1, -1, -1): - w_ref = self.refs_weak[i]() - if w_ref is not None and w_ref.w_callable is not None: - w_ref.enqueue_for_destruction(self.space, - W_WeakrefBase.activate_callback, - 'weakref callback of ') def clear_all_weakrefs(self): """Clear all weakrefs. This is called when an app-level object has @@ -39,12 +27,11 @@ # weakref callbacks are not invoked eagerly here. They are # invoked by self.__del__() anyway. - @jit.dont_look_inside - def get_or_make_weakref(self, space, w_subtype, w_obj, w_callable): + def get_or_make_weakref(self, w_subtype, w_obj): + space = self.space w_weakreftype = space.gettypeobject(W_Weakref.typedef) is_weakreftype = space.is_w(w_weakreftype, w_subtype) - can_reuse = space.is_w(w_callable, space.w_None) - if is_weakreftype and can_reuse and self.cached_weakref_index >= 0: + if is_weakreftype and self.cached_weakref_index >= 0: w_cached = self.refs_weak[self.cached_weakref_index]() if w_cached is not None: return w_cached @@ -52,16 +39,15 @@ self.cached_weakref_index = -1 w_ref = space.allocate_instance(W_Weakref, w_subtype) index = len(self.refs_weak) - W_Weakref.__init__(w_ref, space, w_obj, w_callable) + W_Weakref.__init__(w_ref, space, w_obj, None) self.refs_weak.append(weakref.ref(w_ref)) - if is_weakreftype and can_reuse: + if is_weakreftype: self.cached_weakref_index = index return w_ref - @jit.dont_look_inside - def get_or_make_proxy(self, space, w_obj, w_callable): - can_reuse = space.is_w(w_callable, space.w_None) - if can_reuse and self.cached_proxy_index >= 0: + def get_or_make_proxy(self, w_obj): + space = self.space + if self.cached_proxy_index >= 0: w_cached = self.refs_weak[self.cached_proxy_index]() if w_cached is not None: return w_cached @@ -69,12 +55,11 @@ self.cached_proxy_index = -1 index = len(self.refs_weak) if space.is_true(space.callable(w_obj)): - w_proxy = W_CallableProxy(space, w_obj, w_callable) + w_proxy = W_CallableProxy(space, w_obj, None) else: - w_proxy = W_Proxy(space, w_obj, w_callable) + w_proxy = W_Proxy(space, w_obj, None) self.refs_weak.append(weakref.ref(w_proxy)) - if can_reuse: - self.cached_proxy_index = index + self.cached_proxy_index = index return w_proxy def get_any_weakref(self, space): @@ -90,6 +75,45 @@ return w_ref return space.w_None + +class WeakrefLifelineWithCallbacks(WeakrefLifeline): + + def __init__(self, space, oldlifeline=None): + self.space = space + if oldlifeline is None: + self.refs_weak = [] + else: + self.refs_weak = oldlifeline.refs_weak + + def __del__(self): + """This runs when the interp-level object goes away, and allows + its lifeline to go away. The purpose of this is to activate the + callbacks even if there is no __del__ method on the interp-level + W_Root subclass implementing the object. + """ + for i in range(len(self.refs_weak) - 1, -1, -1): + w_ref = self.refs_weak[i]() + if w_ref is not None and w_ref.w_callable is not None: + w_ref.enqueue_for_destruction(self.space, + W_WeakrefBase.activate_callback, + 'weakref callback of ') + + def make_weakref_with_callback(self, w_subtype, w_obj, w_callable): + space = self.space + w_ref = space.allocate_instance(W_Weakref, w_subtype) + W_Weakref.__init__(w_ref, space, w_obj, w_callable) + self.refs_weak.append(weakref.ref(w_ref)) + return w_ref + + def make_proxy_with_callback(self, w_obj, w_callable): + space = self.space + if space.is_true(space.callable(w_obj)): + w_proxy = W_CallableProxy(space, w_obj, w_callable) + else: + w_proxy = W_Proxy(space, w_obj, w_callable) + self.refs_weak.append(weakref.ref(w_proxy)) + return w_proxy + # ____________________________________________________________ class Dummy: @@ -103,8 +127,7 @@ class W_WeakrefBase(Wrappable): def __init__(w_self, space, w_obj, w_callable): - if space.is_w(w_callable, space.w_None): - w_callable = None + assert w_callable is not space.w_None # should be really None w_self.space = space assert w_obj is not None w_self.w_obj_weak = weakref.ref(w_obj) @@ -177,16 +200,39 @@ def descr__ne__(self, space, w_ref2): return space.not_(space.eq(self, w_ref2)) +def getlifeline(space, w_obj): + lifeline = w_obj.getweakref() + if lifeline is None: + lifeline = WeakrefLifeline(space) + w_obj.setweakref(space, lifeline) + return lifeline + +def getlifelinewithcallbacks(space, w_obj): + lifeline = w_obj.getweakref() + if not isinstance(lifeline, WeakrefLifelineWithCallbacks): # or None + oldlifeline = lifeline + lifeline = WeakrefLifelineWithCallbacks(space, oldlifeline) + w_obj.setweakref(space, lifeline) + return lifeline + + at jit.dont_look_inside +def get_or_make_weakref(space, w_subtype, w_obj): + return getlifeline(space, w_obj).get_or_make_weakref(w_subtype, w_obj) + + at jit.dont_look_inside +def make_weakref_with_callback(space, w_subtype, w_obj, w_callable): + lifeline = getlifelinewithcallbacks(space, w_obj) + return lifeline.make_weakref_with_callback(w_subtype, w_obj, w_callable) + def descr__new__weakref(space, w_subtype, w_obj, w_callable=None, __args__=None): if __args__.arguments_w: raise OperationError(space.w_TypeError, space.wrap( "__new__ expected at most 2 arguments")) - lifeline = w_obj.getweakref() - if lifeline is None: - lifeline = WeakrefLifeline(space) - w_obj.setweakref(space, lifeline) - return lifeline.get_or_make_weakref(space, w_subtype, w_obj, w_callable) + if space.is_w(w_callable, space.w_None): + return get_or_make_weakref(space, w_subtype, w_obj) + else: + return make_weakref_with_callback(space, w_subtype, w_obj, w_callable) W_Weakref.typedef = TypeDef("weakref", __doc__ = """A weak reference to an object 'obj'. A 'callback' can be given, @@ -239,15 +285,23 @@ w_obj = force(space, self) return space.call_args(w_obj, __args__) + at jit.dont_look_inside +def get_or_make_proxy(space, w_obj): + return getlifeline(space, w_obj).get_or_make_proxy(w_obj) + + at jit.dont_look_inside +def make_proxy_with_callback(space, w_obj, w_callable): + lifeline = getlifelinewithcallbacks(space, w_obj) + return lifeline.make_proxy_with_callback(w_obj, w_callable) + def proxy(space, w_obj, w_callable=None): """Create a proxy object that weakly references 'obj'. 'callback', if given, is called with the proxy as an argument when 'obj' is about to be finalized.""" - lifeline = w_obj.getweakref() - if lifeline is None: - lifeline = WeakrefLifeline(space) - w_obj.setweakref(space, lifeline) - return lifeline.get_or_make_proxy(space, w_obj, w_callable) + if space.is_w(w_callable, space.w_None): + return get_or_make_proxy(space, w_obj) + else: + return make_proxy_with_callback(space, w_obj, w_callable) def descr__new__proxy(space, w_subtype, w_obj, w_callable=None): raise OperationError( diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py --- a/pypy/module/_weakref/test/test_weakref.py +++ b/pypy/module/_weakref/test/test_weakref.py @@ -369,6 +369,26 @@ return A raises(TypeError, tryit) + def test_proxy_to_dead_object(self): + import _weakref, gc + class A(object): + pass + p = _weakref.proxy(A()) + gc.collect() + raises(ReferenceError, "p + 1") + + def test_proxy_with_callback(self): + import _weakref, gc + class A(object): + pass + a2 = A() + def callback(proxy): + a2.seen = proxy + p = _weakref.proxy(A(), callback) + gc.collect() + raises(ReferenceError, "p + 1") + assert a2.seen is p + def test_repr(self): import _weakref, gc for kind in ('ref', 'proxy'): diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -446,7 +446,9 @@ result = self.buffer[pos:pos + n] self.pos += n else: - result = self.buffer + pos = self.pos + assert pos >= 0 + result = self.buffer[pos:] self.pos = 0 self.buffer = "" self.readlength += len(result) diff --git a/pypy/module/bz2/test/test_bz2_file.py b/pypy/module/bz2/test/test_bz2_file.py --- a/pypy/module/bz2/test/test_bz2_file.py +++ b/pypy/module/bz2/test/test_bz2_file.py @@ -274,14 +274,14 @@ pass del bz2f # delete from this frame, which is captured in the traceback - def test_read_chunk10(self): + def test_read_chunk9(self): from bz2 import BZ2File self.create_temp_file() bz2f = BZ2File(self.temppath) text_read = "" while True: - data = bz2f.read(10) + data = bz2f.read(9) # 9 doesn't divide evenly into data length if not data: break text_read = "%s%s" % (text_read, data) diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py --- a/pypy/module/cpyext/frameobject.py +++ b/pypy/module/cpyext/frameobject.py @@ -57,7 +57,7 @@ code = space.interp_w(PyCode, w_code) w_globals = from_ref(space, py_frame.c_f_globals) - frame = space.FrameClass(space, code, w_globals, closure=None) + frame = space.FrameClass(space, code, w_globals, outer_func=None) frame.f_lineno = py_frame.c_f_lineno w_obj = space.wrap(frame) track_reference(space, py_obj, w_obj) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -1,42 +1,45 @@ - from pypy.interpreter.mixedmodule import MixedModule + class Module(MixedModule): - applevel_name = 'numpy' interpleveldefs = { 'array': 'interp_numarray.SingleDimArray', 'dtype': 'interp_dtype.W_Dtype', + 'ufunc': 'interp_ufuncs.W_Ufunc', 'zeros': 'interp_numarray.zeros', 'empty': 'interp_numarray.zeros', 'ones': 'interp_numarray.ones', 'fromstring': 'interp_support.fromstring', + } - # ufuncs - 'abs': 'interp_ufuncs.absolute', - 'absolute': 'interp_ufuncs.absolute', - 'add': 'interp_ufuncs.add', - 'copysign': 'interp_ufuncs.copysign', - 'divide': 'interp_ufuncs.divide', - 'exp': 'interp_ufuncs.exp', - 'fabs': 'interp_ufuncs.fabs', - 'floor': 'interp_ufuncs.floor', - 'maximum': 'interp_ufuncs.maximum', - 'minimum': 'interp_ufuncs.minimum', - 'multiply': 'interp_ufuncs.multiply', - 'negative': 'interp_ufuncs.negative', - 'reciprocal': 'interp_ufuncs.reciprocal', - 'sign': 'interp_ufuncs.sign', - 'subtract': 'interp_ufuncs.subtract', - 'sin': 'interp_ufuncs.sin', - 'cos': 'interp_ufuncs.cos', - 'tan': 'interp_ufuncs.tan', - 'arcsin': 'interp_ufuncs.arcsin', - 'arccos': 'interp_ufuncs.arccos', - 'arctan': 'interp_ufuncs.arctan', - } + # ufuncs + for exposed, impl in [ + ("abs", "absolute"), + ("absolute", "absolute"), + ("add", "add"), + ("arccos", "arccos"), + ("arcsin", "arcsin"), + ("arctan", "arctan"), + ("copysign", "copysign"), + ("cos", "cos"), + ("divide", "divide"), + ("exp", "exp"), + ("fabs", "fabs"), + ("floor", "floor"), + ("maximum", "maximum"), + ("minimum", "minimum"), + ("multiply", "multiply"), + ("negative", "negative"), + ("reciprocal", "reciprocal"), + ("sign", "sign"), + ("sin", "sin"), + ("subtract", "subtract"), + ("tan", "tan"), + ]: + interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl appleveldefs = { 'average': 'app_numpy.average', diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -20,6 +20,7 @@ class FakeSpace(object): w_ValueError = None + w_TypeError = None def __init__(self): """NOT_RPYTHON""" diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -53,7 +53,9 @@ VOID_TP = lltype.Ptr(lltype.Array(lltype.Void, hints={'nolength': True, "uncast_on_llgraph": True})) -def create_low_level_dtype(num, kind, name, aliases, applevel_types, T, valtype): +def create_low_level_dtype(num, kind, name, aliases, applevel_types, T, valtype, + expected_size=None): + class Box(BaseBox): def __init__(self, val): self.val = val @@ -113,6 +115,8 @@ W_LowLevelDtype.aliases = aliases W_LowLevelDtype.applevel_types = applevel_types W_LowLevelDtype.num_bytes = rffi.sizeof(T) + if expected_size is not None: + assert W_LowLevelDtype.num_bytes == expected_size return W_LowLevelDtype @@ -282,10 +286,21 @@ applevel_types = [], T = rffi.SIGNEDCHAR, valtype = rffi.SIGNEDCHAR._type, + expected_size = 1, ) class W_Int8Dtype(IntegerArithmeticDtype, W_Int8Dtype): - def unwrap(self, space, w_item): - return self.adapt_val(space.int_w(space.int(w_item))) + pass + +W_Int16Dtype = create_low_level_dtype( + num = 3, kind = SIGNEDLTR, name = "int16", + aliases = ["int16"], + applevel_types = [], + T = rffi.SHORT, + valtype = rffi.SHORT._type, + expected_size = 2, +) +class W_Int16Dtype(IntegerArithmeticDtype, W_Int16Dtype): + pass W_Int32Dtype = create_low_level_dtype( num = 5, kind = SIGNEDLTR, name = "int32", @@ -293,6 +308,7 @@ applevel_types = [], T = rffi.INT, valtype = rffi.INT._type, + expected_size = 4, ) class W_Int32Dtype(IntegerArithmeticDtype, W_Int32Dtype): pass @@ -303,6 +319,7 @@ applevel_types = ["long"], T = rffi.LONGLONG, valtype = rffi.LONGLONG._type, + expected_size = 8, ) class W_Int64Dtype(IntegerArithmeticDtype, W_Int64Dtype): pass @@ -313,6 +330,7 @@ applevel_types = ["float"], T = lltype.Float, valtype = float, + expected_size = 8, ) class W_Float64Dtype(FloatArithmeticDtype, W_Float64Dtype): def unwrap(self, space, w_item): @@ -323,7 +341,7 @@ ALL_DTYPES = [ W_BoolDtype, - W_Int8Dtype, W_Int32Dtype, W_Int64Dtype, + W_Int8Dtype, W_Int16Dtype, W_Int32Dtype, W_Int64Dtype, W_Float64Dtype ] @@ -353,4 +371,4 @@ kind = interp_attrproperty("kind", cls=W_Dtype), shape = GetSetProperty(W_Dtype.descr_get_shape), ) -W_Dtype.typedef.acceptable_as_base_class = False \ No newline at end of file +W_Dtype.typedef.acceptable_as_base_class = False diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -53,90 +53,52 @@ i += 1 return arr - def _unaryop_impl(w_ufunc): + def _unaryop_impl(ufunc_name): def impl(self, space): - return w_ufunc(space, self) - return func_with_new_name(impl, "unaryop_%s_impl" % w_ufunc.__name__) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self]) + return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) - descr_pos = _unaryop_impl(interp_ufuncs.positive) - descr_neg = _unaryop_impl(interp_ufuncs.negative) - descr_abs = _unaryop_impl(interp_ufuncs.absolute) + descr_pos = _unaryop_impl("positive") + descr_neg = _unaryop_impl("negative") + descr_abs = _unaryop_impl("absolute") - def _binop_impl(w_ufunc): + def _binop_impl(ufunc_name): def impl(self, space, w_other): - return w_ufunc(space, self, w_other) - return func_with_new_name(impl, "binop_%s_impl" % w_ufunc.__name__) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other]) + return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) - descr_add = _binop_impl(interp_ufuncs.add) - descr_sub = _binop_impl(interp_ufuncs.subtract) - descr_mul = _binop_impl(interp_ufuncs.multiply) - descr_div = _binop_impl(interp_ufuncs.divide) - descr_pow = _binop_impl(interp_ufuncs.power) - descr_mod = _binop_impl(interp_ufuncs.mod) + descr_add = _binop_impl("add") + descr_sub = _binop_impl("subtract") + descr_mul = _binop_impl("multiply") + descr_div = _binop_impl("divide") + descr_pow = _binop_impl("power") + descr_mod = _binop_impl("mod") - def _binop_right_impl(w_ufunc): + def _binop_right_impl(ufunc_name): def impl(self, space, w_other): w_other = scalar_w(space, interp_ufuncs.find_dtype_for_scalar(space, w_other, self.find_dtype()), w_other ) - return w_ufunc(space, w_other, self) - return func_with_new_name(impl, "binop_right_%s_impl" % w_ufunc.__name__) + return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self]) + return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) - descr_radd = _binop_right_impl(interp_ufuncs.add) - descr_rsub = _binop_right_impl(interp_ufuncs.subtract) - descr_rmul = _binop_right_impl(interp_ufuncs.multiply) - descr_rdiv = _binop_right_impl(interp_ufuncs.divide) - descr_rpow = _binop_right_impl(interp_ufuncs.power) - descr_rmod = _binop_right_impl(interp_ufuncs.mod) + descr_radd = _binop_right_impl("add") + descr_rsub = _binop_right_impl("subtract") + descr_rmul = _binop_right_impl("multiply") + descr_rdiv = _binop_right_impl("divide") + descr_rpow = _binop_right_impl("power") + descr_rmod = _binop_right_impl("mod") - def _reduce_sum_prod_impl(op_name, init): - reduce_driver = jit.JitDriver(greens=['signature'], - reds = ['i', 'size', 'self', 'result', 'res_dtype']) + def _reduce_ufunc_impl(ufunc_name): + def impl(self, space): + return getattr(interp_ufuncs.get(space), ufunc_name).descr_reduce(space, self) + return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name) - def loop(self, res_dtype, result, size): - i = 0 - while i < size: - reduce_driver.jit_merge_point(signature=self.signature, - self=self, res_dtype=res_dtype, - size=size, i=i, result=result) - result = getattr(res_dtype, op_name)( - result, - self.eval(i).convert_to(res_dtype) - ) - i += 1 - return result - - def impl(self, space): - dtype = interp_ufuncs.find_unaryop_result_dtype( - space, self.find_dtype(), promote_to_largest=True - ) - result = dtype.adapt_val(init) - return loop(self, dtype, result, self.find_size()).wrap(space) - return func_with_new_name(impl, "reduce_%s_impl" % op_name) - - def _reduce_max_min_impl(op_name): - reduce_driver = jit.JitDriver(greens=['signature'], - reds = ['i', 'size', 'self', 'result', 'dtype']) - def loop(self, result, size): - i = 1 - dtype = self.find_dtype() - while i < size: - reduce_driver.jit_merge_point(signature=self.signature, - self=self, dtype=dtype, - size=size, i=i, result=result) - result = getattr(dtype, op_name)(result, self.eval(i)) - i += 1 - return result - - def impl(self, space): - size = self.find_size() - if size == 0: - raise OperationError(space.w_ValueError, - space.wrap("Can't call %s on zero-size arrays" \ - % op_name)) - return loop(self, self.eval(0), size).wrap(space) - return func_with_new_name(impl, "reduce_%s_impl" % op_name) + descr_sum = _reduce_ufunc_impl("add") + descr_prod = _reduce_ufunc_impl("multiply") + descr_max = _reduce_ufunc_impl("maximum") + descr_min = _reduce_ufunc_impl("minimum") def _reduce_argmax_argmin_impl(op_name): reduce_driver = jit.JitDriver(greens=['signature'], @@ -192,10 +154,6 @@ def descr_any(self, space): return space.wrap(self._any()) - descr_sum = _reduce_sum_prod_impl("add", 0) - descr_prod = _reduce_sum_prod_impl("mul", 1) - descr_max = _reduce_max_min_impl("max") - descr_min = _reduce_max_min_impl("min") descr_argmax = _reduce_argmax_argmin_impl("max") descr_argmin = _reduce_argmax_argmin_impl("min") @@ -248,7 +206,7 @@ res = "array([" + ", ".join(concrete._getnums(False)) + "]" dtype = concrete.find_dtype() if (dtype is not space.fromcache(interp_dtype.W_Float64Dtype) and - dtype is not space.fromcache(interp_dtype.W_Int64Dtype)): + dtype is not space.fromcache(interp_dtype.W_Int64Dtype)) or not self.find_size(): res += ", dtype=" + dtype.name res += ")" return space.wrap(res) @@ -259,7 +217,15 @@ return space.wrap("[" + " ".join(concrete._getnums(True)) + "]") def descr_getitem(self, space, w_idx): - # TODO: indexing by tuples + # TODO: indexing by arrays and lists + if space.isinstance_w(w_idx, space.w_tuple): + length = space.len_w(w_idx) + if length == 0: + return space.wrap(self) + if length > 1: # only one dimension for now. + raise OperationError(space.w_IndexError, + space.wrap("invalid index")) + w_idx = space.getitem(w_idx, space.wrap(0)) start, stop, step, slice_length = space.decode_index4(w_idx, self.find_size()) if step == 0: # Single index @@ -273,8 +239,19 @@ return space.wrap(res) def descr_setitem(self, space, w_idx, w_value): - # TODO: indexing by tuples and lists + # TODO: indexing by arrays and lists self.invalidated() + if space.isinstance_w(w_idx, space.w_tuple): + length = space.len_w(w_idx) + if length > 1: # only one dimension for now. + raise OperationError(space.w_IndexError, + space.wrap("invalid index")) + if length == 0: + w_idx = space.newslice(space.wrap(0), + space.wrap(self.find_size()), + space.wrap(1)) + else: + w_idx = space.getitem(w_idx, space.wrap(0)) start, stop, step, slice_length = space.decode_index4(w_idx, self.find_size()) if step == 0: diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -1,57 +1,160 @@ +from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.gateway import interp2app +from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty from pypy.module.micronumpy import interp_dtype, signature +from pypy.rlib import jit from pypy.tool.sourcetools import func_with_new_name -def ufunc(func=None, promote_to_float=False, promote_bools=False): - if func is None: - return lambda func: ufunc(func, promote_to_float, promote_bools) - call_sig = signature.Call1(func) - def impl(space, w_obj): +reduce_driver = jit.JitDriver( + greens = ["signature"], + reds = ["i", "size", "self", "dtype", "value", "obj"] +) + +class W_Ufunc(Wrappable): + _attrs_ = ["name", "promote_to_float", "promote_bools", "identity"] + + def __init__(self, name, promote_to_float, promote_bools, identity): + self.name = name + self.promote_to_float = promote_to_float + self.promote_bools = promote_bools + + self.identity = identity + + def descr_repr(self, space): + return space.wrap("" % self.name) + + def descr_get_identity(self, space): + if self.identity is None: + return space.w_None + return self.identity.wrap(space) + + def descr_call(self, space, __args__): + try: + args_w = __args__.fixedunpack(self.argcount) + except ValueError, e: + raise OperationError(space.w_TypeError, space.wrap(str(e))) + return self.call(space, args_w) + + def descr_reduce(self, space, w_obj): + from pypy.module.micronumpy.interp_numarray import convert_to_array, Scalar + + if self.argcount != 2: + raise OperationError(space.w_ValueError, space.wrap("reduce only " + "supported for binary functions")) + + assert isinstance(self, W_Ufunc2) + obj = convert_to_array(space, w_obj) + if isinstance(obj, Scalar): + raise OperationError(space.w_TypeError, space.wrap("cannot reduce " + "on a scalar")) + + size = obj.find_size() + dtype = find_unaryop_result_dtype( + space, obj.find_dtype(), + promote_to_largest=True + ) + start = 0 + if self.identity is None: + if size == 0: + raise operationerrfmt(space.w_ValueError, "zero-size array to " + "%s.reduce without identity", self.name) + value = obj.eval(0).convert_to(dtype) + start += 1 + else: + value = self.identity.convert_to(dtype) + new_sig = signature.Signature.find_sig([ + self.reduce_signature, obj.signature + ]) + return self.reduce(new_sig, start, value, obj, dtype, size).wrap(space) + + def reduce(self, signature, start, value, obj, dtype, size): + i = start + while i < size: + reduce_driver.jit_merge_point(signature=signature, self=self, + value=value, obj=obj, i=i, + dtype=dtype, size=size) + value = self.func(dtype, value, obj.eval(i).convert_to(dtype)) + i += 1 + return value + +class W_Ufunc1(W_Ufunc): + argcount = 1 + + def __init__(self, func, name, promote_to_float=False, promote_bools=False, + identity=None): + + W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity) + self.func = func + self.signature = signature.Call1(func) + + def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call1, convert_to_array, Scalar) + [w_obj] = args_w w_obj = convert_to_array(space, w_obj) res_dtype = find_unaryop_result_dtype(space, w_obj.find_dtype(), - promote_to_float=promote_to_float, - promote_bools=promote_bools, + promote_to_float=self.promote_to_float, + promote_bools=self.promote_bools, ) if isinstance(w_obj, Scalar): - return func(res_dtype, w_obj.value.convert_to(res_dtype)).wrap(space) + return self.func(res_dtype, w_obj.value.convert_to(res_dtype)).wrap(space) - new_sig = signature.Signature.find_sig([call_sig, w_obj.signature]) + new_sig = signature.Signature.find_sig([self.signature, w_obj.signature]) w_res = Call1(new_sig, res_dtype, w_obj) w_obj.add_invalidates(w_res) return w_res - return func_with_new_name(impl, "%s_dispatcher" % func.__name__) -def ufunc2(func=None, promote_to_float=False, promote_bools=False): - if func is None: - return lambda func: ufunc2(func, promote_to_float, promote_bools) - call_sig = signature.Call2(func) - def impl(space, w_lhs, w_rhs): +class W_Ufunc2(W_Ufunc): + argcount = 2 + + def __init__(self, func, name, promote_to_float=False, promote_bools=False, + identity=None): + + W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity) + self.func = func + self.signature = signature.Call2(func) + self.reduce_signature = signature.BaseSignature() + + def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, convert_to_array, Scalar) + [w_lhs, w_rhs] = args_w w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) res_dtype = find_binop_result_dtype(space, w_lhs.find_dtype(), w_rhs.find_dtype(), - promote_to_float=promote_to_float, - promote_bools=promote_bools, + promote_to_float=self.promote_to_float, + promote_bools=self.promote_bools, ) if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar): - return func(res_dtype, w_lhs.value, w_rhs.value).wrap(space) + return self.func(res_dtype, w_lhs.value, w_rhs.value).wrap(space) new_sig = signature.Signature.find_sig([ - call_sig, w_lhs.signature, w_rhs.signature + self.signature, w_lhs.signature, w_rhs.signature ]) w_res = Call2(new_sig, res_dtype, w_lhs, w_rhs) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) return w_res - return func_with_new_name(impl, "%s_dispatcher" % func.__name__) + + +W_Ufunc.typedef = TypeDef("ufunc", + __module__ = "numpy", + + __call__ = interp2app(W_Ufunc.descr_call), + __repr__ = interp2app(W_Ufunc.descr_repr), + + identity = GetSetProperty(W_Ufunc.descr_get_identity), + nin = interp_attrproperty("argcount", cls=W_Ufunc), + + reduce = interp2app(W_Ufunc.descr_reduce), +) def find_binop_result_dtype(space, dt1, dt2, promote_to_float=False, promote_bools=False): @@ -74,7 +177,7 @@ assert False def find_unaryop_result_dtype(space, dt, promote_to_float=False, - promote_to_largest=False, promote_bools=False): + promote_bools=False, promote_to_largest=False): if promote_bools and (dt.kind == interp_dtype.BOOLLTR): return space.fromcache(interp_dtype.W_Int8Dtype) if promote_to_float: @@ -106,53 +209,65 @@ return space.fromcache(interp_dtype.W_Float64Dtype) -def ufunc_dtype_caller(ufunc_name, op_name, argcount, **kwargs): +def ufunc_dtype_caller(ufunc_name, op_name, argcount): if argcount == 1: - @ufunc(**kwargs) def impl(res_dtype, value): return getattr(res_dtype, op_name)(value) elif argcount == 2: - @ufunc2(**kwargs) def impl(res_dtype, lvalue, rvalue): return getattr(res_dtype, op_name)(lvalue, rvalue) return func_with_new_name(impl, ufunc_name) -for ufunc_def in [ - ("add", "add", 2), - ("subtract", "sub", 2), - ("multiply", "mul", 2), - ("divide", "div", 2, {"promote_bools": True}), - ("mod", "mod", 2, {"promote_bools": True}), - ("power", "pow", 2, {"promote_bools": True}), +class UfuncState(object): + def __init__(self, space): + "NOT_RPYTHON" + for ufunc_def in [ + ("add", "add", 2, {"identity": 0}), + ("subtract", "sub", 2), + ("multiply", "mul", 2, {"identity": 1}), + ("divide", "div", 2, {"promote_bools": True}), + ("mod", "mod", 2, {"promote_bools": True}), + ("power", "pow", 2, {"promote_bools": True}), - ("maximum", "max", 2), - ("minimum", "min", 2), + ("maximum", "max", 2), + ("minimum", "min", 2), - ("copysign", "copysign", 2, {"promote_to_float": True}), + ("copysign", "copysign", 2, {"promote_to_float": True}), - ("positive", "pos", 1), - ("negative", "neg", 1), - ("absolute", "abs", 1), - ("sign", "sign", 1, {"promote_bools": True}), - ("reciprocal", "reciprocal", 1), + ("positive", "pos", 1), + ("negative", "neg", 1), + ("absolute", "abs", 1), + ("sign", "sign", 1, {"promote_bools": True}), + ("reciprocal", "reciprocal", 1), - ("fabs", "fabs", 1, {"promote_to_float": True}), - ("floor", "floor", 1, {"promote_to_float": True}), - ("exp", "exp", 1, {"promote_to_float": True}), + ("fabs", "fabs", 1, {"promote_to_float": True}), + ("floor", "floor", 1, {"promote_to_float": True}), + ("exp", "exp", 1, {"promote_to_float": True}), - ("sin", "sin", 1, {"promote_to_float": True}), - ("cos", "cos", 1, {"promote_to_float": True}), - ("tan", "tan", 1, {"promote_to_float": True}), - ("arcsin", "arcsin", 1, {"promote_to_float": True}), - ("arccos", "arccos", 1, {"promote_to_float": True}), - ("arctan", "arctan", 1, {"promote_to_float": True}), -]: - ufunc_name = ufunc_def[0] - op_name = ufunc_def[1] - argcount = ufunc_def[2] - try: - extra_kwargs = ufunc_def[3] - except IndexError: - extra_kwargs = {} + ("sin", "sin", 1, {"promote_to_float": True}), + ("cos", "cos", 1, {"promote_to_float": True}), + ("tan", "tan", 1, {"promote_to_float": True}), + ("arcsin", "arcsin", 1, {"promote_to_float": True}), + ("arccos", "arccos", 1, {"promote_to_float": True}), + ("arctan", "arctan", 1, {"promote_to_float": True}), + ]: + self.add_ufunc(space, *ufunc_def) - globals()[ufunc_name] = ufunc_dtype_caller(ufunc_name, op_name, argcount, **extra_kwargs) + def add_ufunc(self, space, ufunc_name, op_name, argcount, extra_kwargs=None): + if extra_kwargs is None: + extra_kwargs = {} + + identity = extra_kwargs.get("identity") + if identity is not None: + identity = space.fromcache(interp_dtype.W_Int64Dtype).adapt_val(identity) + extra_kwargs["identity"] = identity + + func = ufunc_dtype_caller(ufunc_name, op_name, argcount) + if argcount == 1: + ufunc = W_Ufunc1(func, ufunc_name, **extra_kwargs) + elif argcount == 2: + ufunc = W_Ufunc2(func, ufunc_name, **extra_kwargs) + setattr(self, ufunc_name, ufunc) + +def get(space): + return space.fromcache(UfuncState) \ No newline at end of file diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -82,10 +82,20 @@ assert a[1] == 1 def test_add_int8(self): - from numpy import array + from numpy import array, dtype a = array(range(5), dtype="int8") b = a + a + assert b.dtype is dtype("int8") + for i in range(5): + assert b[i] == i * 2 + + def test_add_int16(self): + from numpy import array, dtype + + a = array(range(5), dtype="int16") + b = a + a + assert b.dtype is dtype("int16") for i in range(5): assert b[i] == i * 2 @@ -98,4 +108,4 @@ from numpy import dtype # You can't subclass dtype - raises(TypeError, type, "Foo", (dtype,), {}) \ No newline at end of file + raises(TypeError, type, "Foo", (dtype,), {}) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -52,10 +52,14 @@ from numpy import array, zeros a = array(range(5), float) assert repr(a) == "array([0.0, 1.0, 2.0, 3.0, 4.0])" + a = array([], float) + assert repr(a) == "array([], dtype=float64)" a = zeros(1001) assert repr(a) == "array([0.0, 0.0, 0.0, ..., 0.0, 0.0, 0.0])" a = array(range(5), long) assert repr(a) == "array([0, 1, 2, 3, 4])" + a = array([], long) + assert repr(a) == "array([], dtype=int64)" a = array([True, False, True, False], "?") assert repr(a) == "array([True, False, True, False], dtype=bool)" @@ -84,6 +88,9 @@ a = array(range(5), dtype="int8") assert str(a) == "[0 1 2 3 4]" + a = array(range(5), dtype="int16") + assert str(a) == "[0 1 2 3 4]" + def test_str_slice(self): from numpy import array, zeros a = array(range(5), float) @@ -102,6 +109,16 @@ assert a[-1] == 8 raises(IndexError, "a[-6]") + def test_getitem_tuple(self): + from numpy import array + a = array(range(5)) + raises(IndexError, "a[(1,2)]") + for i in xrange(5): + assert a[(i,)] == i + b = a[()] + for i in xrange(5): + assert a[i] == b[i] + def test_setitem(self): from numpy import array a = array(range(5)) @@ -110,6 +127,17 @@ raises(IndexError, "a[5] = 0.0") raises(IndexError, "a[-6] = 3.0") + def test_setitem_tuple(self): + from numpy import array + a = array(range(5)) + raises(IndexError, "a[(1,2)] = [0,1]") + for i in xrange(5): + a[(i,)] = i+1 + assert a[i] == i+1 + a[()] = range(5) + for i in xrange(5): + assert a[i] == i + def test_setslice_array(self): from numpy import array a = array(range(5)) @@ -541,4 +569,4 @@ a = fromstring(self.data) for i in range(4): assert a[i] == i + 1 - raises(ValueError, fromstring, "abc") \ No newline at end of file + raises(ValueError, fromstring, "abc") diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -3,6 +3,32 @@ class AppTestUfuncs(BaseNumpyAppTest): + def test_ufunc_instance(self): + from numpy import add, ufunc + + assert isinstance(add, ufunc) + assert repr(add) == "" + assert repr(ufunc) == "" + + def test_ufunc_attrs(self): + from numpy import add, multiply, sin + + assert add.identity == 0 + assert multiply.identity == 1 + assert sin.identity is None + + assert add.nin == 2 + assert multiply.nin == 2 + assert sin.nin == 1 + + def test_wrong_arguments(self): + from numpy import add, sin + + raises(TypeError, add, 1) + raises(TypeError, add, 1, 2, 3) + raises(TypeError, sin, 1, 2) + raises(TypeError, sin) + def test_single_item(self): from numpy import negative, sign, minimum @@ -272,3 +298,16 @@ b = arctan(a) assert math.isnan(b[0]) + def test_reduce_errors(self): + from numpy import sin, add + + raises(ValueError, sin.reduce, [1, 2, 3]) + raises(TypeError, add.reduce, 1) + + def test_reduce(self): + from numpy import add, maximum + + assert add.reduce([1, 2, 3]) == 6 + assert maximum.reduce([1]) == 1 + assert maximum.reduce([1, 2, 3]) == 3 + raises(ValueError, maximum.reduce, []) \ No newline at end of file diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -19,7 +19,7 @@ def test_add(self): def f(i): ar = SingleDimArray(i, dtype=self.float64_dtype) - v = interp_ufuncs.add(self.space, ar, ar) + v = interp_ufuncs.get(self.space).add.call(self.space, [ar, ar]) return v.get_concrete().eval(3).val result = self.meta_interp(f, [5], listops=True, backendopt=True) @@ -31,9 +31,10 @@ def test_floatadd(self): def f(i): ar = SingleDimArray(i, dtype=self.float64_dtype) - v = interp_ufuncs.add(self.space, - ar, - scalar_w(self.space, self.float64_dtype, self.space.wrap(4.5)) + v = interp_ufuncs.get(self.space).add.call(self.space, [ + ar, + scalar_w(self.space, self.float64_dtype, self.space.wrap(4.5)) + ], ) assert isinstance(v, BaseArray) return v.get_concrete().eval(3).val @@ -89,14 +90,21 @@ def test_max(self): space = self.space float64_dtype = self.float64_dtype + int64_dtype = self.int64_dtype def f(i): - ar = SingleDimArray(i, dtype=NonConstant(float64_dtype)) + if NonConstant(False): + dtype = int64_dtype + else: + dtype = float64_dtype + ar = SingleDimArray(i, dtype=dtype) j = 0 while j < i: ar.get_concrete().setitem(j, float64_dtype.box(float(j))) j += 1 - return ar.descr_add(space, ar).descr_max(space).floatval + v = ar.descr_add(space, ar).descr_max(space) + assert isinstance(v, FloatObject) + return v.floatval result = self.meta_interp(f, [5], listops=True, backendopt=True) self.check_loops({"getarrayitem_raw": 2, "float_add": 1, @@ -108,14 +116,21 @@ def test_min(self): space = self.space float64_dtype = self.float64_dtype + int64_dtype = self.int64_dtype def f(i): - ar = SingleDimArray(i, dtype=NonConstant(float64_dtype)) + if NonConstant(False): + dtype = int64_dtype + else: + dtype = float64_dtype + ar = SingleDimArray(i, dtype=dtype) j = 0 while j < i: ar.get_concrete().setitem(j, float64_dtype.box(float(j))) j += 1 - return ar.descr_add(space, ar).descr_min(space).floatval + v = ar.descr_add(space, ar).descr_min(space) + assert isinstance(v, FloatObject) + return v.floatval result = self.meta_interp(f, [5], listops=True, backendopt=True) self.check_loops({"getarrayitem_raw": 2, "float_add": 1, @@ -180,9 +195,9 @@ def f(i): ar = SingleDimArray(i, dtype=self.float64_dtype) - v1 = interp_ufuncs.add(space, ar, scalar_w(space, self.float64_dtype, space.wrap(4.5))) + v1 = interp_ufuncs.get(self.space).add.call(space, [ar, scalar_w(space, self.float64_dtype, space.wrap(4.5))]) assert isinstance(v1, BaseArray) - v2 = interp_ufuncs.multiply(space, v1, scalar_w(space, self.float64_dtype, space.wrap(4.5))) + v2 = interp_ufuncs.get(self.space).multiply.call(space, [v1, scalar_w(space, self.float64_dtype, space.wrap(4.5))]) v1.force_if_needed() assert isinstance(v2, BaseArray) return v2.get_concrete().eval(3).val @@ -200,8 +215,8 @@ space = self.space def f(i): ar = SingleDimArray(i, dtype=self.float64_dtype) - v1 = interp_ufuncs.add(space, ar, ar) - v2 = interp_ufuncs.negative(space, v1) + v1 = interp_ufuncs.get(self.space).add.call(space, [ar, ar]) + v2 = interp_ufuncs.get(self.space).negative.call(space, [v1]) return v2.get_concrete().eval(3).val result = self.meta_interp(f, [5], listops=True, backendopt=True) @@ -216,13 +231,13 @@ def f(i): ar = SingleDimArray(i, dtype=self.float64_dtype) - v1 = interp_ufuncs.add(space, ar, ar) - v2 = interp_ufuncs.negative(space, v1) + v1 = interp_ufuncs.get(self.space).add.call(space, [ar, ar]) + v2 = interp_ufuncs.get(self.space).negative.call(space, [v1]) v2.get_concrete() for i in xrange(5): - v1 = interp_ufuncs.multiply(space, ar, ar) - v2 = interp_ufuncs.negative(space, v1) + v1 = interp_ufuncs.get(self.space).multiply.call(space, [ar, ar]) + v2 = interp_ufuncs.get(self.space).negative.call(space, [v1]) v2.get_concrete() self.meta_interp(f, [5], listops=True, backendopt=True) @@ -237,7 +252,7 @@ SingleDimSlice.signature, ar.signature ]) s = SingleDimSlice(0, step*i, step, i, ar, new_sig) - v = interp_ufuncs.add(self.space, s, s) + v = interp_ufuncs.get(self.space).add.call(self.space, [s, s]) return v.get_concrete().eval(3).val result = self.meta_interp(f, [5], listops=True, backendopt=True) @@ -259,7 +274,7 @@ SingleDimSlice.signature, s1.signature ]) s2 = SingleDimSlice(0, step2*i, step2, i, ar, new_sig) - v = interp_ufuncs.add(self.space, s1, s2) + v = interp_ufuncs.get(self.space).add.call(self.space, [s1, s2]) return v.get_concrete().eval(3).val result = self.meta_interp(f, [5], listops=True, backendopt=True) diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -21,6 +21,7 @@ PyFrame._virtualizable2_ = ['last_instr', 'pycode', 'valuestackdepth', 'locals_stack_w[*]', + 'cells[*]', 'last_exception', 'lastblock', 'is_being_profiled', diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -8,7 +8,8 @@ modname == '__builtin__.interp_classobj' or modname == '__builtin__.functional' or modname == '__builtin__.descriptor' or - modname == 'thread.os_local'): + modname == 'thread.os_local' or + modname == 'thread.os_thread'): return True if '.' in modname: modname, _ = modname.split('.', 1) diff --git a/pypy/module/pypyjit/test/test_policy.py b/pypy/module/pypyjit/test/test_policy.py --- a/pypy/module/pypyjit/test/test_policy.py +++ b/pypy/module/pypyjit/test/test_policy.py @@ -34,7 +34,9 @@ def test_thread_local(): from pypy.module.thread.os_local import Local + from pypy.module.thread.os_thread import get_ident assert pypypolicy.look_inside_function(Local.getdict.im_func) + assert pypypolicy.look_inside_function(get_ident) def test_pypy_module(): from pypy.module._collections.interp_deque import W_Deque diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -174,7 +174,7 @@ guard_no_overflow(descr=...) i18 = force_token() --TICK-- - jump(p0, p1, p2, p3, p4, i8, p7, i17, p8, i9, i17, p10, p11, p12, descr=) + jump(..., descr=) """) def test_default_and_kw(self): @@ -396,3 +396,70 @@ --TICK-- jump(..., descr=) """) + + def test_global_closure_has_constant_cells(self): + log = self.run(""" + def make_adder(n): + def add(x): + return x + n + return add + add5 = make_adder(5) + def main(): + i = 0 + while i < 5000: + i = add5(i) # ID: call + """, []) + loop, = log.loops_by_id('call', is_entry_bridge=True) + assert loop.match(""" + guard_value(i6, 1, descr=...) + guard_nonnull_class(p8, ConstClass(W_IntObject), descr=...) + guard_value(i4, 0, descr=...) + guard_value(p3, ConstPtr(ptr14), descr=...) + i15 = getfield_gc_pure(p8, descr=) + i17 = int_lt(i15, 5000) + guard_true(i17, descr=...) + p18 = getfield_gc(p0, descr=) + guard_value(p18, ConstPtr(ptr19), descr=...) + p20 = getfield_gc(p18, descr=) + guard_value(p20, ConstPtr(ptr21), descr=...) + guard_not_invalidated(descr=...) + # most importantly, there is no getarrayitem_gc here + p23 = call(ConstClass(getexecutioncontext), descr=) + p24 = getfield_gc(p23, descr=) + i25 = force_token() + p26 = getfield_gc(p23, descr=) + guard_isnull(p26, descr=...) + i27 = getfield_gc(p23, descr=) + i28 = int_is_zero(i27) + guard_true(i28, descr=...) + p30 = getfield_gc(ConstPtr(ptr29), descr=) + guard_nonnull_class(p30, ConstClass(W_IntObject), descr=...) + i32 = getfield_gc_pure(p30, descr=) + i33 = int_add_ovf(i15, i32) + guard_no_overflow(descr=...) + --TICK-- + jump(p0, p1, p2, p5, i33, i32, p23, p30, p24, descr=) + """) + + def test_local_closure_is_virtual(self): + log = self.run(""" + def main(): + i = 0 + while i < 5000: + def add(): + return i + 1 + i = add() # ID: call + """, []) + loop, = log.loops_by_id('call') + assert loop.match(""" + i8 = getfield_gc_pure(p6, descr=) + i10 = int_lt(i8, 5000) + guard_true(i10, descr=...) + i11 = force_token() + i13 = int_add(i8, 1) + --TICK-- + p22 = new_with_vtable(ConstClass(W_IntObject)) + setfield_gc(p22, i13, descr=) + setfield_gc(p4, p22, descr=) + jump(p0, p1, p2, p3, p4, p7, p22, p7, descr=) + """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py @@ -23,6 +23,4 @@ guard_not_invalidated(descr=...) p19 = getfield_gc(ConstPtr(p17), descr=) guard_value(p19, ConstPtr(ptr20), descr=...) - p22 = getfield_gc(ConstPtr(ptr21), descr=) - guard_nonnull(p22, descr=...) - """) + """) \ No newline at end of file diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -181,8 +181,7 @@ assert loop.match_by_id("contains", """ guard_not_invalidated(descr=...) i11 = force_token() - i12 = int_add_ovf(i5, i7) - guard_no_overflow(descr=...) + i12 = int_add(i5, 1) """) def test_id_compare_optimization(self): diff --git a/pypy/module/sys/test/test_encoding.py b/pypy/module/sys/test/test_encoding.py new file mode 100644 --- /dev/null +++ b/pypy/module/sys/test/test_encoding.py @@ -0,0 +1,30 @@ +import os, py +from pypy.rlib import rlocale +from pypy.module.sys.interp_encoding import _getfilesystemencoding +from pypy.module.sys.interp_encoding import base_encoding + + +def test__getfilesystemencoding(space): + if not (rlocale.HAVE_LANGINFO and rlocale.CODESET): + py.test.skip("requires HAVE_LANGINFO and CODESET") + + def clear(): + for key in os.environ.keys(): + if key == 'LANG' or key.startswith('LC_'): + del os.environ[key] + + def get(**env): + original_env = os.environ.copy() + try: + clear() + os.environ.update(env) + return _getfilesystemencoding(space) + finally: + clear() + os.environ.update(original_env) + + assert get() in (base_encoding, 'ANSI_X3.4-1968') + assert get(LANG='foobar') in (base_encoding, 'ANSI_X3.4-1968') + assert get(LANG='en_US.UTF-8') == 'UTF-8' + assert get(LC_ALL='en_US.UTF-8') == 'UTF-8' + assert get(LC_CTYPE='en_US.UTF-8') == 'UTF-8' diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -14,7 +14,7 @@ if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) -elif platform.cc.startswith('gcc'): +elif platform.cc is not None and platform.cc.startswith('gcc'): out = platform.execute(platform.cc, '--version').out match = re.search(' (\d+\.\d+(\.\d+)*)', out) if match: diff --git a/pypy/module/test_lib_pypy/test_greenlet.py b/pypy/module/test_lib_pypy/test_greenlet.py --- a/pypy/module/test_lib_pypy/test_greenlet.py +++ b/pypy/module/test_lib_pypy/test_greenlet.py @@ -231,3 +231,13 @@ assert res == "next step" res = g2.switch("goes to f1 instead") assert res == "all ok" + + def test_throw_in_not_started_yet(self): + from greenlet import greenlet + # + def f1(): + never_reached + # + g1 = greenlet(f1) + raises(ValueError, g1.throw, ValueError) + assert g1.dead diff --git a/pypy/module/test_lib_pypy/test_stackless.py b/pypy/module/test_lib_pypy/test_stackless_pickle.py rename from pypy/module/test_lib_pypy/test_stackless.py rename to pypy/module/test_lib_pypy/test_stackless_pickle.py diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py --- a/pypy/objspace/flow/flowcontext.py +++ b/pypy/objspace/flow/flowcontext.py @@ -184,7 +184,7 @@ class FlowExecutionContext(ExecutionContext): - def __init__(self, space, code, globals, constargs={}, closure=None, + def __init__(self, space, code, globals, constargs={}, outer_func=None, name=None): ExecutionContext.__init__(self, space) self.code = code @@ -193,11 +193,11 @@ self.crnt_offset = -1 self.crnt_frame = None - if closure is None: + if outer_func and outer_func.closure: + self.closure = [nestedscope.Cell(Constant(value)) + for value in outer_func.closure] + else: self.closure = None - else: - self.closure = [nestedscope.Cell(Constant(value)) - for value in closure] frame = self.create_frame() formalargcount = code.getformalargcount() arg_list = [Variable() for i in range(formalargcount)] @@ -216,7 +216,7 @@ # while ignoring any operation like the creation of the locals dict self.recorder = [] frame = FlowSpaceFrame(self.space, self.code, - self.w_globals, self.closure) + self.w_globals, self) frame.last_instr = 0 return frame diff --git a/pypy/objspace/flow/objspace.py b/pypy/objspace/flow/objspace.py --- a/pypy/objspace/flow/objspace.py +++ b/pypy/objspace/flow/objspace.py @@ -252,9 +252,9 @@ raise TypeError("%r is a generator" % (func,)) code = PyCode._from_code(self, code) if func.func_closure is None: - closure = None + cl = None else: - closure = [extract_cell_content(c) for c in func.func_closure] + cl = [extract_cell_content(c) for c in func.func_closure] # CallableFactory.pycall may add class_ to functions that are methods name = func.func_name class_ = getattr(func, 'class_', None) @@ -262,8 +262,10 @@ name = '%s.%s' % (class_.__name__, name) for c in "<>&!": name = name.replace(c, '_') + class outerfunc: # hack + closure = cl ec = flowcontext.FlowExecutionContext(self, code, func.func_globals, - constargs, closure, name) + constargs, outerfunc, name) graph = ec.graph graph.func = func # attach a signature and defaults to the graph diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -65,6 +65,10 @@ if isinstance(cell, ModuleCell): cell.w_value = w_value return + # If the new value and the current value are the same, don't create a + # level of indirection, or mutate are version. + if self.space.is_w(w_value, cell): + return if cell is not None: w_value = ModuleCell(w_value) self.mutated() diff --git a/pypy/objspace/std/fake.py b/pypy/objspace/std/fake.py --- a/pypy/objspace/std/fake.py +++ b/pypy/objspace/std/fake.py @@ -142,7 +142,7 @@ def funcrun(self, func, args): frame = func.space.createframe(self, func.w_func_globals, - func.closure) + func) sig = self.signature() scope_w = args.parse_obj(None, func.name, sig, func.defs_w) frame.setfastscope(scope_w) diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -355,9 +355,13 @@ y = w_float2.floatval if y == 0.0: raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float modulo")) - mod = math.fmod(x, y) - if (mod and ((y < 0.0) != (mod < 0.0))): - mod += y + try: + mod = math.fmod(x, y) + except ValueError: + mod = rfloat.NAN + else: + if (mod and ((y < 0.0) != (mod < 0.0))): + mod += y return W_FloatObject(mod) @@ -366,7 +370,10 @@ y = w_float2.floatval if y == 0.0: raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float modulo")) - mod = math.fmod(x, y) + try: + mod = math.fmod(x, y) + except ValueError: + return [W_FloatObject(rfloat.NAN), W_FloatObject(rfloat.NAN)] # fmod is typically exact, so vx-mod is *mathematically* an # exact multiple of wx. But this is fp arithmetic, and fp # vx - mod is an approximation; the result is that div may diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -129,12 +129,12 @@ ec._py_repr = None return ec - def createframe(self, code, w_globals, closure=None): + def createframe(self, code, w_globals, outer_func=None): from pypy.objspace.std.fake import CPythonFakeCode, CPythonFakeFrame if not we_are_translated() and isinstance(code, CPythonFakeCode): return CPythonFakeFrame(self, code, w_globals) else: - return ObjSpace.createframe(self, code, w_globals, closure) + return ObjSpace.createframe(self, code, w_globals, outer_func) def gettypefor(self, cls): return self.gettypeobject(cls.typedef) diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -913,12 +913,16 @@ def repr__String(space, w_str): s = w_str._value - buf = StringBuilder(len(s) + 2) - quote = "'" if quote in s and '"' not in s: quote = '"' + return space.wrap(string_escape_encode(s, quote)) + +def string_escape_encode(s, quote): + + buf = StringBuilder(len(s) + 2) + buf.append(quote) startslice = 0 @@ -959,7 +963,7 @@ buf.append(quote) - return space.wrap(buf.build()) + return buf.build() DEFAULT_NOOP_TABLE = ''.join([chr(i) for i in range(256)]) diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -39,6 +39,20 @@ assert d.getitem("a") is None assert d.strategy.getdictvalue_no_unwrapping(d, "a") is None + def test_same_key_set_twice(self): + strategy = ModuleDictStrategy(space) + storage = strategy.get_empty_storage() + d = W_DictMultiObject(space, strategy, storage) + + v1 = strategy.version + x = object() + d.setitem("a", x) + v2 = strategy.version + assert v1 is not v2 + d.setitem("a", x) + v3 = strategy.version + assert v2 is v3 + class AppTestModuleDict(object): def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withcelldict": True}) diff --git a/pypy/objspace/std/test/test_floatobject.py b/pypy/objspace/std/test/test_floatobject.py --- a/pypy/objspace/std/test/test_floatobject.py +++ b/pypy/objspace/std/test/test_floatobject.py @@ -767,3 +767,19 @@ def test_invalid(self): raises(ValueError, float.fromhex, "0P") + + def test_division_edgecases(self): + import math + + # inf + inf = float("inf") + assert math.isnan(inf % 3) + assert math.isnan(inf // 3) + x, y = divmod(inf, 3) + assert math.isnan(x) + assert math.isnan(y) + + # divide by 0 + raises(ZeroDivisionError, lambda: inf % 0) + raises(ZeroDivisionError, lambda: inf // 0) + raises(ZeroDivisionError, divmod, inf, 0) \ No newline at end of file diff --git a/pypy/objspace/std/test/test_methodcache.py b/pypy/objspace/std/test/test_methodcache.py --- a/pypy/objspace/std/test/test_methodcache.py +++ b/pypy/objspace/std/test/test_methodcache.py @@ -134,20 +134,24 @@ def test_custom_metaclass(self): import __pypy__ - class MetaA(type): - def __getattribute__(self, x): - return 1 - def f(self): - return 42 - A = type.__new__(MetaA, "A", (), {"f": f}) - l = [type.__getattribute__(A, "__new__")(A)] * 10 - __pypy__.reset_method_cache_counter() - for i, a in enumerate(l): - assert a.f() == 42 - cache_counter = __pypy__.method_cache_counter("f") - assert cache_counter[0] >= 5 - assert cache_counter[1] >= 1 # should be (27, 3) - assert sum(cache_counter) == 10 + for j in range(20): + class MetaA(type): + def __getattribute__(self, x): + return 1 + def f(self): + return 42 + A = type.__new__(MetaA, "A", (), {"f": f}) + l = [type.__getattribute__(A, "__new__")(A)] * 10 + __pypy__.reset_method_cache_counter() + for i, a in enumerate(l): + assert a.f() == 42 + cache_counter = __pypy__.method_cache_counter("f") + assert sum(cache_counter) == 10 + if cache_counter == (9, 1): + break + #else the moon is misaligned, try again + else: + raise AssertionError("cache_counter = %r" % (cache_counter,)) def test_mutate_class(self): import __pypy__ diff --git a/pypy/objspace/std/test/test_rangeobject.py b/pypy/objspace/std/test/test_rangeobject.py --- a/pypy/objspace/std/test/test_rangeobject.py +++ b/pypy/objspace/std/test/test_rangeobject.py @@ -89,6 +89,9 @@ assert not self.not_forced(r) r.sort() assert r == range(1, 100) + [999] + r = range(10) + r.sort(key=lambda x: -x) + assert r == range(9, -1, -1) def test_pop(self): r = range(10) diff --git a/pypy/rlib/clibffi.py b/pypy/rlib/clibffi.py --- a/pypy/rlib/clibffi.py +++ b/pypy/rlib/clibffi.py @@ -286,10 +286,10 @@ FFI_OK = cConfig.FFI_OK FFI_BAD_TYPEDEF = cConfig.FFI_BAD_TYPEDEF -FFI_DEFAULT_ABI = rffi.cast(rffi.USHORT, cConfig.FFI_DEFAULT_ABI) +FFI_DEFAULT_ABI = cConfig.FFI_DEFAULT_ABI if _WIN32: - FFI_STDCALL = rffi.cast(rffi.USHORT, cConfig.FFI_STDCALL) -FFI_TYPE_STRUCT = rffi.cast(rffi.USHORT, cConfig.FFI_TYPE_STRUCT) + FFI_STDCALL = cConfig.FFI_STDCALL +FFI_TYPE_STRUCT = cConfig.FFI_TYPE_STRUCT FFI_CIFP = rffi.COpaquePtr('ffi_cif', compilation_info=eci) FFI_CLOSUREP = lltype.Ptr(cConfig.ffi_closure) @@ -319,7 +319,7 @@ which the 'ffistruct' member is a regular FFI_TYPE. """ tpe = lltype.malloc(FFI_STRUCT_P.TO, len(field_types)+1, flavor='raw') - tpe.ffistruct.c_type = FFI_TYPE_STRUCT + tpe.ffistruct.c_type = rffi.cast(rffi.USHORT, FFI_TYPE_STRUCT) tpe.ffistruct.c_size = rffi.cast(rffi.SIZE_T, size) tpe.ffistruct.c_alignment = rffi.cast(rffi.USHORT, aligment) tpe.ffistruct.c_elements = rffi.cast(FFI_TYPE_PP, @@ -402,12 +402,20 @@ closureHeap = ClosureHeap() -FUNCFLAG_STDCALL = 0 -FUNCFLAG_CDECL = 1 # for WINAPI calls +FUNCFLAG_STDCALL = 0 # on Windows: for WINAPI calls +FUNCFLAG_CDECL = 1 # on Windows: for __cdecl calls FUNCFLAG_PYTHONAPI = 4 FUNCFLAG_USE_ERRNO = 8 FUNCFLAG_USE_LASTERROR = 16 +def get_call_conv(flags, from_jit): + if _WIN32 and (flags & FUNCFLAG_CDECL == 0): + return FFI_STDCALL + else: + return FFI_DEFAULT_ABI +get_call_conv._annspecialcase_ = 'specialize:arg(1)' # hack :-/ + + class AbstractFuncPtr(object): ll_cif = lltype.nullptr(FFI_CIFP.TO) ll_argtypes = lltype.nullptr(FFI_TYPE_PP.TO) @@ -427,21 +435,17 @@ self.ll_cif = lltype.malloc(FFI_CIFP.TO, flavor='raw', track_allocation=False) # freed by the __del__ - if _WIN32 and (flags & FUNCFLAG_CDECL == 0): - cc = FFI_STDCALL - else: - cc = FFI_DEFAULT_ABI - if _MSVC: # This little trick works correctly with MSVC. # It returns small structures in registers - if r_uint(restype.c_type) == FFI_TYPE_STRUCT: + if intmask(restype.c_type) == FFI_TYPE_STRUCT: if restype.c_size <= 4: restype = ffi_type_sint32 elif restype.c_size <= 8: restype = ffi_type_sint64 - res = c_ffi_prep_cif(self.ll_cif, cc, + res = c_ffi_prep_cif(self.ll_cif, + rffi.cast(rffi.USHORT, get_call_conv(flags,False)), rffi.cast(rffi.UINT, argnum), restype, self.ll_argtypes) if not res == FFI_OK: diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -75,7 +75,7 @@ @staticmethod @jit.elidable def is_struct(ffi_type): - return intmask(ffi_type.c_type) == intmask(FFI_TYPE_STRUCT) + return intmask(ffi_type.c_type) == FFI_TYPE_STRUCT types._import() @@ -206,6 +206,7 @@ _immutable_fields_ = ['funcsym'] argtypes = [] restype = lltype.nullptr(clibffi.FFI_TYPE_P.TO) + flags = 0 funcsym = lltype.nullptr(rffi.VOIDP.TO) def __init__(self, name, argtypes, restype, funcsym, flags=FUNCFLAG_CDECL, diff --git a/pypy/rlib/parsing/tree.py b/pypy/rlib/parsing/tree.py --- a/pypy/rlib/parsing/tree.py +++ b/pypy/rlib/parsing/tree.py @@ -6,9 +6,16 @@ content = ["digraph G{"] content.extend(self.dot()) content.append("}") - p = py.test.ensuretemp("automaton").join("temp.dot") + try: + p = py.test.ensuretemp("automaton").join("temp.dot") + remove = False + except AttributeError: # pytest lacks ensuretemp, make a normal one + p = py.path.local.mkdtemp().join('automaton.dot') + remove = True p.write("\n".join(content)) graphclient.display_dot_file(str(p)) + if remove: + p.dirpath().remove() class Symbol(Node): diff --git a/pypy/rlib/runicode.py b/pypy/rlib/runicode.py --- a/pypy/rlib/runicode.py +++ b/pypy/rlib/runicode.py @@ -1403,7 +1403,7 @@ s, pos, pos + unicode_bytes) result.append(res) continue - result.append(unichr(t)) + result.append(UNICHR(t)) pos += unicode_bytes return result.build(), pos diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -113,7 +113,7 @@ rffi.LONGLONG: ctypes.c_longlong, rffi.ULONGLONG: ctypes.c_ulonglong, rffi.SIZE_T: ctypes.c_size_t, - lltype.Bool: ctypes.c_bool, + lltype.Bool: getattr(ctypes, "c_bool", ctypes.c_long), llmemory.Address: ctypes.c_void_p, llmemory.GCREF: ctypes.c_void_p, llmemory.WeakRef: ctypes.c_void_p, # XXX @@ -1153,7 +1153,11 @@ # an OverflowError on the following line. cvalue = ctypes.cast(ctypes.c_void_p(cvalue), cresulttype) else: - cvalue = cresulttype(cvalue).value # mask high bits off if needed + try: + cvalue = cresulttype(cvalue).value # mask high bits off if needed + except TypeError: + cvalue = int(cvalue) # float -> int + cvalue = cresulttype(cvalue).value # try again return ctypes2lltype(RESTYPE, cvalue) class ForceCastEntry(ExtRegistryEntry): diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -1283,6 +1283,8 @@ try: return p._obj._hash_cache_ except AttributeError: + assert self._T._gckind == 'gc' + assert self # not for NULL result = hash(p._obj) if cache: try: diff --git a/pypy/rpython/lltypesystem/test/test_rffi.py b/pypy/rpython/lltypesystem/test/test_rffi.py --- a/pypy/rpython/lltypesystem/test/test_rffi.py +++ b/pypy/rpython/lltypesystem/test/test_rffi.py @@ -699,7 +699,10 @@ def test_cast(self): res = cast(SIZE_T, -1) assert type(res) is r_size_t - assert res == r_size_t(-1) + assert res == r_size_t(-1) + # + res = cast(lltype.Signed, 42.5) + assert res == 42 def test_rffi_sizeof(self): try: diff --git a/pypy/rpython/memory/gc/minimark.py b/pypy/rpython/memory/gc/minimark.py --- a/pypy/rpython/memory/gc/minimark.py +++ b/pypy/rpython/memory/gc/minimark.py @@ -1461,6 +1461,7 @@ # We will fix such references to point to the copy of the young # objects when we walk 'old_objects_pointing_to_young'. self.old_objects_pointing_to_young.append(newobj) + _trace_drag_out._always_inline_ = True def _visit_young_rawmalloced_object(self, obj): # 'obj' points to a young, raw-malloced object. diff --git a/pypy/rpython/memory/gctypelayout.py b/pypy/rpython/memory/gctypelayout.py --- a/pypy/rpython/memory/gctypelayout.py +++ b/pypy/rpython/memory/gctypelayout.py @@ -459,7 +459,7 @@ if t._hints.get('immutable'): return if 'immutable_fields' in t._hints: - skip = t._hints['immutable_fields'].fields + skip = t._hints['immutable_fields'].all_immutable_fields() for n, t2 in t._flds.iteritems(): if isinstance(t2, lltype.Ptr) and t2.TO._gckind == 'gc': if n not in skip: diff --git a/pypy/rpython/memory/test/test_gctypelayout.py b/pypy/rpython/memory/test/test_gctypelayout.py --- a/pypy/rpython/memory/test/test_gctypelayout.py +++ b/pypy/rpython/memory/test/test_gctypelayout.py @@ -4,7 +4,7 @@ from pypy.rpython.memory.gctypelayout import gc_pointers_inside from pypy.rpython.lltypesystem import lltype, llmemory, rclass from pypy.rpython.test.test_llinterp import get_interpreter -from pypy.rpython.rclass import IR_IMMUTABLE +from pypy.rpython.rclass import IR_IMMUTABLE, IR_QUASIIMMUTABLE from pypy.objspace.flow.model import Constant class FakeGC: @@ -102,7 +102,7 @@ accessor = rclass.FieldListAccessor() S3 = lltype.GcStruct('S', ('x', PT), ('y', PT), hints={'immutable_fields': accessor}) - accessor.initialize(S3, {'x': IR_IMMUTABLE}) + accessor.initialize(S3, {'x': IR_IMMUTABLE, 'y': IR_QUASIIMMUTABLE}) # s1 = lltype.malloc(S1) adr = llmemory.cast_ptr_to_adr(s1) diff --git a/pypy/rpython/rclass.py b/pypy/rpython/rclass.py --- a/pypy/rpython/rclass.py +++ b/pypy/rpython/rclass.py @@ -16,6 +16,13 @@ for x in fields.itervalues(): assert isinstance(x, ImmutableRanking) + def all_immutable_fields(self): + result = set() + for key, value in self.fields.iteritems(): + if value in (IR_IMMUTABLE, IR_IMMUTABLE_ARRAY): + result.add(key) + return result + def __repr__(self): return '' % getattr(self, 'TYPE', '?') diff --git a/pypy/tool/py.cleanup b/pypy/tool/py.cleanup --- a/pypy/tool/py.cleanup +++ b/pypy/tool/py.cleanup @@ -1,16 +1,31 @@ #!/usr/bin/env python -import py, sys +import sys, os, stat -def shouldremove(p): - return p.ext == '.pyc' +def clean(path): + global count + try: + content = os.listdir(path) + except OSError: + print >> sys.stderr, "skipping", path + return + for fn in content: + filename = os.path.join(path, fn) + st = os.lstat(filename) + if stat.S_ISDIR(st.st_mode): + clean(filename) + if fn == '__pycache__': + try: + os.rmdir(filename) + except OSError: + pass + elif fn.endswith('.pyc') or fn.endswith('.pyo'): + os.unlink(filename) + count += 1 count = 0 for arg in sys.argv[1:] or ['.']: - path = py.path.local(arg) - print "cleaning path", path, "of .pyc files" - for x in path.visit(shouldremove, lambda x: x.check(dotfile=0, link=0)): - x.remove() - count += 1 + print "cleaning path", arg, "of .pyc/.pyo/__pycache__ files" + clean(arg) print "%d files removed" % (count,) diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -52,9 +52,14 @@ pypy_c_dir = basedir.join('pypy', 'translator', 'goal') pypy_c = pypy_c_dir.join('pypy-c.exe') libpypy_c = pypy_c_dir.join('libpypy-c.dll') + libexpat = pypy_c_dir.join('libexpat.dll') + if not libexpat.check(): + libexpat = py.path.local.sysfind('libexpat.dll') + assert libexpat, "libexpat.dll not found" + print "Picking %s" % libexpat binaries = [(pypy_c, pypy_c.basename), (libpypy_c, libpypy_c.basename), - (pypy_c_dir.join('libexpat.dll'), 'libexpat.dll')] + (libexpat, libexpat.basename)] else: basename = 'pypy-c' if override_pypy_c is None: diff --git a/pypy/translator/c/src/stacklet/stacklet.c b/pypy/translator/c/src/stacklet/stacklet.c --- a/pypy/translator/c/src/stacklet/stacklet.c +++ b/pypy/translator/c/src/stacklet/stacklet.c @@ -319,10 +319,11 @@ char **_stacklet_translate_pointer(stacklet_handle context, char **ptr) { + char *p = (char *)ptr; + long delta; if (context == NULL) return ptr; - char *p = (char *)ptr; - long delta = p - context->stack_start; + delta = p - context->stack_start; if (((unsigned long)delta) < ((unsigned long)context->stack_saved)) { /* a pointer to a saved away word */ char *c = (char *)(context + 1); diff --git a/pypy/translator/goal/app_main.py b/pypy/translator/goal/app_main.py --- a/pypy/translator/goal/app_main.py +++ b/pypy/translator/goal/app_main.py @@ -260,6 +260,8 @@ try: import _file except ImportError: + if sys.version_info < (2, 7): + return import ctypes # HACK: while running on top of CPython set_file_encoding = ctypes.pythonapi.PyFile_SetEncodingAndErrors set_file_encoding.argtypes = [ctypes.py_object, ctypes.c_char_p, ctypes.c_char_p] @@ -479,7 +481,8 @@ print >> sys.stderr, "'import site' failed" readenv = not ignore_environment - io_encoding = readenv and os.getenv("PYTHONIOENCODING") + io_encoding = ((readenv and os.getenv("PYTHONIOENCODING")) + or sys.getfilesystemencoding()) if io_encoding: set_io_encoding(io_encoding) diff --git a/pypy/translator/goal/test2/test_app_main.py b/pypy/translator/goal/test2/test_app_main.py --- a/pypy/translator/goal/test2/test_app_main.py +++ b/pypy/translator/goal/test2/test_app_main.py @@ -739,6 +739,19 @@ data = self.run(p + os.sep) assert data == p + os.sep + '\n' + def test_getfilesystemencoding(self): + if sys.version_info < (2, 7): + skip("test requires Python >= 2.7") + p = getscript_in_dir(""" + import sys + sys.stdout.write(u'15\u20ac') + sys.stdout.flush() + """) + env = os.environ.copy() + env["LC_CTYPE"] = 'en_US.UTF-8' + data = self.run(p, env=env) + assert data == '15\xe2\x82\xac' + def test_pythonioencoding(self): if sys.version_info < (2, 7): skip("test requires Python >= 2.7") From noreply at buildbot.pypy.org Sun Sep 4 10:59:53 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 4 Sep 2011 10:59:53 +0200 (CEST) Subject: [pypy-commit] pypy jit-duplicated_short_boxes: kill obsolete code for duplicating short boxes Message-ID: <20110904085953.9AB5182212@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-duplicated_short_boxes Changeset: r47055:66b3de0272ad Date: 2011-09-04 10:12 +0200 http://bitbucket.org/pypy/pypy/changeset/66b3de0272ad/ Log: kill obsolete code for duplicating short boxes diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -555,54 +555,6 @@ if synthetic: self.synthetic[op] = True - def duplicate(self, destination, op): - newop = op.clone() - newop.result = op.result.clonebox() - destination[newop.result] = newop - if op.result in self.duplicates: - self.duplicates[op.result].append(newop.result) - else: - self.duplicates[op.result] = [newop.result] - self.optimizer.make_equal_to(newop.result, self.optimizer.getvalue(op.result)) - return newop - - def duplicate_short_boxes_if_needed(self): - if os.environ.get('DONT_DUPLICATE'): - return - may_need_duplication = {} - for op in self.short_boxes.values(): - if op: - may_need_duplication[op] = True - while may_need_duplication: - op, _ = may_need_duplication.popitem() - self.maybe_duplicate_op(op, may_need_duplication) - - def maybe_duplicate_op(self, op, may_need_duplication): - for arg in op.getarglist(): - if arg in self.short_boxes: - producer = self.producer(arg) - if producer in may_need_duplication: - del may_need_duplication[producer] - self.maybe_duplicate_op(producer, may_need_duplication) - - allops = None - for i in range(len(op.getarglist())): - arg = op.getarg(i) - if arg in self.duplicates: - if len(self.duplicates[arg]) > 5: - debug_print("Refusing to duplicate short box %d times." % len(self.duplicates)) - continue - if not allops: - allops = [op] - previous_ops = len(allops) - for o in range(previous_ops): - for box in self.duplicates[arg]: - if box in self.short_boxes: - newop = self.duplicate(self.short_boxes, allops[0]) - newop.initarglist(allops[o].getarglist()[:]) - newop.setarg(i, box) - allops.append(newop) - def debug_print(self, logops): debug_start('jit-short-boxes') for box, op in self.short_boxes.items(): diff --git a/pypy/jit/metainterp/test/test_virtualstate.py b/pypy/jit/metainterp/test/test_virtualstate.py --- a/pypy/jit/metainterp/test/test_virtualstate.py +++ b/pypy/jit/metainterp/test/test_virtualstate.py @@ -938,58 +938,6 @@ assert self.i1 in sb.short_boxes assert sum([op.result is self.i1 for op in sb.short_boxes.values() if op]) == 1 - def test_short_box_duplication_indirect1(self): - class Optimizer(FakeOptimizer): - def produce_potential_short_preamble_ops(_self, sb): - sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1)) - sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.i1)) - sb.add_potential(ResOperation(rop.INT_NEG, [self.i1], self.i2)) - sb = ShortBoxes(Optimizer(), [self.p1, self.p2]) - assert len(sb.short_boxes) == 6 - for i in (self.i1, self.i2): - assert i in sb.short_boxes - assert sum([op.result is i for op in sb.short_boxes.values() if op]) == 1 - op1, op2 = [op for op in sb.short_boxes.values() - if op and op.getopnum() == rop.INT_NEG] - assert op1.result is not op2.result - pr1, pr2 = sb.producer(op1.getarg(0)), sb.producer(op2.getarg(0)) - assert pr1 is not pr2 - assert pr1.getopnum() == rop.GETFIELD_GC - assert pr2.getopnum() == rop.GETFIELD_GC - assert set([pr1.getarg(0), pr2.getarg(0)]) == set([self.p1, self.p2]) - - def test_short_box_duplication_indirect2(self): - class Optimizer(FakeOptimizer): - def produce_potential_short_preamble_ops(_self, sb): - sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1)) - sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.i1)) - sb.add_potential(ResOperation(rop.INT_NEG, [self.i1], self.i2)) - sb.add_potential(ResOperation(rop.INT_ADD, [ConstInt(7), self.i2], - self.i3)) - sb = ShortBoxes(Optimizer(), [self.p1, self.p2]) - assert len(sb.short_boxes) == 8 - for i in (self.i1, self.i2): - assert i in sb.short_boxes - assert sum([op.result is i for op in sb.short_boxes.values() if op]) == 1 - op1, op2 = [op for op in sb.short_boxes.values() - if op and op.getopnum() == rop.INT_NEG] - assert op1.result is not op2.result - pr1, pr2 = sb.producer(op1.getarg(0)), sb.producer(op2.getarg(0)) - assert pr1 is not pr2 - assert pr1.getopnum() == rop.GETFIELD_GC - assert pr2.getopnum() == rop.GETFIELD_GC - assert set([pr1.getarg(0), pr2.getarg(0)]) == set([self.p1, self.p2]) - op1, op2 = [op for op in sb.short_boxes.values() - if op and op.getopnum() == rop.INT_ADD] - assert op1.result is not op2.result - pr1, pr2 = sb.producer(op1.getarg(1)), sb.producer(op2.getarg(1)) - assert pr1 is not pr2 - assert pr1.getopnum() == rop.INT_NEG - assert pr2.getopnum() == rop.INT_NEG - negargs = set([pr1.getarg(0), pr2.getarg(0)]) - assert len(negargs) == 2 - assert self.i1 in negargs - def test_dont_duplicate_potential_boxes(self): class Optimizer(FakeOptimizer): def produce_potential_short_preamble_ops(_self, sb): @@ -1001,63 +949,6 @@ sb = ShortBoxes(Optimizer(), [self.p1, self.p2]) assert len(sb.short_boxes) == 5 - def test_duplicate_duplicaded_box(self): - class Optimizer(FakeOptimizer): - def produce_potential_short_preamble_ops(_self, sb): - sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.p3)) - sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.p3)) - sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p3], self.i2)) - sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p4], self.i2)) - sb = ShortBoxes(Optimizer(), [self.p1, self.p2, self.p4]) - assert len(sb.short_boxes) == 8 - getfields = {} - for op in sb.short_boxes.values(): - if op and op.getopnum() == rop.GETFIELD_GC: - getfields[op.getarg(0)] = op - i1 = getfields[getfields[self.p1].result].result - i2 = getfields[getfields[self.p2].result].result - i3 = getfields[self.p4].result - ii = set([i1, i2, i3]) - assert len(ii) == 3 - for i in ii: - assert isinstance(i, BoxInt) - assert self.i2 in ii - - def test_duplucate_on_both_arguments(self): - class Optimizer(FakeOptimizer): - def produce_potential_short_preamble_ops(_self, sb): - sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1)) - sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.i1)) - sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p3], self.i2)) - sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p4], self.i2)) - sb.add_potential(ResOperation(rop.INT_ADD, [self.i1, self.i2], - self.i3)) - sb.add_potential(ResOperation(rop.INT_NEG, [self.i3], self.i4)) - - sb = ShortBoxes(Optimizer(), [self.p1, self.p2, self.p3, self.p4]) - assert len(sb.short_boxes) == 16 - getfield, int_add, int_neg = {}, {},{} - for op in sb.short_boxes.values(): - if op: - if op.getopnum() == rop.GETFIELD_GC: - getfield[op.getarg(0)] = op - if op.getopnum() == rop.INT_ADD: - int_add[tuple(op.getarglist())] = op - if op.getopnum() == rop.INT_NEG: - int_neg[op.getarg(0)] = op - assert len(getfield) == 4 - assert len(int_add) == 4 - assert len(int_neg) == 4 - arg0 = [getfield[self.p1].result, getfield[self.p2].result] - arg1 = [getfield[self.p3].result, getfield[self.p4].result] - for a0 in arg0: - for a1 in arg1: - assert (a0, a1) in int_add - for op in int_add.values(): - assert op.result in int_neg - int_ops = int_add.values() + int_neg.values() - assert len(set([op.result for op in int_ops])) == 8 - def test_prioritize1(self): class Optimizer(FakeOptimizer): def produce_potential_short_preamble_ops(_self, sb): From noreply at buildbot.pypy.org Sun Sep 4 10:59:54 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 4 Sep 2011 10:59:54 +0200 (CEST) Subject: [pypy-commit] pypy jit-duplicated_short_boxes: this case is too complicated without duplication Message-ID: <20110904085954.D01E682213@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-duplicated_short_boxes Changeset: r47056:ea8b9810ad9b Date: 2011-09-04 10:21 +0200 http://bitbucket.org/pypy/pypy/changeset/ea8b9810ad9b/ Log: this case is too complicated without duplication diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -116,6 +116,9 @@ guard_not_invalidated(descr=...) i13 = int_add_ovf(i8, i9) guard_no_overflow(descr=...) + i10p = getfield_gc_pure(p10, descr=...) + i10 = int_mul_ovf(2, i10p) + guard_no_overflow(descr=...) i14 = int_add_ovf(i13, i10) guard_no_overflow(descr=...) setfield_gc(p7, p11, descr=...) From noreply at buildbot.pypy.org Sun Sep 4 11:30:26 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Sun, 4 Sep 2011 11:30:26 +0200 (CEST) Subject: [pypy-commit] pypy improve-heap-caching-tracing: next test I want to pass Message-ID: <20110904093026.C4C748203C@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: improve-heap-caching-tracing Changeset: r47058:34cefdbcdbcc Date: 2011-09-04 11:05 +0200 http://bitbucket.org/pypy/pypy/changeset/34cefdbcdbcc/ Log: next test I want to pass diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py --- a/pypy/jit/metainterp/test/test_heapcache.py +++ b/pypy/jit/metainterp/test/test_heapcache.py @@ -208,3 +208,11 @@ assert h.getfield(box4, descr1) is box2 assert h.getfield(box4, descr2) is box3 + h = HeapCache() + h.setfield(box1, descr1, box2) + h.setfield(box1, descr2, box3) + h.replace_box(box1, box4) + assert h.getfield(box1, descr1) is None + assert h.getfield(box1, descr2) is None + assert h.getfield(box4, descr1) is box2 + assert h.getfield(box4, descr2) is box3 diff --git a/pypy/jit/metainterp/test/test_tracingopts.py b/pypy/jit/metainterp/test/test_tracingopts.py --- a/pypy/jit/metainterp/test/test_tracingopts.py +++ b/pypy/jit/metainterp/test/test_tracingopts.py @@ -428,3 +428,22 @@ assert res == 2 * -7 + 2 * -8 self.check_operations_history(getfield_gc=0) + def test_heap_caching_multiple_arrays(self): + class Gbl(object): + pass + g = Gbl() + def fn(n): + a1 = [n, n, n] + g.a = a1 + a1[0] = n + a2 = [n, n, n] + g.a = a2 + a2[0] = n - 1 + return a1[0] + a2[0] + a1[0] + a2[0] + res = self.interp_operations(fn, [7]) + assert res == 2 * 7 + 2 * 6 + self.check_operations_history(getarrayitem_gc=2) + res = self.interp_operations(fn, [-7]) + assert res == 2 * -7 + 2 * -8 + self.check_operations_history(getarrayitem_gc=2) + From noreply at buildbot.pypy.org Sun Sep 4 11:30:28 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Sun, 4 Sep 2011 11:30:28 +0200 (CEST) Subject: [pypy-commit] pypy improve-heap-caching-tracing: change array cache layout Message-ID: <20110904093028.1161D8203C@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: improve-heap-caching-tracing Changeset: r47059:38b764f137a9 Date: 2011-09-04 11:20 +0200 http://bitbucket.org/pypy/pypy/changeset/38b764f137a9/ Log: change array cache layout diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py --- a/pypy/jit/metainterp/heapcache.py +++ b/pypy/jit/metainterp/heapcache.py @@ -16,7 +16,7 @@ # maps descrs to {from_box, to_box} dicts self.heap_cache = {} # heap array cache - # maps descrs to {index: (from_box, to_box)} dicts + # maps descrs to {index: {from_box: to_box}} dicts self.heap_array_cache = {} def invalidate_caches(self, opnum, descr): @@ -87,9 +87,9 @@ index = indexbox.getint() cache = self.heap_array_cache.get(descr, None) if cache: - frombox, tobox = cache.get(index, (None, None)) - if frombox is box: - return tobox + indexcache = cache.get(index, None) + if indexcache is not None: + return indexcache.get(box, None) def setarrayitem(self, box, descr, indexbox, valuebox): if not isinstance(indexbox, ConstInt): @@ -97,9 +97,9 @@ if cache is not None: cache.clear() return + index = indexbox.getint() cache = self.heap_array_cache.setdefault(descr, {}) - index = indexbox.getint() - cache[index] = box, valuebox + cache[index] = {box: valuebox} def replace_box(self, oldbox, newbox): for descr, d in self.heap_cache.iteritems(): From noreply at buildbot.pypy.org Sun Sep 4 11:30:29 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Sun, 4 Sep 2011 11:30:29 +0200 (CEST) Subject: [pypy-commit] pypy improve-heap-caching-tracing: fix XXX in replace_box Message-ID: <20110904093029.4A6948203C@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: improve-heap-caching-tracing Changeset: r47060:feecefe7686f Date: 2011-09-04 11:30 +0200 http://bitbucket.org/pypy/pypy/changeset/feecefe7686f/ Log: fix XXX in replace_box diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py --- a/pypy/jit/metainterp/heapcache.py +++ b/pypy/jit/metainterp/heapcache.py @@ -111,4 +111,13 @@ tobox = newbox new_d[frombox] = tobox self.heap_cache[descr] = new_d - # XXX what about self.heap_array_cache? + for descr, d in self.heap_array_cache.iteritems(): + for index, cache in d.iteritems(): + new_cache = {} + for frombox, tobox in cache.iteritems(): + if frombox is oldbox: + frombox = newbox + if tobox is oldbox: + tobox = newbox + new_cache[frombox] = tobox + d[index] = new_cache diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py --- a/pypy/jit/metainterp/test/test_heapcache.py +++ b/pypy/jit/metainterp/test/test_heapcache.py @@ -8,6 +8,7 @@ box4 = object() descr1 = object() descr2 = object() +descr3 = object() index1 = ConstInt(0) index2 = ConstInt(1) @@ -202,17 +203,26 @@ h = HeapCache() h.setfield(box1, descr1, box2) h.setfield(box1, descr2, box3) + h.setfield(box2, descr3, box3) h.replace_box(box1, box4) assert h.getfield(box1, descr1) is None assert h.getfield(box1, descr2) is None assert h.getfield(box4, descr1) is box2 assert h.getfield(box4, descr2) is box3 + assert h.getfield(box2, descr3) is box3 + def test_replace_box_array(self): h = HeapCache() - h.setfield(box1, descr1, box2) - h.setfield(box1, descr2, box3) + h.setarrayitem(box1, descr1, index1, box2) + h.setarrayitem(box1, descr2, index1, box3) + h.setarrayitem(box2, descr1, index2, box1) + h.setarrayitem(box3, descr2, index2, box1) + h.setarrayitem(box2, descr3, index2, box3) h.replace_box(box1, box4) - assert h.getfield(box1, descr1) is None - assert h.getfield(box1, descr2) is None - assert h.getfield(box4, descr1) is box2 - assert h.getfield(box4, descr2) is box3 + assert h.getarrayitem(box1, descr1, index1) is None + assert h.getarrayitem(box1, descr2, index1) is None + assert h.getarrayitem(box4, descr1, index1) is box2 + assert h.getarrayitem(box4, descr2, index1) is box3 + assert h.getarrayitem(box2, descr1, index2) is box4 + assert h.getarrayitem(box3, descr2, index2) is box4 + assert h.getarrayitem(box2, descr3, index2) is box3 diff --git a/pypy/jit/metainterp/test/test_tracingopts.py b/pypy/jit/metainterp/test/test_tracingopts.py --- a/pypy/jit/metainterp/test/test_tracingopts.py +++ b/pypy/jit/metainterp/test/test_tracingopts.py @@ -257,6 +257,28 @@ self.check_operations_history(setarrayitem_gc=2, setfield_gc=2, getarrayitem_gc=0, getfield_gc=2) + def test_promote_changes_array_cache(self): + a1 = [0, 0] + a2 = [0, 0] + def fn(n): + if n > 0: + a = a1 + else: + a = a2 + a[0] = n + jit.hint(n, promote=True) + x1 = a[0] + jit.hint(x1, promote=True) + a[n - n] = n + 1 + return a[0] + x1 + res = self.interp_operations(fn, [7]) + assert res == 7 + 7 + 1 + self.check_operations_history(getarrayitem_gc=0, guard_value=1) + res = self.interp_operations(fn, [-7]) + assert res == -7 - 7 + 1 + self.check_operations_history(getarrayitem_gc=0, guard_value=1) + + def test_list_caching(self): a1 = [0, 0] a2 = [0, 0] From noreply at buildbot.pypy.org Sun Sep 4 12:04:33 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Sun, 4 Sep 2011 12:04:33 +0200 (CEST) Subject: [pypy-commit] pypy improve-heap-caching-tracing: fix bug in setfield Message-ID: <20110904100433.57D8D8203C@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: improve-heap-caching-tracing Changeset: r47061:119fa1455e3b Date: 2011-09-04 11:53 +0200 http://bitbucket.org/pypy/pypy/changeset/119fa1455e3b/ Log: fix bug in setfield diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py --- a/pypy/jit/metainterp/heapcache.py +++ b/pypy/jit/metainterp/heapcache.py @@ -65,20 +65,21 @@ def setfield(self, box, descr, fieldbox): # slightly subtle logic here d = self.heap_cache.get(descr, None) - new_d = {box: fieldbox} # a write to an arbitrary box, all other boxes can alias this one if not d or box not in self.new_boxes: # therefore we throw away the cache - self.heap_cache[descr] = new_d + self.heap_cache[descr] = {box: fieldbox} return # the object we are writing to is freshly allocated # only remove some boxes from the cache + new_d = {} for frombox, tobox in d.iteritems(): # the other box is *also* freshly allocated # therefore frombox and box *must* contain different objects # thus we can keep it in the cache if frombox in self.new_boxes: new_d[frombox] = tobox + new_d[box] = fieldbox self.heap_cache[descr] = new_d def getarrayitem(self, box, descr, indexbox): diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py --- a/pypy/jit/metainterp/test/test_heapcache.py +++ b/pypy/jit/metainterp/test/test_heapcache.py @@ -121,6 +121,8 @@ h.setfield(box3, descr1, box4) assert h.getfield(box3, descr1) is box4 assert h.getfield(box1, descr1) is box2 # box1 and box3 cannot alias + h.setfield(box1, descr1, box3) + assert h.getfield(box1, descr1) is box3 def test_heapcache_arrays(self): From noreply at buildbot.pypy.org Sun Sep 4 12:04:34 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Sun, 4 Sep 2011 12:04:34 +0200 (CEST) Subject: [pypy-commit] pypy improve-heap-caching-tracing: implement the same logic for getarrayitem as for getfield by reusing the same code Message-ID: <20110904100434.8F55E8203C@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: improve-heap-caching-tracing Changeset: r47062:d79a75d32675 Date: 2011-09-04 12:00 +0200 http://bitbucket.org/pypy/pypy/changeset/d79a75d32675/ Log: implement the same logic for getarrayitem as for getfield by reusing the same code diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py --- a/pypy/jit/metainterp/heapcache.py +++ b/pypy/jit/metainterp/heapcache.py @@ -63,13 +63,16 @@ self.heap_cache.setdefault(descr, {})[box] = fieldbox def setfield(self, box, descr, fieldbox): + d = self.heap_cache.get(descr, None) + new_d = self._do_write_with_aliasing(d, box, fieldbox) + self.heap_cache[descr] = new_d + + def _do_write_with_aliasing(self, d, box, fieldbox): # slightly subtle logic here - d = self.heap_cache.get(descr, None) # a write to an arbitrary box, all other boxes can alias this one if not d or box not in self.new_boxes: # therefore we throw away the cache - self.heap_cache[descr] = {box: fieldbox} - return + return {box: fieldbox} # the object we are writing to is freshly allocated # only remove some boxes from the cache new_d = {} @@ -80,7 +83,7 @@ if frombox in self.new_boxes: new_d[frombox] = tobox new_d[box] = fieldbox - self.heap_cache[descr] = new_d + return new_d def getarrayitem(self, box, descr, indexbox): if not isinstance(indexbox, ConstInt): @@ -100,7 +103,8 @@ return index = indexbox.getint() cache = self.heap_array_cache.setdefault(descr, {}) - cache[index] = {box: valuebox} + indexcache = cache.get(index, None) + cache[index] = self._do_write_with_aliasing(indexcache, box, valuebox) def replace_box(self, oldbox, newbox): for descr, d in self.heap_cache.iteritems(): diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py --- a/pypy/jit/metainterp/test/test_heapcache.py +++ b/pypy/jit/metainterp/test/test_heapcache.py @@ -177,6 +177,36 @@ assert h.getarrayitem(box1, descr1, index1) is None assert h.getarrayitem(box1, descr1, index2) is None + + def test_heapcache_write_fields_multiple_array(self): + h = HeapCache() + h.setarrayitem(box1, descr1, index1, box2) + assert h.getarrayitem(box1, descr1, index1) is box2 + h.setarrayitem(box3, descr1, index1, box4) + assert h.getarrayitem(box3, descr1, index1) is box4 + assert h.getarrayitem(box1, descr1, index1) is None # box1 and box3 can alias + + h = HeapCache() + h.new(box1) + h.setarrayitem(box1, descr1, index1, box2) + assert h.getarrayitem(box1, descr1, index1) is box2 + h.setarrayitem(box3, descr1, index1, box4) + assert h.getarrayitem(box3, descr1, index1) is box4 + assert h.getarrayitem(box1, descr1, index1) is None # box1 and box3 can alias + + h = HeapCache() + h.new(box1) + h.new(box3) + h.setarrayitem(box1, descr1, index1, box2) + assert h.getarrayitem(box1, descr1, index1) is box2 + h.setarrayitem(box3, descr1, index1, box4) + assert h.getarrayitem(box3, descr1, index1) is box4 + assert h.getarrayitem(box1, descr1, index1) is box2 # box1 and box3 cannot alias + h.setarrayitem(box1, descr1, index1, box3) + assert h.getarrayitem(box3, descr1, index1) is box4 + assert h.getarrayitem(box1, descr1, index1) is box3 # box1 and box3 cannot alias + + def test_invalidate_cache(self): h = HeapCache() h.setfield(box1, descr1, box2) From noreply at buildbot.pypy.org Sun Sep 4 12:04:35 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Sun, 4 Sep 2011 12:04:35 +0200 (CEST) Subject: [pypy-commit] pypy improve-heap-caching-tracing: make the functional test pass too Message-ID: <20110904100435.D28188203C@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: improve-heap-caching-tracing Changeset: r47063:b60991325598 Date: 2011-09-04 12:04 +0200 http://bitbucket.org/pypy/pypy/changeset/b60991325598/ Log: make the functional test pass too diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -395,7 +395,9 @@ @arguments("descr", "box") def opimpl_new_array(self, itemsizedescr, countbox): - return self.execute_with_descr(rop.NEW_ARRAY, itemsizedescr, countbox) + resbox = self.execute_with_descr(rop.NEW_ARRAY, itemsizedescr, countbox) + self.metainterp.heapcache.new(resbox) + return resbox @arguments("box", "descr", "box") def _opimpl_getarrayitem_gc_any(self, arraybox, arraydescr, indexbox): diff --git a/pypy/jit/metainterp/test/test_tracingopts.py b/pypy/jit/metainterp/test/test_tracingopts.py --- a/pypy/jit/metainterp/test/test_tracingopts.py +++ b/pypy/jit/metainterp/test/test_tracingopts.py @@ -464,8 +464,8 @@ return a1[0] + a2[0] + a1[0] + a2[0] res = self.interp_operations(fn, [7]) assert res == 2 * 7 + 2 * 6 - self.check_operations_history(getarrayitem_gc=2) + self.check_operations_history(getarrayitem_gc=0) res = self.interp_operations(fn, [-7]) assert res == 2 * -7 + 2 * -8 - self.check_operations_history(getarrayitem_gc=2) + self.check_operations_history(getarrayitem_gc=0) From noreply at buildbot.pypy.org Sun Sep 4 12:12:38 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Sep 2011 12:12:38 +0200 (CEST) Subject: [pypy-commit] pypy shadowstack-perf: hg merge r15-for-exception Message-ID: <20110904101238.4EDE18203C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: shadowstack-perf Changeset: r47064:8f606f534dc0 Date: 2011-09-04 08:45 +0200 http://bitbucket.org/pypy/pypy/changeset/8f606f534dc0/ Log: hg merge r15-for-exception diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -116,9 +116,12 @@ self.pos_exc_value = pos_exc_value self.save_exception = save_exception self.insert_stack_check = lambda: (0, 0, 0) + self.special_register = None def _setup_exception_handling_translated(self): + from pypy.rlib import register + from pypy.rlib.register import register_number def pos_exception(): addr = llop.get_exception_addr(llmemory.Address) @@ -129,6 +132,8 @@ return heaptracker.adr2int(addr) def save_exception(): + if register_number is not None: + register.store_into_reg(register.nonnull) addr = llop.get_exception_addr(llmemory.Address) addr.address[0] = llmemory.NULL addr = llop.get_exc_value_addr(llmemory.Address) @@ -153,6 +158,9 @@ self.pos_exc_value = pos_exc_value self.save_exception = save_exception self.insert_stack_check = insert_stack_check + self.special_register = register_number + self.special_register_nonnull = llmemory.cast_adr_to_int( + register.nonnull) def _setup_on_leave_jitted_untranslated(self): # assume we don't need a backend leave in this case diff --git a/pypy/jit/backend/x86/arch.py b/pypy/jit/backend/x86/arch.py --- a/pypy/jit/backend/x86/arch.py +++ b/pypy/jit/backend/x86/arch.py @@ -6,6 +6,8 @@ # during a malloc that needs to go via its slow path. import sys +from pypy.rlib.register import register_number as special_register + if sys.maxint == (2**31 - 1): WORD = 4 # ebp + ebx + esi + edi + 4 extra words + force_index = 9 words @@ -14,19 +16,27 @@ MY_COPY_OF_REGS = -7*WORD IS_X86_32 = True IS_X86_64 = False + assert special_register is None else: WORD = 8 - # rbp + rbx + r12 + r13 + r14 + r15 + 11 extra words + force_index = 18 - FRAME_FIXED_SIZE = 18 - FORCE_INDEX_OFS = -17*WORD - MY_COPY_OF_REGS = -16*WORD + if special_register is not None: + assert special_register == 15 + # rbp + rbx + r12 + r13 + r14 + 10 extra words + force_index = 16 + FRAME_FIXED_SIZE = 16 + FORCE_INDEX_OFS = -15*WORD + MY_COPY_OF_REGS = -14*WORD + else: + # rbp + rbx + r12 + r13 + r14 + r15 + 11 extra words + force_index = 18 + FRAME_FIXED_SIZE = 18 + FORCE_INDEX_OFS = -17*WORD + MY_COPY_OF_REGS = -16*WORD IS_X86_32 = False IS_X86_64 = True # The extra space has room for almost all registers, apart from eax and edx # which are used in the malloc itself. They are: # ecx, ebx, esi, edi [32 and 64 bits] -# r8, r9, r10, r12, r13, r14, r15 [64 bits only] +# r8, r9, r10, r12, r13, r14, r15? [64 bits only] # # Note that with asmgcc, the locations corresponding to callee-save registers # are never used. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -275,9 +275,14 @@ # esp is now aligned to a multiple of 16 again mc.CALL(imm(slowpathaddr)) # - mc.MOV(eax, heap(self.cpu.pos_exception())) - mc.TEST_rr(eax.value, eax.value) - mc.J_il8(rx86.Conditions['NZ'], 0) + if self.cpu.special_register is None: + mc.MOV(eax, heap(self.cpu.pos_exception())) + mc.TEST_rr(eax.value, eax.value) + mc.J_il8(rx86.Conditions['NZ'], 0) + else: + rnum = self.cpu.special_register + mc.TEST_rr(rnum, rnum) + mc.J_il8(rx86.Conditions['Z'], 0) jnz_location = mc.get_relative_pos() # if IS_X86_32: @@ -298,8 +303,7 @@ mc.overwrite(jnz_location-1, chr(offset)) # clear the exception from the global position mc.MOV(eax, heap(self.cpu.pos_exc_value())) - mc.MOV(heap(self.cpu.pos_exception()), imm0) - mc.MOV(heap(self.cpu.pos_exc_value()), imm0) + self.clear_current_exception(mc) # save the current exception instance into fail_boxes_ptr[0] adr = self.fail_boxes_ptr.get_addr_for_num(0) mc.MOV(heap(adr), eax) @@ -320,6 +324,13 @@ rawstart = mc.materialize(self.cpu.asmmemmgr, []) self.stack_check_slowpath = rawstart + def clear_current_exception(self, mc): + if self.cpu.special_register is not None: + mc.MOV_ri(self.cpu.special_register, + self.cpu.special_register_nonnull) + mc.MOV(heap(self.cpu.pos_exception()), imm0) + mc.MOV(heap(self.cpu.pos_exc_value()), imm0) + @staticmethod def _release_gil_asmgcc(css): # similar to trackgcroot.py:pypy_asm_stackwalk, first part @@ -1579,8 +1590,13 @@ def genop_guard_guard_no_exception(self, ign_1, guard_op, guard_token, locs, ign_2): - self.mc.CMP(heap(self.cpu.pos_exception()), imm0) - self.implement_guard(guard_token, 'NZ') + if self.cpu.special_register is None: + self.mc.CMP(heap(self.cpu.pos_exception()), imm0) + self.implement_guard(guard_token, 'NZ') + else: + rnum = self.cpu.special_register + self.mc.TEST_rr(rnum, rnum) + self.implement_guard(guard_token, 'Z') def genop_guard_guard_not_invalidated(self, ign_1, guard_op, guard_token, locs, ign_2): @@ -1591,14 +1607,11 @@ def genop_guard_guard_exception(self, ign_1, guard_op, guard_token, locs, resloc): loc = locs[0] - loc1 = locs[1] - self.mc.MOV(loc1, heap(self.cpu.pos_exception())) - self.mc.CMP(loc1, loc) + self.mc.CMP(heap(self.cpu.pos_exception()), loc) self.implement_guard(guard_token, 'NE') if resloc is not None: self.mc.MOV(resloc, heap(self.cpu.pos_exc_value())) - self.mc.MOV(heap(self.cpu.pos_exception()), imm0) - self.mc.MOV(heap(self.cpu.pos_exc_value()), imm0) + self.clear_current_exception(self.mc) def _gen_guard_overflow(self, guard_op, guard_token): guard_opnum = guard_op.getopnum() diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -19,7 +19,7 @@ from pypy.jit.backend.llsupport.descr import BaseCallDescr, BaseSizeDescr from pypy.jit.backend.llsupport.regalloc import FrameManager, RegisterManager,\ TempBox -from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE +from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE, special_register from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64, MY_COPY_OF_REGS from pypy.rlib.rarithmetic import r_longlong, r_uint @@ -56,8 +56,11 @@ not_implemented("convert_to_imm: got a %s" % c) class X86_64_RegisterManager(X86RegisterManager): - # r11 omitted because it's used as scratch - all_regs = [eax, ecx, edx, ebx, esi, edi, r8, r9, r10, r12, r13, r14, r15] + # r11 omitted because it's used as scratch; r15 is omitted if used + # as a special register + all_regs = [eax, ecx, edx, ebx, esi, edi, r8, r9, r10, r12, r13, r14] + if special_register is None: + all_regs.append(r15) no_lower_byte_regs = [] save_around_call_regs = [eax, ecx, edx, esi, edi, r8, r9, r10] @@ -79,8 +82,9 @@ r12: MY_COPY_OF_REGS + 7 * WORD, r13: MY_COPY_OF_REGS + 8 * WORD, r14: MY_COPY_OF_REGS + 9 * WORD, - r15: MY_COPY_OF_REGS + 10 * WORD, } + if special_register is None: + REGLOC_TO_COPY_AREA_OFS[r15] = MY_COPY_OF_REGS + 10 * WORD class X86XMMRegisterManager(RegisterManager): @@ -518,17 +522,13 @@ def consider_guard_exception(self, op): loc = self.rm.make_sure_var_in_reg(op.getarg(0)) - box = TempBox() - args = op.getarglist() - loc1 = self.rm.force_allocate_reg(box, args) if op.result in self.longevity: # this means, is it ever used - resloc = self.rm.force_allocate_reg(op.result, args + [box]) + resloc = self.rm.force_allocate_reg(op.result) else: resloc = None - self.perform_guard(op, [loc, loc1], resloc) + self.perform_guard(op, [loc], resloc) self.rm.possibly_free_vars_for_op(op) - self.rm.possibly_free_var(box) consider_guard_no_overflow = consider_guard_no_exception consider_guard_overflow = consider_guard_no_exception diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -5,7 +5,7 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp import history, compile from pypy.jit.backend.x86.assembler import Assembler386 -from pypy.jit.backend.x86.arch import FORCE_INDEX_OFS +from pypy.jit.backend.x86.arch import FORCE_INDEX_OFS, special_register from pypy.jit.backend.x86.profagent import ProfileAgent from pypy.jit.backend.llsupport.llmodel import AbstractLLCPU from pypy.jit.backend.x86 import regloc @@ -205,7 +205,9 @@ backend_name = 'x86_64' WORD = 8 NUM_REGS = 16 - CALLEE_SAVE_REGISTERS = [regloc.ebx, regloc.r12, regloc.r13, regloc.r14, regloc.r15] + CALLEE_SAVE_REGISTERS = [regloc.ebx, regloc.r12, regloc.r13, regloc.r14] + if special_register is None: + CALLEE_SAVE_REGISTERS.append(regloc.r15) def __init__(self, *args, **kwargs): assert sys.maxint == (2**63 - 1) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -498,12 +498,13 @@ @specialize.ll() def wrapper(*args): + from pypy.rpython.lltypesystem import llmemory from pypy.module.cpyext.pyobject import make_ref, from_ref from pypy.module.cpyext.pyobject import Reference # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer - llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py rffi.stackcounter.stacks_counter += 1 + saved = llop.gc_stack_bottom(llmemory.Address) # for trackgcroot.py retval = fatal_value boxed_args = () try: @@ -572,6 +573,7 @@ else: print str(e) pypy_debug_catch_fatal_exception() + llop.gc_stack_bottom_stop(lltype.Void, saved) rffi.stackcounter.stacks_counter -= 1 return retval callable._always_inline_ = True diff --git a/pypy/rlib/register.py b/pypy/rlib/register.py new file mode 100644 --- /dev/null +++ b/pypy/rlib/register.py @@ -0,0 +1,81 @@ +from pypy.translator.tool.cbuild import ExternalCompilationInfo +from pypy.rpython.tool import rffi_platform + +# On platforms with enough hardware registers and with gcc, we can +# (ab)use gcc to globally assign a register to a single global void* +# variable. We use it with a double meaning: +# +# - when it is NULL upon return from a function, it means that an +# exception occurred. It allows the caller to quickly check for +# exceptions. +# +# - in other cases, with --gcrootfinder=shadowstack, it points to +# the top of the shadow stack. + + +# For now, only for x86-64. Tries to use the register r15. +eci = ExternalCompilationInfo( + post_include_bits=[ + 'register void *pypy_r15 asm("r15");\n' + '#define PYPY_GET_SPECIAL_REG() pypy_r15\n' + '#define PYPY_SPECIAL_REG_NONNULL() (pypy_r15 != NULL)\n' + '#define PYPY_SET_SPECIAL_REG(x) (pypy_r15 = x)\n' + ], + ) + +_test_eci = eci.merge(ExternalCompilationInfo( + post_include_bits=[""" + void f(void) { + pypy_r15 = &f; + } + """])) + +try: + rffi_platform.verify_eci(_test_eci) + register_number = 15 # r15 +except rffi_platform.CompilationError: + eci = None + register_number = None +else: + + from pypy.rpython.lltypesystem import lltype, llmemory, rffi + + # use addr=load_from_reg() and store_into_reg(addr) to load and store + # an Address out of the special register. When running on top of Python, + # the behavior is emulated. + + _value_reg = None + + def _pypy_get_special_reg(): + assert _value_reg is not None + return _value_reg + + def _pypy_special_reg_nonnull(): + assert _value_reg is not None + return bool(_value_reg) + + def _pypy_set_special_reg(addr): + global _value_reg + _value_reg = addr + + load_from_reg = rffi.llexternal('PYPY_GET_SPECIAL_REG', [], + llmemory.Address, + _callable=_pypy_get_special_reg, + compilation_info=eci, + _nowrapper=True) + + reg_is_nonnull = rffi.llexternal('PYPY_SPECIAL_REG_NONNULL', [], + lltype.Bool, + _callable=_pypy_special_reg_nonnull, + compilation_info=eci, + _nowrapper=True) + + store_into_reg = rffi.llexternal('PYPY_SET_SPECIAL_REG', + [llmemory.Address], + lltype.Void, + _callable=_pypy_set_special_reg, + compilation_info=eci, + _nowrapper=True) + + # xxx temporary + nonnull = llmemory.cast_int_to_adr(-1) diff --git a/pypy/rlib/test/test_register.py b/pypy/rlib/test/test_register.py new file mode 100644 --- /dev/null +++ b/pypy/rlib/test/test_register.py @@ -0,0 +1,55 @@ +import py +from pypy.rlib import register +from pypy.rpython.lltypesystem import lltype, llmemory, rffi +from pypy.translator.c.test.test_standalone import StandaloneTests + + +def test_register(): + # + from pypy.jit.backend.detect_cpu import autodetect + if autodetect() == 'x86_64': + assert register.eci is not None + assert register.register_number == 15 # r15 + else: + assert register.eci is None + assert register.register_number is None + + +class TestLoadStore(object): + def setup_class(cls): + if register.register_number is None: + py.test.skip("rlib/register not supported on this platform") + + def test_direct(self): + a = rffi.cast(llmemory.Address, 27) + register.store_into_reg(a) + b = register.load_from_reg() + assert lltype.typeOf(b) == llmemory.Address + assert rffi.cast(lltype.Signed, b) == 27 + + def test_llinterp(self): + from pypy.rpython.test.test_llinterp import interpret + def f(n): + a = rffi.cast(llmemory.Address, n) + register.store_into_reg(a) + b = register.load_from_reg() + return rffi.cast(lltype.Signed, b) + res = interpret(f, [41]) + assert res == 41 + + +class TestLoadStoreCompiled(StandaloneTests): + def setup_class(cls): + if register.register_number is None: + py.test.skip("rlib/register not supported on this platform") + + def test_compiled(self): + def f(argv): + a = rffi.cast(llmemory.Address, 43) + register.store_into_reg(a) + b = register.load_from_reg() + print rffi.cast(lltype.Signed, b) + return 0 + t, cbuilder = self.compile(f) + data = cbuilder.cmdexec('') + assert data.startswith('43\n') diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -891,9 +891,6 @@ def op_gc_asmgcroot_static(self, index): raise NotImplementedError("gc_asmgcroot_static") - def op_gc_stack_bottom(self): - pass # marker for trackgcroot.py - def op_gc_get_type_info_group(self): raise NotImplementedError("gc_get_type_info_group") diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -503,6 +503,7 @@ # see translator/c/src/mem.h for the valid indices 'gc_asmgcroot_static': LLOp(sideeffects=False), 'gc_stack_bottom': LLOp(canrun=True), + 'gc_stack_bottom_stop': LLOp(canrun=True), # NOTE NOTE NOTE! don't forget *** canunwindgc=True *** for anything that # can go through a stack unwind, in particular anything that mallocs! diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -527,7 +527,10 @@ return debug.have_debug_prints() def op_gc_stack_bottom(): - pass # marker for trackgcroot.py + return llmemory.NULL # marker for trackgcroot.py + +def op_gc_stack_bottom_stop(saved): + pass # for rlib/register.py def op_jit_force_virtualizable(*args): pass diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -250,7 +250,7 @@ """ Function creating wrappers for callbacks. Note that this is cheating as we assume constant callbacks and we just memoize wrappers """ - from pypy.rpython.lltypesystem import lltype + from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.lltypesystem.lloperation import llop if hasattr(callable, '_errorcode_'): errorcode = callable._errorcode_ @@ -262,13 +262,15 @@ args = ', '.join(['a%d' % i for i in range(len(TP.TO.ARGS))]) source = py.code.Source(r""" def wrapper(%s): # no *args - no GIL for mallocing the tuple - llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py if aroundstate is not None: after = aroundstate.after if after: after() # from now on we hold the GIL stackcounter.stacks_counter += 1 + # marker for trackgcroot.py and for rlib/register.py: + # initialize the value of the special register + saved = llop.gc_stack_bottom(llmemory.Address) try: result = callable(%s) except Exception, e: @@ -279,6 +281,7 @@ import traceback traceback.print_exc() result = errorcode + llop.gc_stack_bottom_stop(lltype.Void, saved) stackcounter.stacks_counter -= 1 if aroundstate is not None: before = aroundstate.before diff --git a/pypy/translator/c/database.py b/pypy/translator/c/database.py --- a/pypy/translator/c/database.py +++ b/pypy/translator/c/database.py @@ -60,7 +60,8 @@ if translator is None or translator.rtyper is None: self.exctransformer = None else: - self.exctransformer = translator.getexceptiontransformer() + self.exctransformer = translator.getexceptiontransformer( + standalone=standalone) if translator is not None: self.gctransformer = self.gcpolicy.transformerclass(translator) self.completed = False diff --git a/pypy/translator/c/gc.py b/pypy/translator/c/gc.py --- a/pypy/translator/c/gc.py +++ b/pypy/translator/c/gc.py @@ -104,9 +104,6 @@ def OP_GC_ASSUME_YOUNG_POINTERS(self, funcgen, op): return '' - def OP_GC_STACK_BOTTOM(self, funcgen, op): - return '' - class RefcountingInfo: static_deallocator = None @@ -397,9 +394,6 @@ def GC_KEEPALIVE(self, funcgen, v): return 'pypy_asm_keepalive(%s);' % funcgen.expr(v) - def OP_GC_STACK_BOTTOM(self, funcgen, op): - return 'pypy_asm_stack_bottom();' - name_to_gcpolicy = { 'boehm': BoehmGcPolicy, diff --git a/pypy/translator/c/gcc/test/test_asmgcroot.py b/pypy/translator/c/gcc/test/test_asmgcroot.py --- a/pypy/translator/c/gcc/test/test_asmgcroot.py +++ b/pypy/translator/c/gcc/test/test_asmgcroot.py @@ -6,7 +6,7 @@ from pypy.annotation.listdef import s_list_of_strings from pypy import conftest from pypy.translator.tool.cbuild import ExternalCompilationInfo -from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rlib.entrypoint import entrypoint, secondary_entrypoints from pypy.rpython.lltypesystem.lloperation import llop @@ -180,9 +180,10 @@ @entrypoint("x42", [lltype.Signed, lltype.Signed], c_name='callback') def mycallback(a, b): - llop.gc_stack_bottom(lltype.Void) rffi.stackcounter.stacks_counter += 1 + saved = llop.gc_stack_bottom(llmemory.Address) gc.collect() + llop.gc_stack_bottom_stop(lltype.Void, saved) rffi.stackcounter.stacks_counter -= 1 return a + b diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -103,7 +103,15 @@ else: regindex = self.CALLEE_SAVE_REGISTERS.index(tag) shape[1 + regindex] = loc - if LOC_NOWHERE in shape and not self.is_stack_bottom: + # + if self.special_register is None: + shape_wo_specialreg = shape + else: + tag = self.special_register + regindex = self.CALLEE_SAVE_REGISTERS.index(tag) + shape_wo_specialreg = shape[:] + del shape_wo_specialreg[1 + regindex] + if LOC_NOWHERE in shape_wo_specialreg and not self.is_stack_bottom: reg = self.CALLEE_SAVE_REGISTERS[shape.index(LOC_NOWHERE) - 1] raise AssertionError("cannot track where register %s is saved" % (reg,)) @@ -1346,6 +1354,7 @@ def process_function(self, lines, filename): tracker = self.FunctionGcRootTracker( lines, filetag=getidentifier(filename)) + tracker.special_register = special_register if self.verbose == 1: sys.stderr.write('.') elif self.verbose > 1: @@ -1548,10 +1557,12 @@ class GcRootTracker(object): - def __init__(self, verbose=0, shuffle=False, format='elf'): + def __init__(self, verbose=0, shuffle=False, format='elf', + special_register=None): self.verbose = verbose self.shuffle = shuffle # to debug the sorting logic in asmgcroot.py self.format = format + self.special_register = special_register self.gcmaptable = [] def dump_raw_table(self, output): @@ -1897,6 +1908,7 @@ verbose = 0 shuffle = False output_raw_table = False + special_register = None if sys.platform == 'darwin': if sys.maxint > 2147483647: format = 'darwin64' @@ -1922,12 +1934,16 @@ elif sys.argv[1].startswith('-f'): format = sys.argv[1][2:] del sys.argv[1] + elif sys.argv[1].startswith('-%'): + special_register = sys.argv[1][1:] + del sys.argv[1] elif sys.argv[1].startswith('-'): print >> sys.stderr, "unrecognized option:", sys.argv[1] sys.exit(1) else: break - tracker = GcRootTracker(verbose=verbose, shuffle=shuffle, format=format) + tracker = GcRootTracker(verbose=verbose, shuffle=shuffle, format=format, + special_register=special_register) for fn in sys.argv[1:]: f = open(fn, 'r') firstline = f.readline() diff --git a/pypy/translator/c/genc.py b/pypy/translator/c/genc.py --- a/pypy/translator/c/genc.py +++ b/pypy/translator/c/genc.py @@ -556,6 +556,12 @@ for rule in rules: mk.rule(*rule) + from pypy.rlib.register import register_number + if register_number is None: + extra_trackgcroot_arg = '' + else: + extra_trackgcroot_arg = '-%%r%d' % register_number + if self.config.translation.gcrootfinder == 'asmgcc': trackgcfiles = [cfile[:-2] for cfile in mk.cfiles] if self.translator.platform.name == 'msvc': @@ -603,7 +609,7 @@ 'cmd /c $(MASM) /nologo /Cx /Cp /Zm /coff /Fo$@ /c $< $(INCLUDEDIRS)') mk.rule('.c.gcmap', '', ['$(CC) /nologo $(ASM_CFLAGS) /c /FAs /Fa$*.s $< $(INCLUDEDIRS)', - 'cmd /c ' + python + '$(PYPYDIR)/translator/c/gcc/trackgcroot.py -fmsvc -t $*.s > $@'] + 'cmd /c ' + python + '$(PYPYDIR)/translator/c/gcc/trackgcroot.py -fmsvc -t %s $*.s > $@' % extra_trackgcroot_arg] ) mk.rule('gcmaptable.c', '$(GCMAPFILES)', 'cmd /c ' + python + '$(PYPYDIR)/translator/c/gcc/trackgcroot.py -fmsvc $(GCMAPFILES) > $@') @@ -614,7 +620,7 @@ mk.rule('%.lbl.s %.gcmap', '%.s', [python + '$(PYPYDIR)/translator/c/gcc/trackgcroot.py ' - '-t $< > $*.gctmp', + '-t %s $< > $*.gctmp' % extra_trackgcroot_arg, 'mv $*.gctmp $*.gcmap']) mk.rule('gcmaptable.s', '$(GCMAPFILES)', [python + diff --git a/pypy/translator/c/src/main.h b/pypy/translator/c/src/main.h --- a/pypy/translator/c/src/main.h +++ b/pypy/translator/c/src/main.h @@ -34,8 +34,9 @@ char *errmsg; int i, exitcode; RPyListOfString *list; + void *saved; - pypy_asm_stack_bottom(); + OP_GC_STACK_BOTTOM(saved); instrument_setup(); if (sizeof(void*) != SIZEOF_LONG) { @@ -70,6 +71,7 @@ pypy_debug_catch_fatal_exception(); } + OP_GC_STACK_BOTTOM_STOP(saved, /*nothing*/); return exitcode; memory_out: @@ -79,7 +81,7 @@ fprintf(stderr, "Fatal error during initialization: %s\n", errmsg); #endif abort(); - return 1; + return 1; /* not actually reachable */ } int PYPY_MAIN_FUNCTION(int argc, char *argv[]) diff --git a/pypy/translator/c/src/mem.h b/pypy/translator/c/src/mem.h --- a/pypy/translator/c/src/mem.h +++ b/pypy/translator/c/src/mem.h @@ -84,6 +84,17 @@ #endif +#ifdef PYPY_GET_SPECIAL_REG /* pypy/rlib/register.py */ +# define OP_GC_STACK_BOTTOM(r) pypy_asm_stack_bottom(); \ + r = PYPY_GET_SPECIAL_REG(); \ + PYPY_SET_SPECIAL_REG((void*)-1) +# define OP_GC_STACK_BOTTOM_STOP(v,r) PYPY_SET_SPECIAL_REG(v) +#else +# define OP_GC_STACK_BOTTOM(r) pypy_asm_stack_bottom() +# define OP_GC_STACK_BOTTOM_STOP(v,r) /* nothing */ +#endif + + /* used by pypy.rlib.rstack, but also by asmgcc */ #define OP_STACK_CURRENT(r) r = (long)&r diff --git a/pypy/translator/exceptiontransform.py b/pypy/translator/exceptiontransform.py --- a/pypy/translator/exceptiontransform.py +++ b/pypy/translator/exceptiontransform.py @@ -14,6 +14,7 @@ from pypy.rlib.rarithmetic import r_singlefloat from pypy.rlib.debug import ll_assert from pypy.rlib.rstackovf import _StackOverflow +from pypy.rlib import register from pypy.annotation import model as annmodel from pypy.rpython.annlowlevel import MixLevelHelperAnnotator from pypy.tool.sourcetools import func_with_new_name @@ -51,8 +52,9 @@ class BaseExceptionTransformer(object): - def __init__(self, translator): + def __init__(self, translator, standalone): self.translator = translator + self.standalone = standalone self.raise_analyzer = canraise.RaiseAnalyzer(translator) edata = translator.rtyper.getexceptiondata() self.lltype_of_exception_value = edata.lltype_of_exception_value @@ -72,9 +74,21 @@ assertion_error_ll_exc_type) self.c_n_i_error_ll_exc_type = constant_value(n_i_error_ll_exc_type) + use_special_reg = standalone and register.register_number is not None + self.use_special_reg = use_special_reg + if use_special_reg: + self.c_nonnull_specialregister = constant_value(register.nonnull) + self.c_load_from_reg = constant_value(register.load_from_reg) + self.c_reg_is_nonnull = constant_value(register.reg_is_nonnull) + self.c_store_into_reg = constant_value(register.store_into_reg) + def rpyexc_occured(): - exc_type = exc_data.exc_type - return bool(exc_type) + if use_special_reg: + # an exception occurred iff the special register is 0 + return register.load_from_reg() == llmemory.NULL + else: + exc_type = exc_data.exc_type + return bool(exc_type) def rpyexc_fetch_type(): return exc_data.exc_type @@ -83,6 +97,8 @@ return exc_data.exc_value def rpyexc_clear(): + if use_special_reg: + register.store_into_reg(register.nonnull) exc_data.exc_type = null_type exc_data.exc_value = null_value @@ -99,11 +115,15 @@ exc_data.exc_type = etype exc_data.exc_value = evalue lloperation.llop.debug_start_traceback(lltype.Void, etype) + if use_special_reg: + register.store_into_reg(llmemory.NULL) def rpyexc_reraise(etype, evalue): exc_data.exc_type = etype exc_data.exc_value = evalue lloperation.llop.debug_reraise_traceback(lltype.Void, etype) + if use_special_reg: + register.store_into_reg(llmemory.NULL) def rpyexc_fetch_exception(): evalue = rpyexc_fetch_value() @@ -114,6 +134,8 @@ if evalue: exc_data.exc_type = rclass.ll_inst_type(evalue) exc_data.exc_value = evalue + if use_special_reg: + register.store_into_reg(llmemory.NULL) def rpyexc_raise_stack_overflow(): rpyexc_raise(stackovf_ll_exc_type, stackovf_ll_exc) @@ -409,6 +431,8 @@ # self.gen_setfield('exc_value', self.c_null_evalue, llops) self.gen_setfield('exc_type', self.c_null_etype, llops) + if self.use_special_reg: + self.gen_setspecialregister(self.c_nonnull_specialregister, llops) excblock.operations[:] = llops newgraph.exceptblock.inputargs[0].concretetype = self.lltype_of_exception_type newgraph.exceptblock.inputargs[1].concretetype = self.lltype_of_exception_value @@ -432,6 +456,8 @@ if alloc_shortcut: T = spaceop.result.concretetype var_no_exc = self.gen_nonnull(spaceop.result, llops) + elif self.use_special_reg: + var_no_exc = self.gen_specialreg_no_exc(llops) else: v_exc_type = self.gen_getfield('exc_type', llops) var_no_exc = self.gen_isnull(v_exc_type, llops) @@ -527,6 +553,17 @@ def gen_nonnull(self, v, llops): return llops.genop('ptr_nonzero', [v], lltype.Bool) + def gen_getspecialregister(self, llops): + return llops.genop('direct_call', [self.c_load_from_reg], + resulttype = llmemory.Address) + + def gen_specialreg_no_exc(self, llops): + return llops.genop('direct_call', [self.c_reg_is_nonnull], + resulttype = lltype.Bool) + + def gen_setspecialregister(self, v, llops): + llops.genop('direct_call', [self.c_store_into_reg, v]) + def same_obj(self, ptr1, ptr2): return ptr1._same_obj(ptr2) @@ -613,10 +650,10 @@ def build_extra_funcs(self): pass -def ExceptionTransformer(translator): +def ExceptionTransformer(translator, standalone): type_system = translator.rtyper.type_system.name if type_system == 'lltypesystem': - return LLTypeExceptionTransformer(translator) + return LLTypeExceptionTransformer(translator, standalone) else: assert type_system == 'ootypesystem' - return OOTypeExceptionTransformer(translator) + return OOTypeExceptionTransformer(translator, standalone) diff --git a/pypy/translator/translator.py b/pypy/translator/translator.py --- a/pypy/translator/translator.py +++ b/pypy/translator/translator.py @@ -108,13 +108,14 @@ type_system = type_system) return self.rtyper - def getexceptiontransformer(self): + def getexceptiontransformer(self, standalone): if self.rtyper is None: raise ValueError("no rtyper") if self.exceptiontransformer is not None: + assert self.exceptiontransformer.standalone == standalone return self.exceptiontransformer from pypy.translator.exceptiontransform import ExceptionTransformer - self.exceptiontransformer = ExceptionTransformer(self) + self.exceptiontransformer = ExceptionTransformer(self, standalone) return self.exceptiontransformer def checkgraphs(self): From noreply at buildbot.pypy.org Sun Sep 4 12:12:39 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Sep 2011 12:12:39 +0200 (CEST) Subject: [pypy-commit] pypy shadowstack-perf: Start to integrate the r15-for-exception branch... Message-ID: <20110904101239.8F0138203C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: shadowstack-perf Changeset: r47065:249453097f4f Date: 2011-09-04 10:20 +0200 http://bitbucket.org/pypy/pypy/changeset/249453097f4f/ Log: Start to integrate the r15-for-exception branch... diff --git a/pypy/rlib/register.py b/pypy/rlib/register.py --- a/pypy/rlib/register.py +++ b/pypy/rlib/register.py @@ -15,18 +15,20 @@ # For now, only for x86-64. Tries to use the register r15. eci = ExternalCompilationInfo( - post_include_bits=[ - 'register void *pypy_r15 asm("r15");\n' - '#define PYPY_GET_SPECIAL_REG() pypy_r15\n' - '#define PYPY_SPECIAL_REG_NONNULL() (pypy_r15 != NULL)\n' - '#define PYPY_SET_SPECIAL_REG(x) (pypy_r15 = x)\n' - ], + post_include_bits=[""" +register long pypy_r15 asm("r15"); +#define PYPY_GET_SPECIAL_REG() ((void *)(pypy_r15 & ~1)) +#define PYPY_SET_SPECIAL_REG(x) (pypy_r15 = (long)(x) | (pypy_r15 & 1)) +#define PYPY_INCR_SPECIAL_REG(d) (pypy_r15 += (d)) +#define PYPY_SPECIAL_REG_GETEXC() (pypy_r15 & 1) +#define PYPY_SPECIAL_REG_SETEXC(x) (pypy_r15 = (x) ? pypy_r15|1 : pypy_r15&~1) +"""], ) _test_eci = eci.merge(ExternalCompilationInfo( post_include_bits=[""" void f(void) { - pypy_r15 = &f; + pypy_r15 = 12345; } """])) @@ -45,31 +47,34 @@ # the behavior is emulated. _value_reg = None + _exc_marker = False def _pypy_get_special_reg(): assert _value_reg is not None return _value_reg - def _pypy_special_reg_nonnull(): - assert _value_reg is not None - return bool(_value_reg) - def _pypy_set_special_reg(addr): global _value_reg _value_reg = addr + def _pypy_incr_special_reg(delta): + global _value_reg + assert _value_reg is not None + _value_reg += delta + + def _pypy_special_reg_getexc(): + return _exc_marker + + def _pypy_special_reg_setexc(flag): + global _value_reg + _exc_marker = flag + load_from_reg = rffi.llexternal('PYPY_GET_SPECIAL_REG', [], llmemory.Address, _callable=_pypy_get_special_reg, compilation_info=eci, _nowrapper=True) - reg_is_nonnull = rffi.llexternal('PYPY_SPECIAL_REG_NONNULL', [], - lltype.Bool, - _callable=_pypy_special_reg_nonnull, - compilation_info=eci, - _nowrapper=True) - store_into_reg = rffi.llexternal('PYPY_SET_SPECIAL_REG', [llmemory.Address], lltype.Void, @@ -77,5 +82,21 @@ compilation_info=eci, _nowrapper=True) - # xxx temporary - nonnull = llmemory.cast_int_to_adr(-1) + incr_reg = rffi.llexternal('PYPY_INCR_SPECIAL_REG', + [lltype.Signed], + lltype.Void, + _callable=_pypy_set_special_reg, + compilation_info=eci, + _nowrapper=True) + + get_exception = rffi.llexternal('PYPY_SPECIAL_REG_GETEXC', [], + lltype.Bool, + _callable=_pypy_special_reg_getexc, + compilation_info=eci, + _nowrapper=True) + + set_exception = rffi.llexternal('PYPY_SPECIAL_REG_SETEXC', [lltype.Bool], + lltype.Void, + _callable=_pypy_special_reg_setexc, + compilation_info=eci, + _nowrapper=True) diff --git a/pypy/rpython/memory/gctransform/framework.py b/pypy/rpython/memory/gctransform/framework.py --- a/pypy/rpython/memory/gctransform/framework.py +++ b/pypy/rpython/memory/gctransform/framework.py @@ -229,6 +229,11 @@ def getfn(ll_function, args_s, s_result, inline=False, minimal_transform=True): + if isinstance(ll_function, lltype._ptr): + # assume that args_s and s_result match + from pypy.objspace.flow.model import Constant + return Constant(ll_function, lltype.typeOf(ll_function)) + # graph = annhelper.getgraph(ll_function, args_s, s_result) if minimal_transform: self.need_minimal_transform(graph) @@ -250,9 +255,14 @@ [annmodel.SomeAddress()], annmodel.s_None, inline = True) + self.incr_stack_top_ptr = getfn(root_walker.incr_stack_top, + [annmodel.SomeInteger()], + annmodel.s_None, + inline = True) else: self.get_stack_top_ptr = None self.set_stack_top_ptr = None + self.incr_stack_top_ptr = None self.weakref_deref_ptr = self.inittime_helper( ll_weakref_deref, [llmemory.WeakRefPtr], llmemory.Address) diff --git a/pypy/rpython/memory/gctransform/shadowstack.py b/pypy/rpython/memory/gctransform/shadowstack.py --- a/pypy/rpython/memory/gctransform/shadowstack.py +++ b/pypy/rpython/memory/gctransform/shadowstack.py @@ -2,6 +2,7 @@ from pypy.rpython.memory.gctransform.framework import BaseRootWalker from pypy.rpython.memory.gctransform.framework import sizeofaddr from pypy.rpython import rmodel +from pypy.rlib import register from pypy.rlib.debug import ll_assert from pypy.rpython.lltypesystem import lltype, llmemory from pypy.tool.algo.regalloc import perform_register_allocation @@ -22,13 +23,20 @@ # NB. 'self' is frozen, but we can use self.gcdata to store state gcdata = self.gcdata - def get_stack_top(): - return gcdata.root_stack_top - self.get_stack_top = get_stack_top - - def set_stack_top(addr): - gcdata.root_stack_top = addr - self.set_stack_top = set_stack_top + if register.register_number is not None: + self.get_stack_top = register.load_from_reg + self.set_stack_top = register.store_into_reg + self.incr_stack_top = register.incr_reg + else: + def get_stack_top(): + return gcdata.root_stack_top + self.get_stack_top = get_stack_top + def set_stack_top(addr): + gcdata.root_stack_top = addr + self.set_stack_top = set_stack_top + def incr_stack_top(delta): + gcdata.root_stack_top += delta + self.incr_stack_top = incr_stack_top self.rootstackhook = gctransformer.root_stack_jit_hook if self.rootstackhook is None: @@ -40,12 +48,12 @@ def push_stack(self, addr): top = self.get_stack_top() + self.incr_stack_top(sizeofaddr) top.address[0] = addr - self.set_stack_top(top + sizeofaddr) def pop_stack(self): top = self.get_stack_top() - sizeofaddr - self.set_stack_top(top) + self.incr_stack_top(-sizeofaddr) return top.address[0] def allocate_stack(self): @@ -54,7 +62,7 @@ def setup_root_walker(self): stackbase = self.allocate_stack() ll_assert(bool(stackbase), "could not allocate root stack") - self.gcdata.root_stack_top = stackbase + self.set_stack_top(stackbase) self.gcdata.root_stack_base = stackbase BaseRootWalker.setup_root_walker(self) @@ -63,7 +71,7 @@ gc = self.gc rootstackhook = self.rootstackhook addr = gcdata.root_stack_base - end = gcdata.root_stack_top + end = self.get_stack_top() while addr != end: addr += rootstackhook(collect_stack_root, gc, addr) if self.collect_stacks_from_other_threads is not None: @@ -307,7 +315,9 @@ blocks_pop_roots[block] = len(llops) block.operations[:] = llops numcolors = -negnumcolors - c_framesize = rmodel.inputconst(lltype.Signed, numcolors * sizeofaddr) + framesize = numcolors * sizeofaddr + c_framesize = rmodel.inputconst(lltype.Signed, framesize) + c_minusframesize = rmodel.inputconst(lltype.Signed, -framesize) # # For each block, determine in which category it is: # @@ -399,10 +409,8 @@ if "stop" in blockstate[block]: # "stop" or "startstop" llops = LowLevelOpList() i = blocks_pop_roots[block] - v_topaddr = get_v_topaddr(block, firstuse=i) - v_baseaddr = llops.genop("adr_sub", [v_topaddr, c_framesize], - resulttype=llmemory.Address) - llops.genop("direct_call", [gct.set_stack_top_ptr, v_baseaddr]) + llops.genop("direct_call", [gct.incr_stack_top_ptr, + c_minusframesize]) block.operations[i:i] = llops # ^^^ important: done first, in case it's a startstop block, # otherwise the index in 'blocks_push_roots[block]' is diff --git a/pypy/translator/c/src/mem.h b/pypy/translator/c/src/mem.h --- a/pypy/translator/c/src/mem.h +++ b/pypy/translator/c/src/mem.h @@ -87,7 +87,7 @@ #ifdef PYPY_GET_SPECIAL_REG /* pypy/rlib/register.py */ # define OP_GC_STACK_BOTTOM(r) pypy_asm_stack_bottom(); \ r = PYPY_GET_SPECIAL_REG(); \ - PYPY_SET_SPECIAL_REG((void*)-1) + PYPY_SPECIAL_REG_SETEXC(0) # define OP_GC_STACK_BOTTOM_STOP(v,r) PYPY_SET_SPECIAL_REG(v) #else # define OP_GC_STACK_BOTTOM(r) pypy_asm_stack_bottom() diff --git a/pypy/translator/exceptiontransform.py b/pypy/translator/exceptiontransform.py --- a/pypy/translator/exceptiontransform.py +++ b/pypy/translator/exceptiontransform.py @@ -77,15 +77,12 @@ use_special_reg = standalone and register.register_number is not None self.use_special_reg = use_special_reg if use_special_reg: - self.c_nonnull_specialregister = constant_value(register.nonnull) - self.c_load_from_reg = constant_value(register.load_from_reg) - self.c_reg_is_nonnull = constant_value(register.reg_is_nonnull) - self.c_store_into_reg = constant_value(register.store_into_reg) + self.c_get_exception = constant_value(register.get_exception) + self.c_set_exception = constant_value(register.set_exception) def rpyexc_occured(): if use_special_reg: - # an exception occurred iff the special register is 0 - return register.load_from_reg() == llmemory.NULL + return register.get_exception() else: exc_type = exc_data.exc_type return bool(exc_type) @@ -98,7 +95,7 @@ def rpyexc_clear(): if use_special_reg: - register.store_into_reg(register.nonnull) + register.set_exception(False) exc_data.exc_type = null_type exc_data.exc_value = null_value @@ -116,14 +113,14 @@ exc_data.exc_value = evalue lloperation.llop.debug_start_traceback(lltype.Void, etype) if use_special_reg: - register.store_into_reg(llmemory.NULL) + register.set_exception(True) def rpyexc_reraise(etype, evalue): exc_data.exc_type = etype exc_data.exc_value = evalue lloperation.llop.debug_reraise_traceback(lltype.Void, etype) if use_special_reg: - register.store_into_reg(llmemory.NULL) + register.set_exception(True) def rpyexc_fetch_exception(): evalue = rpyexc_fetch_value() @@ -135,7 +132,7 @@ exc_data.exc_type = rclass.ll_inst_type(evalue) exc_data.exc_value = evalue if use_special_reg: - register.store_into_reg(llmemory.NULL) + register.set_exception(True) def rpyexc_raise_stack_overflow(): rpyexc_raise(stackovf_ll_exc_type, stackovf_ll_exc) @@ -432,7 +429,7 @@ self.gen_setfield('exc_value', self.c_null_evalue, llops) self.gen_setfield('exc_type', self.c_null_etype, llops) if self.use_special_reg: - self.gen_setspecialregister(self.c_nonnull_specialregister, llops) + self.gen_setexception(False, llops) excblock.operations[:] = llops newgraph.exceptblock.inputargs[0].concretetype = self.lltype_of_exception_type newgraph.exceptblock.inputargs[1].concretetype = self.lltype_of_exception_value @@ -553,16 +550,14 @@ def gen_nonnull(self, v, llops): return llops.genop('ptr_nonzero', [v], lltype.Bool) - def gen_getspecialregister(self, llops): - return llops.genop('direct_call', [self.c_load_from_reg], - resulttype = llmemory.Address) + def gen_specialreg_no_exc(self, llops): + v = llops.genop('direct_call', [self.c_get_exception], + resulttype = lltype.Bool) + return llops.genop('bool_not', [v], resulttype=lltype.Bool) - def gen_specialreg_no_exc(self, llops): - return llops.genop('direct_call', [self.c_reg_is_nonnull], - resulttype = lltype.Bool) - - def gen_setspecialregister(self, v, llops): - llops.genop('direct_call', [self.c_store_into_reg, v]) + def gen_setexception(self, flag, llops): + v = inputconst(lltype.Bool, flag) + llops.genop('direct_call', [self.c_set_exception, v]) def same_obj(self, ptr1, ptr2): return ptr1._same_obj(ptr2) From noreply at buildbot.pypy.org Sun Sep 4 12:12:40 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Sep 2011 12:12:40 +0200 (CEST) Subject: [pypy-commit] pypy shadowstack-perf: Tweaks. Message-ID: <20110904101240.C6CF78203C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: shadowstack-perf Changeset: r47066:f2bf94943a41 Date: 2011-09-04 10:47 +0200 http://bitbucket.org/pypy/pypy/changeset/f2bf94943a41/ Log: Tweaks. diff --git a/pypy/rlib/register.py b/pypy/rlib/register.py --- a/pypy/rlib/register.py +++ b/pypy/rlib/register.py @@ -17,9 +17,10 @@ eci = ExternalCompilationInfo( post_include_bits=[""" register long pypy_r15 asm("r15"); -#define PYPY_GET_SPECIAL_REG() ((void *)(pypy_r15 & ~1)) -#define PYPY_SET_SPECIAL_REG(x) (pypy_r15 = (long)(x) | (pypy_r15 & 1)) -#define PYPY_INCR_SPECIAL_REG(d) (pypy_r15 += (d)) +#define PYPY_GET_SPECIAL_REG_NOMARK() ((void *)pypy_r15) +#define PYPY_SET_SPECIAL_REG_NOMARK(x) (pypy_r15 = (long)(x)) +#define PYPY_GET_SPECIAL_REG_MARK() ((void *)(pypy_r15 & ~1)) +#define PYPY_INCR_SPECIAL_REG_MARK(d) (pypy_r15 += (d)) #define PYPY_SPECIAL_REG_GETEXC() (pypy_r15 & 1) #define PYPY_SPECIAL_REG_SETEXC(x) (pypy_r15 = (x) ? pypy_r15|1 : pypy_r15&~1) """], @@ -49,15 +50,25 @@ _value_reg = None _exc_marker = False - def _pypy_get_special_reg(): + def _pypy_get_special_reg_nomark(): + # this must not be called if _exc_marker is set + assert _value_reg is not None + assert not _exc_marker + return _value_reg + + def _pypy_set_special_reg_nomark(addr): + # this must not be called if _exc_marker is set + global _value_reg + assert not _exc_marker + _value_reg = addr + + def _pypy_get_special_reg_mark(): + # this can be called if _exc_marker is set assert _value_reg is not None return _value_reg - def _pypy_set_special_reg(addr): - global _value_reg - _value_reg = addr - - def _pypy_incr_special_reg(delta): + def _pypy_incr_special_reg_mark(delta): + # this can be called if _exc_marker is set global _value_reg assert _value_reg is not None _value_reg += delta @@ -69,26 +80,32 @@ global _value_reg _exc_marker = flag - load_from_reg = rffi.llexternal('PYPY_GET_SPECIAL_REG', [], - llmemory.Address, - _callable=_pypy_get_special_reg, + load_from_reg_nomark = rffi.llexternal('PYPY_GET_SPECIAL_REG_NOMARK', [], + llmemory.Address, + _callable=_pypy_get_special_reg_nomark, + compilation_info=eci, + _nowrapper=True) + + store_into_reg_nomark = rffi.llexternal('PYPY_SET_SPECIAL_REG_NOMARK', + [llmemory.Address], + lltype.Void, + _callable=_pypy_set_special_reg_nomark, + compilation_info=eci, + _nowrapper=True) + + load_from_reg_mark = rffi.llexternal('PYPY_GET_SPECIAL_REG_MARK', [], + llmemory.Address, + _callable=_pypy_get_special_reg_mark, + compilation_info=eci, + _nowrapper=True) + + incr_reg_mark = rffi.llexternal('PYPY_INCR_SPECIAL_REG_MARK', + [lltype.Signed], + lltype.Void, + _callable=_pypy_incr_special_reg_mark, compilation_info=eci, _nowrapper=True) - store_into_reg = rffi.llexternal('PYPY_SET_SPECIAL_REG', - [llmemory.Address], - lltype.Void, - _callable=_pypy_set_special_reg, - compilation_info=eci, - _nowrapper=True) - - incr_reg = rffi.llexternal('PYPY_INCR_SPECIAL_REG', - [lltype.Signed], - lltype.Void, - _callable=_pypy_set_special_reg, - compilation_info=eci, - _nowrapper=True) - get_exception = rffi.llexternal('PYPY_SPECIAL_REG_GETEXC', [], lltype.Bool, _callable=_pypy_special_reg_getexc, diff --git a/pypy/rpython/memory/gctransform/framework.py b/pypy/rpython/memory/gctransform/framework.py --- a/pypy/rpython/memory/gctransform/framework.py +++ b/pypy/rpython/memory/gctransform/framework.py @@ -229,11 +229,6 @@ def getfn(ll_function, args_s, s_result, inline=False, minimal_transform=True): - if isinstance(ll_function, lltype._ptr): - # assume that args_s and s_result match - from pypy.objspace.flow.model import Constant - return Constant(ll_function, lltype.typeOf(ll_function)) - # graph = annhelper.getgraph(ll_function, args_s, s_result) if minimal_transform: self.need_minimal_transform(graph) @@ -246,23 +241,7 @@ # for tests self.frameworkgc__teardown_ptr = getfn(frameworkgc__teardown, [], annmodel.s_None) - - if root_walker.need_root_stack: - self.get_stack_top_ptr = getfn(root_walker.get_stack_top, - [], annmodel.SomeAddress(), - inline = True) - self.set_stack_top_ptr = getfn(root_walker.set_stack_top, - [annmodel.SomeAddress()], - annmodel.s_None, - inline = True) - self.incr_stack_top_ptr = getfn(root_walker.incr_stack_top, - [annmodel.SomeInteger()], - annmodel.s_None, - inline = True) - else: - self.get_stack_top_ptr = None - self.set_stack_top_ptr = None - self.incr_stack_top_ptr = None + self.weakref_deref_ptr = self.inittime_helper( ll_weakref_deref, [llmemory.WeakRefPtr], llmemory.Address) @@ -1196,7 +1175,7 @@ return livevars def push_roots(self, hop, keep_current_args=False): - if self.get_stack_top_ptr is None: + if not self.root_walker.need_root_stack: return livevars = self.get_livevars_for_roots(hop, keep_current_args) if livevars: @@ -1205,7 +1184,7 @@ return livevars def pop_roots(self, hop, livevars): - if self.get_stack_top_ptr is None: + if not self.root_walker.need_root_stack: return if livevars: hop.genop("gc_pop_roots", livevars) diff --git a/pypy/rpython/memory/gctransform/shadowstack.py b/pypy/rpython/memory/gctransform/shadowstack.py --- a/pypy/rpython/memory/gctransform/shadowstack.py +++ b/pypy/rpython/memory/gctransform/shadowstack.py @@ -24,9 +24,17 @@ gcdata = self.gcdata if register.register_number is not None: - self.get_stack_top = register.load_from_reg - self.set_stack_top = register.store_into_reg - self.incr_stack_top = register.incr_reg + self.get_stack_top_nomark = register.load_from_reg_nomark + self.get_stack_top = register.load_from_reg_mark + self.set_stack_top = register.store_into_reg_nomark + self.incr_stack_top = register.incr_reg_mark + for name in ['get_stack_top_nomark', + 'get_stack_top', + 'set_stack_top', + 'incr_stack_top']: + fptr = getattr(self, name) + c = Constant(fptr, lltype.typeOf(fptr)) + setattr(self, name + '_ptr', c) else: def get_stack_top(): return gcdata.root_stack_top @@ -38,6 +46,19 @@ gcdata.root_stack_top += delta self.incr_stack_top = incr_stack_top + self.get_stack_top_ptr = getfn(root_walker.get_stack_top, + [], annmodel.SomeAddress(), + inline = True) + self.set_stack_top_ptr = getfn(root_walker.set_stack_top, + [annmodel.SomeAddress()], + annmodel.s_None, + inline = True) + self.incr_stack_top_ptr = getfn(root_walker.incr_stack_top, + [annmodel.SomeInteger()], + annmodel.s_None, + inline = True) + self.get_stack_top_nomark_ptr = self.get_stack_top_ptr + self.rootstackhook = gctransformer.root_stack_jit_hook if self.rootstackhook is None: def collect_stack_root(callback, gc, addr): @@ -306,7 +327,7 @@ c_k, v]) else: v_topaddr = llops.genop("direct_call", - [gct.get_stack_top_ptr], + [self.get_stack_top_ptr], resulttype=llmemory.Address) v_newaddr = llops.genop("raw_load", [v_topaddr, c_type, c_k], @@ -409,7 +430,7 @@ if "stop" in blockstate[block]: # "stop" or "startstop" llops = LowLevelOpList() i = blocks_pop_roots[block] - llops.genop("direct_call", [gct.incr_stack_top_ptr, + llops.genop("direct_call", [self.incr_stack_top_ptr, c_minusframesize]) block.operations[i:i] = llops # ^^^ important: done first, in case it's a startstop block, @@ -418,11 +439,12 @@ if "start" in blockstate[block]: # "start" or "startstop" llops = LowLevelOpList() v_topaddr = get_v_topaddr(block) - v_baseaddr = llops.genop("direct_call",[gct.get_stack_top_ptr], + v_baseaddr = llops.genop("direct_call", + [self.get_stack_top_nomark_ptr], resulttype=llmemory.Address) llops.genop("adr_add", [v_baseaddr, c_framesize]) llops[-1].result = v_topaddr - llops.genop("direct_call", [gct.set_stack_top_ptr, v_topaddr]) + llops.genop("direct_call", [self.set_stack_top_ptr, v_topaddr]) c_null = rmodel.inputconst(llmemory.Address, llmemory.NULL) for k in range(numcolors): c_k = rmodel.inputconst(lltype.Signed, ~k) @@ -434,7 +456,7 @@ # we need to get the current stack top for this block i, topaddr_v = topaddrs_v[block] llops = LowLevelOpList() - llops.genop("direct_call", [gct.get_stack_top_ptr]) + llops.genop("direct_call", [self.get_stack_top_nomark_ptr]) llops[-1].result = topaddr_v block.operations[i:i] = llops # From noreply at buildbot.pypy.org Sun Sep 4 12:31:54 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Sun, 4 Sep 2011 12:31:54 +0200 (CEST) Subject: [pypy-commit] pypy improve-heap-caching-tracing: merge default Message-ID: <20110904103154.39B9F8203C@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: improve-heap-caching-tracing Changeset: r47067:304d094f3f3c Date: 2011-09-04 12:31 +0200 http://bitbucket.org/pypy/pypy/changeset/304d094f3f3c/ Log: merge default diff --git a/lib-python/modified-2.7/sqlite3/test/regression.py b/lib-python/modified-2.7/sqlite3/test/regression.py --- a/lib-python/modified-2.7/sqlite3/test/regression.py +++ b/lib-python/modified-2.7/sqlite3/test/regression.py @@ -274,6 +274,18 @@ cur.execute("UPDATE foo SET id = 3 WHERE id = 1") self.assertEqual(cur.description, None) + def CheckStatementCache(self): + cur = self.con.cursor() + cur.execute("CREATE TABLE foo (id INTEGER)") + values = [(i,) for i in xrange(5)] + cur.executemany("INSERT INTO foo (id) VALUES (?)", values) + + cur.execute("SELECT id FROM foo") + self.assertEqual(list(cur), values) + self.con.commit() + cur.execute("SELECT id FROM foo") + self.assertEqual(list(cur), values) + def suite(): regression_suite = unittest.makeSuite(RegressionTests, "Check") return unittest.TestSuite((regression_suite,)) diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -54,7 +54,8 @@ def get_ffi_argtype(self): if self._ffiargtype: return self._ffiargtype - return _shape_to_ffi_type(self._ffiargshape) + self._ffiargtype = _shape_to_ffi_type(self._ffiargshape) + return self._ffiargtype def _CData_output(self, resbuffer, base=None, index=-1): #assert isinstance(resbuffer, _rawffi.ArrayInstance) @@ -225,6 +226,7 @@ 'Z' : _ffi.types.void_p, 'X' : _ffi.types.void_p, 'v' : _ffi.types.sshort, + '?' : _ffi.types.ubyte, } diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -293,7 +293,7 @@ # if stat.in_use: stat = Statement(self.connection, sql) - stat.set_cursor_and_factory(cursor, row_factory) + stat.set_row_factory(row_factory) return stat @@ -705,6 +705,8 @@ from sqlite3.dump import _iterdump return _iterdump(self) +DML, DQL, DDL = range(3) + class Cursor(object): def __init__(self, con): if not isinstance(con, Connection): @@ -735,9 +737,9 @@ self.statement = self.connection.statement_cache.get(sql, self, self.row_factory) if self.connection._isolation_level is not None: - if self.statement.kind == "DDL": + if self.statement.kind == DDL: self.connection.commit() - elif self.statement.kind == "DML": + elif self.statement.kind == DML: self.connection._begin() self.statement.set_params(params) @@ -748,18 +750,18 @@ self.statement.reset() raise self.connection._get_exception(ret) - if self.statement.kind == "DQL"and ret == SQLITE_ROW: + if self.statement.kind == DQL and ret == SQLITE_ROW: self.statement._build_row_cast_map() - self.statement._readahead() + self.statement._readahead(self) else: self.statement.item = None self.statement.exhausted = True - if self.statement.kind in ("DML", "DDL"): + if self.statement.kind == DML or self.statement.kind == DDL: self.statement.reset() self.rowcount = -1 - if self.statement.kind == "DML": + if self.statement.kind == DML: self.rowcount = sqlite.sqlite3_changes(self.connection.db) return self @@ -771,8 +773,8 @@ sql = sql.encode("utf-8") self._check_closed() self.statement = self.connection.statement_cache.get(sql, self, self.row_factory) - - if self.statement.kind == "DML": + + if self.statement.kind == DML: self.connection._begin() else: raise ProgrammingError, "executemany is only for DML statements" @@ -824,7 +826,7 @@ return self def __iter__(self): - return self.statement + return iter(self.fetchone, None) def _check_reset(self): if self.reset: @@ -841,7 +843,7 @@ return None try: - return self.statement.next() + return self.statement.next(self) except StopIteration: return None @@ -855,7 +857,7 @@ if size is None: size = self.arraysize lst = [] - for row in self.statement: + for row in self: lst.append(row) if len(lst) == size: break @@ -866,7 +868,7 @@ self._check_reset() if self.statement is None: return [] - return list(self.statement) + return list(self) def _getdescription(self): if self._description is None: @@ -904,16 +906,15 @@ self.sql = sql # DEBUG ONLY first_word = self._statement_kind = sql.lstrip().split(" ")[0].upper() if first_word in ("INSERT", "UPDATE", "DELETE", "REPLACE"): - self.kind = "DML" + self.kind = DML elif first_word in ("SELECT", "PRAGMA"): - self.kind = "DQL" + self.kind = DQL else: - self.kind = "DDL" + self.kind = DDL self.exhausted = False self.in_use = False # - # set by set_cursor_and_factory - self.cur = None + # set by set_row_factory self.row_factory = None self.statement = c_void_p() @@ -923,7 +924,7 @@ if ret == SQLITE_OK and self.statement.value is None: # an empty statement, we work around that, as it's the least trouble ret = sqlite.sqlite3_prepare_v2(self.con.db, "select 42", -1, byref(self.statement), byref(next_char)) - self.kind = "DQL" + self.kind = DQL if ret != SQLITE_OK: raise self.con._get_exception(ret) @@ -935,8 +936,7 @@ self._build_row_cast_map() - def set_cursor_and_factory(self, cur, row_factory): - self.cur = weakref.ref(cur) + def set_row_factory(self, row_factory): self.row_factory = row_factory def _build_row_cast_map(self): @@ -1039,10 +1039,7 @@ raise ProgrammingError("missing parameter '%s'" %param) self.set_param(idx, param) - def __iter__(self): - return self - - def next(self): + def next(self, cursor): self.con._check_closed() self.con._check_thread() if self.exhausted: @@ -1058,10 +1055,10 @@ sqlite.sqlite3_reset(self.statement) raise exc - self._readahead() + self._readahead(cursor) return item - def _readahead(self): + def _readahead(self, cursor): self.column_count = sqlite.sqlite3_column_count(self.statement) row = [] for i in xrange(self.column_count): @@ -1096,13 +1093,14 @@ row = tuple(row) if self.row_factory is not None: - row = self.row_factory(self.cur(), row) + row = self.row_factory(cursor, row) self.item = row def reset(self): self.row_cast_map = None ret = sqlite.sqlite3_reset(self.statement) self.in_use = False + self.exhausted = False return ret def finalize(self): @@ -1118,7 +1116,7 @@ self.statement = None def _get_description(self): - if self.kind == "DML": + if self.kind == DML: return None desc = [] for i in xrange(sqlite.sqlite3_column_count(self.statement)): diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -59,7 +59,12 @@ # while not target: if not target.__started: - _continulet.__init__(target, _greenlet_start, *args) + if unbound_method != _continulet.throw: + greenlet_func = _greenlet_start + else: + greenlet_func = _greenlet_throw + _continulet.__init__(target, greenlet_func, *args) + unbound_method = _continulet.switch args = () target.__started = True break @@ -136,3 +141,11 @@ if greenlet.parent is not _tls.main: _continuation.permute(greenlet, greenlet.parent) return (res,) + +def _greenlet_throw(greenlet, exc, value, tb): + _tls.current = greenlet + try: + raise exc, value, tb + finally: + if greenlet.parent is not _tls.main: + _continuation.permute(greenlet, greenlet.parent) diff --git a/pypy/interpreter/pyparser/future.py b/pypy/interpreter/pyparser/future.py --- a/pypy/interpreter/pyparser/future.py +++ b/pypy/interpreter/pyparser/future.py @@ -109,25 +109,19 @@ self.getc() == self.getc(+2)): self.pos += 3 while 1: # Deal with a triple quoted docstring - if self.getc() == '\\': - self.pos += 2 + c = self.getc() + if c == '\\': + self.pos += 1 + self._skip_next_char_from_docstring() + elif c != endchar: + self._skip_next_char_from_docstring() else: - c = self.getc() - if c != endchar: - self.pos += 1 - if c == '\n': - self.atbol() - elif c == '\r': - if self.getc() == '\n': - self.pos += 1 - self.atbol() - else: - self.pos += 1 - if (self.getc() == endchar and - self.getc(+1) == endchar): - self.pos += 2 - self.consume_empty_line() - break + self.pos += 1 + if (self.getc() == endchar and + self.getc(+1) == endchar): + self.pos += 2 + self.consume_empty_line() + break else: # Deal with a single quoted docstring self.pos += 1 @@ -138,17 +132,21 @@ self.consume_empty_line() return elif c == '\\': - # Deal with linefeeds - if self.getc() != '\r': - self.pos += 1 - else: - self.pos += 1 - if self.getc() == '\n': - self.pos += 1 + self._skip_next_char_from_docstring() elif c in '\r\n': # Syntax error return + def _skip_next_char_from_docstring(self): + c = self.getc() + self.pos += 1 + if c == '\n': + self.atbol() + elif c == '\r': + if self.getc() == '\n': + self.pos += 1 + self.atbol() + def consume_continuation(self): c = self.getc() if c in '\n\r': diff --git a/pypy/interpreter/pyparser/test/test_futureautomaton.py b/pypy/interpreter/pyparser/test/test_futureautomaton.py --- a/pypy/interpreter/pyparser/test/test_futureautomaton.py +++ b/pypy/interpreter/pyparser/test/test_futureautomaton.py @@ -221,6 +221,14 @@ assert f.lineno == 3 assert f.col_offset == 0 +def test_lots_of_continuation_lines(): + s = "\\\n\\\n\\\n\\\n\\\n\\\n\nfrom __future__ import with_statement\n" + f = run(s) + assert f.pos == len(s) + assert f.flags == fut.CO_FUTURE_WITH_STATEMENT + assert f.lineno == 8 + assert f.col_offset == 0 + # This looks like a bug in cpython parser # and would require extensive modifications # to future.py in order to emulate the same behaviour @@ -239,3 +247,19 @@ raise AssertionError('IndentationError not raised') assert f.lineno == 2 assert f.col_offset == 0 + +def test_continuation_lines_in_docstring_single_quoted(): + s = '"\\\n\\\n\\\n\\\n\\\n\\\n"\nfrom __future__ import division\n' + f = run(s) + assert f.pos == len(s) + assert f.flags == fut.CO_FUTURE_DIVISION + assert f.lineno == 8 + assert f.col_offset == 0 + +def test_continuation_lines_in_docstring_triple_quoted(): + s = '"""\\\n\\\n\\\n\\\n\\\n\\\n"""\nfrom __future__ import division\n' + f = run(s) + assert f.pos == len(s) + assert f.flags == fut.CO_FUTURE_DIVISION + assert f.lineno == 8 + assert f.col_offset == 0 diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -130,8 +130,15 @@ results = _find_jit_marker(graphs, 'jit_merge_point') if not results: raise Exception("no jit_merge_point found!") + seen = set([graph for graph, block, pos in results]) + assert len(seen) == len(results), ( + "found several jit_merge_points in the same graph") return results +def locate_jit_merge_point(graph): + [(graph, block, pos)] = find_jit_merge_points([graph]) + return block, pos, block.operations[pos] + def find_set_param(graphs): return _find_jit_marker(graphs, 'set_param') @@ -235,7 +242,7 @@ def split_graph_and_record_jitdriver(self, graph, block, pos): op = block.operations[pos] jd = JitDriverStaticData() - jd._jit_merge_point_pos = (graph, op) + jd._jit_merge_point_in = graph args = op.args[2:] s_binding = self.translator.annotator.binding jd._portal_args_s = [s_binding(v) for v in args] @@ -504,7 +511,8 @@ self.make_args_specification(jd) def make_args_specification(self, jd): - graph, op = jd._jit_merge_point_pos + graph = jd._jit_merge_point_in + _, _, op = locate_jit_merge_point(graph) greens_v, reds_v = support.decode_hp_hint_args(op) ALLARGS = [v.concretetype for v in (greens_v + reds_v)] jd._green_args_spec = [v.concretetype for v in greens_v] @@ -552,7 +560,7 @@ assert jitdriver in sublists, \ "can_enter_jit with no matching jit_merge_point" jd, sublist = sublists[jitdriver] - origportalgraph = jd._jit_merge_point_pos[0] + origportalgraph = jd._jit_merge_point_in if graph is not origportalgraph: sublist.append((graph, block, index)) jd.no_loop_header = False @@ -582,7 +590,7 @@ can_enter_jits = [(jd.portal_graph, jd.portal_graph.startblock, 0)] for graph, block, index in can_enter_jits: - if graph is jd._jit_merge_point_pos[0]: + if graph is jd._jit_merge_point_in: continue op = block.operations[index] @@ -640,7 +648,7 @@ # while 1: # more stuff # - origportalgraph = jd._jit_merge_point_pos[0] + origportalgraph = jd._jit_merge_point_in portalgraph = jd.portal_graph PORTALFUNC = jd._PORTAL_FUNCTYPE @@ -794,14 +802,7 @@ # ____________________________________________________________ # Now mutate origportalgraph to end with a call to portal_runner_ptr # - _, op = jd._jit_merge_point_pos - for origblock in origportalgraph.iterblocks(): - if op in origblock.operations: - break - else: - assert False, "lost the operation %r in the graph %r" % ( - op, origportalgraph) - origindex = origblock.operations.index(op) + origblock, origindex, op = locate_jit_merge_point(origportalgraph) assert op.opname == 'jit_marker' assert op.args[0].value == 'jit_merge_point' greens_v, reds_v = support.decode_hp_hint_args(op) diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -43,11 +43,11 @@ def switch(self, w_to): to = self.space.interp_w(W_Continulet, w_to, can_be_None=True) if to is not None: - if self is to: # double-switch to myself: no-op - return get_result() if to.sthread is None: start_state.clear() raise geterror(self.space, "continulet not initialized yet") + if self is to: # double-switch to myself: no-op + return get_result() if self.sthread is None: start_state.clear() raise geterror(self.space, "continulet not initialized yet") diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -8,24 +8,12 @@ class WeakrefLifeline(W_Root): + cached_weakref_index = -1 + cached_proxy_index = -1 + def __init__(self, space): self.space = space self.refs_weak = [] - self.cached_weakref_index = -1 - self.cached_proxy_index = -1 - - def __del__(self): - """This runs when the interp-level object goes away, and allows - its lifeline to go away. The purpose of this is to activate the - callbacks even if there is no __del__ method on the interp-level - W_Root subclass implementing the object. - """ - for i in range(len(self.refs_weak) - 1, -1, -1): - w_ref = self.refs_weak[i]() - if w_ref is not None and w_ref.w_callable is not None: - w_ref.enqueue_for_destruction(self.space, - W_WeakrefBase.activate_callback, - 'weakref callback of ') def clear_all_weakrefs(self): """Clear all weakrefs. This is called when an app-level object has @@ -39,12 +27,11 @@ # weakref callbacks are not invoked eagerly here. They are # invoked by self.__del__() anyway. - @jit.dont_look_inside - def get_or_make_weakref(self, space, w_subtype, w_obj, w_callable): + def get_or_make_weakref(self, w_subtype, w_obj): + space = self.space w_weakreftype = space.gettypeobject(W_Weakref.typedef) is_weakreftype = space.is_w(w_weakreftype, w_subtype) - can_reuse = space.is_w(w_callable, space.w_None) - if is_weakreftype and can_reuse and self.cached_weakref_index >= 0: + if is_weakreftype and self.cached_weakref_index >= 0: w_cached = self.refs_weak[self.cached_weakref_index]() if w_cached is not None: return w_cached @@ -52,16 +39,15 @@ self.cached_weakref_index = -1 w_ref = space.allocate_instance(W_Weakref, w_subtype) index = len(self.refs_weak) - W_Weakref.__init__(w_ref, space, w_obj, w_callable) + W_Weakref.__init__(w_ref, space, w_obj, None) self.refs_weak.append(weakref.ref(w_ref)) - if is_weakreftype and can_reuse: + if is_weakreftype: self.cached_weakref_index = index return w_ref - @jit.dont_look_inside - def get_or_make_proxy(self, space, w_obj, w_callable): - can_reuse = space.is_w(w_callable, space.w_None) - if can_reuse and self.cached_proxy_index >= 0: + def get_or_make_proxy(self, w_obj): + space = self.space + if self.cached_proxy_index >= 0: w_cached = self.refs_weak[self.cached_proxy_index]() if w_cached is not None: return w_cached @@ -69,12 +55,11 @@ self.cached_proxy_index = -1 index = len(self.refs_weak) if space.is_true(space.callable(w_obj)): - w_proxy = W_CallableProxy(space, w_obj, w_callable) + w_proxy = W_CallableProxy(space, w_obj, None) else: - w_proxy = W_Proxy(space, w_obj, w_callable) + w_proxy = W_Proxy(space, w_obj, None) self.refs_weak.append(weakref.ref(w_proxy)) - if can_reuse: - self.cached_proxy_index = index + self.cached_proxy_index = index return w_proxy def get_any_weakref(self, space): @@ -90,6 +75,45 @@ return w_ref return space.w_None + +class WeakrefLifelineWithCallbacks(WeakrefLifeline): + + def __init__(self, space, oldlifeline=None): + self.space = space + if oldlifeline is None: + self.refs_weak = [] + else: + self.refs_weak = oldlifeline.refs_weak + + def __del__(self): + """This runs when the interp-level object goes away, and allows + its lifeline to go away. The purpose of this is to activate the + callbacks even if there is no __del__ method on the interp-level + W_Root subclass implementing the object. + """ + for i in range(len(self.refs_weak) - 1, -1, -1): + w_ref = self.refs_weak[i]() + if w_ref is not None and w_ref.w_callable is not None: + w_ref.enqueue_for_destruction(self.space, + W_WeakrefBase.activate_callback, + 'weakref callback of ') + + def make_weakref_with_callback(self, w_subtype, w_obj, w_callable): + space = self.space + w_ref = space.allocate_instance(W_Weakref, w_subtype) + W_Weakref.__init__(w_ref, space, w_obj, w_callable) + self.refs_weak.append(weakref.ref(w_ref)) + return w_ref + + def make_proxy_with_callback(self, w_obj, w_callable): + space = self.space + if space.is_true(space.callable(w_obj)): + w_proxy = W_CallableProxy(space, w_obj, w_callable) + else: + w_proxy = W_Proxy(space, w_obj, w_callable) + self.refs_weak.append(weakref.ref(w_proxy)) + return w_proxy + # ____________________________________________________________ class Dummy: @@ -103,8 +127,7 @@ class W_WeakrefBase(Wrappable): def __init__(w_self, space, w_obj, w_callable): - if space.is_w(w_callable, space.w_None): - w_callable = None + assert w_callable is not space.w_None # should be really None w_self.space = space assert w_obj is not None w_self.w_obj_weak = weakref.ref(w_obj) @@ -177,16 +200,39 @@ def descr__ne__(self, space, w_ref2): return space.not_(space.eq(self, w_ref2)) +def getlifeline(space, w_obj): + lifeline = w_obj.getweakref() + if lifeline is None: + lifeline = WeakrefLifeline(space) + w_obj.setweakref(space, lifeline) + return lifeline + +def getlifelinewithcallbacks(space, w_obj): + lifeline = w_obj.getweakref() + if not isinstance(lifeline, WeakrefLifelineWithCallbacks): # or None + oldlifeline = lifeline + lifeline = WeakrefLifelineWithCallbacks(space, oldlifeline) + w_obj.setweakref(space, lifeline) + return lifeline + + at jit.dont_look_inside +def get_or_make_weakref(space, w_subtype, w_obj): + return getlifeline(space, w_obj).get_or_make_weakref(w_subtype, w_obj) + + at jit.dont_look_inside +def make_weakref_with_callback(space, w_subtype, w_obj, w_callable): + lifeline = getlifelinewithcallbacks(space, w_obj) + return lifeline.make_weakref_with_callback(w_subtype, w_obj, w_callable) + def descr__new__weakref(space, w_subtype, w_obj, w_callable=None, __args__=None): if __args__.arguments_w: raise OperationError(space.w_TypeError, space.wrap( "__new__ expected at most 2 arguments")) - lifeline = w_obj.getweakref() - if lifeline is None: - lifeline = WeakrefLifeline(space) - w_obj.setweakref(space, lifeline) - return lifeline.get_or_make_weakref(space, w_subtype, w_obj, w_callable) + if space.is_w(w_callable, space.w_None): + return get_or_make_weakref(space, w_subtype, w_obj) + else: + return make_weakref_with_callback(space, w_subtype, w_obj, w_callable) W_Weakref.typedef = TypeDef("weakref", __doc__ = """A weak reference to an object 'obj'. A 'callback' can be given, @@ -239,15 +285,23 @@ w_obj = force(space, self) return space.call_args(w_obj, __args__) + at jit.dont_look_inside +def get_or_make_proxy(space, w_obj): + return getlifeline(space, w_obj).get_or_make_proxy(w_obj) + + at jit.dont_look_inside +def make_proxy_with_callback(space, w_obj, w_callable): + lifeline = getlifelinewithcallbacks(space, w_obj) + return lifeline.make_proxy_with_callback(w_obj, w_callable) + def proxy(space, w_obj, w_callable=None): """Create a proxy object that weakly references 'obj'. 'callback', if given, is called with the proxy as an argument when 'obj' is about to be finalized.""" - lifeline = w_obj.getweakref() - if lifeline is None: - lifeline = WeakrefLifeline(space) - w_obj.setweakref(space, lifeline) - return lifeline.get_or_make_proxy(space, w_obj, w_callable) + if space.is_w(w_callable, space.w_None): + return get_or_make_proxy(space, w_obj) + else: + return make_proxy_with_callback(space, w_obj, w_callable) def descr__new__proxy(space, w_subtype, w_obj, w_callable=None): raise OperationError( diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py --- a/pypy/module/_weakref/test/test_weakref.py +++ b/pypy/module/_weakref/test/test_weakref.py @@ -369,6 +369,26 @@ return A raises(TypeError, tryit) + def test_proxy_to_dead_object(self): + import _weakref, gc + class A(object): + pass + p = _weakref.proxy(A()) + gc.collect() + raises(ReferenceError, "p + 1") + + def test_proxy_with_callback(self): + import _weakref, gc + class A(object): + pass + a2 = A() + def callback(proxy): + a2.seen = proxy + p = _weakref.proxy(A(), callback) + gc.collect() + raises(ReferenceError, "p + 1") + assert a2.seen is p + def test_repr(self): import _weakref, gc for kind in ('ref', 'proxy'): diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -8,7 +8,8 @@ modname == '__builtin__.interp_classobj' or modname == '__builtin__.functional' or modname == '__builtin__.descriptor' or - modname == 'thread.os_local'): + modname == 'thread.os_local' or + modname == 'thread.os_thread'): return True if '.' in modname: modname, _ = modname.split('.', 1) diff --git a/pypy/module/pypyjit/test/test_policy.py b/pypy/module/pypyjit/test/test_policy.py --- a/pypy/module/pypyjit/test/test_policy.py +++ b/pypy/module/pypyjit/test/test_policy.py @@ -34,7 +34,9 @@ def test_thread_local(): from pypy.module.thread.os_local import Local + from pypy.module.thread.os_thread import get_ident assert pypypolicy.look_inside_function(Local.getdict.im_func) + assert pypypolicy.look_inside_function(get_ident) def test_pypy_module(): from pypy.module._collections.interp_deque import W_Deque diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py @@ -23,6 +23,4 @@ guard_not_invalidated(descr=...) p19 = getfield_gc(ConstPtr(p17), descr=) guard_value(p19, ConstPtr(ptr20), descr=...) - p22 = getfield_gc(ConstPtr(ptr21), descr=) - guard_nonnull(p22, descr=...) - """) + """) \ No newline at end of file diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -181,8 +181,7 @@ assert loop.match_by_id("contains", """ guard_not_invalidated(descr=...) i11 = force_token() - i12 = int_add_ovf(i5, i7) - guard_no_overflow(descr=...) + i12 = int_add(i5, 1) """) def test_id_compare_optimization(self): diff --git a/pypy/module/sys/test/test_encoding.py b/pypy/module/sys/test/test_encoding.py new file mode 100644 --- /dev/null +++ b/pypy/module/sys/test/test_encoding.py @@ -0,0 +1,30 @@ +import os, py +from pypy.rlib import rlocale +from pypy.module.sys.interp_encoding import _getfilesystemencoding +from pypy.module.sys.interp_encoding import base_encoding + + +def test__getfilesystemencoding(space): + if not (rlocale.HAVE_LANGINFO and rlocale.CODESET): + py.test.skip("requires HAVE_LANGINFO and CODESET") + + def clear(): + for key in os.environ.keys(): + if key == 'LANG' or key.startswith('LC_'): + del os.environ[key] + + def get(**env): + original_env = os.environ.copy() + try: + clear() + os.environ.update(env) + return _getfilesystemencoding(space) + finally: + clear() + os.environ.update(original_env) + + assert get() in (base_encoding, 'ANSI_X3.4-1968') + assert get(LANG='foobar') in (base_encoding, 'ANSI_X3.4-1968') + assert get(LANG='en_US.UTF-8') == 'UTF-8' + assert get(LC_ALL='en_US.UTF-8') == 'UTF-8' + assert get(LC_CTYPE='en_US.UTF-8') == 'UTF-8' diff --git a/pypy/module/test_lib_pypy/test_greenlet.py b/pypy/module/test_lib_pypy/test_greenlet.py --- a/pypy/module/test_lib_pypy/test_greenlet.py +++ b/pypy/module/test_lib_pypy/test_greenlet.py @@ -231,3 +231,13 @@ assert res == "next step" res = g2.switch("goes to f1 instead") assert res == "all ok" + + def test_throw_in_not_started_yet(self): + from greenlet import greenlet + # + def f1(): + never_reached + # + g1 = greenlet(f1) + raises(ValueError, g1.throw, ValueError) + assert g1.dead diff --git a/pypy/module/test_lib_pypy/test_stackless.py b/pypy/module/test_lib_pypy/test_stackless_pickle.py rename from pypy/module/test_lib_pypy/test_stackless.py rename to pypy/module/test_lib_pypy/test_stackless_pickle.py diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -65,6 +65,10 @@ if isinstance(cell, ModuleCell): cell.w_value = w_value return + # If the new value and the current value are the same, don't create a + # level of indirection, or mutate are version. + if self.space.is_w(w_value, cell): + return if cell is not None: w_value = ModuleCell(w_value) self.mutated() diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -355,9 +355,13 @@ y = w_float2.floatval if y == 0.0: raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float modulo")) - mod = math.fmod(x, y) - if (mod and ((y < 0.0) != (mod < 0.0))): - mod += y + try: + mod = math.fmod(x, y) + except ValueError: + mod = rfloat.NAN + else: + if (mod and ((y < 0.0) != (mod < 0.0))): + mod += y return W_FloatObject(mod) @@ -366,7 +370,10 @@ y = w_float2.floatval if y == 0.0: raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float modulo")) - mod = math.fmod(x, y) + try: + mod = math.fmod(x, y) + except ValueError: + return [W_FloatObject(rfloat.NAN), W_FloatObject(rfloat.NAN)] # fmod is typically exact, so vx-mod is *mathematically* an # exact multiple of wx. But this is fp arithmetic, and fp # vx - mod is an approximation; the result is that div may diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -39,6 +39,20 @@ assert d.getitem("a") is None assert d.strategy.getdictvalue_no_unwrapping(d, "a") is None + def test_same_key_set_twice(self): + strategy = ModuleDictStrategy(space) + storage = strategy.get_empty_storage() + d = W_DictMultiObject(space, strategy, storage) + + v1 = strategy.version + x = object() + d.setitem("a", x) + v2 = strategy.version + assert v1 is not v2 + d.setitem("a", x) + v3 = strategy.version + assert v2 is v3 + class AppTestModuleDict(object): def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withcelldict": True}) diff --git a/pypy/objspace/std/test/test_floatobject.py b/pypy/objspace/std/test/test_floatobject.py --- a/pypy/objspace/std/test/test_floatobject.py +++ b/pypy/objspace/std/test/test_floatobject.py @@ -767,3 +767,19 @@ def test_invalid(self): raises(ValueError, float.fromhex, "0P") + + def test_division_edgecases(self): + import math + + # inf + inf = float("inf") + assert math.isnan(inf % 3) + assert math.isnan(inf // 3) + x, y = divmod(inf, 3) + assert math.isnan(x) + assert math.isnan(y) + + # divide by 0 + raises(ZeroDivisionError, lambda: inf % 0) + raises(ZeroDivisionError, lambda: inf // 0) + raises(ZeroDivisionError, divmod, inf, 0) \ No newline at end of file diff --git a/pypy/objspace/std/test/test_methodcache.py b/pypy/objspace/std/test/test_methodcache.py --- a/pypy/objspace/std/test/test_methodcache.py +++ b/pypy/objspace/std/test/test_methodcache.py @@ -134,20 +134,24 @@ def test_custom_metaclass(self): import __pypy__ - class MetaA(type): - def __getattribute__(self, x): - return 1 - def f(self): - return 42 - A = type.__new__(MetaA, "A", (), {"f": f}) - l = [type.__getattribute__(A, "__new__")(A)] * 10 - __pypy__.reset_method_cache_counter() - for i, a in enumerate(l): - assert a.f() == 42 - cache_counter = __pypy__.method_cache_counter("f") - assert cache_counter[0] >= 5 - assert cache_counter[1] >= 1 # should be (27, 3) - assert sum(cache_counter) == 10 + for j in range(20): + class MetaA(type): + def __getattribute__(self, x): + return 1 + def f(self): + return 42 + A = type.__new__(MetaA, "A", (), {"f": f}) + l = [type.__getattribute__(A, "__new__")(A)] * 10 + __pypy__.reset_method_cache_counter() + for i, a in enumerate(l): + assert a.f() == 42 + cache_counter = __pypy__.method_cache_counter("f") + assert sum(cache_counter) == 10 + if cache_counter == (9, 1): + break + #else the moon is misaligned, try again + else: + raise AssertionError("cache_counter = %r" % (cache_counter,)) def test_mutate_class(self): import __pypy__ diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -206,6 +206,7 @@ _immutable_fields_ = ['funcsym'] argtypes = [] restype = lltype.nullptr(clibffi.FFI_TYPE_P.TO) + flags = 0 funcsym = lltype.nullptr(rffi.VOIDP.TO) def __init__(self, name, argtypes, restype, funcsym, flags=FUNCFLAG_CDECL, diff --git a/pypy/rpython/memory/gc/minimark.py b/pypy/rpython/memory/gc/minimark.py --- a/pypy/rpython/memory/gc/minimark.py +++ b/pypy/rpython/memory/gc/minimark.py @@ -1461,6 +1461,7 @@ # We will fix such references to point to the copy of the young # objects when we walk 'old_objects_pointing_to_young'. self.old_objects_pointing_to_young.append(newobj) + _trace_drag_out._always_inline_ = True def _visit_young_rawmalloced_object(self, obj): # 'obj' points to a young, raw-malloced object. diff --git a/pypy/rpython/memory/gctypelayout.py b/pypy/rpython/memory/gctypelayout.py --- a/pypy/rpython/memory/gctypelayout.py +++ b/pypy/rpython/memory/gctypelayout.py @@ -459,7 +459,7 @@ if t._hints.get('immutable'): return if 'immutable_fields' in t._hints: - skip = t._hints['immutable_fields'].fields + skip = t._hints['immutable_fields'].all_immutable_fields() for n, t2 in t._flds.iteritems(): if isinstance(t2, lltype.Ptr) and t2.TO._gckind == 'gc': if n not in skip: diff --git a/pypy/rpython/memory/test/test_gctypelayout.py b/pypy/rpython/memory/test/test_gctypelayout.py --- a/pypy/rpython/memory/test/test_gctypelayout.py +++ b/pypy/rpython/memory/test/test_gctypelayout.py @@ -4,7 +4,7 @@ from pypy.rpython.memory.gctypelayout import gc_pointers_inside from pypy.rpython.lltypesystem import lltype, llmemory, rclass from pypy.rpython.test.test_llinterp import get_interpreter -from pypy.rpython.rclass import IR_IMMUTABLE +from pypy.rpython.rclass import IR_IMMUTABLE, IR_QUASIIMMUTABLE from pypy.objspace.flow.model import Constant class FakeGC: @@ -102,7 +102,7 @@ accessor = rclass.FieldListAccessor() S3 = lltype.GcStruct('S', ('x', PT), ('y', PT), hints={'immutable_fields': accessor}) - accessor.initialize(S3, {'x': IR_IMMUTABLE}) + accessor.initialize(S3, {'x': IR_IMMUTABLE, 'y': IR_QUASIIMMUTABLE}) # s1 = lltype.malloc(S1) adr = llmemory.cast_ptr_to_adr(s1) diff --git a/pypy/rpython/rclass.py b/pypy/rpython/rclass.py --- a/pypy/rpython/rclass.py +++ b/pypy/rpython/rclass.py @@ -16,6 +16,13 @@ for x in fields.itervalues(): assert isinstance(x, ImmutableRanking) + def all_immutable_fields(self): + result = set() + for key, value in self.fields.iteritems(): + if value in (IR_IMMUTABLE, IR_IMMUTABLE_ARRAY): + result.add(key) + return result + def __repr__(self): return '' % getattr(self, 'TYPE', '?') diff --git a/pypy/tool/py.cleanup b/pypy/tool/py.cleanup --- a/pypy/tool/py.cleanup +++ b/pypy/tool/py.cleanup @@ -1,16 +1,31 @@ #!/usr/bin/env python -import py, sys +import sys, os, stat -def shouldremove(p): - return p.ext == '.pyc' +def clean(path): + global count + try: + content = os.listdir(path) + except OSError: + print >> sys.stderr, "skipping", path + return + for fn in content: + filename = os.path.join(path, fn) + st = os.lstat(filename) + if stat.S_ISDIR(st.st_mode): + clean(filename) + if fn == '__pycache__': + try: + os.rmdir(filename) + except OSError: + pass + elif fn.endswith('.pyc') or fn.endswith('.pyo'): + os.unlink(filename) + count += 1 count = 0 for arg in sys.argv[1:] or ['.']: - path = py.path.local(arg) - print "cleaning path", path, "of .pyc files" - for x in path.visit(shouldremove, lambda x: x.check(dotfile=0, link=0)): - x.remove() - count += 1 + print "cleaning path", arg, "of .pyc/.pyo/__pycache__ files" + clean(arg) print "%d files removed" % (count,) diff --git a/pypy/translator/goal/app_main.py b/pypy/translator/goal/app_main.py --- a/pypy/translator/goal/app_main.py +++ b/pypy/translator/goal/app_main.py @@ -260,6 +260,8 @@ try: import _file except ImportError: + if sys.version_info < (2, 7): + return import ctypes # HACK: while running on top of CPython set_file_encoding = ctypes.pythonapi.PyFile_SetEncodingAndErrors set_file_encoding.argtypes = [ctypes.py_object, ctypes.c_char_p, ctypes.c_char_p] @@ -479,7 +481,8 @@ print >> sys.stderr, "'import site' failed" readenv = not ignore_environment - io_encoding = readenv and os.getenv("PYTHONIOENCODING") + io_encoding = ((readenv and os.getenv("PYTHONIOENCODING")) + or sys.getfilesystemencoding()) if io_encoding: set_io_encoding(io_encoding) diff --git a/pypy/translator/goal/test2/test_app_main.py b/pypy/translator/goal/test2/test_app_main.py --- a/pypy/translator/goal/test2/test_app_main.py +++ b/pypy/translator/goal/test2/test_app_main.py @@ -739,6 +739,19 @@ data = self.run(p + os.sep) assert data == p + os.sep + '\n' + def test_getfilesystemencoding(self): + if sys.version_info < (2, 7): + skip("test requires Python >= 2.7") + p = getscript_in_dir(""" + import sys + sys.stdout.write(u'15\u20ac') + sys.stdout.flush() + """) + env = os.environ.copy() + env["LC_CTYPE"] = 'en_US.UTF-8' + data = self.run(p, env=env) + assert data == '15\xe2\x82\xac' + def test_pythonioencoding(self): if sys.version_info < (2, 7): skip("test requires Python >= 2.7") From noreply at buildbot.pypy.org Sun Sep 4 12:56:34 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Sep 2011 12:56:34 +0200 (CEST) Subject: [pypy-commit] pypy default: Kill the deprecated module/_stackless, as well as rlib/rcoroutine, Message-ID: <20110904105634.70E868203C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47068:b1456845b665 Date: 2011-09-04 12:45 +0200 http://bitbucket.org/pypy/pypy/changeset/b1456845b665/ Log: Kill the deprecated module/_stackless, as well as rlib/rcoroutine, and a few references left behind. diff --git a/pypy/config/test/test_config.py b/pypy/config/test/test_config.py --- a/pypy/config/test/test_config.py +++ b/pypy/config/test/test_config.py @@ -281,11 +281,11 @@ def test_underscore_in_option_name(): descr = OptionDescription("opt", "", [ - BoolOption("_stackless", "", default=False), + BoolOption("_foobar", "", default=False), ]) config = Config(descr) parser = to_optparse(config) - assert parser.has_option("--_stackless") + assert parser.has_option("--_foobar") def test_none(): dummy1 = BoolOption('dummy1', 'doc dummy', default=False, cmdline=None) diff --git a/pypy/doc/config/objspace.usemodules._stackless.txt b/pypy/doc/config/objspace.usemodules._stackless.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._stackless.txt +++ /dev/null @@ -1,1 +0,0 @@ -Deprecated. diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -639,33 +639,6 @@ a1.free() cb.free() - def test_another_callback_in_stackless(self): - try: - import _stackless - except ImportError: - skip("only valid in a stackless pypy-c") - - import _rawffi - lib = _rawffi.CDLL(self.lib_name) - runcallback = lib.ptr('runcallback', ['P'], 'q') - def callback(): - co = _stackless.coroutine() - def f(): - pass - try: - co.bind(f) - co.switch() - except RuntimeError: - return 1<<42 - return -5 - - cb = _rawffi.CallbackPtr(callback, [], 'q') - a1 = cb.byptr() - res = runcallback(a1) - assert res[0] == 1<<42 - a1.free() - cb.free() - def test_raising_callback(self): import _rawffi, sys import StringIO diff --git a/pypy/module/_stackless/__init__.py b/pypy/module/_stackless/__init__.py deleted file mode 100644 --- a/pypy/module/_stackless/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -# Package initialisation -from pypy.interpreter.mixedmodule import MixedModule - -class Module(MixedModule): - """ - This module implements Stackless for applications. - """ - - appleveldefs = { - 'GreenletExit' : 'app_greenlet.GreenletExit', - 'GreenletError' : 'app_greenlet.GreenletError', - } - - interpleveldefs = { - 'tasklet' : 'interp_stackless.tasklet', - 'coroutine' : 'interp_coroutine.AppCoroutine', - 'greenlet' : 'interp_greenlet.AppGreenlet', - 'usercostate': 'interp_composable_coroutine.W_UserCoState', - '_return_main' : 'interp_coroutine.return_main', - 'get_stack_depth_limit': 'interp_coroutine.get_stack_depth_limit', - 'set_stack_depth_limit': 'interp_coroutine.set_stack_depth_limit', - } - - def setup_after_space_initialization(self): - # post-installing classmethods/staticmethods which - # are not yet directly supported - from pypy.module._stackless.interp_coroutine import post_install as post_install_coro - post_install_coro(self) - from pypy.module._stackless.interp_greenlet import post_install as post_install_greenlet - post_install_greenlet(self) - - if self.space.config.translation.gc == 'marksweep': - from pypy.module._stackless.interp_clonable import post_install as post_install_clonable - self.extra_interpdef('clonable', 'interp_clonable.AppClonableCoroutine') - self.extra_interpdef('fork', 'interp_clonable.fork') - post_install_clonable(self) diff --git a/pypy/module/_stackless/app_greenlet.py b/pypy/module/_stackless/app_greenlet.py deleted file mode 100644 --- a/pypy/module/_stackless/app_greenlet.py +++ /dev/null @@ -1,5 +0,0 @@ -class GreenletExit(Exception): - pass - -class GreenletError(Exception): - pass diff --git a/pypy/module/_stackless/interp_clonable.py b/pypy/module/_stackless/interp_clonable.py deleted file mode 100644 --- a/pypy/module/_stackless/interp_clonable.py +++ /dev/null @@ -1,106 +0,0 @@ -from pypy.interpreter.error import OperationError -from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.gateway import interp2app -from pypy.module._stackless.interp_coroutine import AppCoroutine, AppCoState -from pypy.module._stackless.interp_coroutine import makeStaticMethod -from pypy.module._stackless.rcoroutine import AbstractThunk -from pypy.module._stackless.rclonable import InterpClonableMixin - - -class AppClonableCoroutine(AppCoroutine, InterpClonableMixin): - - def newsubctx(self): - self.hello_local_pool() - AppCoroutine.newsubctx(self) - self.goodbye_local_pool() - - def hello(self): - self.hello_local_pool() - AppCoroutine.hello(self) - - def goodbye(self): - AppCoroutine.goodbye(self) - self.goodbye_local_pool() - - def descr_method__new__(space, w_subtype): - co = space.allocate_instance(AppClonableCoroutine, w_subtype) - costate = AppClonableCoroutine._get_state(space) - AppClonableCoroutine.__init__(co, space, state=costate) - return space.wrap(co) - - def _get_state(space): - return space.fromcache(AppClonableCoState) - _get_state = staticmethod(_get_state) - - def w_getcurrent(space): - return space.wrap(AppClonableCoroutine._get_state(space).current) - w_getcurrent = staticmethod(w_getcurrent) - - def w_clone(self): - space = self.space - costate = self.costate - if costate.current is self: - raise OperationError(space.w_RuntimeError, - space.wrap("clone() cannot clone the " - "current coroutine" - "; use fork() instead")) - copy = AppClonableCoroutine(space, state=costate) - copy.subctx = self.clone_into(copy, self.subctx) - return space.wrap(copy) - - def descr__reduce__(self, space): - raise OperationError(space.w_TypeError, - space.wrap("_stackless.clonable instances are " - "not picklable")) - - -AppClonableCoroutine.typedef = TypeDef("clonable", AppCoroutine.typedef, - __new__ = interp2app(AppClonableCoroutine.descr_method__new__.im_func), - getcurrent = interp2app(AppClonableCoroutine.w_getcurrent), - clone = interp2app(AppClonableCoroutine.w_clone), - __reduce__ = interp2app(AppClonableCoroutine.descr__reduce__), -) - -class AppClonableCoState(AppCoState): - def post_install(self): - self.current = self.main = AppClonableCoroutine(self.space, state=self) - self.main.subctx.clear_framestack() # wack - -def post_install(module): - makeStaticMethod(module, 'clonable', 'getcurrent') - space = module.space - AppClonableCoroutine._get_state(space).post_install() - -# ____________________________________________________________ - -class ForkThunk(AbstractThunk): - def __init__(self, coroutine): - self.coroutine = coroutine - self.newcoroutine = None - def call(self): - oldcoro = self.coroutine - self.coroutine = None - newcoro = AppClonableCoroutine(oldcoro.space, state=oldcoro.costate) - newcoro.subctx = oldcoro.clone_into(newcoro, oldcoro.subctx) - newcoro.parent = oldcoro - self.newcoroutine = newcoro - -def fork(space): - """Fork, as in the Unix fork(): the call returns twice, and the return - value of the call is either the new 'child' coroutine object (if returning - into the parent), or None (if returning into the child). This returns - into the parent first, which can switch to the child later. - """ - costate = AppClonableCoroutine._get_state(space) - current = costate.current - if current is costate.main: - raise OperationError(space.w_RuntimeError, - space.wrap("cannot fork() in the main " - "clonable coroutine")) - thunk = ForkThunk(current) - coro_fork = AppClonableCoroutine(space, state=costate) - coro_fork.bind(thunk) - coro_fork.switch() - # we resume here twice. The following would need explanations about - # why it returns the correct thing in both the parent and the child... - return space.wrap(thunk.newcoroutine) diff --git a/pypy/module/_stackless/interp_composable_coroutine b/pypy/module/_stackless/interp_composable_coroutine deleted file mode 100644 --- a/pypy/module/_stackless/interp_composable_coroutine +++ /dev/null @@ -1,33 +0,0 @@ -from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.typedef import TypeDef, interp2app -from pypy.module._stackless.coroutine import AppCoState, AppCoroutine - - -class W_UserCoState(Wrappable): - def __init__(self, space): - self.costate = AppCoState(space) - self.costate.post_install() - - def descr_method__new__(space, w_subtype): - costate = space.allocate_instance(W_UserCoState, w_subtype) - W_UserCoState.__init__(costate, space) - return space.wrap(costate) - - def w_getcurrent(self): - space = self.costate.space - return space.wrap(self.costate.current) - - def w_spawn(self, w_subtype=None): - space = self.costate.space - if space.is_w(w_subtype, space.w_None): - w_subtype = space.gettypeobject(AppCoroutine.typedef) - co = space.allocate_instance(AppCoroutine, w_subtype) - AppCoroutine.__init__(co, space, state=self.costate) - return space.wrap(co) - -W_UserCoState.typedef = TypeDef("usercostate", - __new__ = interp2app(W_UserCoState.descr_method__new__.im_func), - __module__ = '_stackless', - getcurrent = interp2app(W_UserCoState.w_getcurrent), - spawn = interp2app(W_UserCoState.w_spawn), -) diff --git a/pypy/module/_stackless/interp_composable_coroutine.py b/pypy/module/_stackless/interp_composable_coroutine.py deleted file mode 100644 --- a/pypy/module/_stackless/interp_composable_coroutine.py +++ /dev/null @@ -1,34 +0,0 @@ -from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.typedef import TypeDef, interp2app -from pypy.module._stackless.interp_coroutine import AppCoState, AppCoroutine - - -class W_UserCoState(Wrappable): - def __init__(self, space): - self.costate = AppCoState(space) - self.costate.post_install() - - def descr_method__new__(space, w_subtype): - costate = space.allocate_instance(W_UserCoState, w_subtype) - W_UserCoState.__init__(costate, space) - return space.wrap(costate) - - def w_getcurrent(self): - space = self.costate.space - return space.wrap(self.costate.current) - - def w_spawn(self, w_subtype=None): - space = self.costate.space - if space.is_w(w_subtype, space.w_None): - w_subtype = space.gettypeobject(AppCoroutine.typedef) - co = space.allocate_instance(AppCoroutine, w_subtype) - AppCoroutine.__init__(co, space, state=self.costate) - return space.wrap(co) - -W_UserCoState.typedef = TypeDef("usercostate", - __new__ = interp2app(W_UserCoState.descr_method__new__.im_func), - __module__ = '_stackless', - getcurrent = interp2app(W_UserCoState.w_getcurrent), - spawn = interp2app(W_UserCoState.w_spawn), -) -W_UserCoState.acceptable_as_base_class = False diff --git a/pypy/module/_stackless/interp_coroutine.py b/pypy/module/_stackless/interp_coroutine.py deleted file mode 100644 --- a/pypy/module/_stackless/interp_coroutine.py +++ /dev/null @@ -1,403 +0,0 @@ -""" -Coroutine implementation for application level on top -of the internal coroutines. -This is an extensible concept. Multiple implementations -of concurrency can exist together, if they follow the -basic concept of maintaining their own costate. - -There is also some diversification possible by using -multiple costates for the same type. This leads to -disjoint switchable sets within the same type. - -I'm not so sure to what extent the opposite is possible, too. -I.e., merging the costate of tasklets and greenlets would -allow them to be parents of each other. Needs a bit more -experience to decide where to set the limits. -""" - -from pypy.interpreter.argument import Arguments -from pypy.interpreter.typedef import GetSetProperty, TypeDef -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.error import OperationError, operationerrfmt - -from pypy.module._stackless.stackless_flags import StacklessFlags -from pypy.module._stackless.rcoroutine import Coroutine, BaseCoState, AbstractThunk, CoroutineExit - -from pypy.module.exceptions.interp_exceptions import W_SystemExit, _new_exception - -from pypy.rlib import rstack, jit # for resume points -from pypy.tool import stdlib_opcode as pythonopcode - -class _AppThunk(AbstractThunk): - - def __init__(self, space, costate, w_obj, args): - self.space = space - self.costate = costate - if not space.is_true(space.callable(w_obj)): - raise operationerrfmt( - space.w_TypeError, - "'%s' object is not callable", - space.type(w_obj).getname(space)) - self.w_func = w_obj - self.args = args - - def call(self): - costate = self.costate - w_result = self.space.call_args(self.w_func, self.args) - costate.w_tempval = w_result - -class _ResumeThunk(AbstractThunk): - def __init__(self, space, costate, w_frame): - self.space = space - self.costate = costate - self.w_frame = w_frame - - def call(self): - w_result = resume_frame(self.space, self.w_frame) - # costate.w_tempval = w_result #XXX? - - -W_CoroutineExit = _new_exception('CoroutineExit', W_SystemExit, - """Coroutine killed manually.""") - -# Should be moved to interp_stackless.py if it's ever implemented... Currently -# used by pypy/lib/stackless.py. -W_TaskletExit = _new_exception('TaskletExit', W_SystemExit, - """Tasklet killed manually.""") - -class AppCoroutine(Coroutine): # XXX, StacklessFlags): - - def __init__(self, space, state=None): - self.space = space - if state is None: - state = AppCoroutine._get_state(space) - Coroutine.__init__(self, state) - self.flags = 0 - self.newsubctx() - - def newsubctx(self): - ec = self.space.getexecutioncontext() - self.subctx = ec.Subcontext() - - def descr_method__new__(space, w_subtype): - co = space.allocate_instance(AppCoroutine, w_subtype) - AppCoroutine.__init__(co, space) - return space.wrap(co) - - def _get_state(space): - return space.fromcache(AppCoState) - _get_state = staticmethod(_get_state) - - def w_bind(self, w_func, __args__): - space = self.space - if self.frame is not None: - raise OperationError(space.w_ValueError, space.wrap( - "cannot bind a bound Coroutine")) - state = self.costate - thunk = _AppThunk(space, state, w_func, __args__) - self.bind(thunk) - - def w_switch(self): - space = self.space - if self.frame is None: - raise OperationError(space.w_ValueError, space.wrap( - "cannot switch to an unbound Coroutine")) - state = self.costate - self.switch() - w_ret, state.w_tempval = state.w_tempval, space.w_None - return w_ret - - def switch(self): - space = self.space - try: - Coroutine.switch(self) - except CoroutineExit: - raise OperationError(self.costate.w_CoroutineExit, space.w_None) - - def w_finished(self, w_excinfo): - pass - - def finish(self, operror=None): - space = self.space - if isinstance(operror, OperationError): - w_exctype = operror.w_type - w_excvalue = operror.get_w_value(space) - w_exctraceback = operror.get_traceback() - w_excinfo = space.newtuple([w_exctype, w_excvalue, w_exctraceback]) - - if w_exctype is self.costate.w_CoroutineExit: - self.coroutine_exit = True - else: - w_N = space.w_None - w_excinfo = space.newtuple([w_N, w_N, w_N]) - - return space.call_method(space.wrap(self),'finished', w_excinfo) - - def hello(self): - ec = self.space.getexecutioncontext() - self.subctx.enter(ec) - - def goodbye(self): - ec = self.space.getexecutioncontext() - self.subctx.leave(ec) - - def w_kill(self): - self.kill() - - def w_throw(self, w_type, w_value=None, w_traceback=None): - space = self.space - - operror = OperationError(w_type, w_value) - operror.normalize_exception(space) - - if not space.is_w(w_traceback, space.w_None): - from pypy.interpreter import pytraceback - tb = space.interpclass_w(w_traceback) - if tb is None or not space.is_true(space.isinstance(tb, - space.gettypeobject(pytraceback.PyTraceback.typedef))): - raise OperationError(space.w_TypeError, - space.wrap("throw: arg 3 must be a traceback or None")) - operror.set_traceback(tb) - - self._kill(operror) - - def _userdel(self): - if self.get_is_zombie(): - return - self.set_is_zombie(True) - self.space.userdel(self.space.wrap(self)) - - def w_getcurrent(space): - return space.wrap(AppCoroutine._get_state(space).current) - w_getcurrent = staticmethod(w_getcurrent) - - def w_getmain(space): - return space.wrap(AppCoroutine._get_state(space).main) - w_getmain = staticmethod(w_getmain) - - # pickling interface - def descr__reduce__(self, space): - # this is trying to be simplistic at the moment. - # we neither allow to pickle main (which can become a mess - # since it has some deep anchestor frames) - # nor we allow to pickle the current coroutine. - # rule: switch before pickling. - # you cannot construct the tree that you are climbing. - from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('_stackless') - mod = space.interp_w(MixedModule, w_mod) - w_mod2 = space.getbuiltinmodule('_pickle_support') - mod2 = space.interp_w(MixedModule, w_mod2) - w_new_inst = mod.get('coroutine') - w = space.wrap - nt = space.newtuple - ec = self.space.getexecutioncontext() - - if self is self.costate.main: - return nt([mod.get('_return_main'), nt([])]) - - thunk = self.thunk - if isinstance(thunk, _AppThunk): - w_args, w_kwds = thunk.args.topacked() - w_thunk = nt([thunk.w_func, w_args, w_kwds]) - else: - w_thunk = space.w_None - - tup_base = [ - ] - tup_state = [ - w(self.flags), - self.subctx.getstate(space), - w_thunk, - w(self.parent), - ] - - return nt([w_new_inst, nt(tup_base), nt(tup_state)]) - - def descr__setstate__(self, space, w_args): - w_flags, w_state, w_thunk, w_parent = space.unpackiterable(w_args, - expected_length=4) - self.flags = space.int_w(w_flags) - if space.is_w(w_parent, space.w_None): - w_parent = self.w_getmain(space) - self.parent = space.interp_w(AppCoroutine, w_parent) - ec = self.space.getexecutioncontext() - self.subctx.setstate(space, w_state) - if space.is_w(w_thunk, space.w_None): - if space.is_w(w_state, space.w_None): - self.thunk = None - else: - self.bind(_ResumeThunk(space, self.costate, self.subctx.topframe)) - else: - w_func, w_args, w_kwds = space.unpackiterable(w_thunk, - expected_length=3) - args = Arguments.frompacked(space, w_args, w_kwds) - self.bind(_AppThunk(space, self.costate, w_func, args)) - - -# _mixin_ did not work -for methname in StacklessFlags.__dict__: - meth = getattr(StacklessFlags, methname) - if hasattr(meth, 'im_func'): - setattr(AppCoroutine, meth.__name__, meth.im_func) -del meth, methname - -def w_get_is_zombie(self, space): - return space.wrap(self.get_is_zombie()) -AppCoroutine.w_get_is_zombie = w_get_is_zombie - -def w_get_is_alive(self, space): - return space.wrap(self.is_alive()) -AppCoroutine.w_get_is_alive = w_get_is_alive - -def w_descr__framestack(self, space): - assert isinstance(self, AppCoroutine) - counter = 0 - f = self.subctx.topframe - while f is not None: - counter += 1 - f = f.f_backref() - items = [None] * counter - f = self.subctx.topframe - while f is not None: - counter -= 1 - assert counter >= 0 - items[counter] = space.wrap(f) - f = f.f_backref() - assert counter == 0 - return space.newtuple(items) - -def makeStaticMethod(module, classname, funcname): - "NOT_RPYTHON" - space = module.space - w_klass = space.getattr(space.wrap(module), space.wrap(classname)) - # HACK HACK HACK - # make the typeobject mutable for a while - from pypy.objspace.std.typeobject import W_TypeObject - assert isinstance(w_klass, W_TypeObject) - old_flag = w_klass.flag_heaptype - w_klass.flag_heaptype = True - - space.appexec([w_klass, space.wrap(funcname)], """ - (klass, funcname): - func = getattr(klass, funcname) - setattr(klass, funcname, staticmethod(func.im_func)) - """) - w_klass.flag_heaptype = old_flag - -def post_install(module): - makeStaticMethod(module, 'coroutine', 'getcurrent') - makeStaticMethod(module, 'coroutine', 'getmain') - space = module.space - AppCoroutine._get_state(space).post_install() - -# space.appexec("""() : - -# maybe use __spacebind__ for postprocessing - -AppCoroutine.typedef = TypeDef("coroutine", - __new__ = interp2app(AppCoroutine.descr_method__new__.im_func), - bind = interp2app(AppCoroutine.w_bind), - switch = interp2app(AppCoroutine.w_switch), - kill = interp2app(AppCoroutine.w_kill), - throw = interp2app(AppCoroutine.w_throw), - finished = interp2app(AppCoroutine.w_finished), - is_alive = GetSetProperty(AppCoroutine.w_get_is_alive), - is_zombie = GetSetProperty(AppCoroutine.w_get_is_zombie, - doc=AppCoroutine.get_is_zombie.__doc__), #--- this flag is a bit obscure - # and not useful (it's totally different from Coroutine.is_zombie(), too) - # but lib/stackless.py uses it - _framestack = GetSetProperty(w_descr__framestack), - getcurrent = interp2app(AppCoroutine.w_getcurrent), - getmain = interp2app(AppCoroutine.w_getmain), - __reduce__ = interp2app(AppCoroutine.descr__reduce__), - __setstate__ = interp2app(AppCoroutine.descr__setstate__), - __module__ = '_stackless', -) - -class AppCoState(BaseCoState): - def __init__(self, space): - BaseCoState.__init__(self) - self.w_tempval = space.w_None - self.space = space - - # XXX Workaround: for now we need to instantiate these classes - # explicitly for translation to work - W_CoroutineExit(space) - W_TaskletExit(space) - - # Exporting new exception to space - self.w_CoroutineExit = space.gettypefor(W_CoroutineExit) - space.setitem( - space.exceptions_module.w_dict, - space.new_interned_str('CoroutineExit'), - self.w_CoroutineExit) - space.setitem(space.builtin.w_dict, - space.new_interned_str('CoroutineExit'), - self.w_CoroutineExit) - - # Should be moved to interp_stackless.py if it's ever implemented... - self.w_TaskletExit = space.gettypefor(W_TaskletExit) - space.setitem( - space.exceptions_module.w_dict, - space.new_interned_str('TaskletExit'), - self.w_TaskletExit) - space.setitem(space.builtin.w_dict, - space.new_interned_str('TaskletExit'), - self.w_TaskletExit) - - def post_install(self): - self.current = self.main = AppCoroutine(self.space, state=self) - self.main.subctx.clear_framestack() # wack - -def return_main(space): - return AppCoroutine._get_state(space).main - -def get_stack_depth_limit(space): - return space.wrap(rstack.get_stack_depth_limit()) - - at unwrap_spec(limit=int) -def set_stack_depth_limit(space, limit): - rstack.set_stack_depth_limit(limit) - - -# ___________________________________________________________________ -# unpickling trampoline - -def resume_frame(space, w_frame): - from pypy.interpreter.pyframe import PyFrame - frame = space.interp_w(PyFrame, w_frame, can_be_None=True) - w_result = space.w_None - operr = None - executioncontext = frame.space.getexecutioncontext() - while frame is not None: - code = frame.pycode.co_code - instr = frame.last_instr - opcode = ord(code[instr]) - map = pythonopcode.opmap - call_ops = [map['CALL_FUNCTION'], map['CALL_FUNCTION_KW'], map['CALL_FUNCTION_VAR'], - map['CALL_FUNCTION_VAR_KW'], map['CALL_METHOD']] - assert opcode in call_ops - instr += 1 - oparg = ord(code[instr]) | ord(code[instr + 1]) << 8 - nargs = oparg & 0xff - nkwds = (oparg >> 8) & 0xff - if nkwds == 0: # only positional arguments - # fast paths leaves things on the stack, pop them - if space.config.objspace.opcodes.CALL_METHOD and opcode == map['CALL_METHOD']: - frame.dropvalues(nargs + 2) - elif opcode == map['CALL_FUNCTION']: - frame.dropvalues(nargs + 1) - - # small hack: unlink frame out of the execution context, because - # execute_frame will add it there again - executioncontext.topframeref = jit.non_virtual_ref(frame.f_backref()) - frame.last_instr = instr + 1 # continue after the call - try: - w_result = frame.execute_frame(w_result, operr) - except OperationError, operr: - pass - frame = frame.f_backref() - if operr: - raise operr - return w_result diff --git a/pypy/module/_stackless/interp_greenlet.py b/pypy/module/_stackless/interp_greenlet.py deleted file mode 100644 --- a/pypy/module/_stackless/interp_greenlet.py +++ /dev/null @@ -1,238 +0,0 @@ -from pypy.interpreter.argument import Arguments -from pypy.interpreter.typedef import GetSetProperty, TypeDef -from pypy.interpreter.gateway import interp2app -from pypy.interpreter.gateway import NoneNotWrapped -from pypy.interpreter.error import OperationError - -from pypy.module._stackless.rcoroutine import Coroutine, BaseCoState -from pypy.module._stackless.rcoroutine import AbstractThunk, syncstate -from pypy.module._stackless.interp_coroutine import makeStaticMethod - - -class GreenletThunk(AbstractThunk): - - def __init__(self, greenlet): - self.greenlet = greenlet - - def call(self): - greenlet = self.greenlet - greenlet.active = True - try: - space = greenlet.space - args_w = greenlet.costate.args_w - __args__ = Arguments(space, args_w) - try: - w_run = space.getattr(space.wrap(greenlet), space.wrap('run')) - greenlet.w_callable = None - w_result = space.call_args(w_run, __args__) - except OperationError, operror: - if not operror.match(space, greenlet.costate.w_GreenletExit): - raise - w_result = operror.get_w_value(space) - finally: - greenlet.active = False - greenlet.costate.args_w = [w_result] - -class AppGreenletCoState(BaseCoState): - def __init__(self, space): - BaseCoState.__init__(self) - self.args_w = None - self.space = space - self.w_GreenletExit = get(space, "GreenletExit") - self.w_GreenletError = get(space, "GreenletError") - - def post_install(self): - self.current = self.main = AppGreenlet(self.space, is_main=True) - -class AppGreenlet(Coroutine): - def __init__(self, space, w_callable=None, is_main=False): - Coroutine.__init__(self, self._get_state(space)) - self.space = space - self.w_callable = w_callable - self.active = is_main - self.subctx = space.getexecutioncontext().Subcontext() - if is_main: - self.subctx.clear_framestack() # wack - else: - self.bind(GreenletThunk(self)) - - def descr_method__new__(space, w_subtype, __args__): - co = space.allocate_instance(AppGreenlet, w_subtype) - AppGreenlet.__init__(co, space) - return space.wrap(co) - - def descr_method__init__(self, w_run=NoneNotWrapped, - w_parent=NoneNotWrapped): - if w_run is not None: - self.set_run(w_run) - if w_parent is not None: - self.set_parent(w_parent) - - def _get_state(space): - return space.fromcache(AppGreenletCoState) - _get_state = staticmethod(_get_state) - - def hello(self): - ec = self.space.getexecutioncontext() - self.subctx.enter(ec) - - def goodbye(self): - ec = self.space.getexecutioncontext() - self.subctx.leave(ec) - - def w_getcurrent(space): - return space.wrap(AppGreenlet._get_state(space).current) - w_getcurrent = staticmethod(w_getcurrent) - - def w_switch(self, args_w): - # Find the switch target - it might be a parent greenlet - space = self.space - costate = self.costate - target = self - while target.isdead(): - target = target.parent - assert isinstance(target, AppGreenlet) - # Switch to it - costate.args_w = args_w - if target is not costate.current: - target.switch() - else: - # case not handled in Coroutine.switch() - syncstate._do_things_to_do() - result_w = costate.args_w - costate.args_w = None - # costate.args_w can be set to None above for throw(), but then - # switch() should have raised. At this point cosstate.args_w != None. - assert result_w is not None - # Return the result of a switch, packaging it in a tuple if - # there is more than one value. - if len(result_w) == 1: - return result_w[0] - return space.newtuple(result_w) - - def w_throw(self, w_type=None, w_value=None, w_traceback=None): - space = self.space - if space.is_w(w_type, space.w_None): - w_type = self.costate.w_GreenletExit - # Code copied from RAISE_VARARGS but slightly modified. Not too nice. - operror = OperationError(w_type, w_value) - operror.normalize_exception(space) - if not space.is_w(w_traceback, space.w_None): - from pypy.interpreter import pytraceback - tb = space.interpclass_w(w_traceback) - if tb is None or not space.is_true(space.isinstance(tb, - space.gettypeobject(pytraceback.PyTraceback.typedef))): - raise OperationError(space.w_TypeError, - space.wrap("throw: arg 3 must be a traceback or None")) - operror.set_traceback(tb) - # Dead greenlet: turn GreenletExit into a regular return - if self.isdead() and operror.match(space, self.costate.w_GreenletExit): - args_w = [operror.get_w_value(space)] - else: - syncstate.push_exception(operror) - args_w = None - return self.w_switch(args_w) - - def _userdel(self): - self.space.userdel(self.space.wrap(self)) - - def isdead(self): - return self.thunk is None and not self.active - - def w_get_is_dead(self, space): - return space.newbool(self.isdead()) - - def descr__nonzero__(self): - return self.space.newbool(self.active) - - def w_get_run(self, space): - w_run = self.w_callable - if w_run is None: - raise OperationError(space.w_AttributeError, space.wrap("run")) - return w_run - - def set_run(self, w_run): - space = self.space - if self.thunk is None: - raise OperationError(space.w_AttributeError, - space.wrap("run cannot be set " - "after the start of the greenlet")) - self.w_callable = w_run - - def w_set_run(self, space, w_run): - self.set_run(w_run) - - def w_del_run(self, space): - if self.w_callable is None: - raise OperationError(space.w_AttributeError, space.wrap("run")) - self.w_callable = None - - def w_get_parent(self, space): - return space.wrap(self.parent) - - def set_parent(self, w_parent): - space = self.space - newparent = space.interp_w(AppGreenlet, w_parent) - if newparent.costate is not self.costate: - raise OperationError(self.costate.w_GreenletError, - space.wrap("invalid foreign parent")) - curr = newparent - while curr: - if curr is self: - raise OperationError(space.w_ValueError, - space.wrap("cyclic parent chain")) - curr = curr.parent - self.parent = newparent - - def w_set_parent(self, space, w_parent): - self.set_parent(w_parent) - - def w_get_frame(self, space): - if not self.active or self.costate.current is self: - f = None - else: - f = self.subctx.topframe - return space.wrap(f) - -def get(space, name): - w_module = space.getbuiltinmodule('_stackless') - return space.getattr(w_module, space.wrap(name)) - -def post_install(module): - "NOT_RPYTHON" - makeStaticMethod(module, 'greenlet', 'getcurrent') - space = module.space - state = AppGreenlet._get_state(space) - state.post_install() - w_greenlet = get(space, 'greenlet') - # HACK HACK HACK - # make the typeobject mutable for a while - from pypy.objspace.std.typeobject import W_TypeObject - assert isinstance(w_greenlet, W_TypeObject) - old_flag = w_greenlet.flag_heaptype - w_greenlet.flag_heaptype = True - space.appexec([w_greenlet, - state.w_GreenletExit, - state.w_GreenletError], """ - (greenlet, exit, error): - greenlet.GreenletExit = exit - greenlet.error = error - """) - w_greenlet.flag_heaptype = old_flag - -AppGreenlet.typedef = TypeDef("greenlet", - __new__ = interp2app(AppGreenlet.descr_method__new__.im_func), - __init__ = interp2app(AppGreenlet.descr_method__init__), - switch = interp2app(AppGreenlet.w_switch), - dead = GetSetProperty(AppGreenlet.w_get_is_dead), - run = GetSetProperty(AppGreenlet.w_get_run, - AppGreenlet.w_set_run, - AppGreenlet.w_del_run), - parent = GetSetProperty(AppGreenlet.w_get_parent, - AppGreenlet.w_set_parent), - getcurrent = interp2app(AppGreenlet.w_getcurrent), - throw = interp2app(AppGreenlet.w_throw), - gr_frame = GetSetProperty(AppGreenlet.w_get_frame), - __nonzero__ = interp2app(AppGreenlet.descr__nonzero__), - __module__ = '_stackless', -) diff --git a/pypy/module/_stackless/interp_stackless.py b/pypy/module/_stackless/interp_stackless.py deleted file mode 100644 --- a/pypy/module/_stackless/interp_stackless.py +++ /dev/null @@ -1,28 +0,0 @@ -from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.gateway import interp2app -import os - - -class tasklet(Wrappable): - - def __init__(self, space): - self.space = space - self.flags = 0 - self.state = None - - def descr_method__new__(space, w_subtype): - t = space.allocate_instance(tasklet, w_subtype) - tasklet.__init__(t, space) - return space.wrap(t) - - def w_demo(self): - output("42") - -tasklet.typedef = TypeDef("tasklet", - __new__ = interp2app(tasklet.descr_method__new__.im_func), - demo = interp2app(tasklet.w_demo), -) - -def output(stuff): - os.write(2, stuff + '\n') diff --git a/pypy/module/_stackless/rclonable.py b/pypy/module/_stackless/rclonable.py deleted file mode 100644 --- a/pypy/module/_stackless/rclonable.py +++ /dev/null @@ -1,87 +0,0 @@ -from pypy.module._stackless.interp_coroutine import AbstractThunk, Coroutine -from pypy.rlib.rgc import gc_swap_pool, gc_clone -from pypy.rlib.objectmodel import we_are_translated - - -class InterpClonableMixin: - local_pool = None - _mixin_ = True - - def hello_local_pool(self): - if we_are_translated(): - self.saved_pool = gc_swap_pool(self.local_pool) - - def goodbye_local_pool(self): - if we_are_translated(): - self.local_pool = gc_swap_pool(self.saved_pool) - self.saved_pool = None - - def clone_into(self, copy, extradata=None): - if not we_are_translated(): - raise NotImplementedError - # cannot gc_clone() directly self, because it is not in its own - # local_pool. Moreover, it has a __del__, which cloning doesn't - # support properly at the moment. - copy.parent = self.parent - # the hello/goodbye pair has two purposes: it forces - # self.local_pool to be computed even if it was None up to now, - # and it puts the 'data' tuple in the correct pool to be cloned. - self.hello_local_pool() - data = (self.frame, extradata) - self.goodbye_local_pool() - # clone! - data, copy.local_pool = gc_clone(data, self.local_pool) - copy.frame, extradata = data - copy.thunk = self.thunk # in case we haven't switched to self yet - return extradata - - -class InterpClonableCoroutine(Coroutine, InterpClonableMixin): - - def hello(self): - self.hello_local_pool() - - def goodbye(self): - self.goodbye_local_pool() - - def clone(self): - # hack, this is overridden in AppClonableCoroutine - if self.getcurrent() is self: - raise RuntimeError("clone() cannot clone the current coroutine; " - "use fork() instead") - copy = InterpClonableCoroutine(self.costate) - self.clone_into(copy) - return copy - - -class ForkThunk(AbstractThunk): - def __init__(self, coroutine): - self.coroutine = coroutine - self.newcoroutine = None - def call(self): - oldcoro = self.coroutine - self.coroutine = None - newcoro = oldcoro.clone() - newcoro.parent = oldcoro - self.newcoroutine = newcoro - -def fork(): - """Fork, as in the Unix fork(): the call returns twice, and the return - value of the call is either the new 'child' coroutine object (if returning - into the parent), or None (if returning into the child). This returns - into the parent first, which can switch to the child later. - """ - current = InterpClonableCoroutine.getcurrent() - if not isinstance(current, InterpClonableCoroutine): - raise RuntimeError("fork() in a non-clonable coroutine") - thunk = ForkThunk(current) - coro_fork = InterpClonableCoroutine() - coro_fork.bind(thunk) - coro_fork.switch() - # we resume here twice. The following would need explanations about - # why it returns the correct thing in both the parent and the child... - return thunk.newcoroutine - -## from pypy.rpython.lltypesystem import lltype, lloperation -## lloperation.llop.debug_view(lltype.Void, current, thunk, -## lloperation.llop.gc_x_size_header(lltype.Signed)) diff --git a/pypy/module/_stackless/rcoroutine.py b/pypy/module/_stackless/rcoroutine.py deleted file mode 100644 --- a/pypy/module/_stackless/rcoroutine.py +++ /dev/null @@ -1,10 +0,0 @@ -from pypy.rlib.rcoroutine import make_coroutine_classes -from pypy.interpreter.baseobjspace import Wrappable - -d = make_coroutine_classes(Wrappable) - -Coroutine = d['Coroutine'] -BaseCoState = d['BaseCoState'] -AbstractThunk = d['AbstractThunk'] -syncstate = d['syncstate'] -CoroutineExit = d['CoroutineExit'] diff --git a/pypy/module/_stackless/stackless_flags.py b/pypy/module/_stackless/stackless_flags.py deleted file mode 100644 --- a/pypy/module/_stackless/stackless_flags.py +++ /dev/null @@ -1,201 +0,0 @@ -""" -basic definitions for tasklet flags. -For simplicity and compatibility, -they are defined the same for coroutines, -even if they are not used. - -taken from tasklet_structs.h ----------------------------- - -/*************************************************************************** - - Tasklet Flag Definition - ----------------------- - - blocked: The tasklet is either waiting in a channel for - writing (1) or reading (-1) or not blocked (0). - Maintained by the channel logic. Do not change. - - atomic: If true, schedulers will never switch. Driven by - the code object or dynamically, see below. - - ignore_nesting: Allows auto-scheduling, even if nesting_level - is not zero. - - autoschedule: The tasklet likes to be auto-scheduled. User driven. - - block_trap: Debugging aid. Whenever the tasklet would be - blocked by a channel, an exception is raised. - - is_zombie: This tasklet is almost dead, its deallocation has - started. The tasklet *must* die at some time, or the - process can never end. - - pending_irq: If set, an interrupt was issued during an atomic - operation, and should be handled when possible. - - - Policy for atomic/autoschedule and switching: - --------------------------------------------- - A tasklet switch can always be done explicitly by calling schedule(). - Atomic and schedule are concerned with automatic features. - - atomic autoschedule - - 1 any Neither a scheduler nor a watchdog will - try to switch this tasklet. - - 0 0 The tasklet can be stopped on desire, or it - can be killed by an exception. - - 0 1 Like above, plus auto-scheduling is enabled. - - Default settings: - ----------------- - All flags are zero by default. - - ***************************************************************************/ - -typedef struct _tasklet_flags { - int blocked: 2; - unsigned int atomic: 1; - unsigned int ignore_nesting: 1; - unsigned int autoschedule: 1; - unsigned int block_trap: 1; - unsigned int is_zombie: 1; - unsigned int pending_irq: 1; -} PyTaskletFlagStruc; -""" - -from pypy.rlib.rarithmetic import LONG_BIT, intmask - -class BitSetDef(object): - __slots__ = "_names __dict__ _attrname".split() - - def __init__(self, _attrname): - self._names = [] - self._attrname = _attrname - - def __setattr__(self, key, value): - if key not in self.__slots__: - assert key not in self.__dict__ - self._names.append(key) - object.__setattr__(self, key, value) - - def __iter__(self): - return self._enum_objects() - - def _enum_objects(self): - for name in self._names: - yield name, getattr(self, name) - -# negative values are user-writable -flags = BitSetDef("flags") -flags.blocked = 2, """writing (1) or reading (-1) or not blocked (0)""" -flags.atomic = -1, """If true, schedulers will never switch""" -flags.ignore_nesting = -1, """allow auto-scheduling in nested interpreters""" -flags.autoschedule = -1, """enable auto-scheduling""" -flags.block_trap = -1, """raise an exception instead of blocking""" -flags.is_zombie = 1, """__del__ is in progress""" -flags.pending_irq = 1, """an interrupt occured while being atomic""" - -def make_get_bits(name, bits, shift): - """ return a bool for single bits, signed int otherwise """ - signmask = 1 << (bits - 1 + shift) - lshift = bits + shift - rshift = bits - if bits == 1: - return "bool(%s & 0x%x)" % (name, signmask) - else: - return "intmask(%s << (LONG_BIT-%d)) >> (LONG_BIT-%d)" % (name, lshift, rshift) - -def make_set_bits(name, bits, shift): - datamask = int('1' * bits, 2) - clearmask = datamask << shift - return "%s & ~0x%x | (value & 0x%x) << %d" % (name, clearmask, datamask, shift) - -def gen_code(): - from cStringIO import StringIO - f = StringIO() - print >> f, "class StacklessFlags(object):" - print >> f, " _mixin_ = True" - shift = 0 - field = "self.%s" % flags._attrname - for name, (bits, doc) in flags: - write, bits = bits < 0, abs(bits) - print >> f - print >> f, ' def get_%s(self):' % name - print >> f, ' """%s"""' % doc - print >> f, ' return %s' % make_get_bits(field, bits, shift) - print >> f, ' def set_%s(self, value):' % name - print >> f, ' """%s"""' % doc - print >> f, ' %s = %s' % (field, make_set_bits(field, bits, shift)) - print >> f, ' set_%s._public = %s' % (name, write) - shift += bits - return f.getvalue() - -# BEGIN generated code -class StacklessFlags(object): - _mixin_ = True - - def get_blocked(self): - """writing (1) or reading (-1) or not blocked (0)""" - return intmask(self.flags << (LONG_BIT-2)) >> (LONG_BIT-2) - def set_blocked(self, value): - """writing (1) or reading (-1) or not blocked (0)""" - self.flags = self.flags & ~0x3 | (value & 0x3) << 0 - set_blocked._public = False - - def get_atomic(self): - """If true, schedulers will never switch""" - return bool(self.flags & 0x4) - def set_atomic(self, value): - """If true, schedulers will never switch""" - self.flags = self.flags & ~0x4 | (value & 0x1) << 2 - set_atomic._public = True - - def get_ignore_nesting(self): - """allow auto-scheduling in nested interpreters""" - return bool(self.flags & 0x8) - def set_ignore_nesting(self, value): - """allow auto-scheduling in nested interpreters""" - self.flags = self.flags & ~0x8 | (value & 0x1) << 3 - set_ignore_nesting._public = True - - def get_autoschedule(self): - """enable auto-scheduling""" - return bool(self.flags & 0x10) - def set_autoschedule(self, value): - """enable auto-scheduling""" - self.flags = self.flags & ~0x10 | (value & 0x1) << 4 - set_autoschedule._public = True - - def get_block_trap(self): - """raise an exception instead of blocking""" - return bool(self.flags & 0x20) - def set_block_trap(self, value): - """raise an exception instead of blocking""" - self.flags = self.flags & ~0x20 | (value & 0x1) << 5 - set_block_trap._public = True - - def get_is_zombie(self): - """__del__ is in progress""" - return bool(self.flags & 0x40) - def set_is_zombie(self, value): - """__del__ is in progress""" - self.flags = self.flags & ~0x40 | (value & 0x1) << 6 - set_is_zombie._public = False - - def get_pending_irq(self): - """an interrupt occured while being atomic""" - return bool(self.flags & 0x80) - def set_pending_irq(self, value): - """an interrupt occured while being atomic""" - self.flags = self.flags & ~0x80 | (value & 0x1) << 7 - set_pending_irq._public = False - -# END generated code - -if __name__ == '__main__': - # paste this into the file - print gen_code() diff --git a/pypy/module/_stackless/test/__init__.py b/pypy/module/_stackless/test/__init__.py deleted file mode 100644 --- a/pypy/module/_stackless/test/__init__.py +++ /dev/null @@ -1,1 +0,0 @@ -# \ No newline at end of file diff --git a/pypy/module/_stackless/test/conftest.py b/pypy/module/_stackless/test/conftest.py deleted file mode 100644 --- a/pypy/module/_stackless/test/conftest.py +++ /dev/null @@ -1,8 +0,0 @@ -import sys -import py.test - -def pytest_runtest_setup(item): - py.test.importorskip('greenlet') - if sys.platform == 'win32': - py.test.skip("stackless tests segfault on Windows") - diff --git a/pypy/module/_stackless/test/slp_test_pickle.py b/pypy/module/_stackless/test/slp_test_pickle.py deleted file mode 100644 --- a/pypy/module/_stackless/test/slp_test_pickle.py +++ /dev/null @@ -1,35 +0,0 @@ -from pypy.conftest import gettestobjspace - -# app-level testing of coroutine pickling - -class AppTest_Pickle: - - def setup_class(cls): - space = gettestobjspace(usemodules=('_stackless',)) - cls.space = space - - def test_simple_ish(self): - - output = [] - import _stackless - def f(coro, n, x): - if n == 0: - coro.switch() - return - f(coro, n-1, 2*x) - output.append(x) - - def example(): - main_coro = _stackless.coroutine.getcurrent() - sub_coro = _stackless.coroutine() - sub_coro.bind(f, main_coro, 5, 1) - sub_coro.switch() - - import pickle - pckl = pickle.dumps(sub_coro) - new_coro = pickle.loads(pckl) - - new_coro.switch() - - example() - assert output == [16, 8, 4, 2, 1] diff --git a/pypy/module/_stackless/test/test_choicepoint.py b/pypy/module/_stackless/test/test_choicepoint.py deleted file mode 100644 --- a/pypy/module/_stackless/test/test_choicepoint.py +++ /dev/null @@ -1,85 +0,0 @@ -import py; py.test.skip("clonable coroutines not really maintained any more") - -from pypy.rlib.rcoroutine import AbstractThunk -from pypy.module._stackless.rclonable import InterpClonableCoroutine as ClonableCoroutine - -class ChoicePointHolder(object): - def __init__(self): - self.choicepoints = [] - self.clone_me = False - self.answer = 0 - self.solutions_count = 0 - - def next_choice(self): - return self.choicepoints.pop() - - def add(self, choice, answer=0): - self.choicepoints.append((choice, answer)) - - def more_choices(self): - return bool(self.choicepoints) - - def choice(self): - #os.write(1, "choice\n") - self.clone_me = True - self.g_main.switch() - #os.write(1, "answer: %d\n" % (self.answer,)) - return self.answer - - def fail(self): - self.g_main.switch() - assert False - -choicepoints = ChoicePointHolder() - -# ____________________________________________________________ - -class SearchTask(AbstractThunk): - def call(self): - path = [] - for i in range(10): - res = choicepoints.choice() - assert len(path) == i - path.append(res) - #os.write(1, "{%x} trying: %s\n" % (id(path), path)) - if i == 3: - import gc; gc.collect() - #os.write(1, "{%x} found a solution: %s\n" % (id(path), path)) - choicepoints.solutions_count += 1 - -# ____________________________________________________________ - - -class SearchAllTask(AbstractThunk): - def call(self): - search_coro = ClonableCoroutine() - search_coro.bind(SearchTask()) - choicepoints.add(search_coro) - - #os.write(1, "starting\n") - while choicepoints.more_choices(): - searcher, nextvalue = choicepoints.next_choice() - choicepoints.clone_me = False - choicepoints.answer = nextvalue - #os.write(1, '<<< {%x} %d\n' % (id(searcher), nextvalue)) - searcher.switch() - #os.write(1, '>>> %d\n' % (choicepoints.clone_me,)) - if choicepoints.clone_me: - searcher2 = searcher.clone() - #os.write(1, 'searcher = {%x}, searcher2 = {%x}\n' % ( - # id(searcher), id(searcher2))) - choicepoints.add(searcher, 5) - choicepoints.add(searcher2, 4) - -def entry_point(): - choicepoints.g_main = ClonableCoroutine() - choicepoints.g_main.bind(SearchAllTask()) - choicepoints.g_main.switch() - return choicepoints.solutions_count - -def test_choicepoint(): - from pypy.translator.c.test import test_newgc - tester = test_newgc.TestUsingStacklessFramework() - fn = tester.getcompiled(entry_point) - res = fn() - assert res == 2 ** 10 diff --git a/pypy/module/_stackless/test/test_clonable.py b/pypy/module/_stackless/test/test_clonable.py deleted file mode 100644 --- a/pypy/module/_stackless/test/test_clonable.py +++ /dev/null @@ -1,187 +0,0 @@ -import py; py.test.skip("clonable coroutines not really maintained any more") - -from pypy.conftest import gettestobjspace, option -import py, sys - -# app-level testing of coroutine cloning - -class AppTestClonable: - - def setup_class(cls): - if not option.runappdirect: - py.test.skip('pure appdirect test (run with -A)') - cls.space = space = gettestobjspace(usemodules=('_stackless',)) - if not space.is_true(space.appexec([], """(): - import _stackless - return hasattr(_stackless, 'clonable') - """)): - py.test.skip('no _stackless.clonable') - - - def test_solver(self): - import _stackless - - class Fail(Exception): - pass - - class Success(Exception): - pass - - def first_solution(func): - global next_answer - co = _stackless.clonable() - co.bind(func) - pending = [(co, None)] - while pending: - co, next_answer = pending.pop() - try: - co.switch() - except Fail: - pass - except Success, e: - return e.args[0] - else: - # zero_or_one() called, clone the coroutine - co2 = co.clone() - pending.append((co2, 1)) - pending.append((co, 0)) - raise Fail("no solution") - - pending = [] - main = _stackless.clonable.getcurrent() - - def zero_or_one(): - main.switch() - return next_answer - - # ____________________________________________________________ - - invalid_prefixes = { - (0, 0): True, - (0, 1, 0): True, - (0, 1, 1): True, - (1, 0): True, - (1, 1, 0, 0): True, - } - - def example(): - test = [] - for n in range(5): - test.append(zero_or_one()) - if tuple(test) in invalid_prefixes: - raise Fail - raise Success(test) - - res = first_solution(example) - assert res == [1, 1, 0, 1, 0] - - - def test_myself_may_not_be_me_any_more(self): - import gc - from _stackless import clonable - - counter = [0] - - def runner(): - while 1: - assert clonable.getcurrent() is coro - counter[0] += 1 - main.switch() - - main = clonable.getcurrent() - coro = clonable() - coro.bind(runner) - - coro.switch() - assert counter == [1] - - assert clonable.getcurrent() is main - coro1 = coro.clone() - assert counter == [1] - assert clonable.getcurrent() is main - coro.switch() - assert counter == [2] - coro.switch() - assert counter == [3] - assert clonable.getcurrent() is main - del coro1 - gc.collect() - #print "collected!" - assert clonable.getcurrent() is main - assert counter == [3] - coro.switch() - assert clonable.getcurrent() is main - assert counter == [4] - - - def test_fork(self): - import _stackless - - class Fail(Exception): - pass - - class Success(Exception): - pass - - def first_solution(func): - global next_answer - co = _stackless.clonable() - co.bind(func) - try: - co.switch() - except Success, e: - return e.args[0] - - def zero_or_one(): - sub = _stackless.fork() - if sub is not None: - # in the parent: run the child first - try: - sub.switch() - except Fail: - pass - # then proceed with answer '1' - return 1 - else: - # in the child: answer '0' - return 0 - - # ____________________________________________________________ - - invalid_prefixes = { - (0, 0): True, - (0, 1, 0): True, - (0, 1, 1): True, - (1, 0): True, - (1, 1, 0, 0): True, - } - - def example(): - test = [] - for n in range(5): - test.append(zero_or_one()) - if tuple(test) in invalid_prefixes: - raise Fail - raise Success(test) - - res = first_solution(example) - assert res == [1, 1, 0, 1, 0] - - def test_clone_before_start(self): - """Tests that a clonable coroutine can be - cloned before it is started - (this used to fail with a segmentation fault) - """ - import _stackless - - counter = [0] - def simple_coro(): - print "hello" - counter[0] += 1 - - s = _stackless.clonable() - s.bind(simple_coro) - t = s.clone() - s.switch() - t.switch() - assert counter[0] == 2 diff --git a/pypy/module/_stackless/test/test_composable_coroutine.py b/pypy/module/_stackless/test/test_composable_coroutine.py deleted file mode 100644 --- a/pypy/module/_stackless/test/test_composable_coroutine.py +++ /dev/null @@ -1,133 +0,0 @@ -""" a faith is the connection between past and future that divides the - application into switch-compatible chunks. - -- stakkars -""" -from pypy.conftest import gettestobjspace -from py.test import skip - -class AppTest_ComposableCoroutine: - - def setup_class(cls): - space = gettestobjspace(usemodules=('_stackless',)) - cls.space = space - - cls.w_generator_ = space.appexec([], """(): - import _stackless - - generators_costate = _stackless.usercostate() - main = generators_costate.getcurrent() - - class generator_iterator(_stackless.coroutine): - - def __iter__(self): - return self - - def next(self): - if self.gi_answer is not None: - raise ValueError('stackless-generator' - ' already executing') - self.gi_answer = [] - self.gi_caller = generators_costate.getcurrent() - self.switch() - answer = self.gi_answer - self.gi_answer = None - if answer: - return answer[0] - else: - raise StopIteration - - def generator(f): - def myfunc(*args, **kwds): - g = generators_costate.spawn(generator_iterator) - g.gi_answer = None - g.bind(f, *args, **kwds) - return g - return myfunc - - def Yield(value): - g = generators_costate.getcurrent() - if g is main: - raise ValueError('Yield() outside any stackless-generator') - assert isinstance(g, generator_iterator) - assert g.gi_answer == [] - g.gi_answer.append(value) - g.gi_caller.switch() - - generator.Yield = Yield - generator._costate = generators_costate - return (generator,) - """) - - def test_simple_costate(self): - import _stackless - costate = _stackless.usercostate() - main = costate.getcurrent() - - result = [] - def f(): - result.append(costate.getcurrent()) - co = costate.spawn() - co.bind(f) - co.switch() - assert result == [co] - - def test_generator(self): - generator, = self.generator_ - - def squares(n): - for i in range(n): - generator.Yield(i*i) - squares = generator(squares) - - lst1 = [i*i for i in range(10)] - for got in squares(10): - expected = lst1.pop(0) - assert got == expected - assert lst1 == [] - - def test_multiple_costates(self): - """Test that two independent costates mix transparently: - - - compute_costate, used for a coroutine that fills a list with - some more items each time it is switched to - - - generators_costate, used interally by self.generator (see above) - """ - - import _stackless - generator, = self.generator_ - - # you can see how it fails if we don't have two different costates - # by setting compute_costate to generator._costate instead - compute_costate = _stackless.usercostate() - compute_main = compute_costate.getcurrent() - lst = [] - - def filler(): # -> 0, 1, 2, 100, 101, 102, 200, 201, 202, 300 ... - for k in range(5): - for j in range(3): - lst.append(100 * k + j) - compute_main.switch() - - filler_co = compute_costate.spawn() - filler_co.bind(filler) - - def grab_next_value(): - while not lst: - #print 'filling more...' - filler_co.switch() - #print 'now lst =', lst - #print 'grabbing', lst[0] - return lst.pop(0) - - def squares(n): - for i in range(n): - #print 'square:', i - generator.Yield(i*grab_next_value()) - squares = generator(squares) - - lst1 = [0, 1, 4, 300, 404, 510, 1200, 1407, 1616, 2700] - for got in squares(10): - expected = lst1.pop(0) - assert got == expected - assert lst1 == [] diff --git a/pypy/module/_stackless/test/test_coroutine.py b/pypy/module/_stackless/test/test_coroutine.py deleted file mode 100644 --- a/pypy/module/_stackless/test/test_coroutine.py +++ /dev/null @@ -1,168 +0,0 @@ -from pypy.conftest import gettestobjspace, option -from py.test import skip - - -class AppTest_Coroutine: - - def setup_class(cls): - space = gettestobjspace(usemodules=('_stackless',)) - cls.space = space - - def test_raise_propagate(self): - import _stackless as stackless - co = stackless.coroutine() - def f(): - return 1/0 - co.bind(f) - try: - co.switch() - except ZeroDivisionError: - pass - else: - raise AssertionError("exception not propagated") - - def test_strange_test(self): - from _stackless import coroutine - def f(): - print "in new coro" - return 42 - def create(): - b = coroutine() - b.bind(f) - print "bound" - b.switch() - print "switched" - return b - a = coroutine() - a.bind(create) - b = a.switch() - # now b.parent = a - def nothing(): - pass - a.bind(nothing) - def kill(): - # this sets a.parent = b - a.kill() - b.bind(kill) - b.switch() - - def test_kill(self): - import _stackless as stackless - co = stackless.coroutine() - def f(): - pass - co.bind(f) - assert co.is_alive - co.kill() - assert not co.is_alive - - def test_kill_running(self): - coroutineexit = [] - import _stackless as stackless - main = stackless.coroutine.getcurrent() - result = [] - co = stackless.coroutine() - def f(): - x = 2 - try: - result.append(1) - main.switch() - x = 3 - except CoroutineExit: - coroutineexit.append(True) - raise - finally: - result.append(x) - result.append(4) - co.bind(f) - assert co.is_alive - co.switch() - assert co.is_alive - assert result == [1] - co.kill() - assert not co.is_alive - assert result == [1, 2] - assert coroutineexit == [True] - - def test_bogus_bind(self): - import _stackless as stackless - co = stackless.coroutine() - def f(): - pass - co.bind(f) - raises(ValueError, co.bind, f) - - def test__framestack(self): - import _stackless as stackless - main = stackless.coroutine.getmain() - co = stackless.coroutine() - def g(): - return co._framestack - def f(): - return g() - - co.bind(f) - stack = co.switch() - assert stack == () # running corountine, _framestack is empty - - co = stackless.coroutine() - def g(): - return main.switch() - def f(): - return g() - - co.bind(f) - co.switch() - stack = co._framestack - assert len(stack) == 2 - assert stack[0].f_code is f.func_code - assert stack[1].f_code is g.func_code - - co = stackless.coroutine() - - - -class AppTestDirect: - def setup_class(cls): - if not option.runappdirect: - skip('pure appdirect test (run with -A)') - cls.space = gettestobjspace(usemodules=('_stackless',)) - - def test_stack_depth_limit(self): - import sys - import _stackless as stackless - st = stackless.get_stack_depth_limit() - try: - stackless.set_stack_depth_limit(1) - assert stackless.get_stack_depth_limit() == 1 - try: - co = stackless.coroutine() - def f(): - pass - co.bind(f) - co.switch() - except RuntimeError: - pass - finally: - stackless.set_stack_depth_limit(st) - -class TestRandomThings: - def setup_class(cls): - cls.space = gettestobjspace(usemodules=('_stackless',)) - - def test___del___handling(self): - space = self.space - w_l = space.newlist([]) - coro = space.appexec([w_l], """(l): - from _stackless import coroutine - class MyCoroutine(coroutine): - def __del__(self): - l.append(self.is_zombie) - return MyCoroutine() - """) - coro.__del__() - space.user_del_action.perform(space.getexecutioncontext(), None) - coro._kill_finally() - assert space.len_w(w_l) == 1 - res = space.is_true(space.getitem(w_l, space.wrap(0))) - assert res diff --git a/pypy/module/_stackless/test/test_greenlet.py b/pypy/module/_stackless/test/test_greenlet.py deleted file mode 100644 --- a/pypy/module/_stackless/test/test_greenlet.py +++ /dev/null @@ -1,643 +0,0 @@ -from pypy.conftest import gettestobjspace, skip_on_missing_buildoption - -class AppTest_Greenlet: - - def setup_class(cls): - space = gettestobjspace(usemodules=('_stackless',)) - cls.space = space - - def test_very_simple(self): - from _stackless import greenlet - lst = [] - def f(x): - lst.append(x) - return x + 10 - g = greenlet(f) - assert not g - res = g.switch(20) - assert res == 30 - assert lst == [20] - assert g.dead - assert not g - - def test_switch_back_to_main(self): - from _stackless import greenlet - lst = [] - main = greenlet.getcurrent() - def f(x): - lst.append(x) - x = main.switch(x + 10) - return 40 + x - g = greenlet(f) - res = g.switch(20) - assert res == 30 - assert lst == [20] - assert not g.dead - res = g.switch(2) - assert res == 42 - assert g.dead - - def test_simple(self): - from _stackless import greenlet - lst = [] - gs = [] - def f(): - lst.append(1) - greenlet.getcurrent().parent.switch() - lst.append(3) - g = greenlet(f) - lst.append(0) - g.switch() - lst.append(2) - g.switch() - lst.append(4) - assert lst == range(5) - - def test_exception_simple(self): - from _stackless import greenlet - def f(): - raise ValueError - g1 = greenlet(f) - raises(ValueError, g1.switch) - - def test_exception_propagate(self): - from _stackless import greenlet - def f(): - raise ValueError - def g(): - return g1.switch() - g1 = greenlet(f) - g2 = greenlet(g) - raises(ValueError, g1.switch) - g1 = greenlet(f) - raises(ValueError, g2.switch) - - - def test_exc_info_save_restore(self): - from _stackless import greenlet - import sys - def f(): - try: - raise ValueError('fun') - except: - exc_info = sys.exc_info() - greenlet(h).switch() - assert exc_info == sys.exc_info() - - def h(): - assert sys.exc_info() == (None, None, None) - - greenlet(f).switch() - - def test_exception(self): - from _stackless import greenlet - import sys - def fmain(seen): - try: - greenlet.getcurrent().parent.switch() - except: - seen.append(sys.exc_info()[0]) - raise - raise ValueError - seen = [] - g1 = greenlet(fmain) - g2 = greenlet(fmain) - g1.switch(seen) - g2.switch(seen) - raises(TypeError, "g2.parent = 1") - g2.parent = g1 - assert seen == [] - raises(ValueError, g2.switch) - assert seen == [ValueError] - g2.switch() - assert seen == [ValueError] - - def test_send_exception(self): - from _stackless import greenlet - import sys - def send_exception(g, exc): - # note: send_exception(g, exc) can be now done with g.throw(exc). - # the purpose of this test is to explicitely check the propagation rules. - def crasher(exc): - raise exc - g1 = greenlet(crasher) - g1.parent = g - g1.switch(exc) - def fmain(seen): - try: - greenlet.getcurrent().parent.switch() - except: - seen.append(sys.exc_info()[0]) - raise - raise ValueError - - seen = [] - g1 = greenlet(fmain) - g1.switch(seen) - raises(KeyError, "send_exception(g1, KeyError)") - assert seen == [KeyError] - seen = [] - g1 = greenlet(fmain) - g1.switch(seen) - raises(KeyError, "g1.throw(KeyError)") - assert seen == [KeyError] - assert g1.dead - - def test_frame(self): - from _stackless import greenlet - import sys - def f1(): - f = sys._getframe(0) - assert f.f_back is None - greenlet.getcurrent().parent.switch(f) - return "meaning of life" - g = greenlet(f1) - frame = g.switch() - assert frame is g.gr_frame - assert g - next = g.switch() - assert not g - assert next == "meaning of life" - assert g.gr_frame is None - - def test_mixing_greenlet_coroutine(self): - from _stackless import greenlet, coroutine - lst = [] - def f(): - lst.append(1) - greenlet.getcurrent().parent.switch() - lst.append(3) - def make_h(c): - def h(): - g = greenlet(f) - lst.append(0) - g.switch() - c.switch() - lst.append(2) - g.switch() - c.switch() - lst.append(4) - c.switch() - return h - c1 = coroutine.getcurrent() - c2 = coroutine() - c3 = coroutine() - c2.bind(make_h(c3)) - c3.bind(make_h(c2)) - c2.switch() - assert lst == [0, 1, 0, 1, 2, 3, 2, 3, 4, 4] - - def test_dealloc(self): - skip("not working yet") - from _stackless import greenlet - import sys - def fmain(seen): - try: - greenlet.getcurrent().parent.switch() - except: - seen.append(sys.exc_info()[0]) - raise - raise ValueError - seen = [] - seen = [] - g1 = greenlet(fmain) - g2 = greenlet(fmain) - g1.switch(seen) - g2.switch(seen) - assert seen == [] - del g1 - assert seen == [greenlet.GreenletExit] - del g2 - assert seen == [greenlet.GreenletExit, greenlet.GreenletExit] - - -# ____________________________________________________________ -# -# The tests from greenlets. -# For now, without the ones that involve threads -# -class AppTest_PyMagicTestGreenlet: - - def setup_class(cls): - space = gettestobjspace(usemodules=('_stackless',)) - cls.space = space - cls.w_glob = space.appexec([], """(): - import sys - from _stackless import greenlet - - class SomeError(Exception): - pass - - def fmain(seen): - try: - greenlet.getcurrent().parent.switch() - except: - seen.append(sys.exc_info()[0]) - raise - raise SomeError - - class Glob: pass - glob = Glob() - glob.__dict__.update(locals()) - return glob - """) - - def test_simple(self): - greenlet = self.glob.greenlet - lst = [] - def f(): - lst.append(1) - greenlet.getcurrent().parent.switch() - lst.append(3) - g = greenlet(f) - lst.append(0) - g.switch() - lst.append(2) - g.switch() - lst.append(4) - assert lst == range(5) - - def test_exception(self): - greenlet = self.glob.greenlet - fmain = self.glob.fmain - SomeError = self.glob.SomeError - seen = [] - g1 = greenlet(fmain) - g2 = greenlet(fmain) - g1.switch(seen) - g2.switch(seen) - g2.parent = g1 - assert seen == [] - raises(SomeError, g2.switch) - assert seen == [SomeError] - g2.switch() - assert seen == [SomeError] - - def test_send_exception(self): - greenlet = self.glob.greenlet - fmain = self.glob.fmain - def send_exception(g, exc): - # note: send_exception(g, exc) can be now done with g.throw(exc). - # the purpose of this test is to explicitely check the - # propagation rules. - def crasher(exc): - raise exc - g1 = greenlet(crasher, parent=g) - g1.switch(exc) - - seen = [] - g1 = greenlet(fmain) - g1.switch(seen) - raises(KeyError, "send_exception(g1, KeyError)") - assert seen == [KeyError] - - def test_dealloc(self): - skip("XXX in-progress: GC handling of greenlets") - import gc - greenlet = self.glob.greenlet - fmain = self.glob.fmain - seen = [] - g1 = greenlet(fmain) - g2 = greenlet(fmain) - g1.switch(seen) - g2.switch(seen) - assert seen == [] - del g1 - gc.collect() - assert seen == [greenlet.GreenletExit] - del g2 - gc.collect() - assert seen == [greenlet.GreenletExit, greenlet.GreenletExit] - - def test_frame(self): - import sys - greenlet = self.glob.greenlet - def f1(): - f = sys._getframe(0) - assert f.f_back is None - greenlet.getcurrent().parent.switch(f) - return "meaning of life" - g = greenlet(f1) - frame = g.switch() - assert frame is g.gr_frame - assert g - next = g.switch() - assert not g - assert next == "meaning of life" - assert g.gr_frame is None - - -class AppTest_PyMagicTestThrow: - - def setup_class(cls): - space = gettestobjspace(usemodules=('_stackless',)) - cls.space = space - - def test_class(self): - from _stackless import greenlet - def switch(*args): - return greenlet.getcurrent().parent.switch(*args) - - def f(): - try: - switch("ok") - except RuntimeError: - switch("ok") - return - switch("fail") - - g = greenlet(f) - res = g.switch() - assert res == "ok" - res = g.throw(RuntimeError) - assert res == "ok" - - def test_val(self): - from _stackless import greenlet - def switch(*args): - return greenlet.getcurrent().parent.switch(*args) - - def f(): - try: - switch("ok") - except RuntimeError, val: - if str(val) == "ciao": - switch("ok") - return - switch("fail") - - g = greenlet(f) - res = g.switch() - assert res == "ok" - res = g.throw(RuntimeError("ciao")) - assert res == "ok" - - g = greenlet(f) - res = g.switch() - assert res == "ok" - res = g.throw(RuntimeError, "ciao") - assert res == "ok" - - def test_kill(self): - from _stackless import greenlet - def switch(*args): - return greenlet.getcurrent().parent.switch(*args) - - def f(): - switch("ok") - switch("fail") - - g = greenlet(f) - res = g.switch() - assert res == "ok" - res = g.throw() - assert isinstance(res, greenlet.GreenletExit) - assert g.dead - res = g.throw() # immediately eaten by the already-dead greenlet - assert isinstance(res, greenlet.GreenletExit) - - def test_throw_goes_to_original_parent(self): - from _stackless import greenlet - main = greenlet.getcurrent() - def f1(): - try: - main.switch("f1 ready to catch") - except IndexError: - return "caught" - else: - return "normal exit" - def f2(): - main.switch("from f2") - - g1 = greenlet(f1) - g2 = greenlet(f2, parent=g1) - raises(IndexError, g2.throw, IndexError) - assert g2.dead - assert g1.dead - - g1 = greenlet(f1) - g2 = greenlet(f2, parent=g1) - res = g1.switch() - assert res == "f1 ready to catch" - res = g2.throw(IndexError) - assert res == "caught" - assert g2.dead - assert g1.dead - - g1 = greenlet(f1) - g2 = greenlet(f2, parent=g1) - res = g1.switch() - assert res == "f1 ready to catch" - res = g2.switch() - assert res == "from f2" - res = g2.throw(IndexError) - assert res == "caught" - assert g2.dead - assert g1.dead - - -class AppTest_PyMagicTestGenerator: - - def setup_class(cls): - space = gettestobjspace(usemodules=('_stackless',)) - cls.space = space - - def test_generator(self): - from _stackless import greenlet - - class genlet(greenlet): - - def __init__(self, *args, **kwds): - self.args = args - self.kwds = kwds - - def run(self): - fn, = self.fn - fn(*self.args, **self.kwds) - - def __iter__(self): - return self - - def next(self): - self.parent = greenlet.getcurrent() - result = self.switch() - if self: - return result - else: - raise StopIteration - - def Yield(value): - g = greenlet.getcurrent() - while not isinstance(g, genlet): - if g is None: - raise RuntimeError, 'yield outside a genlet' - g = g.parent - g.parent.switch(value) - - def generator(func): - class generator(genlet): - fn = (func,) - return generator - - # ___ test starts here ___ - seen = [] - def g(n): - for i in range(n): - seen.append(i) - Yield(i) - g = generator(g) - for k in range(3): - for j in g(5): - seen.append(j) - assert seen == 3 * [0, 0, 1, 1, 2, 2, 3, 3, 4, 4] - - -class AppTest_PyMagicTestGeneratorNested: - - def setup_class(cls): - space = gettestobjspace(usemodules=('_stackless',)) - cls.space = space - cls.w_glob = space.appexec([], """(): - from _stackless import greenlet - - class genlet(greenlet): - - def __init__(self, *args, **kwds): - self.args = args - self.kwds = kwds - self.child = None - - def run(self): - fn, = self.fn - fn(*self.args, **self.kwds) - - def __iter__(self): - return self - - def set_child(self, child): - self.child = child - - def next(self): - if self.child: - child = self.child - while child.child: - tmp = child - child = child.child - tmp.child = None - - result = child.switch() - else: - self.parent = greenlet.getcurrent() - result = self.switch() - - if self: - return result - else: - raise StopIteration - - def Yield(value, level = 1): - g = greenlet.getcurrent() - - while level != 0: - if not isinstance(g, genlet): - raise RuntimeError, 'yield outside a genlet' - if level > 1: - g.parent.set_child(g) - g = g.parent - level -= 1 - - g.switch(value) - - def Genlet(func): - class Genlet(genlet): - fn = (func,) - return Genlet - - class Glob: pass - glob = Glob() - glob.__dict__.update(locals()) - return glob - """) - - def test_genlet_1(self): - Genlet = self.glob.Genlet - Yield = self.glob.Yield - - def g1(n, seen): - for i in range(n): - seen.append(i+1) - yield i - - def g2(n, seen): - for i in range(n): - seen.append(i+1) - Yield(i) - - g2 = Genlet(g2) - - def nested(i): - Yield(i) - - def g3(n, seen): - for i in range(n): - seen.append(i+1) - nested(i) - g3 = Genlet(g3) - - raises(RuntimeError, Yield, 10) - for g in [g1, g2, g3]: - seen = [] - for k in range(3): - for j in g(5, seen): - seen.append(j) - assert seen == 3 * [1, 0, 2, 1, 3, 2, 4, 3, 5, 4] - raises(RuntimeError, Yield, 10) - - def test_nested_genlets(self): - Genlet = self.glob.Genlet - Yield = self.glob.Yield - def a(n): - if n == 0: - return - for ii in ax(n-1): - Yield(ii) - Yield(n) - ax = Genlet(a) - seen = [] - for ii in ax(5): - seen.append(ii) - assert seen == [1, 2, 3, 4, 5] - - def test_perms(self): - Genlet = self.glob.Genlet - Yield = self.glob.Yield - def perms(l): - if len(l) > 1: - for e in l: - # No syntactical sugar for generator expressions - [Yield([e] + p) for p in perms([x for x in l if x!=e])] - else: - Yield(l) - perms = Genlet(perms) - gen_perms = perms(range(4)) - permutations = list(gen_perms) - assert len(permutations) == 4*3*2*1 - assert [0,1,2,3] in permutations - assert [3,2,1,0] in permutations - - def test_layered_genlets(self): - Genlet = self.glob.Genlet - Yield = self.glob.Yield - def gr1(n): - for ii in range(1, n): - Yield(ii) - Yield(ii * ii, 2) - gr1 = Genlet(gr1) - def gr2(n, seen): - for ii in gr1(n): - seen.append(ii) - gr2 = Genlet(gr2) - seen = [] - for ii in gr2(5, seen): - seen.append(ii) - assert seen == [1, 1, 2, 4, 3, 9, 4, 16] diff --git a/pypy/module/_stackless/test/test_interp_clonable.py b/pypy/module/_stackless/test/test_interp_clonable.py deleted file mode 100644 --- a/pypy/module/_stackless/test/test_interp_clonable.py +++ /dev/null @@ -1,118 +0,0 @@ -""" -testing cloning -""" -import py; py.test.skip("clonable coroutines not really maintained any more") - -from pypy import conftest; conftest.translation_test_so_skip_if_appdirect() -from pypy.translator.c import gc -from pypy.rpython.memory.gctransform import stacklessframework -from pypy.rpython.memory.test import test_transformed_gc -from pypy.module._stackless.rclonable import InterpClonableCoroutine as ClonableCoroutine -from pypy.module._stackless.rclonable import AbstractThunk, fork - -class TestClonableCoroutine(test_transformed_gc.GCTest): - - gcname = "marksweep" - stacklessgc = True - class gcpolicy(gc.StacklessFrameworkGcPolicy): - class transformerclass(stacklessframework.StacklessFrameworkGCTransformer): - GC_PARAMS = {'start_heap_size': 4096 } - - def test_clone(self): - class T(AbstractThunk): - def __init__(self, result): - self.result = result - def call(self): - self.result.append(2) - ClonableCoroutine.getmain().switch() - self.result.append(4) - def f(): - result = [] - coro = ClonableCoroutine() - coro.bind(T(result)) - result.append(1) - coro.switch() - coro2 = coro.clone() - result.append(3) - coro2.switch() - result.append(5) - coro.switch() - result.append(6) - n = 0 - for i in result: - n = n*10 + i - return n - - run = self.runner(f) - res = run([]) - assert res == 1234546 - - def test_clone_local_state(self): - class T(AbstractThunk): - def __init__(self, result): - self.result = result - def call(self): - localstate = [] - localstate.append(10) - self.result.append(2) - ClonableCoroutine.getmain().switch() - localstate.append(20) - if localstate == [10, 20]: - self.result.append(4) - else: - self.result.append(0) - def f(): - result = [] - coro = ClonableCoroutine() - coro.bind(T(result)) - result.append(1) - coro.switch() - coro2 = coro.clone() - result.append(3) - coro2.switch() - result.append(5) - coro.switch() - result.append(6) - n = 0 - for i in result: - n = n*10 + i - return n - - run = self.runner(f) - res = run([]) - assert res == 1234546 - - def test_fork(self): - class T(AbstractThunk): - def __init__(self, result): - self.result = result - def call(self): - localdata = [10] - self.result.append(2) - newcoro = fork() - localdata.append(20) - if newcoro is not None: - # in the parent - self.result.append(3) - newcoro.switch() - self.result.append(5) - else: - # in the child - self.result.append(4) - localdata.append(30) - self.result.append(localdata != [10, 20, 30]) - def f(): - result = [] - coro = ClonableCoroutine() - coro.bind(T(result)) - result.append(1) - coro.switch() - result.append(6) - n = 0 - for i in result: - n = n*10 + i - return n - - run = self.runner(f) - res = run([]) - assert res == 12340506 diff --git a/pypy/module/_stackless/test/test_pickle.py b/pypy/module/_stackless/test/test_pickle.py deleted file mode 100644 --- a/pypy/module/_stackless/test/test_pickle.py +++ /dev/null @@ -1,487 +0,0 @@ -from pypy.conftest import gettestobjspace, option -import py - -# app-level testing of coroutine pickling - - -class AppTestBasic: - def setup_class(cls): - cls.space = gettestobjspace(usemodules=('_stackless',)) - - def test_pickle_main(self): - import _stackless, pickle - main = _stackless.coroutine.getcurrent() - s = pickle.dumps(main) - c = pickle.loads(s) - assert c is main - - -class AppTestPickle: - - def setup_class(cls): - cls.space = gettestobjspace(usemodules=('_stackless',), CALL_METHOD=True) - - def test_pickle_coroutine_empty(self): - # this test is limited to basic pickling. - # real stacks can only tested with a stackless pypy build. - import _stackless as stackless - co = stackless.coroutine() - import pickle - pckl = pickle.dumps(co) - co2 = pickle.loads(pckl) - # the empty unpickled coroutine can still be used: - result = [] - co2.bind(result.append, 42) - co2.switch() - assert result == [42] - - def test_pickle_coroutine_bound(self): - import pickle - import _stackless - lst = [4] - co = _stackless.coroutine() - co.bind(lst.append, 2) - pckl = pickle.dumps((co, lst)) - - (co2, lst2) = pickle.loads(pckl) - assert lst2 == [4] - co2.switch() - assert lst2 == [4, 2] - - - def test_simple_ish(self): - - import new, sys - - mod = new.module('mod') - sys.modules['mod'] = mod - try: - exec ''' -output = [] -import _stackless -def f(coro, n, x): - if n == 0: - coro.switch() - return - f(coro, n-1, 2*x) - output.append(x) - -def example(): - main_coro = _stackless.coroutine.getcurrent() - sub_coro = _stackless.coroutine() - sub_coro.bind(f, main_coro, 5, 1) - sub_coro.switch() - - import pickle - pckl = pickle.dumps(sub_coro) - new_coro = pickle.loads(pckl) - - new_coro.switch() - -example() -assert output == [16, 8, 4, 2, 1] -''' in mod.__dict__ - finally: - del sys.modules['mod'] - - def test_pickle_again(self): - - import new, sys - - mod = new.module('mod') - sys.modules['mod'] = mod - try: - exec ''' -output = [] -import _stackless -def f(coro, n, x): - if n == 0: - coro.switch() - return - f(coro, n-1, 2*x) - output.append(x) - -def example(): - main_coro = _stackless.coroutine.getcurrent() - sub_coro = _stackless.coroutine() - sub_coro.bind(f, main_coro, 5, 1) - sub_coro.switch() - - import pickle - pckl = pickle.dumps(sub_coro) - new_coro = pickle.loads(pckl) - pckl = pickle.dumps(new_coro) - newer_coro = pickle.loads(pckl) - - newer_coro.switch() - -example() -assert output == [16, 8, 4, 2, 1] -''' in mod.__dict__ - finally: - del sys.modules['mod'] - - def test_kwargs(self): - - import new, sys - - mod = new.module('mod') - sys.modules['mod'] = mod - try: - exec ''' -output = [] -import _stackless -def f(coro, n, x, step=4): - if n == 0: - coro.switch() - return - f(coro, n-1, 2*x, step=1) - output.append(x) - -def example(): - main_coro = _stackless.coroutine.getcurrent() - sub_coro = _stackless.coroutine() - sub_coro.bind(f, main_coro, 5, 1, 1) - sub_coro.switch() - - import pickle - pckl = pickle.dumps(sub_coro) - new_coro = pickle.loads(pckl) - - new_coro.switch() - -example() -assert output == [16, 8, 4, 2, 1] -''' in mod.__dict__ - finally: - del sys.modules['mod'] - - def test_starstarargs(self): - - import new, sys - - mod = new.module('mod') - sys.modules['mod'] = mod - try: - exec ''' -output = [] -import _stackless -def f(coro, n, x, step=4): - if n == 0: - coro.switch() - return - f(coro, n-1, 2*x, **{'step': 1}) - output.append(x) - -def example(): - main_coro = _stackless.coroutine.getcurrent() - sub_coro = _stackless.coroutine() - sub_coro.bind(f, main_coro, 5, 1, 1) - sub_coro.switch() - - import pickle - pckl = pickle.dumps(sub_coro) - new_coro = pickle.loads(pckl) - - new_coro.switch() - -example() -assert output == [16, 8, 4, 2, 1] -''' in mod.__dict__ - finally: - del sys.modules['mod'] - - def test_closure(self): - import new, sys - - mod = new.module('mod') - sys.modules['mod'] = mod - try: - exec ''' -output = [] -import _stackless -def example(): - main_coro = _stackless.coroutine.getcurrent() - sub_coro = _stackless.coroutine() - y = 3 - def f(coro, n, x): - if n == 0: - coro.switch() - return - f(coro, n-1, 2*x) - output.append(x+y) - - sub_coro.bind(f, main_coro, 5, 1) - sub_coro.switch() - - import pickle - pckl = pickle.dumps(sub_coro) - new_coro = pickle.loads(pckl) - - new_coro.switch() - -example() -assert output == [19, 11, 7, 5, 4] -''' in mod.__dict__ - finally: - del sys.modules['mod'] - - def test_exception(self): - import new, sys - - mod = new.module('mod') - sys.modules['mod'] = mod - try: - exec ''' -output = [] -import _stackless -def f(coro): - try: - raise ValueError - except: - coro.switch() - import sys - t, v, tb = sys.exc_info() - output.append(t) - -def example(): - main_coro = _stackless.coroutine.getcurrent() - sub_coro = _stackless.coroutine() - - sub_coro.bind(f, main_coro) - sub_coro.switch() - - import pickle - pckl = pickle.dumps(sub_coro) - new_coro = pickle.loads(pckl) - - new_coro.switch() - -example() -assert output == [ValueError] -''' in mod.__dict__ - finally: - del sys.modules['mod'] - - def test_exception_after_unpickling(self): - - import new, sys - - mod = new.module('mod') - sys.modules['mod'] = mod - try: - exec ''' -output = [] -import _stackless -def f(coro, n, x): - if n == 0: - coro.switch() - raise ValueError - try: - f(coro, n-1, 2*x) - finally: - output.append(x) - -def example(): - main_coro = _stackless.coroutine.getcurrent() - sub_coro = _stackless.coroutine() - sub_coro.bind(f, main_coro, 5, 1) - sub_coro.switch() - - import pickle - pckl = pickle.dumps(sub_coro) - new_coro = pickle.loads(pckl) - - try: - sub_coro.switch() - except ValueError: - pass - else: - assert 0 - try: - new_coro.switch() - except ValueError: - pass - else: - assert 0 - -example() -assert output == [16, 8, 4, 2, 1] * 2 -''' in mod.__dict__ - finally: - del sys.modules['mod'] - - def test_loop(self): - import new, sys - - mod = new.module('mod') - sys.modules['mod'] = mod - try: - exec ''' -output = [] -import _stackless -def f(coro): - for x in (1,2,3): - coro.switch() - output.append(x) - -def example(): - main_coro = _stackless.coroutine.getcurrent() - sub_coro = _stackless.coroutine() - - sub_coro.bind(f, main_coro) - sub_coro.switch() - - import pickle - pckl = pickle.dumps(sub_coro) - new_coro = pickle.loads(pckl) - - new_coro.switch() - new_coro.switch() - new_coro.switch() - -example() -assert output == [1, 2, 3] -''' in mod.__dict__ - finally: - del sys.modules['mod'] - - def test_valstack(self): - import new, sys - - mod = new.module('mod') - sys.modules['mod'] = mod - try: - exec ''' -output = [] -import _stackless -def f(coro): - r = 1+g(coro)+3 - output.append(r) - -def g(coro): - coro.switch() - return 2 - -def example(): - main_coro = _stackless.coroutine.getcurrent() - sub_coro = _stackless.coroutine() - - sub_coro.bind(f, main_coro) - sub_coro.switch() - - import pickle - pckl = pickle.dumps(sub_coro) - new_coro = pickle.loads(pckl) - - new_coro.switch() - - -example() -assert output == [6] -''' in mod.__dict__ - finally: - del sys.modules['mod'] - - - def test_exec_and_locals(self): - import new, sys - - mod = new.module('mod') - sys.modules['mod'] = mod - try: - exec ''' -output = [] -import _stackless - -def f(coro): - x = None - exec "x = 9" - coro.switch() - output.append(x) - -def example(): - main_coro = _stackless.coroutine.getcurrent() - sub_coro = _stackless.coroutine() - sub_coro.bind(f, main_coro) - sub_coro.switch() - - import pickle - pckl = pickle.dumps(sub_coro) - new_coro = pickle.loads(pckl) - - new_coro.switch() - -example() -assert output == [9] -''' in mod.__dict__ - finally: - del sys.modules['mod'] - - - def test_solver(self): - import new, sys - - mod = new.module('mod') - sys.modules['mod'] = mod - try: - exec ''' -import _stackless, pickle - -class Fail(Exception): - pass - -class Success(Exception): - pass - -def first_solution(func): - global next_answer - co = _stackless.coroutine() - co.bind(func) - pending = [(co, None)] - while pending: - co, next_answer = pending.pop() - try: - co.switch() - except Fail: - pass - except Success, e: - return e.args[0] - else: - # zero_or_one() called, clone the coroutine - # NB. this seems to be quite slow - co2 = pickle.loads(pickle.dumps(co)) - pending.append((co2, 1)) - pending.append((co, 0)) - raise Fail("no solution") - -pending = [] -main = _stackless.coroutine.getcurrent() - -def zero_or_one(): - main.switch() - return next_answer - -# ____________________________________________________________ - -invalid_prefixes = { - (0, 0): True, - (0, 1, 0): True, - (0, 1, 1): True, - (1, 0): True, - (1, 1, 0, 0): True, - } - -def example(): - test = [] - for n in range(5): - test.append(zero_or_one()) - if tuple(test) in invalid_prefixes: - raise Fail - raise Success(test) - -res = first_solution(example) -assert res == [1, 1, 0, 1, 0] -''' in mod.__dict__ - finally: - del sys.modules['mod'] diff --git a/pypy/module/test_lib_pypy/test_distributed/test_distributed.py b/pypy/module/test_lib_pypy/test_distributed/test_distributed.py --- a/pypy/module/test_lib_pypy/test_distributed/test_distributed.py +++ b/pypy/module/test_lib_pypy/test_distributed/test_distributed.py @@ -7,8 +7,7 @@ class AppTestDistributed(object): def setup_class(cls): - cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, - "usemodules":("_stackless",)}) + cls.space = gettestobjspace(**{"objspace.std.withtproxy": True}) def test_init(self): import distributed @@ -89,15 +88,12 @@ assert len(item) == 11 class AppTestDistributedTasklets(object): - spaceconfig = {"objspace.std.withtproxy": True, - "objspace.usemodules._stackless": True} + spaceconfig = {"objspace.std.withtproxy": True} reclimit = sys.getrecursionlimit() def setup_class(cls): import py.test py.test.importorskip('greenlet') - #cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, - # "usemodules":("_stackless",)}) cls.w_test_env_ = cls.space.appexec([], """(): from distributed import test_env return (test_env,) diff --git a/pypy/module/test_lib_pypy/test_distributed/test_greensock.py b/pypy/module/test_lib_pypy/test_distributed/test_greensock.py --- a/pypy/module/test_lib_pypy/test_distributed/test_greensock.py +++ b/pypy/module/test_lib_pypy/test_distributed/test_greensock.py @@ -9,8 +9,7 @@ def setup_class(cls): if not option.runappdirect: py.test.skip("Cannot run this on top of py.py because of PopenGateway") - cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, - "usemodules":("_stackless",)}) + cls.space = gettestobjspace(**{"objspace.std.withtproxy": True}) cls.w_remote_side_code = cls.space.appexec([], """(): import sys sys.path.insert(0, '%s') diff --git a/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py b/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py --- a/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py +++ b/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py @@ -9,7 +9,7 @@ class AppTestSocklayer: def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, - "usemodules":("_stackless","_socket", "select")}) + "usemodules":("_socket", "select")}) def test_socklayer(self): class X(object): diff --git a/pypy/module/test_lib_pypy/test_stackless_pickle.py b/pypy/module/test_lib_pypy/test_stackless_pickle.py --- a/pypy/module/test_lib_pypy/test_stackless_pickle.py +++ b/pypy/module/test_lib_pypy/test_stackless_pickle.py @@ -1,3 +1,4 @@ +import py; py.test.skip("XXX port me") from pypy.conftest import gettestobjspace, option class AppTest_Stackless: diff --git a/pypy/rlib/rcoroutine.py b/pypy/rlib/rcoroutine.py deleted file mode 100644 --- a/pypy/rlib/rcoroutine.py +++ /dev/null @@ -1,357 +0,0 @@ -""" -Basic Concept: --------------- - -All concurrency is expressed by some means of coroutines. -This is the lowest possible exposable interface. - -A coroutine is a structure that controls a sequence -of continuations in time. It contains a frame object -that is a restartable stack chain. This frame object -is updated on every switch. - -The frame can be None. Either the coroutine is not yet -bound, or it is the current coroutine of some costate. -See below. XXX rewrite a definition of these terms. - -There is always a notation of a "current" and a "last" -coroutine. Current has no frame and represents the -running program. last is needed to keep track of the -coroutine that receives a new frame chain after a switch. - -A costate object holds last and current. -There are different coroutine concepts existing in -parallel, like plain interp-level coroutines and -app-level structures like coroutines, greenlets and -tasklets. -Every concept is associated with its own costate object. -This allows for peaceful co-existence of many concepts. -The type of a switch is determined by the target's costate. -""" - -import py; py.test.skip("fixme: rewrite using rlib.rstacklet") -# XXX ^^^ the reason it is not done is that pypy.rlib.rcoroutine -# plus pypy/module/_stackless look like faaaaaar too much code -# to me :-( - -from pypy.rlib.rstack import yield_current_frame_to_caller -from pypy.rlib.objectmodel import we_are_translated - -from pypy.interpreter.error import OperationError - -try: - from greenlet import greenlet - main_greenlet = greenlet.getcurrent() -except (ImportError, ValueError): - def greenlet(*args, **kwargs): - raise NotImplementedError("need either greenlets or a translated version of pypy") - -class FrameChain(object): - """Greenlet-based emulation of the primitive rstack 'frames' of RPython""" - - def __init__(self, thunk=None): - if thunk: - self.greenlet = greenlet(thunk) - else: - self.greenlet = greenlet.getcurrent() - - def switch(self): - last = FrameChain() - return self.greenlet.switch(last) - -import sys, os - -def make_coroutine_classes(baseclass): - class BaseCoState(object): - def __init__(self): - self.current = self.main = None - - def __repr__(self): - "NOT_RPYTHON" - # for debugging only - return '<%s current=%r>' % (self.__class__.__name__, self.current) - - def update(self, new): - syncstate.leaving = self.current - syncstate.entering = new - self.current = new - frame, new.frame = new.frame, None - return frame - - - class CoState(BaseCoState): - def __init__(self): - BaseCoState.__init__(self) - self.current = self.main = Coroutine(self) - - class CoroutineDamage(SystemError): - pass - - - class SyncState(object): - def __init__(self): - self.reset() - - def reset(self): - self.default_costate = None - self.leaving = None - self.entering = None - self.things_to_do = False - self.temp_exc = None - self.to_delete = [] - - def switched(self, incoming_frame): - left = syncstate.leaving - entered = syncstate.entering - syncstate.leaving = syncstate.entering = None - if left is not None: # mostly to work around an annotation problem; - # should not really be None - left.frame = incoming_frame - left.goodbye() - if entered is not None: - entered.hello() - if self.things_to_do: - self._do_things_to_do() - - def push_exception(self, exc): - self.things_to_do = True - self.temp_exc = exc - - def check_for_zombie(self, obj): - return obj in self.to_delete - - def postpone_deletion(self, obj): - self.to_delete.append(obj) - self.things_to_do = True - - def _do_things_to_do(self): - if self.temp_exc is not None: - # somebody left an unhandled exception and switched to us. - # this both provides default exception handling and the - # way to inject an exception, like CoroutineExit. - e, self.temp_exc = self.temp_exc, None - self.things_to_do = bool(self.to_delete) - raise e - while self.to_delete: - delete, self.to_delete = self.to_delete, [] - for obj in delete: - obj.parent = obj.costate.current - obj._kill_finally() - else: - self.things_to_do = False - - def _freeze_(self): - self.reset() - return False - - syncstate = SyncState() - - - class CoroutineExit(SystemExit): - # XXX SystemExit's __init__ creates problems in bookkeeper. - def __init__(self): - pass - - class AbstractThunk(object): - def call(self): - raise NotImplementedError("abstract base class") - - - class Coroutine(baseclass): - def __init__(self, state=None): - self.frame = None - if state is None: - state = self._get_default_costate() - self.costate = state - self.parent = None - self.thunk = None - self.coroutine_exit = False - - def __repr__(self): - 'NOT_RPYTHON' - # just for debugging - if hasattr(self, '__name__'): - return '' % (self.__name__, self.frame, self.thunk is not None) - else: - return '' % (self.frame, self.thunk is not None) - - def _get_default_costate(): - state = syncstate.default_costate - if state is None: - state = syncstate.default_costate = CoState() - return state - _get_default_costate = staticmethod(_get_default_costate) - - def _get_default_parent(self): - return self.costate.current - - def bind(self, thunk): - assert isinstance(thunk, AbstractThunk) - if self.frame is not None: - raise CoroutineDamage - if self.parent is None: - self.parent = self._get_default_parent() - assert self.parent is not None - self.thunk = thunk - if we_are_translated(): - self.frame = self._bind() - else: - self.frame = self._greenlet_bind() - - def _greenlet_bind(self): - weak = [self] - def _greenlet_execute(incoming_frame): - try: - chain2go2next = weak[0]._execute(incoming_frame) - except: - # no exception is supposed to get out of _execute() - # better report it directly into the main greenlet then, - # and hidden to prevent catching - main_greenlet.throw(AssertionError( - "unexpected exception out of Coroutine._execute()", - *sys.exc_info())) - assert 0 - del weak[0] - greenlet.getcurrent().parent = chain2go2next.greenlet - return None # as the result of the FrameChain.switch() - chain = FrameChain(_greenlet_execute) - return chain - - def _bind(self): - state = self.costate - incoming_frame = yield_current_frame_to_caller() - self = state.current - return self._execute(incoming_frame) - - def _execute(self, incoming_frame): - state = self.costate - try: - try: - try: - exc = None - thunk = self.thunk - self.thunk = None - syncstate.switched(incoming_frame) - thunk.call() - except Exception, e: - exc = e - raise - finally: - # warning! we must reload the 'self' from the costate, - # because after a clone() the 'self' of both copies - # point to the original! - self = state.current - self.finish(exc) - except CoroutineExit: - pass - except Exception, e: - if self.coroutine_exit is False: - # redirect all unhandled exceptions to the parent - syncstate.push_exception(e) - - while self.parent is not None and self.parent.frame is None: - # greenlet behavior is fine - self.parent = self.parent.parent - return state.update(self.parent) - - def switch(self): - if self.frame is None: - # considered a programming error. - # greenlets and tasklets have different ideas about this. - raise CoroutineDamage - state = self.costate - incoming_frame = state.update(self).switch() - syncstate.switched(incoming_frame) - - def kill(self): - self._kill(CoroutineExit()) - - def _kill(self, exc): - if self.frame is None: - return - state = self.costate - syncstate.push_exception(exc) - # careful here - if setting self.parent to state.current would - # create a loop, break it. The assumption is that 'self' - # will die, so that state.current's chain of parents can be - # modified to skip 'self' without too many people noticing. - p = state.current - if p is self or self.parent is None: - pass # killing the current of the main - don't change any parent - else: - while p.parent is not None: - if p.parent is self: - p.parent = self.parent - break - p = p.parent - self.parent = state.current - self.switch() - - def _kill_finally(self): - try: - self._userdel() - except Exception: - pass # maybe print a warning? - self.kill() - - __already_postponed = False - - def __del__(self): - # provide the necessary clean-up - # note that AppCoroutine has to take care about this - # as well, including a check for user-supplied __del__. - # Additionally note that in the context of __del__, we are - # not in the position to issue a switch. - # we defer it completely. - - # it is necessary to check whether syncstate is None because CPython - # sets it to None when it cleans up the modules, which will lead to - # very strange effects - - if not we_are_translated(): - # we need to make sure that we postpone each coroutine only once on - # top of CPython, because this resurrects the coroutine and CPython - # calls __del__ again, thus postponing and resurrecting the - # coroutine once more :-( - if self.__already_postponed: - return - self.__already_postponed = True - if syncstate is not None: - syncstate.postpone_deletion(self) - - # coroutines need complete control over their __del__ behaviour. In - # particular they need to care about calling space.userdel themselves - handle_del_manually = True - - def _userdel(self): - # override this for exposed coros - pass - - def is_alive(self): - return self.frame is not None or self is self.costate.current - - def is_zombie(self): - return self.frame is not None and syncstate.check_for_zombie(self) - - def getcurrent(): - costate = Coroutine._get_default_costate() - return costate.current - getcurrent = staticmethod(getcurrent) - - def getmain(): - costate = Coroutine._get_default_costate() - return costate.main - getmain = staticmethod(getmain) - - def hello(self): - "Called when execution is transferred into this coroutine." - - def goodbye(self): - "Called just after execution is transferred away from this coroutine." - - def finish(self, exc=None): - "stephan forgot me" - - return locals() - -# _________________________________________________ diff --git a/pypy/rlib/test/test_rcoroutine.py b/pypy/rlib/test/test_rcoroutine.py deleted file mode 100644 --- a/pypy/rlib/test/test_rcoroutine.py +++ /dev/null @@ -1,348 +0,0 @@ -""" -testing coroutines at interprepter level -""" -import py -import os -from pypy import conftest; conftest.translation_test_so_skip_if_appdirect() -from pypy.rlib.rcoroutine import make_coroutine_classes -from pypy.translator.c.test.test_stackless import StacklessTest -from pypy.translator.c import gc - -def setup_module(mod): - py.test.importorskip('greenlet') - -d = make_coroutine_classes(object) -syncstate = d['syncstate'] -Coroutine = d['Coroutine'] -AbstractThunk = d['AbstractThunk'] - -def output(stuff): - os.write(2, stuff + '\n') - -class _TestCoroutine(StacklessTest): - backendopt = True - Coroutine = Coroutine - - def setup_method(self, method): - syncstate.reset() - - def _freeze_(self): # for 'self.Coroutine' - return True - - def test_coroutine1(self): - - def g(lst, coros): - coro_f, coro_g, coro_h = coros - lst.append(2) - output('g appended 2') - coro_h.switch() - lst.append(5) - output('g appended 5') - - def h(lst, coros): - coro_f, coro_g, coro_h = coros - lst.append(3) - output('h appended 3') - coro_f.switch() - lst.append(7) - output('h appended 7') - - class T(AbstractThunk): - def __init__(self, func, arg1, arg2): - self.func = func - self.arg1 = arg1 - self.arg2 = arg2 - def call(self): - self.func(self.arg1, self.arg2) - - def f(): - lst = [1] - coro_f = Coroutine.getcurrent() - coro_g = self.Coroutine() - coro_h = self.Coroutine() - coros = [coro_f, coro_g, coro_h] - thunk_g = T(g, lst, coros) - output('binding g after f set 1') - coro_g.bind(thunk_g) - thunk_h = T(h, lst, coros) - output('binding h after f set 1') - coro_h.bind(thunk_h) - output('switching to g') - coro_g.switch() - lst.append(4) - output('f appended 4') - coro_g.switch() - lst.append(6) - output('f appended 6') - coro_h.switch() - lst.append(8) - output('f appended 8') - n = 0 - for i in lst: - n = n*10 + i - return n - - data = self.wrap_stackless_function(f) - assert data == 12345678 - - def test_coroutine2(self): - - class TBase(AbstractThunk): - def call(self): - pass - - class T(TBase): - def __init__(self, func, arg1, arg2): - self.func = func - self.arg1 = arg1 - self.arg2 = arg2 - def call(self): - self.res = self.func(self.arg1, self.arg2) - - class T1(TBase): - def __init__(self, func, arg1): - self.func = func - self.arg1 = arg1 - def call(self): - self.res = self.func(self.arg1) - - def g(lst, coros): - coro_f1, coro_g, coro_h = coros - lst.append(2) - output('g appended 2') - coro_h.switch() - lst.append(5) - output('g appended 5') - output('exiting g') - - def h(lst, coros): - coro_f1, coro_g, coro_h = coros - lst.append(3) - output('h appended 3') - coro_f1.switch() - lst.append(7) - output('h appended 7') - output('exiting h') - - def f1(coro_f1): - lst = [1] - coro_g = self.Coroutine() - coro_g.__name__ = 'coro_g' - coro_h = self.Coroutine() - coro_h.__name__ = 'coro_h' - coros = [coro_f1, coro_g, coro_h] - thunk_g = T(g, lst, coros) - output('binding g after f1 set 1') - coro_g.bind(thunk_g) - thunk_h = T(h, lst, coros) - output('binding h after f1 set 1') - coro_h.bind(thunk_h) - output('switching to g') - coro_g.switch() - lst.append(4) - output('f1 appended 4') - coro_g.switch() - lst.append(6) - output('f1 appended 6') - coro_h.switch() - lst.append(8) - output('f1 appended 8') - n = 0 - for i in lst: - n = n*10 + i - output('exiting f1') - return n - - def f(): - coro_f = Coroutine.getcurrent() - coro_f.__name__ = 'coro_f' - coro_f1 = self.Coroutine() - coro_f1.__name__ = 'coro_f1' - thunk_f1 = T1(f1, coro_f1) - output('binding f1 after f set 1') - coro_f1.bind(thunk_f1) - coro_f1.switch() - output('return to main :-(') - return thunk_f1.res - - data = self.wrap_stackless_function(f) - assert data == 12345678 - - def test_kill_raise_del_coro(self): - class T(AbstractThunk): - def __init__(self, func, arg): - self.func = func - self.arg = arg - def call(self): - self.func(self.arg, self) - - def g(nrec, t, count=0): - t.count = count - if nrec < 0: - raise ValueError - if nrec: - g(nrec-1, t, count+1) - Coroutine.getmain().switch() - - def f(): - assert Coroutine.getmain().frame is None - coro_g = self.Coroutine() - coro_g.__name__ = 'coro_g' - thunk_g = T(g, 42) - coro_g.bind(thunk_g) - coro_g.switch() - res = thunk_g.count - res *= 10 - res |= coro_g.frame is not None - # testing kill - coro_g.kill() - res *= 10 - res |= coro_g.frame is None - coro_g = self.Coroutine() - # see what happens if we __del__ - thunk_g = T(g, -42) - coro_g.bind(thunk_g) - try: - coro_g.switch() - except ValueError: - res += 500 - return res - - data = self.wrap_stackless_function(f) - assert data == 4711 - - def test_tree_compare(self): - class Node: - def __init__(self, value, left=None, right=None): - self.value = value - self.left = left - self.right = right - def __repr__(self): - return 'Node(%r, %r, %r)'%(self.value, self.left, self.right) - - tree1 = Node(1, Node(2, Node(3))) - tree2 = Node(1, Node(3, Node(2))) - tree3 = Node(1, Node(2), Node(3)) - - class Producer(AbstractThunk): - def __init__(self, tree, objects, consumer): - self.tree = tree - self.objects = objects - self.consumer = consumer - def produce(self, t): - if t is None: - return - self.objects.append(t.value) - self.consumer.switch() - self.produce(t.left) - self.produce(t.right) - def call(self): - self.produce(self.tree) - while 1: - self.consumer.switch() - class Consumer(AbstractThunk): - def __init__(self, tree, objects, producer): - self.tree = tree - self.objects = objects - self.producer = producer - def consume(self, t): - if t is None: - return True - self.producer.switch() - if not self.objects: - return False - if self.objects.pop(0) != t.value: - return False - if not self.consume(t.left): - return False - return self.consume(t.right) - - def call(self): - self.result = self.consume(self.tree) - Coroutine.getmain().switch() - - def pre_order_eq(t1, t2): - objects = [] - producer = self.Coroutine() - consumer = self.Coroutine() - - producer.bind(Producer(t1, objects, consumer)) - cons = Consumer(t2, objects, producer) - consumer.bind(cons) - - consumer.switch() - - return cons.result - - def ep(): - return int("%d%d%d%d"%(pre_order_eq(tree1, tree2), - pre_order_eq(tree1, tree1), - pre_order_eq(tree1, tree3), - pre_order_eq(tree2, tree1), - )) - - output = self.wrap_stackless_function(ep) - assert output == int('0110') - - def test_hello_goodbye(self): - - class C(Coroutine): - n = 2 - def __init__(self, n): - Coroutine.__init__(self) - self.n = n - def hello(self): - costate.hello_goodbye *= 10 - costate.hello_goodbye += self.n - def goodbye(self): - costate.hello_goodbye *= 10 - costate.hello_goodbye += self.n + 1 - - class T(AbstractThunk): - def call(self): - pass - - costate = Coroutine._get_default_costate() - costate.current.__class__ = C - costate.hello_goodbye = 0 - - def ep(): - syncstate.default_costate = costate - costate.hello_goodbye = 0 - c1 = C(4) - c1.bind(T()) - c1.switch() - return costate.hello_goodbye - - output = self.wrap_stackless_function(ep) - # expected result: - # goodbye main 3 - # hello c1 4 - # goodbye c1 5 - # hello main 2 - assert output == 3452 - - def test_raise_propagate(self): - class T(AbstractThunk): - def call(self): - raise ValueError - - def ep(): - c = self.Coroutine() - c.bind(T()) - try: - c.switch() - except ValueError: - return 100 - else: - return -5 - - output = self.wrap_stackless_function(ep) - assert output == 100 - - -TestCoroutine = _TestCoroutine # to activate -class TestCoroutineOnCPython(_TestCoroutine): - def wrap_stackless_function(self, func): - return func() - diff --git a/pypy/translator/goal/targetpreimportedpypy.py b/pypy/translator/goal/targetpreimportedpypy.py deleted file mode 100644 --- a/pypy/translator/goal/targetpreimportedpypy.py +++ /dev/null @@ -1,239 +0,0 @@ -import py - -import os, sys -sys.setrecursionlimit(17000) - -from pypy.interpreter import gateway -from pypy.interpreter.error import OperationError -from pypy.translator.goal.ann_override import PyPyAnnotatorPolicy -from pypy.config.config import Config, to_optparse, make_dict, SUPPRESS_USAGE -from pypy.config.config import ConflictConfigError -from pypy.tool.option import make_objspace -from pypy.translator.goal.nanos import setup_nanos - -EXTRA_MODULES = [ - #"os", - #"decimal", - #"difflib", - #"tarfile", - #"cookielib", - #"optparse", - "inspect", - "random", -] - -thisdir = py.path.local(__file__).dirpath() - -try: - this_dir = os.path.dirname(__file__) -except NameError: - this_dir = os.path.dirname(sys.argv[0]) - -def debug(msg): - os.write(2, "debug: " + msg + '\n') - -# __________ Entry point __________ - -def create_entry_point(space, w_dict): - w_entry_point = space.getitem(w_dict, space.wrap('entry_point')) - w_run_toplevel = space.getitem(w_dict, space.wrap('run_toplevel')) - w_call_finish_gateway = space.wrap(gateway.interp2app(call_finish)) - w_call_startup_gateway = space.wrap(gateway.interp2app(call_startup)) - w_os = setup_nanos(space) - - def entry_point(argv): - space.timer.start("Entrypoint") - #debug("entry point starting") - #for arg in argv: - # debug(" argv -> " + arg) - if len(argv) > 2 and argv[1] == '--heapsize': - # Undocumented option, handled at interp-level. - # It has silently no effect with some GCs. - # It works in Boehm and in the semispace or generational GCs - # (but see comments in semispace.py:set_max_heap_size()). - # At the moment this option exists mainly to support sandboxing. - from pypy.rlib import rgc - rgc.set_max_heap_size(int(argv[2])) - argv = argv[:1] + argv[3:] - try: - try: - space.timer.start("space.startup") - space.call_function(w_run_toplevel, w_call_startup_gateway) - space.timer.stop("space.startup") - w_executable = space.wrap(argv[0]) - w_argv = space.newlist([space.wrap(s) for s in argv[1:]]) - space.timer.start("w_entry_point") - w_exitcode = space.call_function(w_entry_point, w_executable, w_argv, w_os) - space.timer.stop("w_entry_point") - exitcode = space.int_w(w_exitcode) - # try to pull it all in - ## from pypy.interpreter import main, interactive, error - ## con = interactive.PyPyConsole(space) - ## con.interact() - except OperationError, e: - debug("OperationError:") - debug(" operror-type: " + e.w_type.getname(space)) - debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) - return 1 - finally: - try: - space.timer.start("space.finish") - space.call_function(w_run_toplevel, w_call_finish_gateway) - space.timer.stop("space.finish") - except OperationError, e: - debug("OperationError:") - debug(" operror-type: " + e.w_type.getname(space)) - debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) - return 1 - space.timer.stop("Entrypoint") - space.timer.dump() - return exitcode - return entry_point - -def call_finish(space): - space.finish() - -def call_startup(space): - space.startup() - -# _____ Define and setup target ___ - -# for now this will do for option handling - -class PyPyTarget(object): - - usage = SUPPRESS_USAGE - - take_options = True - - def opt_parser(self, config): - parser = to_optparse(config, useoptions=["objspace.*"], - parserkwargs={'usage': self.usage}) - return parser - - def handle_config(self, config, translateconfig): - self.translateconfig = translateconfig - # set up the objspace optimizations based on the --opt argument - from pypy.config.pypyoption import set_pypy_opt_level - set_pypy_opt_level(config, translateconfig.opt) - - # as of revision 27081, multimethod.py uses the InstallerVersion1 by default - # because it is much faster both to initialize and run on top of CPython. - # The InstallerVersion2 is optimized for making a translator-friendly - # structure for low level backends. However, InstallerVersion1 is still - # preferable for high level backends, so we patch here. - - from pypy.objspace.std import multimethod - if config.objspace.std.multimethods == 'mrd': - assert multimethod.InstallerVersion1.instance_counter == 0,\ - 'The wrong Installer version has already been instatiated' - multimethod.Installer = multimethod.InstallerVersion2 - elif config.objspace.std.multimethods == 'doubledispatch': - # don't rely on the default, set again here - assert multimethod.InstallerVersion2.instance_counter == 0,\ - 'The wrong Installer version has already been instatiated' - multimethod.Installer = multimethod.InstallerVersion1 - - def print_help(self, config): - self.opt_parser(config).print_help() - - def get_additional_config_options(self): - from pypy.config.pypyoption import pypy_optiondescription - return pypy_optiondescription - - def target(self, driver, args): - driver.exe_name = 'pypy-%(backend)s' - - config = driver.config - parser = self.opt_parser(config) - - parser.parse_args(args) - - # expose the following variables to ease debugging - global space, entry_point - - if config.objspace.allworkingmodules: - from pypy.config.pypyoption import enable_allworkingmodules - enable_allworkingmodules(config) - - if config.translation.thread: - config.objspace.usemodules.thread = True - elif config.objspace.usemodules.thread: - try: - config.translation.thread = True - except ConflictConfigError: - # If --allworkingmodules is given, we reach this point - # if threads cannot be enabled (e.g. they conflict with - # something else). In this case, we can try setting the - # usemodules.thread option to False again. It will - # cleanly fail if that option was set to True by the - # command-line directly instead of via --allworkingmodules. - config.objspace.usemodules.thread = False - - if config.translation.stackless: - config.objspace.usemodules._stackless = True - elif config.objspace.usemodules._stackless: - try: - config.translation.stackless = True - except ConflictConfigError: - raise ConflictConfigError("please use the --stackless option " - "to translate.py instead of " - "--withmod-_stackless directly") - - if not config.translation.rweakref: - config.objspace.usemodules._weakref = False - - if self.translateconfig.goal_options.jit: - config.objspace.usemodules.pypyjit = True - elif config.objspace.usemodules.pypyjit: - self.translateconfig.goal_options.jit = True - - if config.translation.backend == "cli": - config.objspace.usemodules.clr = True - # XXX did it ever work? - #elif config.objspace.usemodules.clr: - # config.translation.backend == "cli" - - config.objspace.nofaking = True - config.objspace.compiler = "ast" - config.translating = True - - import translate - translate.log_config(config.objspace, "PyPy config object") - - # obscure hack to stuff the translation options into the translated PyPy - import pypy.module.sys - options = make_dict(config) - wrapstr = 'space.wrap(%r)' % (options) - pypy.module.sys.Module.interpleveldefs['pypy_translation_info'] = wrapstr - - return self.get_entry_point(config) - - def portal(self, driver): - from pypy.module.pypyjit.portal import get_portal - return get_portal(driver) - - def get_entry_point(self, config): - space = make_objspace(config) - - # manually imports app_main.py - filename = os.path.join(this_dir, 'app_main.py') - w_dict = space.newdict() - space.exec_(open(filename).read(), w_dict, w_dict) - for modulename in EXTRA_MODULES: - print 'pre-importing', modulename - space.exec_("import " + modulename, w_dict, w_dict) - print 'phew, ready' - entry_point = create_entry_point(space, w_dict) - - return entry_point, None, PyPyAnnotatorPolicy(single_space = space) - - def interface(self, ns): - for name in ['take_options', 'handle_config', 'print_help', 'target', - 'portal', - 'get_additional_config_options']: - ns[name] = getattr(self, name) - - -PyPyTarget().interface(globals()) - From noreply at buildbot.pypy.org Sun Sep 4 12:56:35 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Sep 2011 12:56:35 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix tests. Message-ID: <20110904105635.A8E918203C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47069:8fa344e25292 Date: 2011-09-04 12:48 +0200 http://bitbucket.org/pypy/pypy/changeset/8fa344e25292/ Log: Fix tests. diff --git a/pypy/module/test_lib_pypy/test_distributed/test_distributed.py b/pypy/module/test_lib_pypy/test_distributed/test_distributed.py --- a/pypy/module/test_lib_pypy/test_distributed/test_distributed.py +++ b/pypy/module/test_lib_pypy/test_distributed/test_distributed.py @@ -7,7 +7,8 @@ class AppTestDistributed(object): def setup_class(cls): - cls.space = gettestobjspace(**{"objspace.std.withtproxy": True}) + cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, + "usemodules":("_continuation",)}) def test_init(self): import distributed @@ -88,7 +89,8 @@ assert len(item) == 11 class AppTestDistributedTasklets(object): - spaceconfig = {"objspace.std.withtproxy": True} + spaceconfig = {"objspace.std.withtproxy": True, + "objspace.usemodules._continuation": True} reclimit = sys.getrecursionlimit() def setup_class(cls): diff --git a/pypy/module/test_lib_pypy/test_distributed/test_greensock.py b/pypy/module/test_lib_pypy/test_distributed/test_greensock.py --- a/pypy/module/test_lib_pypy/test_distributed/test_greensock.py +++ b/pypy/module/test_lib_pypy/test_distributed/test_greensock.py @@ -9,7 +9,8 @@ def setup_class(cls): if not option.runappdirect: py.test.skip("Cannot run this on top of py.py because of PopenGateway") - cls.space = gettestobjspace(**{"objspace.std.withtproxy": True}) + cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, + "usemodules":("_continuation",)}) cls.w_remote_side_code = cls.space.appexec([], """(): import sys sys.path.insert(0, '%s') diff --git a/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py b/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py --- a/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py +++ b/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py @@ -9,7 +9,8 @@ class AppTestSocklayer: def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, - "usemodules":("_socket", "select")}) + "usemodules":("_continuation", + "_socket", "select")}) def test_socklayer(self): class X(object): From noreply at buildbot.pypy.org Sun Sep 4 12:56:36 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Sep 2011 12:56:36 +0200 (CEST) Subject: [pypy-commit] pypy default: Not understanding why we have two slightly different copies of these Message-ID: <20110904105636.DBA788203C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47070:a572ffba61fe Date: 2011-09-04 12:53 +0200 http://bitbucket.org/pypy/pypy/changeset/a572ffba61fe/ Log: Not understanding why we have two slightly different copies of these tests, fixing them too. At some point please kill one or both copies... diff --git a/lib_pypy/distributed/test/test_distributed.py b/lib_pypy/distributed/test/test_distributed.py --- a/lib_pypy/distributed/test/test_distributed.py +++ b/lib_pypy/distributed/test/test_distributed.py @@ -9,7 +9,7 @@ class AppTestDistributed(object): def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, - "usemodules":("_stackless",)}) + "usemodules":("_continuation",)}) def test_init(self): import distributed @@ -91,10 +91,8 @@ class AppTestDistributedTasklets(object): spaceconfig = {"objspace.std.withtproxy": True, - "objspace.usemodules._stackless": True} + "objspace.usemodules._continuation": True} def setup_class(cls): - #cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, - # "usemodules":("_stackless",)}) cls.w_test_env = cls.space.appexec([], """(): from distributed import test_env return test_env diff --git a/lib_pypy/distributed/test/test_greensock.py b/lib_pypy/distributed/test/test_greensock.py --- a/lib_pypy/distributed/test/test_greensock.py +++ b/lib_pypy/distributed/test/test_greensock.py @@ -10,7 +10,7 @@ if not option.runappdirect: py.test.skip("Cannot run this on top of py.py because of PopenGateway") cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, - "usemodules":("_stackless",)}) + "usemodules":("_continuation",)}) cls.w_remote_side_code = cls.space.appexec([], """(): import sys sys.path.insert(0, '%s') diff --git a/lib_pypy/distributed/test/test_socklayer.py b/lib_pypy/distributed/test/test_socklayer.py --- a/lib_pypy/distributed/test/test_socklayer.py +++ b/lib_pypy/distributed/test/test_socklayer.py @@ -9,7 +9,8 @@ class AppTestSocklayer: def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, - "usemodules":("_stackless","_socket", "select")}) + "usemodules":("_continuation", + "_socket", "select")}) def test_socklayer(self): class X(object): From noreply at buildbot.pypy.org Sun Sep 4 12:56:38 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Sep 2011 12:56:38 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix? Message-ID: <20110904105638.1B8D48203C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47071:68371db86590 Date: 2011-09-04 12:55 +0200 http://bitbucket.org/pypy/pypy/changeset/68371db86590/ Log: Fix? diff --git a/lib_pypy/pypy_test/test_coroutine.py b/lib_pypy/pypy_test/test_coroutine.py --- a/lib_pypy/pypy_test/test_coroutine.py +++ b/lib_pypy/pypy_test/test_coroutine.py @@ -2,7 +2,7 @@ from py.test import skip, raises try: - from lib_pypy.stackless import coroutine, CoroutineExit + from stackless import coroutine, CoroutineExit except ImportError, e: skip('cannot import stackless: %s' % (e,)) @@ -20,10 +20,6 @@ assert not co.is_zombie def test_is_zombie_del_without_frame(self): - try: - import _stackless # are we on pypy with a stackless build? - except ImportError: - skip("only works on pypy-c-stackless") import gc res = [] class MyCoroutine(coroutine): @@ -45,10 +41,6 @@ assert res[0], "is_zombie was False in __del__" def test_is_zombie_del_with_frame(self): - try: - import _stackless # are we on pypy with a stackless build? - except ImportError: - skip("only works on pypy-c-stackless") import gc res = [] class MyCoroutine(coroutine): From noreply at buildbot.pypy.org Sun Sep 4 12:56:39 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Sep 2011 12:56:39 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20110904105639.4FA5C8203C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47072:7e34f7b0c4c1 Date: 2011-09-04 12:56 +0200 http://bitbucket.org/pypy/pypy/changeset/7e34f7b0c4c1/ Log: merge heads diff --git a/lib-python/modified-2.7/sqlite3/test/regression.py b/lib-python/modified-2.7/sqlite3/test/regression.py --- a/lib-python/modified-2.7/sqlite3/test/regression.py +++ b/lib-python/modified-2.7/sqlite3/test/regression.py @@ -274,6 +274,18 @@ cur.execute("UPDATE foo SET id = 3 WHERE id = 1") self.assertEqual(cur.description, None) + def CheckStatementCache(self): + cur = self.con.cursor() + cur.execute("CREATE TABLE foo (id INTEGER)") + values = [(i,) for i in xrange(5)] + cur.executemany("INSERT INTO foo (id) VALUES (?)", values) + + cur.execute("SELECT id FROM foo") + self.assertEqual(list(cur), values) + self.con.commit() + cur.execute("SELECT id FROM foo") + self.assertEqual(list(cur), values) + def suite(): regression_suite = unittest.makeSuite(RegressionTests, "Check") return unittest.TestSuite((regression_suite,)) diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1100,6 +1100,7 @@ self.row_cast_map = None ret = sqlite.sqlite3_reset(self.statement) self.in_use = False + self.exhausted = False return ret def finalize(self): From noreply at buildbot.pypy.org Sun Sep 4 13:16:03 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Sep 2011 13:16:03 +0200 (CEST) Subject: [pypy-commit] benchmarks default: If the web POST request fails, retry a few times before Message-ID: <20110904111603.393878203C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r142:295b8e416c4a Date: 2011-09-04 13:15 +0200 http://bitbucket.org/pypy/benchmarks/changeset/295b8e416c4a/ Log: If the web POST request fails, retry a few times before giving up. diff --git a/saveresults.py b/saveresults.py --- a/saveresults.py +++ b/saveresults.py @@ -21,7 +21,7 @@ """ import sys -import urllib, urllib2 +import urllib, urllib2, time from datetime import datetime import optparse @@ -93,9 +93,19 @@ info += str(data['commitid']) + ", benchmark " + data['benchmark'] print(info) try: - f = urllib2.urlopen(SPEEDURL + 'result/add/', params) - response = f.read() - f.close() + retries = [10, 20, 30, 60, 150, 300] + while True: + try: + f = urllib2.urlopen(SPEEDURL + 'result/add/', params) + response = f.read() + f.close() + break + except urllib2.URLError: + if not retries: + raise + d = retries.pop(0) + print "retrying in %d seconds..." % d + time.sleep(d) except urllib2.URLError, e: if hasattr(e, 'reason'): response = '\n We failed to reach a server\n' From noreply at buildbot.pypy.org Sun Sep 4 13:48:28 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Sun, 4 Sep 2011 13:48:28 +0200 (CEST) Subject: [pypy-commit] pypy improve-heap-caching-tracing: more cases where invalidation is not necessary Message-ID: <20110904114828.F006982212@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: improve-heap-caching-tracing Changeset: r47074:46e1033f28fd Date: 2011-09-04 13:48 +0200 http://bitbucket.org/pypy/pypy/changeset/46e1033f28fd/ Log: more cases where invalidation is not necessary diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py --- a/pypy/jit/metainterp/heapcache.py +++ b/pypy/jit/metainterp/heapcache.py @@ -32,7 +32,7 @@ return if rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST: return - if opnum == rop.CALL: + if opnum == rop.CALL or opnum == rop.CALL_LOOPINVARIANT: effectinfo = descr.get_extra_info() ef = effectinfo.extraeffect if ef == effectinfo.EF_LOOPINVARIANT or \ diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py --- a/pypy/jit/metainterp/test/test_heapcache.py +++ b/pypy/jit/metainterp/test/test_heapcache.py @@ -226,6 +226,8 @@ assert h.getarrayitem(box1, descr1, index1) is box2 assert h.getarrayitem(box1, descr1, index2) is box4 + h.invalidate_caches( + rop.CALL_LOOPINVARIANT, FakeCallDescr(FakeEffektinfo.EF_LOOPINVARIANT)) h.invalidate_caches( rop.CALL, FakeCallDescr(FakeEffektinfo.EF_RANDOM_EFFECTS)) From noreply at buildbot.pypy.org Sun Sep 4 13:48:27 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Sun, 4 Sep 2011 13:48:27 +0200 (CEST) Subject: [pypy-commit] pypy improve-heap-caching-tracing: more cases to not invalidate the cache Message-ID: <20110904114827.BB5388203C@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: improve-heap-caching-tracing Changeset: r47073:9b4b10daa887 Date: 2011-09-04 13:38 +0200 http://bitbucket.org/pypy/pypy/changeset/9b4b10daa887/ Log: more cases to not invalidate the cache diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py --- a/pypy/jit/metainterp/heapcache.py +++ b/pypy/jit/metainterp/heapcache.py @@ -24,6 +24,12 @@ return if opnum == rop.SETARRAYITEM_GC: return + if opnum == rop.SETFIELD_RAW: + return + if opnum == rop.SETARRAYITEM_RAW: + return + if rop._OVF_FIRST <= opnum <= rop._OVF_LAST: + return if rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST: return if opnum == rop.CALL: diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py --- a/pypy/jit/metainterp/test/test_heapcache.py +++ b/pypy/jit/metainterp/test/test_heapcache.py @@ -213,6 +213,9 @@ h.setarrayitem(box1, descr1, index1, box2) h.setarrayitem(box1, descr1, index2, box4) h.invalidate_caches(rop.INT_ADD, None) + h.invalidate_caches(rop.INT_ADD_OVF, None) + h.invalidate_caches(rop.SETFIELD_RAW, None) + h.invalidate_caches(rop.SETARRAYITEM_RAW, None) assert h.getfield(box1, descr1) is box2 assert h.getarrayitem(box1, descr1, index1) is box2 assert h.getarrayitem(box1, descr1, index2) is box4 From noreply at buildbot.pypy.org Sun Sep 4 17:26:31 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Sep 2011 17:26:31 +0200 (CEST) Subject: [pypy-commit] pypy default: Skip these tests. They seem to invoke too much stuff on _continuation Message-ID: <20110904152631.9B9548203C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47075:6218cccb1880 Date: 2011-09-04 17:26 +0200 http://bitbucket.org/pypy/pypy/changeset/6218cccb1880/ Log: Skip these tests. They seem to invoke too much stuff on _continuation for successfully running in py.py. diff --git a/pypy/module/test_lib_pypy/test_distributed/test_distributed.py b/pypy/module/test_lib_pypy/test_distributed/test_distributed.py --- a/pypy/module/test_lib_pypy/test_distributed/test_distributed.py +++ b/pypy/module/test_lib_pypy/test_distributed/test_distributed.py @@ -1,3 +1,4 @@ +import py; py.test.skip("xxx remove") """ Controllers tests """ diff --git a/pypy/module/test_lib_pypy/test_distributed/test_greensock.py b/pypy/module/test_lib_pypy/test_distributed/test_greensock.py --- a/pypy/module/test_lib_pypy/test_distributed/test_greensock.py +++ b/pypy/module/test_lib_pypy/test_distributed/test_greensock.py @@ -1,5 +1,4 @@ - -import py +import py; py.test.skip("xxx remove") from pypy.conftest import gettestobjspace, option def setup_module(mod): diff --git a/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py b/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py --- a/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py +++ b/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py @@ -1,4 +1,4 @@ -import py +import py; py.test.skip("xxx remove") from pypy.conftest import gettestobjspace def setup_module(mod): From noreply at buildbot.pypy.org Sun Sep 4 19:55:49 2011 From: noreply at buildbot.pypy.org (boemmels) Date: Sun, 4 Sep 2011 19:55:49 +0200 (CEST) Subject: [pypy-commit] lang-scheme default: Remove some code dublication Message-ID: <20110904175549.BC88D8203C@wyvern.cs.uni-duesseldorf.de> Author: Juergen Boemmels Branch: Changeset: r3:599eea81fab0 Date: 2011-09-04 19:08 +0200 http://bitbucket.org/pypy/lang-scheme/changeset/599eea81fab0/ Log: Remove some code dublication diff --git a/scheme/object.py b/scheme/object.py --- a/scheme/object.py +++ b/scheme/object.py @@ -226,15 +226,7 @@ self.cdr = cdr def to_string(self): - car = self.car.to_string() - cdr = self.cdr - if isinstance(cdr, W_Pair): #proper list - return "(" + car + " " + cdr.to_lstring() + ")" - elif cdr is w_nil: #one element proper list - return "(" + car + ")" - - #dotted list/pair - return "(" + car + " . " + cdr.to_string() + ")" + return "(" + self.to_lstring() + ")" def to_lstring(self): car = self.car.to_string() From noreply at buildbot.pypy.org Sun Sep 4 21:05:47 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Sun, 4 Sep 2011 21:05:47 +0200 (CEST) Subject: [pypy-commit] pypy jit-duplicated_short_boxes: speedups? Message-ID: <20110904190547.A3A6A8203C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-duplicated_short_boxes Changeset: r47076:311cc70d5b88 Date: 2011-09-04 21:05 +0200 http://bitbucket.org/pypy/pypy/changeset/311cc70d5b88/ Log: speedups? diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -10,6 +10,7 @@ from pypy.jit.metainterp.typesystem import llhelper, oohelper from pypy.tool.pairtype import extendabletype from pypy.rlib.debug import debug_start, debug_stop, debug_print +from pypy.rlib.objectmodel import specialize LEVEL_UNKNOWN = '\x00' LEVEL_NONNULL = '\x01' @@ -326,12 +327,10 @@ self.loop = loop self.bridge = bridge self.values = {} - self.importable_values = {} self.interned_refs = self.cpu.ts.new_ref_dict() self.resumedata_memo = resume.ResumeDataLoopMemo(metainterp_sd) self.bool_boxes = {} self.pure_operations = args_dict() - self.emitted_pure_operations = {} self.producer = {} self.pendingfields = [] self.posponedop = None @@ -339,12 +338,11 @@ self.quasi_immutable_deps = None self.opaque_pointers = {} self.newoperations = [] - self.emitting_dissabled = False - self.emitted_guards = 0 if loop is not None: self.call_pure_results = loop.call_pure_results self.set_optimizations(optimizations) + self.setup() def set_optimizations(self, optimizations): if optimizations: @@ -371,23 +369,18 @@ assert self.posponedop is None def new(self): + new = Optimizer(self.metainterp_sd, self.loop) + return self._new(new) + + def _new(self, new): assert self.posponedop is None - new = Optimizer(self.metainterp_sd, self.loop) optimizations = [o.new() for o in self.optimizations] new.set_optimizations(optimizations) new.quasi_immutable_deps = self.quasi_immutable_deps return new def produce_potential_short_preamble_ops(self, sb): - for op in self.emitted_pure_operations: - if op.getopnum() == rop.GETARRAYITEM_GC_PURE or \ - op.getopnum() == rop.STRGETITEM or \ - op.getopnum() == rop.UNICODEGETITEM: - if not self.getvalue(op.getarg(1)).is_constant(): - continue - sb.add_potential(op) - for opt in self.optimizations: - opt.produce_potential_short_preamble_ops(sb) + raise NotImplementedError('This is implemented in unroll.UnrollableOptimizer') def turned_constant(self, value): for o in self.optimizations: @@ -409,6 +402,7 @@ else: return box + @specialize.argtype(0) def getvalue(self, box): box = self.getinterned(box) try: @@ -419,11 +413,9 @@ return value def ensure_imported(self, value): - if not self.emitting_dissabled and value in self.importable_values: - imp = self.importable_values[value] - del self.importable_values[value] - imp.import_value(value) + pass + @specialize.argtype(0) def get_constant_box(self, box): if isinstance(box, Const): return box @@ -512,18 +504,22 @@ def emit_operation(self, op): if op.returns_bool_result(): self.bool_boxes[self.getvalue(op.result)] = None - if self.emitting_dissabled: - return + self._emit_operation(op) + @specialize.argtype(0) + def _emit_operation(self, op): for i in range(op.numargs()): arg = op.getarg(i) - if arg in self.values: - box = self.getvalue(arg).force_box() - op.setarg(i, box) + try: + value = self.values[arg] + except KeyError: + pass + else: + self.ensure_imported(value) + op.setarg(i, value.force_box()) self.metainterp_sd.profiler.count(jitprof.OPT_OPS) if op.is_guard(): self.metainterp_sd.profiler.count(jitprof.OPT_GUARDS) - self.emitted_guards += 1 # FIXME: can we reuse above counter? op = self.store_final_boxes_in_guard(op) elif op.can_raise(): self.exception_might_have_happened = True @@ -575,6 +571,7 @@ args[n+1] = op.getdescr() return args + @specialize.argtype(0) def optimize_default(self, op): canfold = op.is_always_pure() if op.is_ovf(): @@ -610,13 +607,16 @@ return else: self.pure_operations[args] = op - self.emitted_pure_operations[op] = True + self.remember_emitting_pure(op) # otherwise, the operation remains self.emit_operation(op) if nextop: self.emit_operation(nextop) + def remember_emitting_pure(self, op): + pass + def constant_fold(self, op): argboxes = [self.get_constant_box(op.getarg(i)) for i in range(op.numargs())] diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -70,6 +70,47 @@ self.snapshot_map[snapshot] = new_snapshot return new_snapshot +class UnrollableOptimizer(Optimizer): + def setup(self): + self.importable_values = {} + self.emitting_dissabled = False + self.emitted_guards = 0 + self.emitted_pure_operations = {} + + def ensure_imported(self, value): + if not self.emitting_dissabled and value in self.importable_values: + imp = self.importable_values[value] + del self.importable_values[value] + imp.import_value(value) + + def emit_operation(self, op): + if op.returns_bool_result(): + self.bool_boxes[self.getvalue(op.result)] = None + if self.emitting_dissabled: + return + if op.is_guard(): + self.emitted_guards += 1 # FIXME: can we use counter in self._emit_operation? + self._emit_operation(op) + + def new(self): + new = UnrollableOptimizer(self.metainterp_sd, self.loop) + return self._new(new) + + def remember_emitting_pure(self, op): + self.emitted_pure_operations[op] = True + + def produce_potential_short_preamble_ops(self, sb): + for op in self.emitted_pure_operations: + if op.getopnum() == rop.GETARRAYITEM_GC_PURE or \ + op.getopnum() == rop.STRGETITEM or \ + op.getopnum() == rop.UNICODEGETITEM: + if not self.getvalue(op.getarg(1)).is_constant(): + continue + sb.add_potential(op) + for opt in self.optimizations: + opt.produce_potential_short_preamble_ops(sb) + + class UnrollOptimizer(Optimization): """Unroll the loop into two iterations. The first one will @@ -77,7 +118,7 @@ distinction anymore)""" def __init__(self, metainterp_sd, loop, optimizations): - self.optimizer = Optimizer(metainterp_sd, loop, optimizations) + self.optimizer = UnrollableOptimizer(metainterp_sd, loop, optimizations) self.cloned_operations = [] for op in self.optimizer.loop.operations: newop = op.clone() From noreply at buildbot.pypy.org Sun Sep 4 21:54:24 2011 From: noreply at buildbot.pypy.org (boemmels) Date: Sun, 4 Sep 2011 21:54:24 +0200 (CEST) Subject: [pypy-commit] lang-scheme default: Implement display & newline Message-ID: <20110904195424.E15C68203C@wyvern.cs.uni-duesseldorf.de> Author: Juergen Boemmels Branch: Changeset: r4:afd4189fb68b Date: 2011-09-04 21:49 +0200 http://bitbucket.org/pypy/lang-scheme/changeset/afd4189fb68b/ Log: Implement display & newline Mostly just uncommenting the implementation. Added tests for display and newline diff --git a/scheme/procedure.py b/scheme/procedure.py --- a/scheme/procedure.py +++ b/scheme/procedure.py @@ -357,18 +357,28 @@ ## # Input/Output procedures ## -#class Display(W_Procedure): -# _symbol_name = "display" -# -# def procedure(self, ctx, lst): -# if len(lst) == 1: -# obj = lst[0] -# elif len(lst) == 2: -# (obj, port) = lst -# raise NotImplementedError -# else: -# raise WrongArgsNumber -# -# print obj.to_string() -# return w_undefined +class Display(W_Procedure): + _symbol_name = "display" + def procedure(self, ctx, lst): + if len(lst) == 1: + obj = lst[0] + elif len(lst) == 2: + (obj, port) = lst + raise NotImplementedError + else: + raise WrongArgsNumber + + print obj.to_string(), + return w_undefined + +class Newline(W_Procedure): + _symbol_name = "newline" + + def procedure(self, ctx, lst): + if len(lst) != 0: + raise WrongArgsNumber + + print + return w_undefined + diff --git a/scheme/test/test_output.py b/scheme/test/test_output.py new file mode 100644 --- /dev/null +++ b/scheme/test/test_output.py @@ -0,0 +1,35 @@ + +import sys +from StringIO import StringIO +from scheme.ssparser import parse +from scheme.execution import ExecutionContext + + +def capture_output(func): + s = StringIO() + so = sys.stdout + sys.stdout = s + try: + func() + finally: + sys.stdout = so + res = s.getvalue() + s.close() + return res + +def eval_noctx(expr): + return parse(expr)[0].eval(ExecutionContext()) + +def test_display(): + tests = [("(display 'foobar)", "foobar"), + ("(display 42)", "42"), + ("(display \"Hello World!\")", "Hello World!"), + ("(display '(1 2 3))", "(1 2 3)"), + ] + for code, expected in tests: + out = capture_output(lambda: eval_noctx(code)) + assert out == expected + +def test_newline(): + out = capture_output(lambda: eval_noctx("(newline)")) + assert out == "\n" From noreply at buildbot.pypy.org Mon Sep 5 06:46:16 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Mon, 5 Sep 2011 06:46:16 +0200 (CEST) Subject: [pypy-commit] pypy gc-trace-faster: Attempting to remove a check for objects not in the nursery in order to speed up gc for large dicts. Message-ID: <20110905044616.1E45A11B2E04@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: gc-trace-faster Changeset: r47077:463f0bcef4ce Date: 2011-09-04 22:45 -0600 http://bitbucket.org/pypy/pypy/changeset/463f0bcef4ce/ Log: Attempting to remove a check for objects not in the nursery in order to speed up gc for large dicts. diff --git a/pypy/rpython/memory/gc/minimark.py b/pypy/rpython/memory/gc/minimark.py --- a/pypy/rpython/memory/gc/minimark.py +++ b/pypy/rpython/memory/gc/minimark.py @@ -1300,6 +1300,10 @@ def collect_cardrefs_to_nursery(self): size_gc_header = self.gcheaderbuilder.size_gc_header oldlist = self.old_objects_with_cards_set + if bool(self.young_rawmalloced_objects): + callfunc = self.trace_and_drag_out_of_nursery_partial_young_raw + else: + callfunc = self.trace_and_drag_out_of_nursery_partial while oldlist.non_empty(): obj = oldlist.pop() # @@ -1346,7 +1350,8 @@ interval_stop = length ll_assert(cardbyte <= 1 and bytes == 0, "premature end of object") - self.trace_and_drag_out_of_nursery_partial( + #self.trace_and_drag_out_of_nursery_partial( + callfunc( obj, interval_start, interval_stop) # interval_start = interval_stop @@ -1358,6 +1363,10 @@ # Follow the old_objects_pointing_to_young list and move the # young objects they point to out of the nursery. oldlist = self.old_objects_pointing_to_young + if bool(self.young_rawmalloced_objects): + trace_and_drag_out_of_nursery_func = self.trace_and_drag_out_of_nursery_young_raw + else: + trace_and_drag_out_of_nursery_func = self.trace_and_drag_out_of_nursery while oldlist.non_empty(): obj = oldlist.pop() # @@ -1374,7 +1383,7 @@ # Trace the 'obj' to replace pointers to nursery with pointers # outside the nursery, possibly forcing nursery objects out # and adding them to 'old_objects_pointing_to_young' as well. - self.trace_and_drag_out_of_nursery(obj) + trace_and_drag_out_of_nursery_func(obj) def trace_and_drag_out_of_nursery(self, obj): """obj must not be in the nursery. This copies all the @@ -1382,6 +1391,9 @@ """ self.trace(obj, self._trace_drag_out, None) + def trace_and_drag_out_of_nursery_young_raw(self, obj): + self.trace(obj, self._trace_drag_out_young_raw, None) + def trace_and_drag_out_of_nursery_partial(self, obj, start, stop): """Like trace_and_drag_out_of_nursery(), but limited to the array indices in range(start, stop). @@ -1391,27 +1403,19 @@ #print 'trace_partial:', start, stop, '\t', obj self.trace_partial(obj, start, stop, self._trace_drag_out, None) + def trace_and_drag_out_of_nursery_partial_young_raw(self, obj, start, stop): + """Like trace_and_drag_out_of_nursery(), but limited to the array + indices in range(start, stop). + """ + ll_assert(start < stop, "empty or negative range " + "in trace_and_drag_out_of_nursery_partial()") + #print 'trace_partial:', start, stop, '\t', obj + self.trace_partial(obj, start, stop, self._trace_drag_out_young_raw, None) def _trace_drag_out1(self, root): - self._trace_drag_out(root, None) + self._trace_drag_out_young_raw(root, None) - def _trace_drag_out(self, root, ignored): - obj = root.address[0] - #print '_trace_drag_out(%x: %r)' % (hash(obj.ptr._obj), obj) - # - # If 'obj' is not in the nursery, nothing to change -- expect - # that we must set GCFLAG_VISITED on young raw-malloced objects. - if not self.is_in_nursery(obj): - # cache usage trade-off: I think that it is a better idea to - # check if 'obj' is in young_rawmalloced_objects with an access - # to this (small) dictionary, rather than risk a lot of cache - # misses by reading a flag in the header of all the 'objs' that - # arrive here. - if (bool(self.young_rawmalloced_objects) - and self.young_rawmalloced_objects.contains(obj)): - self._visit_young_rawmalloced_object(obj) - return - # + def _trace_drag_out_base(self, root, obj): # If 'obj' was already forwarded, change it to its forwarding address. if self.is_forwarded(obj): root.address[0] = self.get_forwarding_address(obj) @@ -1461,8 +1465,34 @@ # We will fix such references to point to the copy of the young # objects when we walk 'old_objects_pointing_to_young'. self.old_objects_pointing_to_young.append(newobj) + _trace_drag_out_base._always_inline_ = True + + def _trace_drag_out(self, root, ignored): + obj = root.address[0] + if not self.is_in_nursery(obj): + return + self._trace_drag_out_base(root, obj) _trace_drag_out._always_inline_ = True + def _trace_drag_out_young_raw(self, root, ignored): + obj = root.address[0] + #print '_trace_drag_out(%x: %r)' % (hash(obj.ptr._obj), obj) + # + # If 'obj' is not in the nursery, nothing to change -- expect + # that we must set GCFLAG_VISITED on young raw-malloced objects. + if not self.is_in_nursery(obj): + # cache usage trade-off: I think that it is a better idea to + # check if 'obj' is in young_rawmalloced_objects with an access + # to this (small) dictionary, rather than risk a lot of cache + # misses by reading a flag in the header of all the 'objs' that + # arrive here. + #assert bool(self.young_rawmalloced_objects) + if (bool(self.young_rawmalloced_objects) and self.young_rawmalloced_objects.contains(obj)): + self._visit_young_rawmalloced_object(obj) + return + self._trace_drag_out_base(root, obj) + _trace_drag_out_young_raw._always_inline_ = True + def _visit_young_rawmalloced_object(self, obj): # 'obj' points to a young, raw-malloced object. # Any young rawmalloced object never seen by the code here From noreply at buildbot.pypy.org Mon Sep 5 12:29:42 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Sep 2011 12:29:42 +0200 (CEST) Subject: [pypy-commit] pypy default: Change FORCE_TOKEN in the x86 backend to return the register 'ebp' Message-ID: <20110905102942.2500B8203C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47078:906f5d65a98b Date: 2011-09-05 10:38 +0200 http://bitbucket.org/pypy/pypy/changeset/906f5d65a98b/ Log: Change FORCE_TOKEN in the x86 backend to return the register 'ebp' directly, thus not producing any code at all. diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -57,11 +57,13 @@ all_regs = [] no_lower_byte_regs = [] save_around_call_regs = [] - + frame_reg = None + def __init__(self, longevity, frame_manager=None, assembler=None): self.free_regs = self.all_regs[:] self.longevity = longevity self.reg_bindings = {} + self.bindings_to_frame_reg = {} self.position = -1 self.frame_manager = frame_manager self.assembler = assembler @@ -218,6 +220,10 @@ self.reg_bindings[v] = loc return loc + def force_allocate_frame_reg(self, v): + """ Allocate the new variable v in the frame register.""" + self.bindings_to_frame_reg[v] = None + def force_spill_var(self, var): self._sync_var(var) try: @@ -236,6 +242,8 @@ try: return self.reg_bindings[box] except KeyError: + if box in self.bindings_to_frame_reg: + return self.frame_reg return self.frame_manager.loc(box) def return_constant(self, v, forbidden_vars=[], selected_reg=None): @@ -264,8 +272,9 @@ self._check_type(v) if isinstance(v, Const): return self.return_constant(v, forbidden_vars, selected_reg) - prev_loc = self.loc(v) + if prev_loc is self.frame_reg and selected_reg is None: + return prev_loc loc = self.force_allocate_reg(v, forbidden_vars, selected_reg, need_lower_byte=need_lower_byte) if prev_loc is not loc: diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -2510,11 +2510,6 @@ genop_discard_cond_call_gc_wb_array = genop_discard_cond_call_gc_wb - def genop_force_token(self, op, arglocs, resloc): - # RegAlloc.consider_force_token ensures this: - assert isinstance(resloc, RegLoc) - self.mc.LEA_rb(resloc.value, FORCE_INDEX_OFS) - def not_implemented_op_discard(self, op, arglocs): not_implemented("not implemented operation: %s" % op.getopname()) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -29,6 +29,7 @@ all_regs = [eax, ecx, edx, ebx, esi, edi] no_lower_byte_regs = [esi, edi] save_around_call_regs = [eax, edx, ecx] + frame_reg = ebp REGLOC_TO_GCROOTMAP_REG_INDEX = { ebx: 1, @@ -1358,8 +1359,8 @@ self.assembler.datablockwrapper) def consider_force_token(self, op): - loc = self.rm.force_allocate_reg(op.result) - self.Perform(op, [], loc) + # the FORCE_TOKEN operation returns directly 'ebp' + self.rm.force_allocate_frame_reg(op.result) def not_implemented_op(self, op): not_implemented("not implemented operation: %s" % op.getopname()) diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -119,7 +119,8 @@ setitem(index, null) def get_latest_force_token(self): - return self.assembler.fail_ebp + FORCE_INDEX_OFS + # the FORCE_TOKEN operation and this helper both return 'ebp'. + return self.assembler.fail_ebp def execute_token(self, executable_token): addr = executable_token._x86_bootstrap_code @@ -153,8 +154,9 @@ flavor='raw', zero=True, immortal=True) - def force(self, addr_of_force_index): + def force(self, addr_of_force_token): TP = rffi.CArrayPtr(lltype.Signed) + addr_of_force_index = addr_of_force_token + FORCE_INDEX_OFS fail_index = rffi.cast(TP, addr_of_force_index)[0] assert fail_index >= 0, "already forced!" faildescr = self.get_fail_descr_from_number(fail_index) @@ -164,7 +166,7 @@ # start of "no gc operation!" block fail_index_2 = self.assembler.grab_frame_values( bytecode, - addr_of_force_index - FORCE_INDEX_OFS, + addr_of_force_token, self.all_null_registers) self.assembler.leave_jitted_hook() # end of "no gc operation!" block From noreply at buildbot.pypy.org Mon Sep 5 12:29:43 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Sep 2011 12:29:43 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix. Message-ID: <20110905102943.62E9682212@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47079:86b5fa5dac13 Date: 2011-09-05 10:55 +0200 http://bitbucket.org/pypy/pypy/changeset/86b5fa5dac13/ Log: Fix. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -957,6 +957,7 @@ if (isinstance(from_loc, RegLoc) and from_loc.is_xmm) or (isinstance(to_loc, RegLoc) and to_loc.is_xmm): self.mc.MOVSD(to_loc, from_loc) else: + assert to_loc is not ebp self.mc.MOV(to_loc, from_loc) regalloc_mov = mov # legacy interface diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -313,8 +313,11 @@ self.fm.frame_bindings[arg] = loc else: if isinstance(loc, RegLoc): - self.rm.reg_bindings[arg] = loc - used[loc] = None + if loc is ebp: + self.rm.bindings_to_frame_reg[arg] = None + else: + self.rm.reg_bindings[arg] = loc + used[loc] = None else: self.fm.frame_bindings[arg] = loc self.rm.free_regs = [] From noreply at buildbot.pypy.org Mon Sep 5 14:17:15 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Sep 2011 14:17:15 +0200 (CEST) Subject: [pypy-commit] pypy default: (fenrrir) Message-ID: <20110905121715.CC4618203C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47080:ba3e8ce278bc Date: 2011-09-05 14:16 +0200 http://bitbucket.org/pypy/pypy/changeset/ba3e8ce278bc/ Log: (fenrrir) Rewrite using the _continuation module. diff --git a/lib_pypy/stackless.py b/lib_pypy/stackless.py --- a/lib_pypy/stackless.py +++ b/lib_pypy/stackless.py @@ -4,121 +4,124 @@ Please refer to their documentation. """ -DEBUG = True - -def dprint(*args): - for arg in args: - print arg, - print import traceback -import sys +import _continuation +from functools import partial + +class TaskletExit(Exception): + pass + +CoroutineExit = TaskletExit + +class GWrap(_continuation.continulet): + """This is just a wrapper around continulet to allow + to stick additional attributes to a continulet. + To be more concrete, we need a backreference to + the coroutine object""" + + +class coroutine(object): + "we can't have continulet as a base, because continulets can't be rebound" + + def __init__(self): + self._frame = None + self.is_zombie = False + + def __getattr__(self, attr): + return getattr(self._frame, attr) + + def __del__(self): + self.is_zombie = True + del self._frame + self._frame = None + + def bind(self, func, *argl, **argd): + """coro.bind(f, *argl, **argd) -> None. + binds function f to coro. f will be called with + arguments *argl, **argd + """ + if self._frame is None or not self._frame.is_pending(): + + def _func(c, *args, **kwargs): + return func(*args, **kwargs) + + run = partial(_func, *argl, **argd) + self._frame = frame = GWrap(run) + else: + raise ValueError("cannot bind a bound coroutine") + + def switch(self): + """coro.switch() -> returnvalue + switches to coroutine coro. If the bound function + f finishes, the returnvalue is that of f, otherwise + None is returned + """ + current = _getcurrent() + current._jump_to(self) + + def _jump_to(self, coroutine): + _tls.current_coroutine = coroutine + self._frame.switch(to=coroutine._frame) + + def kill(self): + """coro.kill() : kill coroutine coro""" + _tls.current_coroutine = self + self._frame.throw(CoroutineExit) + + def _is_alive(self): + if self._frame is None: + return False + return not self._frame.is_pending() + is_alive = property(_is_alive) + del _is_alive + + def getcurrent(): + """coroutine.getcurrent() -> the currently running coroutine""" + try: + return _getcurrent() + except AttributeError: + return _maincoro + getcurrent = staticmethod(getcurrent) + + def __reduce__(self): + raise TypeError, 'pickling is not possible based upon continulets' + + +def _getcurrent(): + "Returns the current coroutine (i.e. the one which called this function)." + try: + return _tls.current_coroutine + except AttributeError: + # first call in this thread: current == main + _coroutine_create_main() + return _tls.current_coroutine + try: - # If _stackless can be imported then TaskletExit and CoroutineExit are - # automatically added to the builtins. - from _stackless import coroutine, greenlet -except ImportError: # we are running from CPython - from greenlet import greenlet, GreenletExit - TaskletExit = CoroutineExit = GreenletExit - del GreenletExit - try: - from functools import partial - except ImportError: # we are not running python 2.5 - class partial(object): - # just enough of 'partial' to be usefull - def __init__(self, func, *argl, **argd): - self.func = func - self.argl = argl - self.argd = argd + from thread import _local +except ImportError: + class _local(object): # assume no threads + pass - def __call__(self): - return self.func(*self.argl, **self.argd) +_tls = _local() - class GWrap(greenlet): - """This is just a wrapper around greenlets to allow - to stick additional attributes to a greenlet. - To be more concrete, we need a backreference to - the coroutine object""" +def _coroutine_create_main(): + # create the main coroutine for this thread + _tls.current_coroutine = None + main_coroutine = coroutine() + main_coroutine.bind(lambda x:x) + _tls.main_coroutine = main_coroutine + _tls.current_coroutine = main_coroutine + return main_coroutine - class MWrap(object): - def __init__(self,something): - self.something = something - def __getattr__(self, attr): - return getattr(self.something, attr) +_maincoro = _coroutine_create_main() - class coroutine(object): - "we can't have greenlet as a base, because greenlets can't be rebound" - - def __init__(self): - self._frame = None - self.is_zombie = False - - def __getattr__(self, attr): - return getattr(self._frame, attr) - - def __del__(self): - self.is_zombie = True - del self._frame - self._frame = None - - def bind(self, func, *argl, **argd): - """coro.bind(f, *argl, **argd) -> None. - binds function f to coro. f will be called with - arguments *argl, **argd - """ - if self._frame is None or self._frame.dead: - self._frame = frame = GWrap() - frame.coro = self - if hasattr(self._frame, 'run') and self._frame.run: - raise ValueError("cannot bind a bound coroutine") - self._frame.run = partial(func, *argl, **argd) - - def switch(self): - """coro.switch() -> returnvalue - switches to coroutine coro. If the bound function - f finishes, the returnvalue is that of f, otherwise - None is returned - """ - try: - return greenlet.switch(self._frame) - except TypeError, exp: # self._frame is the main coroutine - return greenlet.switch(self._frame.something) - - def kill(self): - """coro.kill() : kill coroutine coro""" - self._frame.throw() - - def _is_alive(self): - if self._frame is None: - return False - return not self._frame.dead - is_alive = property(_is_alive) - del _is_alive - - def getcurrent(): - """coroutine.getcurrent() -> the currently running coroutine""" - try: - return greenlet.getcurrent().coro - except AttributeError: - return _maincoro - getcurrent = staticmethod(getcurrent) - - def __reduce__(self): - raise TypeError, 'pickling is not possible based upon greenlets' - - _maincoro = coroutine() - maingreenlet = greenlet.getcurrent() - _maincoro._frame = frame = MWrap(maingreenlet) - frame.coro = _maincoro - del frame - del maingreenlet from collections import deque import operator -__all__ = 'run getcurrent getmain schedule tasklet channel coroutine \ - greenlet'.split() +__all__ = 'run getcurrent getmain schedule tasklet channel coroutine'.split() _global_task_id = 0 _squeue = None @@ -131,7 +134,8 @@ def _scheduler_remove(value): try: del _squeue[operator.indexOf(_squeue, value)] - except ValueError:pass + except ValueError: + pass def _scheduler_append(value, normal=True): if normal: From noreply at buildbot.pypy.org Mon Sep 5 15:17:50 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Sep 2011 15:17:50 +0200 (CEST) Subject: [pypy-commit] pypy default: Add this as a dummy module that imports everything from Message-ID: <20110905131750.6515A8203C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47081:eb4d7421fbe6 Date: 2011-09-05 15:17 +0200 http://bitbucket.org/pypy/pypy/changeset/eb4d7421fbe6/ Log: Add this as a dummy module that imports everything from xml.etree.ElementTree. It is good enough to make all tests from lib-python/2.7/test/test_xml_etree_c.py happy. diff --git a/lib_pypy/_elementtree.py b/lib_pypy/_elementtree.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_elementtree.py @@ -0,0 +1,6 @@ +# Just use ElementTree. + +from xml.etree import ElementTree + +globals().update(ElementTree.__dict__) +del __all__ From noreply at buildbot.pypy.org Mon Sep 5 15:52:15 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Sep 2011 15:52:15 +0200 (CEST) Subject: [pypy-commit] pypy default: Try 20x in this test too. Message-ID: <20110905135215.4EE058203C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47082:54a2d8fd87be Date: 2011-09-05 15:51 +0200 http://bitbucket.org/pypy/pypy/changeset/54a2d8fd87be/ Log: Try 20x in this test too. diff --git a/pypy/objspace/std/test/test_methodcache.py b/pypy/objspace/std/test/test_methodcache.py --- a/pypy/objspace/std/test/test_methodcache.py +++ b/pypy/objspace/std/test/test_methodcache.py @@ -88,30 +88,37 @@ def test_many_names(self): import __pypy__ - class A(object): - foo = 5 - bar = 6 - baz = 7 - xyz = 8 - stuff = 9 - a = 10 - foobar = 11 + for j in range(20): + class A(object): + foo = 5 + bar = 6 + baz = 7 + xyz = 8 + stuff = 9 + a = 10 + foobar = 11 - a = A() - names = [name for name in A.__dict__.keys() - if not name.startswith('_')] - names.sort() - names_repeated = names * 10 - result = [] - __pypy__.reset_method_cache_counter() - for name in names_repeated: - result.append(getattr(a, name)) - append_counter = __pypy__.method_cache_counter("append") - names_counters = [__pypy__.method_cache_counter(name) - for name in names] - assert append_counter[0] >= 5 * len(names) - for name, count in zip(names, names_counters): - assert count[0] >= 5, str((name, count)) + a = A() + names = [name for name in A.__dict__.keys() + if not name.startswith('_')] + names.sort() + names_repeated = names * 10 + result = [] + __pypy__.reset_method_cache_counter() + for name in names_repeated: + result.append(getattr(a, name)) + append_counter = __pypy__.method_cache_counter("append") + names_counters = [__pypy__.method_cache_counter(name) + for name in names] + try: + assert append_counter[0] >= 10 * len(names) - 1 + for name, count in zip(names, names_counters): + assert count == (9, 1), str((name, count)) + break + except AssertionError: + pass + else: + raise def test_mutating_bases(self): class C(object): From noreply at buildbot.pypy.org Mon Sep 5 16:44:45 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Sep 2011 16:44:45 +0200 (CEST) Subject: [pypy-commit] pypy default: Don't use "hasattr(os.stat_result)" on top of CPython. Instead Message-ID: <20110905144445.B05408203C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47083:cf485730ec67 Date: 2011-09-05 16:31 +0200 http://bitbucket.org/pypy/pypy/changeset/cf485730ec67/ Log: Don't use "hasattr(os.stat_result)" on top of CPython. Instead try to compile with each of the extra fields, keeping the ones that work. diff --git a/pypy/rpython/module/ll_os_stat.py b/pypy/rpython/module/ll_os_stat.py --- a/pypy/rpython/module/ll_os_stat.py +++ b/pypy/rpython/module/ll_os_stat.py @@ -49,19 +49,8 @@ ] N_INDEXABLE_FIELDS = 10 -# for now, check the host Python to know which st_xxx fields exist -STAT_FIELDS = [(_name, _TYPE) for (_name, _TYPE) in ALL_STAT_FIELDS - if hasattr(os.stat_result, _name)] - -STAT_FIELD_TYPES = dict(STAT_FIELDS) # {'st_xxx': TYPE} - -STAT_FIELD_NAMES = [_name for (_name, _TYPE) in ALL_STAT_FIELDS - if _name in STAT_FIELD_TYPES] - -del _name, _TYPE - # For OO backends, expose only the portable fields (the first 10). -PORTABLE_STAT_FIELDS = STAT_FIELDS[:N_INDEXABLE_FIELDS] +PORTABLE_STAT_FIELDS = ALL_STAT_FIELDS[:N_INDEXABLE_FIELDS] # ____________________________________________________________ # @@ -142,17 +131,22 @@ includes = INCLUDES ) -if sys.platform != 'win32': +if TIMESPEC is not None: + class CConfig_for_timespec: + _compilation_info_ = compilation_info + TIMESPEC = TIMESPEC + TIMESPEC = lltype.Ptr( + platform.configure(CConfig_for_timespec)['TIMESPEC']) + + +def posix_declaration(try_to_add=None): + global STAT_STRUCT LL_STAT_FIELDS = STAT_FIELDS[:] + if try_to_add: + LL_STAT_FIELDS.append(try_to_add) if TIMESPEC is not None: - class CConfig_for_timespec: - _compilation_info_ = compilation_info - TIMESPEC = TIMESPEC - - TIMESPEC = lltype.Ptr( - platform.configure(CConfig_for_timespec)['TIMESPEC']) def _expand(lst, originalname, timespecname): for i, (_name, _TYPE) in enumerate(lst): @@ -178,9 +172,34 @@ class CConfig: _compilation_info_ = compilation_info STAT_STRUCT = platform.Struct('struct %s' % _name_struct_stat, LL_STAT_FIELDS) - config = platform.configure(CConfig) + try: + config = platform.configure(CConfig) + except platform.CompilationError: + if try_to_add: + return # failed to add this field, give up + raise STAT_STRUCT = lltype.Ptr(config['STAT_STRUCT']) + if try_to_add: + STAT_FIELDS.append(try_to_add) + + +# This lists only the fields that have been found on the underlying platform. +# Initially only the PORTABLE_STAT_FIELDS, but more may be added by the +# following loop. +STAT_FIELDS = PORTABLE_STAT_FIELDS[:] + +if sys.platform != 'win32': + posix_declaration() + for _i in range(len(PORTABLE_STAT_FIELDS), len(ALL_STAT_FIELDS)): + posix_declaration(ALL_STAT_FIELDS[_i]) + del _i + +# these two global vars only list the fields defined in the underlying platform +STAT_FIELD_TYPES = dict(STAT_FIELDS) # {'st_xxx': TYPE} +STAT_FIELD_NAMES = [_name for (_name, _TYPE) in STAT_FIELDS] +del _name, _TYPE + def build_stat_result(st): # only for LL backends diff --git a/pypy/rpython/module/test/test_ll_os_stat.py b/pypy/rpython/module/test/test_ll_os_stat.py --- a/pypy/rpython/module/test/test_ll_os_stat.py +++ b/pypy/rpython/module/test/test_ll_os_stat.py @@ -2,6 +2,16 @@ import sys, os import py + +class TestLinuxImplementation: + def setup_class(cls): + if not sys.platform.startswith('linux'): + py.test.skip("linux specific tests") + + def test_has_all_fields(self): + assert ll_os_stat.STAT_FIELDS == ll_os_stat.ALL_STAT_FIELDS[:13] + + class TestWin32Implementation: def setup_class(cls): if sys.platform != 'win32': From noreply at buildbot.pypy.org Mon Sep 5 18:37:52 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Sep 2011 18:37:52 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: * Update the RAM usage estimate on 32-bit. Message-ID: <20110905163752.71B218203C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r253:7089e4114875 Date: 2011-09-05 18:37 +0200 http://bitbucket.org/pypy/pypy.org/changeset/7089e4114875/ Log: * Update the RAM usage estimate on 32-bit. * Bold for fijal :-) diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -119,7 +119,7 @@ hg clone https://bitbucket.org/pypy/pypy

  • -
  • Make sure you installed the dependencies. See the list here.

    +
  • Make sure you installed the dependencies. See the list here.

  • Enter the goal directory:

    @@ -154,7 +154,7 @@
     is 2 GB unless you hack a lot), then you can (for now) tweak some parameters
     via environment variables and command-line options.  The following command
     takes a bit more time, but finishes with only using 3.0 GB of RAM (on
    -Linux 64-bit; probably not much more than 1.5 GB on 32-bit).  It should be
    +Linux 64-bit; probably not much more than 1.6 GB on 32-bit).  It should be
     noted that it is less than with CPython.

     PYPY_GC_MAX_DELTA=200MB pypy --jit loop_longevity=300 ./translate.py -Ojit
    diff --git a/source/download.txt b/source/download.txt
    --- a/source/download.txt
    +++ b/source/download.txt
    @@ -111,7 +111,7 @@
     
          hg clone https://bitbucket.org/pypy/pypy
     
    -2. Make sure you installed the dependencies.  See the list here__.
    +2. Make sure you **installed the dependencies.**  See the list here__.
     
        .. __: http://pypy.readthedocs.org/en/latest/getting-started-python.html#translating-the-pypy-python-interpreter
     
    
    From noreply at buildbot.pypy.org  Mon Sep  5 18:44:39 2011
    From: noreply at buildbot.pypy.org (justinpeel)
    Date: Mon,  5 Sep 2011 18:44:39 +0200 (CEST)
    Subject: [pypy-commit] pypy gc-trace-faster: some more refactoring. Not
     quite as good as I had wanted it to be,
     but it gets rid of 127/128 unnecessary checks for
     bool(self.young_rawmalloced_objects).
    Message-ID: <20110905164439.4085A8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Justin Peel 
    Branch: gc-trace-faster
    Changeset: r47084:f8ddb3402976
    Date: 2011-09-05 10:44 -0600
    http://bitbucket.org/pypy/pypy/changeset/f8ddb3402976/
    
    Log:	some more refactoring. Not quite as good as I had wanted it to be,
    	but it gets rid of 127/128 unnecessary checks for
    	bool(self.young_rawmalloced_objects).
    
    diff --git a/pypy/rpython/memory/gc/minimark.py b/pypy/rpython/memory/gc/minimark.py
    --- a/pypy/rpython/memory/gc/minimark.py
    +++ b/pypy/rpython/memory/gc/minimark.py
    @@ -1300,10 +1300,6 @@
         def collect_cardrefs_to_nursery(self):
             size_gc_header = self.gcheaderbuilder.size_gc_header
             oldlist = self.old_objects_with_cards_set
    -        if bool(self.young_rawmalloced_objects):
    -            callfunc = self.trace_and_drag_out_of_nursery_partial_young_raw
    -        else:
    -            callfunc = self.trace_and_drag_out_of_nursery_partial
             while oldlist.non_empty():
                 obj = oldlist.pop()
                 #
    @@ -1351,8 +1347,12 @@
                                     ll_assert(cardbyte <= 1 and bytes == 0,
                                               "premature end of object")
                                 #self.trace_and_drag_out_of_nursery_partial(
    -                            callfunc(
    -                                obj, interval_start, interval_stop)
    +                            if bool(self.young_rawmalloced_objects):
    +                                self.trace_and_drag_out_of_nursery_partial_young_raw(
    +                                    obj, interval_start, interval_stop)
    +                            else:
    +                                self.trace_and_drag_out_of_nursery_partial(
    +                                    obj, interval_start, interval_stop)
                             #
                             interval_start = interval_stop
                             cardbyte >>= 1
    @@ -1363,10 +1363,6 @@
             # Follow the old_objects_pointing_to_young list and move the
             # young objects they point to out of the nursery.
             oldlist = self.old_objects_pointing_to_young
    -        if bool(self.young_rawmalloced_objects):
    -            trace_and_drag_out_of_nursery_func = self.trace_and_drag_out_of_nursery_young_raw
    -        else:
    -            trace_and_drag_out_of_nursery_func = self.trace_and_drag_out_of_nursery
             while oldlist.non_empty():
                 obj = oldlist.pop()
                 #
    @@ -1383,7 +1379,11 @@
                 # Trace the 'obj' to replace pointers to nursery with pointers
                 # outside the nursery, possibly forcing nursery objects out
                 # and adding them to 'old_objects_pointing_to_young' as well.
    -            trace_and_drag_out_of_nursery_func(obj)
    +            if bool(self.young_rawmalloced_objects):
    +                self.trace_and_drag_out_of_nursery_young_raw(obj)
    +            else:
    +                self.trace_and_drag_out_of_nursery(obj)
    +
     
         def trace_and_drag_out_of_nursery(self, obj):
             """obj must not be in the nursery.  This copies all the
    
    From noreply at buildbot.pypy.org  Mon Sep  5 19:03:10 2011
    From: noreply at buildbot.pypy.org (alex_gaynor)
    Date: Mon,  5 Sep 2011 19:03:10 +0200 (CEST)
    Subject: [pypy-commit] pypy default: Bytes aren't free,
    	don't waste it on an extra space.
    Message-ID: <20110905170310.883838203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Alex Gaynor 
    Branch: 
    Changeset: r47085:baa4ae3fbdee
    Date: 2011-09-04 16:54 -0700
    http://bitbucket.org/pypy/pypy/changeset/baa4ae3fbdee/
    
    Log:	Bytes aren't free, don't waste it on an extra space.
    
    diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py
    --- a/pypy/module/micronumpy/interp_dtype.py
    +++ b/pypy/module/micronumpy/interp_dtype.py
    @@ -224,7 +224,7 @@
             return math.tan(v)
         @unaryop
         def arcsin(self, v):
    -        if v < -1.0 or  v > 1.0:
    +        if v < -1.0 or v > 1.0:
                 return rfloat.NAN
             return math.asin(v)
         @unaryop
    
    From noreply at buildbot.pypy.org  Mon Sep  5 19:03:11 2011
    From: noreply at buildbot.pypy.org (alex_gaynor)
    Date: Mon,  5 Sep 2011 19:03:11 +0200 (CEST)
    Subject: [pypy-commit] pypy default: merged upstream.
    Message-ID: <20110905170311.CD5238203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Alex Gaynor 
    Branch: 
    Changeset: r47086:4d04034495ce
    Date: 2011-09-05 09:12 -0700
    http://bitbucket.org/pypy/pypy/changeset/4d04034495ce/
    
    Log:	merged upstream.
    
    diff --git a/lib_pypy/_elementtree.py b/lib_pypy/_elementtree.py
    new file mode 100644
    --- /dev/null
    +++ b/lib_pypy/_elementtree.py
    @@ -0,0 +1,6 @@
    +# Just use ElementTree.
    +
    +from xml.etree import ElementTree
    +
    +globals().update(ElementTree.__dict__)
    +del __all__
    diff --git a/lib_pypy/stackless.py b/lib_pypy/stackless.py
    --- a/lib_pypy/stackless.py
    +++ b/lib_pypy/stackless.py
    @@ -4,121 +4,124 @@
     Please refer to their documentation.
     """
     
    -DEBUG = True
    -
    -def dprint(*args):
    -    for arg in args:
    -        print arg,
    -    print
     
     import traceback
    -import sys
    +import _continuation
    +from functools import partial
    +
    +class TaskletExit(Exception):
    +    pass
    +
    +CoroutineExit = TaskletExit
    +
    +class GWrap(_continuation.continulet):
    +    """This is just a wrapper around continulet to allow
    +       to stick additional attributes to a continulet.
    +       To be more concrete, we need a backreference to
    +       the coroutine object"""
    +
    +
    +class coroutine(object):
    +    "we can't have continulet as a base, because continulets can't be rebound"
    +
    +    def __init__(self):
    +        self._frame = None
    +        self.is_zombie = False
    +
    +    def __getattr__(self, attr):
    +        return getattr(self._frame, attr)
    +
    +    def __del__(self):
    +        self.is_zombie = True
    +        del self._frame
    +        self._frame = None
    +
    +    def bind(self, func, *argl, **argd):
    +        """coro.bind(f, *argl, **argd) -> None.
    +           binds function f to coro. f will be called with
    +           arguments *argl, **argd
    +        """
    +        if self._frame is None or not self._frame.is_pending():
    +
    +            def _func(c, *args, **kwargs):
    +                return func(*args, **kwargs)
    +            
    +            run = partial(_func, *argl, **argd)
    +            self._frame = frame = GWrap(run)
    +        else:
    +            raise ValueError("cannot bind a bound coroutine")
    +
    +    def switch(self):
    +        """coro.switch() -> returnvalue
    +           switches to coroutine coro. If the bound function
    +           f finishes, the returnvalue is that of f, otherwise
    +           None is returned
    +        """
    +        current = _getcurrent()
    +        current._jump_to(self)
    +
    +    def _jump_to(self, coroutine):
    +        _tls.current_coroutine = coroutine
    +        self._frame.switch(to=coroutine._frame)
    +
    +    def kill(self):
    +        """coro.kill() : kill coroutine coro"""
    +        _tls.current_coroutine = self
    +        self._frame.throw(CoroutineExit)
    +
    +    def _is_alive(self):
    +        if self._frame is None:
    +            return False
    +        return not self._frame.is_pending()
    +    is_alive = property(_is_alive)
    +    del _is_alive
    +
    +    def getcurrent():
    +        """coroutine.getcurrent() -> the currently running coroutine"""
    +        try:
    +            return _getcurrent()
    +        except AttributeError:
    +            return _maincoro
    +    getcurrent = staticmethod(getcurrent)
    +
    +    def __reduce__(self):
    +        raise TypeError, 'pickling is not possible based upon continulets'
    +
    +
    +def _getcurrent():
    +    "Returns the current coroutine (i.e. the one which called this function)."
    +    try:
    +        return _tls.current_coroutine
    +    except AttributeError:
    +        # first call in this thread: current == main
    +        _coroutine_create_main()
    +        return _tls.current_coroutine
    +
     try:
    -    # If _stackless can be imported then TaskletExit and CoroutineExit are 
    -    # automatically added to the builtins.
    -    from _stackless import coroutine, greenlet
    -except ImportError: # we are running from CPython
    -    from greenlet import greenlet, GreenletExit
    -    TaskletExit = CoroutineExit = GreenletExit
    -    del GreenletExit
    -    try:
    -        from functools import partial
    -    except ImportError: # we are not running python 2.5
    -        class partial(object):
    -            # just enough of 'partial' to be usefull
    -            def __init__(self, func, *argl, **argd):
    -                self.func = func
    -                self.argl = argl
    -                self.argd = argd
    +    from thread import _local
    +except ImportError:
    +    class _local(object):    # assume no threads
    +        pass
     
    -            def __call__(self):
    -                return self.func(*self.argl, **self.argd)
    +_tls = _local()
     
    -    class GWrap(greenlet):
    -        """This is just a wrapper around greenlets to allow
    -           to stick additional attributes to a greenlet.
    -           To be more concrete, we need a backreference to
    -           the coroutine object"""
    +def _coroutine_create_main():
    +    # create the main coroutine for this thread
    +    _tls.current_coroutine = None
    +    main_coroutine = coroutine()
    +    main_coroutine.bind(lambda x:x)
    +    _tls.main_coroutine = main_coroutine
    +    _tls.current_coroutine = main_coroutine
    +    return main_coroutine
     
    -    class MWrap(object):
    -        def __init__(self,something):
    -            self.something = something
     
    -        def __getattr__(self, attr):
    -            return getattr(self.something, attr)
    +_maincoro = _coroutine_create_main()
     
    -    class coroutine(object):
    -        "we can't have greenlet as a base, because greenlets can't be rebound"
    -
    -        def __init__(self):
    -            self._frame = None
    -            self.is_zombie = False
    -
    -        def __getattr__(self, attr):
    -            return getattr(self._frame, attr)
    -
    -        def __del__(self):
    -            self.is_zombie = True
    -            del self._frame
    -            self._frame = None
    -
    -        def bind(self, func, *argl, **argd):
    -            """coro.bind(f, *argl, **argd) -> None.
    -               binds function f to coro. f will be called with
    -               arguments *argl, **argd
    -            """
    -            if self._frame is None or self._frame.dead:
    -                self._frame = frame = GWrap()
    -                frame.coro = self
    -            if hasattr(self._frame, 'run') and self._frame.run:
    -                raise ValueError("cannot bind a bound coroutine")
    -            self._frame.run = partial(func, *argl, **argd)
    -
    -        def switch(self):
    -            """coro.switch() -> returnvalue
    -               switches to coroutine coro. If the bound function
    -               f finishes, the returnvalue is that of f, otherwise
    -               None is returned
    -            """
    -            try:
    -                return greenlet.switch(self._frame)
    -            except TypeError, exp: # self._frame is the main coroutine
    -                return greenlet.switch(self._frame.something)
    -
    -        def kill(self):
    -            """coro.kill() : kill coroutine coro"""
    -            self._frame.throw()
    -
    -        def _is_alive(self):
    -            if self._frame is None:
    -                return False
    -            return not self._frame.dead
    -        is_alive = property(_is_alive)
    -        del _is_alive
    -
    -        def getcurrent():
    -            """coroutine.getcurrent() -> the currently running coroutine"""
    -            try:
    -                return greenlet.getcurrent().coro
    -            except AttributeError:
    -                return _maincoro
    -        getcurrent = staticmethod(getcurrent)
    -
    -        def __reduce__(self):
    -            raise TypeError, 'pickling is not possible based upon greenlets'
    -
    -    _maincoro = coroutine()
    -    maingreenlet = greenlet.getcurrent()
    -    _maincoro._frame = frame = MWrap(maingreenlet)
    -    frame.coro = _maincoro
    -    del frame
    -    del maingreenlet
     
     from collections import deque
     
     import operator
    -__all__ = 'run getcurrent getmain schedule tasklet channel coroutine \
    -                greenlet'.split()
    +__all__ = 'run getcurrent getmain schedule tasklet channel coroutine'.split()
     
     _global_task_id = 0
     _squeue = None
    @@ -131,7 +134,8 @@
     def _scheduler_remove(value):
         try:
             del _squeue[operator.indexOf(_squeue, value)]
    -    except ValueError:pass
    +    except ValueError:
    +        pass
     
     def _scheduler_append(value, normal=True):
         if normal:
    diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py
    --- a/pypy/jit/backend/llsupport/regalloc.py
    +++ b/pypy/jit/backend/llsupport/regalloc.py
    @@ -57,11 +57,13 @@
         all_regs              = []
         no_lower_byte_regs    = []
         save_around_call_regs = []
    -    
    +    frame_reg             = None
    +
         def __init__(self, longevity, frame_manager=None, assembler=None):
             self.free_regs = self.all_regs[:]
             self.longevity = longevity
             self.reg_bindings = {}
    +        self.bindings_to_frame_reg = {}
             self.position = -1
             self.frame_manager = frame_manager
             self.assembler = assembler
    @@ -218,6 +220,10 @@
             self.reg_bindings[v] = loc
             return loc
     
    +    def force_allocate_frame_reg(self, v):
    +        """ Allocate the new variable v in the frame register."""
    +        self.bindings_to_frame_reg[v] = None
    +
         def force_spill_var(self, var):
             self._sync_var(var)
             try:
    @@ -236,6 +242,8 @@
             try:
                 return self.reg_bindings[box]
             except KeyError:
    +            if box in self.bindings_to_frame_reg:
    +                return self.frame_reg
                 return self.frame_manager.loc(box)
     
         def return_constant(self, v, forbidden_vars=[], selected_reg=None):
    @@ -264,8 +272,9 @@
             self._check_type(v)
             if isinstance(v, Const):
                 return self.return_constant(v, forbidden_vars, selected_reg)
    -        
             prev_loc = self.loc(v)
    +        if prev_loc is self.frame_reg and selected_reg is None:
    +            return prev_loc
             loc = self.force_allocate_reg(v, forbidden_vars, selected_reg,
                                           need_lower_byte=need_lower_byte)
             if prev_loc is not loc:
    diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py
    --- a/pypy/jit/backend/x86/assembler.py
    +++ b/pypy/jit/backend/x86/assembler.py
    @@ -957,6 +957,7 @@
             if (isinstance(from_loc, RegLoc) and from_loc.is_xmm) or (isinstance(to_loc, RegLoc) and to_loc.is_xmm):
                 self.mc.MOVSD(to_loc, from_loc)
             else:
    +            assert to_loc is not ebp
                 self.mc.MOV(to_loc, from_loc)
     
         regalloc_mov = mov # legacy interface
    @@ -2510,11 +2511,6 @@
     
         genop_discard_cond_call_gc_wb_array = genop_discard_cond_call_gc_wb
     
    -    def genop_force_token(self, op, arglocs, resloc):
    -        # RegAlloc.consider_force_token ensures this:
    -        assert isinstance(resloc, RegLoc)
    -        self.mc.LEA_rb(resloc.value, FORCE_INDEX_OFS)
    -
         def not_implemented_op_discard(self, op, arglocs):
             not_implemented("not implemented operation: %s" % op.getopname())
     
    diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py
    --- a/pypy/jit/backend/x86/regalloc.py
    +++ b/pypy/jit/backend/x86/regalloc.py
    @@ -29,6 +29,7 @@
         all_regs = [eax, ecx, edx, ebx, esi, edi]
         no_lower_byte_regs = [esi, edi]
         save_around_call_regs = [eax, edx, ecx]
    +    frame_reg = ebp
     
         REGLOC_TO_GCROOTMAP_REG_INDEX = {
             ebx: 1,
    @@ -312,8 +313,11 @@
                         self.fm.frame_bindings[arg] = loc
                 else:
                     if isinstance(loc, RegLoc):
    -                    self.rm.reg_bindings[arg] = loc
    -                    used[loc] = None
    +                    if loc is ebp:
    +                        self.rm.bindings_to_frame_reg[arg] = None
    +                    else:
    +                        self.rm.reg_bindings[arg] = loc
    +                        used[loc] = None
                     else:
                         self.fm.frame_bindings[arg] = loc
             self.rm.free_regs = []
    @@ -1358,8 +1362,8 @@
                                                 self.assembler.datablockwrapper)
     
         def consider_force_token(self, op):
    -        loc = self.rm.force_allocate_reg(op.result)
    -        self.Perform(op, [], loc)
    +        # the FORCE_TOKEN operation returns directly 'ebp'
    +        self.rm.force_allocate_frame_reg(op.result)
     
         def not_implemented_op(self, op):
             not_implemented("not implemented operation: %s" % op.getopname())
    diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py
    --- a/pypy/jit/backend/x86/runner.py
    +++ b/pypy/jit/backend/x86/runner.py
    @@ -119,7 +119,8 @@
                 setitem(index, null)
     
         def get_latest_force_token(self):
    -        return self.assembler.fail_ebp + FORCE_INDEX_OFS
    +        # the FORCE_TOKEN operation and this helper both return 'ebp'.
    +        return self.assembler.fail_ebp
     
         def execute_token(self, executable_token):
             addr = executable_token._x86_bootstrap_code
    @@ -153,8 +154,9 @@
                                            flavor='raw', zero=True,
                                            immortal=True)
     
    -    def force(self, addr_of_force_index):
    +    def force(self, addr_of_force_token):
             TP = rffi.CArrayPtr(lltype.Signed)
    +        addr_of_force_index = addr_of_force_token + FORCE_INDEX_OFS
             fail_index = rffi.cast(TP, addr_of_force_index)[0]
             assert fail_index >= 0, "already forced!"
             faildescr = self.get_fail_descr_from_number(fail_index)
    @@ -164,7 +166,7 @@
             # start of "no gc operation!" block
             fail_index_2 = self.assembler.grab_frame_values(
                 bytecode,
    -            addr_of_force_index - FORCE_INDEX_OFS,
    +            addr_of_force_token,
                 self.all_null_registers)
             self.assembler.leave_jitted_hook()
             # end of "no gc operation!" block
    diff --git a/pypy/objspace/std/test/test_methodcache.py b/pypy/objspace/std/test/test_methodcache.py
    --- a/pypy/objspace/std/test/test_methodcache.py
    +++ b/pypy/objspace/std/test/test_methodcache.py
    @@ -88,30 +88,37 @@
       
         def test_many_names(self):
             import __pypy__
    -        class A(object):
    -            foo = 5
    -            bar = 6
    -            baz = 7
    -            xyz = 8
    -            stuff = 9
    -            a = 10
    -            foobar = 11
    +        for j in range(20):
    +            class A(object):
    +                foo = 5
    +                bar = 6
    +                baz = 7
    +                xyz = 8
    +                stuff = 9
    +                a = 10
    +                foobar = 11
     
    -        a = A()
    -        names = [name for name in A.__dict__.keys()
    -                      if not name.startswith('_')]
    -        names.sort()
    -        names_repeated = names * 10
    -        result = []
    -        __pypy__.reset_method_cache_counter()
    -        for name in names_repeated:
    -            result.append(getattr(a, name))
    -        append_counter = __pypy__.method_cache_counter("append")
    -        names_counters = [__pypy__.method_cache_counter(name)
    -                          for name in names]
    -        assert append_counter[0] >= 5 * len(names)
    -        for name, count in zip(names, names_counters):
    -            assert count[0] >= 5, str((name, count))
    +            a = A()
    +            names = [name for name in A.__dict__.keys()
    +                          if not name.startswith('_')]
    +            names.sort()
    +            names_repeated = names * 10
    +            result = []
    +            __pypy__.reset_method_cache_counter()
    +            for name in names_repeated:
    +                result.append(getattr(a, name))
    +            append_counter = __pypy__.method_cache_counter("append")
    +            names_counters = [__pypy__.method_cache_counter(name)
    +                              for name in names]
    +            try:
    +                assert append_counter[0] >= 10 * len(names) - 1
    +                for name, count in zip(names, names_counters):
    +                    assert count == (9, 1), str((name, count))
    +                break
    +            except AssertionError:
    +                pass
    +        else:
    +            raise
     
         def test_mutating_bases(self):
             class C(object):
    diff --git a/pypy/rpython/module/ll_os_stat.py b/pypy/rpython/module/ll_os_stat.py
    --- a/pypy/rpython/module/ll_os_stat.py
    +++ b/pypy/rpython/module/ll_os_stat.py
    @@ -49,19 +49,8 @@
         ]
     N_INDEXABLE_FIELDS = 10
     
    -# for now, check the host Python to know which st_xxx fields exist
    -STAT_FIELDS = [(_name, _TYPE) for (_name, _TYPE) in ALL_STAT_FIELDS
    -                              if hasattr(os.stat_result, _name)]
    -
    -STAT_FIELD_TYPES = dict(STAT_FIELDS)      # {'st_xxx': TYPE}
    -
    -STAT_FIELD_NAMES = [_name for (_name, _TYPE) in ALL_STAT_FIELDS
    -                          if _name in STAT_FIELD_TYPES]
    -
    -del _name, _TYPE
    -
     # For OO backends, expose only the portable fields (the first 10).
    -PORTABLE_STAT_FIELDS = STAT_FIELDS[:N_INDEXABLE_FIELDS]
    +PORTABLE_STAT_FIELDS = ALL_STAT_FIELDS[:N_INDEXABLE_FIELDS]
     
     # ____________________________________________________________
     #
    @@ -142,17 +131,22 @@
         includes = INCLUDES
     )
     
    -if sys.platform != 'win32':
    +if TIMESPEC is not None:
    +    class CConfig_for_timespec:
    +        _compilation_info_ = compilation_info
    +        TIMESPEC = TIMESPEC
    +    TIMESPEC = lltype.Ptr(
    +        platform.configure(CConfig_for_timespec)['TIMESPEC'])
    +
    +
    +def posix_declaration(try_to_add=None):
    +    global STAT_STRUCT
     
         LL_STAT_FIELDS = STAT_FIELDS[:]
    +    if try_to_add:
    +        LL_STAT_FIELDS.append(try_to_add)
     
         if TIMESPEC is not None:
    -        class CConfig_for_timespec:
    -            _compilation_info_ = compilation_info
    -            TIMESPEC = TIMESPEC
    -
    -        TIMESPEC = lltype.Ptr(
    -            platform.configure(CConfig_for_timespec)['TIMESPEC'])
     
             def _expand(lst, originalname, timespecname):
                 for i, (_name, _TYPE) in enumerate(lst):
    @@ -178,9 +172,34 @@
         class CConfig:
             _compilation_info_ = compilation_info
             STAT_STRUCT = platform.Struct('struct %s' % _name_struct_stat, LL_STAT_FIELDS)
    -    config = platform.configure(CConfig)
    +    try:
    +        config = platform.configure(CConfig)
    +    except platform.CompilationError:
    +        if try_to_add:
    +            return    # failed to add this field, give up
    +        raise
     
         STAT_STRUCT = lltype.Ptr(config['STAT_STRUCT'])
    +    if try_to_add:
    +        STAT_FIELDS.append(try_to_add)
    +
    +
    +# This lists only the fields that have been found on the underlying platform.
    +# Initially only the PORTABLE_STAT_FIELDS, but more may be added by the
    +# following loop.
    +STAT_FIELDS = PORTABLE_STAT_FIELDS[:]
    +
    +if sys.platform != 'win32':
    +    posix_declaration()
    +    for _i in range(len(PORTABLE_STAT_FIELDS), len(ALL_STAT_FIELDS)):
    +        posix_declaration(ALL_STAT_FIELDS[_i])
    +    del _i
    +
    +# these two global vars only list the fields defined in the underlying platform
    +STAT_FIELD_TYPES = dict(STAT_FIELDS)      # {'st_xxx': TYPE}
    +STAT_FIELD_NAMES = [_name for (_name, _TYPE) in STAT_FIELDS]
    +del _name, _TYPE
    +
     
     def build_stat_result(st):
         # only for LL backends
    diff --git a/pypy/rpython/module/test/test_ll_os_stat.py b/pypy/rpython/module/test/test_ll_os_stat.py
    --- a/pypy/rpython/module/test/test_ll_os_stat.py
    +++ b/pypy/rpython/module/test/test_ll_os_stat.py
    @@ -2,6 +2,16 @@
     import sys, os
     import py
     
    +
    +class TestLinuxImplementation:
    +    def setup_class(cls):
    +        if not sys.platform.startswith('linux'):
    +            py.test.skip("linux specific tests")
    +
    +    def test_has_all_fields(self):
    +        assert ll_os_stat.STAT_FIELDS == ll_os_stat.ALL_STAT_FIELDS[:13]
    +
    +
     class TestWin32Implementation:
         def setup_class(cls):
             if sys.platform != 'win32':
    
    From noreply at buildbot.pypy.org  Mon Sep  5 19:03:13 2011
    From: noreply at buildbot.pypy.org (alex_gaynor)
    Date: Mon,  5 Sep 2011 19:03:13 +0200 (CEST)
    Subject: [pypy-commit] pypy default: (snus,
     alex) Added the comparison functions to micronumpy. This is mostly
     the work from the numpy-comparisons branch, refactored by me.
    Message-ID: <20110905170313.140378203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Alex Gaynor 
    Branch: 
    Changeset: r47087:618b0bba96a2
    Date: 2011-09-05 10:02 -0700
    http://bitbucket.org/pypy/pypy/changeset/618b0bba96a2/
    
    Log:	(snus, alex) Added the comparison functions to micronumpy. This is
    	mostly the work from the numpy-comparisons branch, refactored by me.
    
    diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py
    --- a/pypy/module/micronumpy/__init__.py
    +++ b/pypy/module/micronumpy/__init__.py
    @@ -26,13 +26,19 @@
             ("copysign", "copysign"),
             ("cos", "cos"),
             ("divide", "divide"),
    +        ("equal", "equal"),
             ("exp", "exp"),
             ("fabs", "fabs"),
             ("floor", "floor"),
    +        ("greater", "greater"),
    +        ("greater_equal", "greater_equal"),
    +        ("less", "less"),
    +        ("less_equal", "less_equal"),
             ("maximum", "maximum"),
             ("minimum", "minimum"),
             ("multiply", "multiply"),
             ("negative", "negative"),
    +        ("not_equal", "not_equal"),
             ("reciprocal", "reciprocal"),
             ("sign", "sign"),
             ("sin", "sin"),
    diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py
    --- a/pypy/module/micronumpy/interp_dtype.py
    +++ b/pypy/module/micronumpy/interp_dtype.py
    @@ -129,6 +129,16 @@
             ))
         return impl
     
    +def raw_binop(func):
    +    # Returns the result unwrapped.
    +    @functools.wraps(func)
    +    def impl(self, v1, v2):
    +        return func(self,
    +            self.for_computation(self.unbox(v1)),
    +            self.for_computation(self.unbox(v2))
    +        )
    +    return impl
    +
     def unaryop(func):
         @functools.wraps(func)
         def impl(self, v):
    @@ -170,8 +180,24 @@
     
         def bool(self, v):
             return bool(self.for_computation(self.unbox(v)))
    +    @raw_binop
    +    def eq(self, v1, v2):
    +        return v1 == v2
    +    @raw_binop
         def ne(self, v1, v2):
    -        return self.for_computation(self.unbox(v1)) != self.for_computation(self.unbox(v2))
    +        return v1 != v2
    +    @raw_binop
    +    def lt(self, v1, v2):
    +        return v1 < v2
    +    @raw_binop
    +    def le(self, v1, v2):
    +        return v1 <= v2
    +    @raw_binop
    +    def gt(self, v1, v2):
    +        return v1 > v2
    +    @raw_binop
    +    def ge(self, v1, v2):
    +        return v1 >= v2
     
     
     class FloatArithmeticDtype(ArithmaticTypeMixin):
    diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py
    --- a/pypy/module/micronumpy/interp_numarray.py
    +++ b/pypy/module/micronumpy/interp_numarray.py
    @@ -74,6 +74,13 @@
         descr_pow = _binop_impl("power")
         descr_mod = _binop_impl("mod")
     
    +    descr_eq = _binop_impl("equal")
    +    descr_ne = _binop_impl("not_equal")
    +    descr_lt = _binop_impl("less")
    +    descr_le = _binop_impl("less_equal")
    +    descr_gt = _binop_impl("greater")
    +    descr_ge = _binop_impl("greater_equal")
    +
         def _binop_right_impl(ufunc_name):
             def impl(self, space, w_other):
                 w_other = scalar_w(space,
    @@ -404,10 +411,11 @@
         """
         Intermediate class for performing binary operations.
         """
    -    def __init__(self, signature, res_dtype, left, right):
    +    def __init__(self, signature, calc_dtype, res_dtype, left, right):
             VirtualArray.__init__(self, signature, res_dtype)
             self.left = left
             self.right = right
    +        self.calc_dtype = calc_dtype
     
         def _del_sources(self):
             self.left = None
    @@ -421,14 +429,14 @@
             return self.right.find_size()
     
         def _eval(self, i):
    -        lhs = self.left.eval(i).convert_to(self.res_dtype)
    -        rhs = self.right.eval(i).convert_to(self.res_dtype)
    +        lhs = self.left.eval(i).convert_to(self.calc_dtype)
    +        rhs = self.right.eval(i).convert_to(self.calc_dtype)
     
             sig = jit.promote(self.signature)
             assert isinstance(sig, signature.Signature)
             call_sig = sig.components[0]
             assert isinstance(call_sig, signature.Call2)
    -        return call_sig.func(self.res_dtype, lhs, rhs)
    +        return call_sig.func(self.calc_dtype, lhs, rhs)
     
     class ViewArray(BaseArray):
         """
    @@ -573,18 +581,28 @@
         __pos__ = interp2app(BaseArray.descr_pos),
         __neg__ = interp2app(BaseArray.descr_neg),
         __abs__ = interp2app(BaseArray.descr_abs),
    +
         __add__ = interp2app(BaseArray.descr_add),
         __sub__ = interp2app(BaseArray.descr_sub),
         __mul__ = interp2app(BaseArray.descr_mul),
         __div__ = interp2app(BaseArray.descr_div),
         __pow__ = interp2app(BaseArray.descr_pow),
         __mod__ = interp2app(BaseArray.descr_mod),
    +
         __radd__ = interp2app(BaseArray.descr_radd),
         __rsub__ = interp2app(BaseArray.descr_rsub),
         __rmul__ = interp2app(BaseArray.descr_rmul),
         __rdiv__ = interp2app(BaseArray.descr_rdiv),
         __rpow__ = interp2app(BaseArray.descr_rpow),
         __rmod__ = interp2app(BaseArray.descr_rmod),
    +
    +    __eq__ = interp2app(BaseArray.descr_eq),
    +    __ne__ = interp2app(BaseArray.descr_ne),
    +    __lt__ = interp2app(BaseArray.descr_lt),
    +    __le__ = interp2app(BaseArray.descr_le),
    +    __gt__ = interp2app(BaseArray.descr_gt),
    +    __ge__ = interp2app(BaseArray.descr_ge),
    +
         __repr__ = interp2app(BaseArray.descr_repr),
         __str__ = interp2app(BaseArray.descr_str),
     
    diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py
    --- a/pypy/module/micronumpy/interp_ufuncs.py
    +++ b/pypy/module/micronumpy/interp_ufuncs.py
    @@ -113,10 +113,11 @@
         argcount = 2
     
         def __init__(self, func, name, promote_to_float=False, promote_bools=False,
    -        identity=None):
    +        identity=None, comparison_func=False):
     
             W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity)
             self.func = func
    +        self.comparison_func = comparison_func
             self.signature = signature.Call2(func)
             self.reduce_signature = signature.BaseSignature()
     
    @@ -127,18 +128,25 @@
             [w_lhs, w_rhs] = args_w
             w_lhs = convert_to_array(space, w_lhs)
             w_rhs = convert_to_array(space, w_rhs)
    -        res_dtype = find_binop_result_dtype(space,
    +        calc_dtype = find_binop_result_dtype(space,
                 w_lhs.find_dtype(), w_rhs.find_dtype(),
                 promote_to_float=self.promote_to_float,
                 promote_bools=self.promote_bools,
             )
    +        if self.comparison_func:
    +            res_dtype = space.fromcache(interp_dtype.W_BoolDtype)
    +        else:
    +            res_dtype = calc_dtype
             if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar):
    -            return self.func(res_dtype, w_lhs.value, w_rhs.value).wrap(space)
    +            return self.func(calc_dtype,
    +                w_lhs.value.convert_to(calc_dtype),
    +                w_rhs.value.convert_to(calc_dtype)
    +            ).wrap(space)
     
             new_sig = signature.Signature.find_sig([
                 self.signature, w_lhs.signature, w_rhs.signature
             ])
    -        w_res = Call2(new_sig, res_dtype, w_lhs, w_rhs)
    +        w_res = Call2(new_sig, calc_dtype, res_dtype, w_lhs, w_rhs)
             w_lhs.add_invalidates(w_res)
             w_rhs.add_invalidates(w_res)
             return w_res
    @@ -209,13 +217,16 @@
         return space.fromcache(interp_dtype.W_Float64Dtype)
     
     
    -def ufunc_dtype_caller(ufunc_name, op_name, argcount):
    +def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func):
         if argcount == 1:
             def impl(res_dtype, value):
                 return getattr(res_dtype, op_name)(value)
         elif argcount == 2:
             def impl(res_dtype, lvalue, rvalue):
    -            return getattr(res_dtype, op_name)(lvalue, rvalue)
    +            res = getattr(res_dtype, op_name)(lvalue, rvalue)
    +            if comparison_func:
    +                res = space.fromcache(interp_dtype.W_BoolDtype).box(res)
    +            return res
         return func_with_new_name(impl, ufunc_name)
     
     class UfuncState(object):
    @@ -229,6 +240,13 @@
                 ("mod", "mod", 2, {"promote_bools": True}),
                 ("power", "pow", 2, {"promote_bools": True}),
     
    +            ("equal", "eq", 2, {"comparison_func": True}),
    +            ("not_equal", "ne", 2, {"comparison_func": True}),
    +            ("less", "lt", 2, {"comparison_func": True}),
    +            ("less_equal", "le", 2, {"comparison_func": True}),
    +            ("greater", "gt", 2, {"comparison_func": True}),
    +            ("greater_equal", "ge", 2, {"comparison_func": True}),
    +
                 ("maximum", "max", 2),
                 ("minimum", "min", 2),
     
    @@ -262,7 +280,9 @@
                 identity = space.fromcache(interp_dtype.W_Int64Dtype).adapt_val(identity)
             extra_kwargs["identity"] = identity
     
    -        func = ufunc_dtype_caller(ufunc_name, op_name, argcount)
    +        func = ufunc_dtype_caller(space, ufunc_name, op_name, argcount,
    +            comparison_func=extra_kwargs.get("comparison_func", False)
    +        )
             if argcount == 1:
                 ufunc = W_Ufunc1(func, ufunc_name, **extra_kwargs)
             elif argcount == 2:
    diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py
    --- a/pypy/module/micronumpy/test/test_numarray.py
    +++ b/pypy/module/micronumpy/test/test_numarray.py
    @@ -557,6 +557,26 @@
             assert array([1.2, 5]).dtype is dtype(float)
             assert array([]).dtype is dtype(float)
     
    +    def test_comparison(self):
    +        import operator
    +        from numpy import array, dtype
    +
    +        a = array(range(5))
    +        b = array(range(5), float)
    +        for func in [
    +            operator.eq, operator.ne, operator.lt, operator.le, operator.gt,
    +            operator.ge
    +        ]:
    +            c = func(a, 3)
    +            assert c.dtype is dtype(bool)
    +            for i in xrange(5):
    +                assert c[i] == func(a[i], 3)
    +
    +            c = func(b, 3)
    +            assert c.dtype is dtype(bool)
    +            for i in xrange(5):
    +                assert c[i] == func(b[i], 3)
    +
     
     class AppTestSupport(object):
         def setup_class(cls):
    diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py
    --- a/pypy/module/micronumpy/test/test_ufuncs.py
    +++ b/pypy/module/micronumpy/test/test_ufuncs.py
    @@ -310,4 +310,30 @@
             assert add.reduce([1, 2, 3]) == 6
             assert maximum.reduce([1]) == 1
             assert maximum.reduce([1, 2, 3]) == 3
    -        raises(ValueError, maximum.reduce, [])
    \ No newline at end of file
    +        raises(ValueError, maximum.reduce, [])
    +
    +    def test_comparisons(self):
    +        import operator
    +        from numpy import equal, not_equal, less, less_equal, greater, greater_equal
    +
    +        for ufunc, func in [
    +            (equal, operator.eq),
    +            (not_equal, operator.ne),
    +            (less, operator.lt),
    +            (less_equal, operator.le),
    +            (greater, operator.gt),
    +            (greater_equal, operator.ge),
    +        ]:
    +            for a, b in [
    +                (3, 3),
    +                (3, 4),
    +                (4, 3),
    +                (3.0, 3.0),
    +                (3.0, 3.5),
    +                (3.5, 3.0),
    +                (3.0, 3),
    +                (3, 3.0),
    +                (3.5, 3),
    +                (3, 3.5),
    +            ]:
    +                assert ufunc(a, b) is func(a, b)
    \ No newline at end of file
    
    From noreply at buildbot.pypy.org  Mon Sep  5 19:03:14 2011
    From: noreply at buildbot.pypy.org (alex_gaynor)
    Date: Mon,  5 Sep 2011 19:03:14 +0200 (CEST)
    Subject: [pypy-commit] pypy numpy-comparison: Close branch.
    Message-ID: <20110905170314.407768203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Alex Gaynor 
    Branch: numpy-comparison
    Changeset: r47088:5a08d2cfb491
    Date: 2011-09-05 10:02 -0700
    http://bitbucket.org/pypy/pypy/changeset/5a08d2cfb491/
    
    Log:	Close branch.
    
    
    From noreply at buildbot.pypy.org  Mon Sep  5 21:09:25 2011
    From: noreply at buildbot.pypy.org (alex_gaynor)
    Date: Mon,  5 Sep 2011 21:09:25 +0200 (CEST)
    Subject: [pypy-commit] pypy default: a skipped, failing test.
    Message-ID: <20110905190925.1073F8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Alex Gaynor 
    Branch: 
    Changeset: r47089:386189328fb9
    Date: 2011-09-05 12:09 -0700
    http://bitbucket.org/pypy/pypy/changeset/386189328fb9/
    
    Log:	a skipped, failing test.
    
    diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py
    --- a/pypy/jit/metainterp/optimizeopt/heap.py
    +++ b/pypy/jit/metainterp/optimizeopt/heap.py
    @@ -25,7 +25,7 @@
             #      'cached_fields'.
             #
             self._cached_fields = {}
    -        self._cached_fields_getfield_op = {}        
    +        self._cached_fields_getfield_op = {}
             self._lazy_setfield = None
             self._lazy_setfield_registered = False
     
    @@ -75,7 +75,7 @@
         def remember_field_value(self, structvalue, fieldvalue, getfield_op=None):
             assert self._lazy_setfield is None
             self._cached_fields[structvalue] = fieldvalue
    -        self._cached_fields_getfield_op[structvalue] = getfield_op        
    +        self._cached_fields_getfield_op[structvalue] = getfield_op
     
         def force_lazy_setfield(self, optheap, can_cache=True):
             op = self._lazy_setfield
    @@ -163,7 +163,7 @@
     
         def new(self):
             return OptHeap()
    -        
    +
         def produce_potential_short_preamble_ops(self, sb):
             descrkeys = self.cached_fields.keys()
             if not we_are_translated():
    diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
    --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
    +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
    @@ -4711,6 +4711,33 @@
             """
             self.optimize_loop(ops, expected)
     
    +    def test_forced_virtuals_aliasing(self):
    +        ops = """
    +        [i0, i1]
    +        p0 = new(descr=ssize)
    +        p1 = new(descr=ssize)
    +        escape(p0)
    +        escape(p1)
    +        setfield_gc(p0, i0, descr=adescr)
    +        setfield_gc(p1, i1, descr=adescr)
    +        i2 = getfield_gc(p0, descr=adescr)
    +        jump(i2, i2)
    +        """
    +        expected = """
    +        [i0, i1]
    +        p0 = new(descr=ssize)
    +        escape(p0)
    +        p1 = new(descr=ssize)
    +        escape(p1)
    +        setfield_gc(p0, i0, descr=adescr)
    +        setfield_gc(p1, i1, descr=adescr)
    +        jump(i0, i0)
    +        """
    +        py.test.skip("not implemented")
    +        # setfields on things that used to be virtual still can't alias each
    +        # other
    +        self.optimize_loop(ops, expected)
    +
     
     class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin):
         pass
    
    From noreply at buildbot.pypy.org  Mon Sep  5 23:20:08 2011
    From: noreply at buildbot.pypy.org (amauryfa)
    Date: Mon,  5 Sep 2011 23:20:08 +0200 (CEST)
    Subject: [pypy-commit] pypy compile-from-stream: Fix recently merged test
    Message-ID: <20110905212008.8C0BB8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Amaury Forgeot d'Arc 
    Branch: compile-from-stream
    Changeset: r47090:6b67dd77810b
    Date: 2011-08-31 01:14 +0200
    http://bitbucket.org/pypy/pypy/changeset/6b67dd77810b/
    
    Log:	Fix recently merged test
    
    diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py
    --- a/pypy/module/imp/test/test_import.py
    +++ b/pypy/module/imp/test/test_import.py
    @@ -782,7 +782,7 @@
                                                      w_modulename,
                                                      w_mod,
                                                      pathname,
    -                                                 stream.readall())
    +                                                 stream)
             finally:
                 space.setattr(space.sys, space.wrap('dont_write_bytecode'),
                               space.w_False)
    
    From noreply at buildbot.pypy.org  Mon Sep  5 23:20:09 2011
    From: noreply at buildbot.pypy.org (amauryfa)
    Date: Mon,  5 Sep 2011 23:20:09 +0200 (CEST)
    Subject: [pypy-commit] pypy compile-from-stream: Simplify some tests
    Message-ID: <20110905212009.CF6628203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Amaury Forgeot d'Arc 
    Branch: compile-from-stream
    Changeset: r47091:47e802416bf2
    Date: 2011-08-31 01:34 +0200
    http://bitbucket.org/pypy/pypy/changeset/47e802416bf2/
    
    Log:	Simplify some tests
    
    diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py
    --- a/pypy/module/imp/test/test_import.py
    +++ b/pypy/module/imp/test/test_import.py
    @@ -695,9 +695,8 @@
             pathname = _testfilesource()
             stream = streamio.open_file_as_stream(pathname, "r")
             try:
    -            w_ret = importing.parse_source_module(space,
    -                                                  pathname,
    -                                                  stream.readall())
    +            w_ret = importing.parse_source_file_module(
    +                space, pathname, stream)
             finally:
                 stream.close()
             pycode = space.interpclass_w(w_ret)
    @@ -839,9 +838,8 @@
             os.chmod(pathname, 0777)
             stream = streamio.open_file_as_stream(pathname, "r")
             try:
    -            w_ret = importing.parse_source_module(space,
    -                                                  pathname,
    -                                                  stream.readall())
    +            w_ret = importing.parse_source_file_module(
    +                space, pathname, stream)
             finally:
                 stream.close()
             pycode = space.interpclass_w(w_ret)
    
    From noreply at buildbot.pypy.org  Mon Sep  5 23:20:11 2011
    From: noreply at buildbot.pypy.org (amauryfa)
    Date: Mon,  5 Sep 2011 23:20:11 +0200 (CEST)
    Subject: [pypy-commit] pypy default: implement the pwd module at
    	interp-level, 
    Message-ID: <20110905212011.162368203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Amaury Forgeot d'Arc 
    Branch: 
    Changeset: r47092:21f8faf13c20
    Date: 2011-09-05 23:14 +0200
    http://bitbucket.org/pypy/pypy/changeset/21f8faf13c20/
    
    Log:	implement the pwd module at interp-level, should help the cyclic
    	imports on some platforms (pwd imported ctypes which needs
    	pwd.pw_dir to find libraries)
    
    diff --git a/lib-python/conftest.py b/lib-python/conftest.py
    --- a/lib-python/conftest.py
    +++ b/lib-python/conftest.py
    @@ -359,7 +359,7 @@
         RegrTest('test_property.py', core=True),
         RegrTest('test_pstats.py'),
         RegrTest('test_pty.py', skip="unsupported extension module"),
    -    RegrTest('test_pwd.py', skip=skip_win32),
    +    RegrTest('test_pwd.py', usemodules="pwd", skip=skip_win32),
         RegrTest('test_py3kwarn.py'),
         RegrTest('test_pyclbr.py'),
         RegrTest('test_pydoc.py'),
    diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
    --- a/pypy/config/pypyoption.py
    +++ b/pypy/config/pypyoption.py
    @@ -27,7 +27,7 @@
     # --allworkingmodules
     working_modules = default_modules.copy()
     working_modules.update(dict.fromkeys(
    -    ["_socket", "unicodedata", "mmap", "fcntl", "_locale",
    +    ["_socket", "unicodedata", "mmap", "fcntl", "_locale", "pwd",
          "rctime" , "select", "zipimport", "_lsprof",
          "crypt", "signal", "_rawffi", "termios", "zlib", "bz2",
          "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO",
    @@ -58,6 +58,7 @@
         # unix only modules
         del working_modules["crypt"]
         del working_modules["fcntl"]
    +    del working_modules["pwd"]
         del working_modules["termios"]
         del working_modules["_minimal_curses"]
     
    diff --git a/pypy/doc/config/objspace.usemodules.pwd.txt b/pypy/doc/config/objspace.usemodules.pwd.txt
    new file mode 100644
    --- /dev/null
    +++ b/pypy/doc/config/objspace.usemodules.pwd.txt
    @@ -0,0 +1,2 @@
    +Use the 'pwd' module. 
    +This module is expected to be fully working.
    diff --git a/pypy/module/pwd/__init__.py b/pypy/module/pwd/__init__.py
    new file mode 100644
    --- /dev/null
    +++ b/pypy/module/pwd/__init__.py
    @@ -0,0 +1,25 @@
    +from pypy.interpreter.mixedmodule import MixedModule
    +
    +class Module(MixedModule):
    +    """
    +    This module provides access to the Unix password database.
    +    It is available on all Unix versions.
    +
    +    Password database entries are reported as 7-tuples containing the following
    +    items from the password database (see `'), in order:
    +    pw_name, pw_passwd, pw_uid, pw_gid, pw_gecos, pw_dir, pw_shell.
    +    The uid and gid items are integers, all others are strings. An
    +    exception is raised if the entry asked for cannot be found.
    +    """
    +
    +    interpleveldefs = {
    +        'getpwuid': 'interp_pwd.getpwuid',
    +        'getpwnam': 'interp_pwd.getpwnam',
    +        'getpwall': 'interp_pwd.getpwall',
    +    }
    +
    +    appleveldefs = {
    +        'struct_passwd': 'app_pwd.struct_passwd',
    +        'struct_pwent': 'app_pwd.struct_passwd',
    +    }
    +
    diff --git a/pypy/module/pwd/app_pwd.py b/pypy/module/pwd/app_pwd.py
    new file mode 100644
    --- /dev/null
    +++ b/pypy/module/pwd/app_pwd.py
    @@ -0,0 +1,20 @@
    +from _structseq import structseqtype, structseqfield
    +
    +class struct_passwd:
    +    """
    +    pwd.struct_passwd: Results from getpw*() routines.
    +
    +    This object may be accessed either as a tuple of
    +      (pw_name,pw_passwd,pw_uid,pw_gid,pw_gecos,pw_dir,pw_shell)
    +    or via the object attributes as named in the above tuple.
    +    """
    +    __metaclass__ = structseqtype
    +    name = "pwd.struct_passwd"
    +
    +    pw_name   = structseqfield(0, "user name")
    +    pw_passwd = structseqfield(1, "password")
    +    pw_uid    = structseqfield(2, "user id")
    +    pw_gid    = structseqfield(3, "group id")
    +    pw_gecos  = structseqfield(4, "real name")
    +    pw_dir    = structseqfield(5, "home directory")
    +    pw_shell  = structseqfield(6, "shell program")
    diff --git a/pypy/module/pwd/interp_pwd.py b/pypy/module/pwd/interp_pwd.py
    new file mode 100644
    --- /dev/null
    +++ b/pypy/module/pwd/interp_pwd.py
    @@ -0,0 +1,89 @@
    +from pypy.translator.tool.cbuild import ExternalCompilationInfo
    +from pypy.rpython.tool import rffi_platform
    +from pypy.rpython.lltypesystem import rffi, lltype
    +from pypy.interpreter.gateway import interp2app, unwrap_spec
    +from pypy.interpreter.error import OperationError, operationerrfmt
    +
    +class CConfig:
    +    _compilation_info_ = ExternalCompilationInfo(
    +        includes=['pwd.h']
    +        )
    +
    +    uid_t = rffi_platform.SimpleType("uid_t")
    +
    +    passwd = rffi_platform.Struct(
    +        'struct passwd',
    +        [('pw_name', rffi.CCHARP),
    +         ('pw_passwd', rffi.CCHARP),
    +         ('pw_uid', rffi.INT),
    +         ('pw_gid', rffi.INT),
    +         ('pw_gecos', rffi.CCHARP),
    +         ('pw_dir', rffi.CCHARP),
    +         ('pw_shell', rffi.CCHARP),
    +         ])
    +
    +config = rffi_platform.configure(CConfig)
    +passwd_p = lltype.Ptr(config['passwd'])
    +uid_t = config['uid_t']
    +
    +c_getpwuid = rffi.llexternal("getpwuid", [uid_t], passwd_p)
    +c_getpwnam = rffi.llexternal("getpwnam", [rffi.CCHARP], passwd_p)
    +c_setpwent = rffi.llexternal("setpwent", [], lltype.Void)
    +c_getpwent = rffi.llexternal("getpwent", [], passwd_p)
    +c_endpwent = rffi.llexternal("endpwent", [], lltype.Void)
    +
    +def make_struct_passwd(space, pw):
    +    w_passwd_struct = space.getattr(space.getbuiltinmodule('pwd'),
    +                                    space.wrap('struct_passwd'))
    +    w_tuple = space.newtuple([
    +        space.wrap(rffi.charp2str(pw.c_pw_name)),
    +        space.wrap(rffi.charp2str(pw.c_pw_passwd)),
    +        space.wrap(pw.c_pw_uid),
    +        space.wrap(pw.c_pw_gid),
    +        space.wrap(rffi.charp2str(pw.c_pw_gecos)),
    +        space.wrap(rffi.charp2str(pw.c_pw_dir)),
    +        space.wrap(rffi.charp2str(pw.c_pw_shell)),
    +        ])
    +    return space.call_function(w_passwd_struct, w_tuple)
    +
    + at unwrap_spec(uid=int)
    +def getpwuid(space, uid):
    +    """
    +    getpwuid(uid) -> (pw_name,pw_passwd,pw_uid,
    +                      pw_gid,pw_gecos,pw_dir,pw_shell)
    +    Return the password database entry for the given numeric user ID.
    +    See pwd.__doc__ for more on password database entries.
    +    """
    +    pw = c_getpwuid(uid)
    +    if not pw:
    +        raise operationerrfmt(space.w_KeyError,
    +            "getpwuid(): uid not found: %d", uid)
    +    return make_struct_passwd(space, pw)
    +
    + at unwrap_spec(name=str)
    +def getpwnam(space, name):
    +    """
    +    getpwnam(name) -> (pw_name,pw_passwd,pw_uid,
    +                        pw_gid,pw_gecos,pw_dir,pw_shell)
    +    Return the password database entry for the given user name.
    +    See pwd.__doc__ for more on password database entries.
    +    """
    +    pw = c_getpwnam(name)
    +    if not pw:
    +        raise operationerrfmt(space.w_KeyError,
    +            "getpwnam(): name not found: %s", name)
    +    return make_struct_passwd(space, pw)
    +
    +def getpwall(space):
    +    users_w = []
    +    c_setpwent()
    +    try:
    +        while True:
    +            pw = c_getpwent()
    +            if not pw:
    +                break
    +            users_w.append(make_struct_passwd(space, pw))
    +    finally:
    +        c_endpwent()
    +    return space.newlist(users_w)
    +    
    diff --git a/pypy/module/pwd/test/test_pwd.py b/pypy/module/pwd/test/test_pwd.py
    new file mode 100644
    --- /dev/null
    +++ b/pypy/module/pwd/test/test_pwd.py
    @@ -0,0 +1,25 @@
    +from pypy.conftest import gettestobjspace
    +
    +class AppTestPwd:
    +    def setup_class(cls):
    +        cls.space = gettestobjspace(usemodules=['pwd'])
    +
    +    def test_getpwuid(self):
    +        import pwd
    +        raises(KeyError, pwd.getpwuid, -1)
    +        pw = pwd.getpwuid(0)
    +        assert pw.pw_name == 'root'
    +        assert isinstance(pw.pw_passwd, str)
    +        assert pw.pw_uid == 0
    +        assert pw.pw_gid == 0
    +        assert pw.pw_dir == '/root'
    +        assert pw.pw_shell.startswith('/')
    +
    +    def test_getpwnam(self):
    +        import pwd
    +        raises(KeyError, pwd.getpwnam, '~invalid~')
    +        assert pwd.getpwnam('root').pw_name == 'root'
    +
    +    def test_getpwall(self):
    +        import pwd
    +        assert pwd.getpwnam('root') in pwd.getpwall()
    
    From noreply at buildbot.pypy.org  Mon Sep  5 23:20:12 2011
    From: noreply at buildbot.pypy.org (amauryfa)
    Date: Mon,  5 Sep 2011 23:20:12 +0200 (CEST)
    Subject: [pypy-commit] pypy default: Small cleanup in marshall functions
    Message-ID: <20110905212012.4C2718203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Amaury Forgeot d'Arc 
    Branch: 
    Changeset: r47093:66a579857a84
    Date: 2011-08-31 01:25 +0200
    http://bitbucket.org/pypy/pypy/changeset/66a579857a84/
    
    Log:	Small cleanup in marshall functions
    
    diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py
    --- a/pypy/module/marshal/interp_marshal.py
    +++ b/pypy/module/marshal/interp_marshal.py
    @@ -40,7 +40,7 @@
             reader = FileReader(space, w_f)
         try:
             u = Unmarshaller(space, reader)
    -        return u.load_w_obj(False)
    +        return u.load_w_obj()
         finally:
             reader.finished()
     
    @@ -49,7 +49,7 @@
     ignored."""
         space.timer.start("marshal loads")
         u = StringUnmarshaller(space, w_str)
    -    obj = u.load_w_obj(False)
    +    obj = u.load_w_obj()
         space.timer.stop("marshal loads")
         return obj
     
    @@ -424,7 +424,7 @@
             lng = self.get_lng()
             return self.get(lng)
     
    -    def get_w_obj(self, allow_null):
    +    def get_w_obj(self, allow_null=False):
             space = self.space
             w_ret = space.w_None # something not None
             tc = self.get1()
    @@ -434,9 +434,9 @@
                     'NULL object in marshal data'))
             return w_ret
     
    -    def load_w_obj(self, allow_null):
    +    def load_w_obj(self):
             try:
    -            return self.get_w_obj(allow_null)
    +            return self.get_w_obj()
             except rstackovf.StackOverflow:
                 rstackovf.check_stack_overflow()
                 self._overflow()
    diff --git a/pypy/objspace/std/marshal_impl.py b/pypy/objspace/std/marshal_impl.py
    --- a/pypy/objspace/std/marshal_impl.py
    +++ b/pypy/objspace/std/marshal_impl.py
    @@ -325,10 +325,10 @@
         # of building a list of tuples.
         w_dic = space.newdict()
         while 1:
    -        w_key = u.get_w_obj(True)
    +        w_key = u.get_w_obj(allow_null=True)
             if w_key is None:
                 break
    -        w_value = u.get_w_obj(False)
    +        w_value = u.get_w_obj()
             space.setitem(w_dic, w_key, w_value)
         return w_dic
     register(TYPE_DICT, unmarshal_DictMulti)
    @@ -364,7 +364,7 @@
     # so we no longer can handle it in interp_marshal.atom_strlist
     
     def unmarshal_str(u):
    -    w_obj = u.get_w_obj(False)
    +    w_obj = u.get_w_obj()
         try:
             return u.space.str_w(w_obj)
         except OperationError, e:
    
    From noreply at buildbot.pypy.org  Tue Sep  6 01:30:33 2011
    From: noreply at buildbot.pypy.org (amauryfa)
    Date: Tue,  6 Sep 2011 01:30:33 +0200 (CEST)
    Subject: [pypy-commit] pypy default: Issue863 Implement sys._current_frames()
    Message-ID: <20110905233033.6AF298203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Amaury Forgeot d'Arc 
    Branch: 
    Changeset: r47094:411a4e22b5bc
    Date: 2011-09-06 01:29 +0200
    http://bitbucket.org/pypy/pypy/changeset/411a4e22b5bc/
    
    Log:	Issue863 Implement sys._current_frames() "for debugging purposes"
    
    diff --git a/pypy/interpreter/miscutils.py b/pypy/interpreter/miscutils.py
    --- a/pypy/interpreter/miscutils.py
    +++ b/pypy/interpreter/miscutils.py
    @@ -167,3 +167,7 @@
     
         def getmainthreadvalue(self):
             return self._value
    +
    +    def getallvalues(self):
    +        return {0: self._value}
    +
    diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py
    --- a/pypy/module/sys/__init__.py
    +++ b/pypy/module/sys/__init__.py
    @@ -47,6 +47,7 @@
             'pypy_initial_path'     : 'state.pypy_initial_path',
     
             '_getframe'             : 'vm._getframe', 
    +        '_current_frames'       : 'vm._current_frames', 
             'setrecursionlimit'     : 'vm.setrecursionlimit', 
             'getrecursionlimit'     : 'vm.getrecursionlimit', 
             'setcheckinterval'      : 'vm.setcheckinterval', 
    diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py
    --- a/pypy/module/sys/test/test_sysmodule.py
    +++ b/pypy/module/sys/test/test_sysmodule.py
    @@ -1,6 +1,6 @@
     # -*- coding: iso-8859-1 -*-
     import autopath
    -from pypy.conftest import option
    +from pypy.conftest import option, gettestobjspace
     from py.test import raises
     from pypy.interpreter.gateway import app2interp_temp
     import sys
    @@ -524,3 +524,51 @@
             # If this ever actually becomes a compilation option this test should
             # be changed.
             assert sys.float_repr_style == "short"
    +
    +class AppTestCurrentFrames:
    +
    +    def test_current_frames(self):
    +        try:
    +            import thread
    +        except ImportError:
    +            pass
    +        else:
    +            skip('This test requires an intepreter without threads')
    +        import sys
    +
    +        def f():
    +            return sys._current_frames()
    +        frames = f()
    +        assert frames.keys() == [0]
    +        assert frames[0].f_code.co_name == 'f'
    +
    +class AppTestCurrentFramesWithThread(AppTestCurrentFrames):
    +    def setup_class(cls):
    +        cls.space = gettestobjspace(usemodules=('thread',))
    +
    +    def test_current_frames(self):
    +        import sys
    +        import time
    +        import thread
    +
    +        thread_id = thread.get_ident()
    +        self.ready = False
    +        def other_thread():
    +            self.ready = True
    +            print "thread started"
    +            time.sleep(5)
    +        thread.start_new_thread(other_thread, ())
    +
    +        def f():
    +            for i in range(100):
    +                if self.ready: break
    +                time.sleep(0.1)
    +            return sys._current_frames()
    +        
    +        frames = f()
    +        thisframe = frames.pop(thread_id)
    +        assert thisframe.f_code.co_name == 'f'
    +
    +        assert len(frames) == 1
    +        _, other_frame = frames.popitem()
    +        assert other_frame.f_code.co_name == 'other_thread'
    diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py
    --- a/pypy/module/sys/vm.py
    +++ b/pypy/module/sys/vm.py
    @@ -43,6 +43,23 @@
         f.mark_as_escaped()
         return space.wrap(f)
     
    +def _current_frames(space):
    +    """_current_frames() -> dictionary
    +
    +    Return a dictionary mapping each current thread T's thread id to T's
    +    current stack frame.
    +
    +    This function should be used for specialized purposes only."""
    +    w_result = space.newdict()
    +    ecs = space.threadlocals.getallvalues()
    +    for thread_ident, ec in ecs.items():
    +        f = ec.gettopframe_nohidden()
    +        f.mark_as_escaped()
    +        space.setitem(w_result,
    +                      space.wrap(thread_ident),
    +                      space.wrap(f))
    +    return w_result                      
    +
     def setrecursionlimit(space, w_new_limit):
         """setrecursionlimit() sets the maximum number of nested calls that
     can occur before a RuntimeError is raised.  On PyPy the limit is
    diff --git a/pypy/module/thread/threadlocals.py b/pypy/module/thread/threadlocals.py
    --- a/pypy/module/thread/threadlocals.py
    +++ b/pypy/module/thread/threadlocals.py
    @@ -43,6 +43,9 @@
             ident = self._mainthreadident
             return self._valuedict.get(ident, None)
     
    +    def getallvalues(self):
    +        return self._valuedict
    +
         def enter_thread(self, space):
             "Notification that the current thread is just starting."
             ec = space.getexecutioncontext()
    
    From noreply at buildbot.pypy.org  Tue Sep  6 02:03:14 2011
    From: noreply at buildbot.pypy.org (amauryfa)
    Date: Tue,  6 Sep 2011 02:03:14 +0200 (CEST)
    Subject: [pypy-commit] pypy default: Fix translation
    Message-ID: <20110906000314.7D5168203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Amaury Forgeot d'Arc 
    Branch: 
    Changeset: r47095:517b88c3b7d5
    Date: 2011-09-06 02:02 +0200
    http://bitbucket.org/pypy/pypy/changeset/517b88c3b7d5/
    
    Log:	Fix translation
    
    diff --git a/pypy/module/pwd/interp_pwd.py b/pypy/module/pwd/interp_pwd.py
    --- a/pypy/module/pwd/interp_pwd.py
    +++ b/pypy/module/pwd/interp_pwd.py
    @@ -4,10 +4,12 @@
     from pypy.interpreter.gateway import interp2app, unwrap_spec
     from pypy.interpreter.error import OperationError, operationerrfmt
     
    +eci = ExternalCompilationInfo(
    +    includes=['pwd.h']
    +    )
    +
     class CConfig:
    -    _compilation_info_ = ExternalCompilationInfo(
    -        includes=['pwd.h']
    -        )
    +    _compilation_info_ = eci
     
         uid_t = rffi_platform.SimpleType("uid_t")
     
    @@ -26,11 +28,14 @@
     passwd_p = lltype.Ptr(config['passwd'])
     uid_t = config['uid_t']
     
    -c_getpwuid = rffi.llexternal("getpwuid", [uid_t], passwd_p)
    -c_getpwnam = rffi.llexternal("getpwnam", [rffi.CCHARP], passwd_p)
    -c_setpwent = rffi.llexternal("setpwent", [], lltype.Void)
    -c_getpwent = rffi.llexternal("getpwent", [], passwd_p)
    -c_endpwent = rffi.llexternal("endpwent", [], lltype.Void)
    +def external(name, args, result, **kwargs):
    +    return rffi.llexternal(name, args, result, compilation_info=eci, **kwargs)
    +
    +c_getpwuid = external("getpwuid", [uid_t], passwd_p)
    +c_getpwnam = external("getpwnam", [rffi.CCHARP], passwd_p)
    +c_setpwent = external("setpwent", [], lltype.Void)
    +c_getpwent = external("getpwent", [], passwd_p)
    +c_endpwent = external("endpwent", [], lltype.Void)
     
     def make_struct_passwd(space, pw):
         w_passwd_struct = space.getattr(space.getbuiltinmodule('pwd'),
    
    From noreply at buildbot.pypy.org  Tue Sep  6 06:27:19 2011
    From: noreply at buildbot.pypy.org (arigo)
    Date: Tue,  6 Sep 2011 06:27:19 +0200 (CEST)
    Subject: [pypy-commit] pyrepl default: Port 659f7a0b3256 from pypy.
    Message-ID: <20110906042719.A2C1E8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r127:183fb78bf113
    Date: 2011-09-06 06:27 +0200
    http://bitbucket.org/pypy/pyrepl/changeset/183fb78bf113/
    
    Log:	Port 659f7a0b3256 from pypy.
    
    diff --git a/pyrepl/reader.py b/pyrepl/reader.py
    --- a/pyrepl/reader.py
    +++ b/pyrepl/reader.py
    @@ -576,7 +576,7 @@
             self.console.push_char(char)
             self.handle1(0)
         
    -    def readline(self):
    +    def readline(self, returns_unicode=False):
             """Read a line.  The implementation of this method also shows
             how to drive Reader if you want more control over the event
             loop."""
    @@ -585,6 +585,8 @@
                 self.refresh()
                 while not self.finished:
                     self.handle1()
    +            if returns_unicode:
    +                return self.get_unicode()
                 return self.get_buffer()
             finally:
                 self.restore()
    diff --git a/pyrepl/readline.py b/pyrepl/readline.py
    --- a/pyrepl/readline.py
    +++ b/pyrepl/readline.py
    @@ -198,7 +198,7 @@
             reader.ps1 = prompt
             return reader.readline()
     
    -    def multiline_input(self, more_lines, ps1, ps2):
    +    def multiline_input(self, more_lines, ps1, ps2, returns_unicode=False):
             """Read an input on possibly multiple lines, asking for more
             lines as long as 'more_lines(unicodetext)' returns an object whose
             boolean value is true.
    @@ -209,7 +209,7 @@
                 reader.more_lines = more_lines
                 reader.ps1 = reader.ps2 = ps1
                 reader.ps3 = reader.ps4 = ps2
    -            return reader.readline()
    +            return reader.readline(returns_unicode=returns_unicode)
             finally:
                 reader.more_lines = saved
     
    diff --git a/pyrepl/simple_interact.py b/pyrepl/simple_interact.py
    --- a/pyrepl/simple_interact.py
    +++ b/pyrepl/simple_interact.py
    @@ -54,7 +54,8 @@
                 ps1 = getattr(sys, 'ps1', '>>> ')
                 ps2 = getattr(sys, 'ps2', '... ')
                 try:
    -                statement = multiline_input(more_lines, ps1, ps2)
    +                statement = multiline_input(more_lines, ps1, ps2,
    +                                            returns_unicode=True)
                 except EOFError:
                     break
                 more = console.push(statement)
    
    From noreply at buildbot.pypy.org  Tue Sep  6 06:27:27 2011
    From: noreply at buildbot.pypy.org (arigo)
    Date: Tue,  6 Sep 2011 06:27:27 +0200 (CEST)
    Subject: [pypy-commit] pypy default: A failing test.
    Message-ID: <20110906042727.1049C8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r47096:950b50221e93
    Date: 2011-09-06 06:16 +0200
    http://bitbucket.org/pypy/pypy/changeset/950b50221e93/
    
    Log:	A failing test.
    
    diff --git a/pypy/interpreter/test/test_exec.py b/pypy/interpreter/test/test_exec.py
    --- a/pypy/interpreter/test/test_exec.py
    +++ b/pypy/interpreter/test/test_exec.py
    @@ -219,3 +219,17 @@
                 raise e
     
             assert res == 1
    +
    +    def test_exec_unicode(self):
    +        # 's' is a string
    +        s = "x = u'\xd0\xb9\xd1\x86\xd1\x83\xd0\xba\xd0\xb5\xd0\xbd'"
    +        # 'u' is a unicode
    +        u = s.decode('utf-8')
    +        exec u
    +        assert len(x) == 6
    +        assert ord(x[0]) == 0x0439
    +        assert ord(x[1]) == 0x0446
    +        assert ord(x[2]) == 0x0443
    +        assert ord(x[3]) == 0x043a
    +        assert ord(x[4]) == 0x0435
    +        assert ord(x[5]) == 0x043d
    
    From noreply at buildbot.pypy.org  Tue Sep  6 06:27:28 2011
    From: noreply at buildbot.pypy.org (arigo)
    Date: Tue,  6 Sep 2011 06:27:28 +0200 (CEST)
    Subject: [pypy-commit] pypy default: Import into _pypy_interact.py the
     change from code.py to decode
    Message-ID: <20110906042728.4B21D8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r47097:659f7a0b3256
    Date: 2011-09-06 06:25 +0200
    http://bitbucket.org/pypy/pypy/changeset/659f7a0b3256/
    
    Log:	Import into _pypy_interact.py the change from code.py to decode the
    	raw string into a unicode string. Fix simple_interact.py to just do
    	the right thing and not encode the unicode string in the first
    	place, by passing an extra keyword argument.
    
    diff --git a/lib_pypy/_pypy_interact.py b/lib_pypy/_pypy_interact.py
    --- a/lib_pypy/_pypy_interact.py
    +++ b/lib_pypy/_pypy_interact.py
    @@ -56,6 +56,10 @@
                     prompt = getattr(sys, 'ps1', '>>> ')
                 try:
                     line = raw_input(prompt)
    +                # Can be None if sys.stdin was redefined
    +                encoding = getattr(sys.stdin, 'encoding', None)
    +                if encoding and not isinstance(line, unicode):
    +                    line = line.decode(encoding)
                 except EOFError:
                     console.write("\n")
                     break
    diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py
    --- a/lib_pypy/pyrepl/reader.py
    +++ b/lib_pypy/pyrepl/reader.py
    @@ -576,7 +576,7 @@
             self.console.push_char(char)
             self.handle1(0)
         
    -    def readline(self):
    +    def readline(self, returns_unicode=False):
             """Read a line.  The implementation of this method also shows
             how to drive Reader if you want more control over the event
             loop."""
    @@ -585,6 +585,8 @@
                 self.refresh()
                 while not self.finished:
                     self.handle1()
    +            if returns_unicode:
    +                return self.get_unicode()
                 return self.get_buffer()
             finally:
                 self.restore()
    diff --git a/lib_pypy/pyrepl/readline.py b/lib_pypy/pyrepl/readline.py
    --- a/lib_pypy/pyrepl/readline.py
    +++ b/lib_pypy/pyrepl/readline.py
    @@ -198,7 +198,7 @@
             reader.ps1 = prompt
             return reader.readline()
     
    -    def multiline_input(self, more_lines, ps1, ps2):
    +    def multiline_input(self, more_lines, ps1, ps2, returns_unicode=False):
             """Read an input on possibly multiple lines, asking for more
             lines as long as 'more_lines(unicodetext)' returns an object whose
             boolean value is true.
    @@ -209,7 +209,7 @@
                 reader.more_lines = more_lines
                 reader.ps1 = reader.ps2 = ps1
                 reader.ps3 = reader.ps4 = ps2
    -            return reader.readline()
    +            return reader.readline(returns_unicode=returns_unicode)
             finally:
                 reader.more_lines = saved
     
    diff --git a/lib_pypy/pyrepl/simple_interact.py b/lib_pypy/pyrepl/simple_interact.py
    --- a/lib_pypy/pyrepl/simple_interact.py
    +++ b/lib_pypy/pyrepl/simple_interact.py
    @@ -54,7 +54,8 @@
                 ps1 = getattr(sys, 'ps1', '>>> ')
                 ps2 = getattr(sys, 'ps2', '... ')
                 try:
    -                statement = multiline_input(more_lines, ps1, ps2)
    +                statement = multiline_input(more_lines, ps1, ps2,
    +                                            returns_unicode=True)
                 except EOFError:
                     break
                 more = console.push(statement)
    
    From noreply at buildbot.pypy.org  Tue Sep  6 06:37:13 2011
    From: noreply at buildbot.pypy.org (arigo)
    Date: Tue,  6 Sep 2011 06:37:13 +0200 (CEST)
    Subject: [pypy-commit] pypy default: More tests.
    Message-ID: <20110906043713.4BBE08203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r47098:5d1b417d7c94
    Date: 2011-09-06 06:35 +0200
    http://bitbucket.org/pypy/pypy/changeset/5d1b417d7c94/
    
    Log:	More tests.
    
    diff --git a/pypy/interpreter/test/test_exec.py b/pypy/interpreter/test/test_exec.py
    --- a/pypy/interpreter/test/test_exec.py
    +++ b/pypy/interpreter/test/test_exec.py
    @@ -233,3 +233,16 @@
             assert ord(x[3]) == 0x043a
             assert ord(x[4]) == 0x0435
             assert ord(x[5]) == 0x043d
    +
    +    def test_eval_unicode(self):
    +        u = "u'%s'" % unichr(0x1234)
    +        v = eval(u)
    +        assert v == unichr(0x1234)
    +
    +    def test_compile_unicode(self):
    +        s = "x = u'\xd0\xb9\xd1\x86\xd1\x83\xd0\xba\xd0\xb5\xd0\xbd'"
    +        u = s.decode('utf-8')
    +        c = compile(u, '', 'exec')
    +        exec c
    +        assert len(x) == 6
    +        assert ord(x[0]) == 0x0439
    
    From noreply at buildbot.pypy.org  Tue Sep  6 06:37:14 2011
    From: noreply at buildbot.pypy.org (arigo)
    Date: Tue,  6 Sep 2011 06:37:14 +0200 (CEST)
    Subject: [pypy-commit] pypy default: Fix: only 'exec' was broken.
    Message-ID: <20110906043714.92B2B8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r47099:2c2f3d3849d5
    Date: 2011-09-06 06:35 +0200
    http://bitbucket.org/pypy/pypy/changeset/2c2f3d3849d5/
    
    Log:	Fix: only 'exec' was broken.
    
    diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
    --- a/pypy/interpreter/pyopcode.py
    +++ b/pypy/interpreter/pyopcode.py
    @@ -1523,10 +1523,8 @@
     
             if not isinstance(prog, codetype):
                 filename = ''
    -            if not isinstance(prog, str):
    -                if isinstance(prog, basestring):
    -                    prog = str(prog)
    -                elif isinstance(prog, file):
    +            if not isinstance(prog, basestring):
    +                if isinstance(prog, file):
                         filename = prog.name
                         prog = prog.read()
                     else:
    
    From noreply at buildbot.pypy.org  Tue Sep  6 08:19:09 2011
    From: noreply at buildbot.pypy.org (alex_gaynor)
    Date: Tue,  6 Sep 2011 08:19:09 +0200 (CEST)
    Subject: [pypy-commit] pypy default: fix indentation error.
    Message-ID: <20110906061909.EE14C8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Alex Gaynor 
    Branch: 
    Changeset: r47100:21dd54a85247
    Date: 2011-09-06 06:18 +0000
    http://bitbucket.org/pypy/pypy/changeset/21dd54a85247/
    
    Log:	fix indentation error.
    
    diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py
    --- a/pypy/module/micronumpy/test/test_ufuncs.py
    +++ b/pypy/module/micronumpy/test/test_ufuncs.py
    @@ -336,4 +336,4 @@
                     (3.5, 3),
                     (3, 3.5),
                 ]:
    -                assert ufunc(a, b) is func(a, b)
    \ No newline at end of file
    +                assert ufunc(a, b) is func(a, b)
    
    From noreply at buildbot.pypy.org  Tue Sep  6 13:02:14 2011
    From: noreply at buildbot.pypy.org (arigo)
    Date: Tue,  6 Sep 2011 13:02:14 +0200 (CEST)
    Subject: [pypy-commit] pypy default: Cast these fields to a Python-level
     'int', i.e. a C 'long'.
    Message-ID: <20110906110214.3CFF18203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r47101:9efba345739c
    Date: 2011-09-06 13:01 +0200
    http://bitbucket.org/pypy/pypy/changeset/9efba345739c/
    
    Log:	Cast these fields to a Python-level 'int', i.e. a C 'long'. This is
    	the same as CPython does.
    
    diff --git a/pypy/module/pwd/interp_pwd.py b/pypy/module/pwd/interp_pwd.py
    --- a/pypy/module/pwd/interp_pwd.py
    +++ b/pypy/module/pwd/interp_pwd.py
    @@ -3,6 +3,7 @@
     from pypy.rpython.lltypesystem import rffi, lltype
     from pypy.interpreter.gateway import interp2app, unwrap_spec
     from pypy.interpreter.error import OperationError, operationerrfmt
    +from pypy.rlib.rarithmetic import intmask
     
     eci = ExternalCompilationInfo(
         includes=['pwd.h']
    @@ -43,8 +44,8 @@
         w_tuple = space.newtuple([
             space.wrap(rffi.charp2str(pw.c_pw_name)),
             space.wrap(rffi.charp2str(pw.c_pw_passwd)),
    -        space.wrap(pw.c_pw_uid),
    -        space.wrap(pw.c_pw_gid),
    +        space.wrap(intmask(pw.c_pw_uid)),
    +        space.wrap(intmask(pw.c_pw_gid)),
             space.wrap(rffi.charp2str(pw.c_pw_gecos)),
             space.wrap(rffi.charp2str(pw.c_pw_dir)),
             space.wrap(rffi.charp2str(pw.c_pw_shell)),
    diff --git a/pypy/module/pwd/test/test_pwd.py b/pypy/module/pwd/test/test_pwd.py
    --- a/pypy/module/pwd/test/test_pwd.py
    +++ b/pypy/module/pwd/test/test_pwd.py
    @@ -14,6 +14,9 @@
             assert pw.pw_gid == 0
             assert pw.pw_dir == '/root'
             assert pw.pw_shell.startswith('/')
    +        #
    +        assert type(pw.pw_uid) is int
    +        assert type(pw.pw_gid) is int
     
         def test_getpwnam(self):
             import pwd
    
    From noreply at buildbot.pypy.org  Tue Sep  6 13:37:07 2011
    From: noreply at buildbot.pypy.org (arigo)
    Date: Tue,  6 Sep 2011 13:37:07 +0200 (CEST)
    Subject: [pypy-commit] pypy default: Fix this test on 64-bit: avoids that
     random unrelated operations
    Message-ID: <20110906113707.5983E8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r47102:a41ea5a18e1c
    Date: 2011-09-06 13:36 +0200
    http://bitbucket.org/pypy/pypy/changeset/a41ea5a18e1c/
    
    Log:	Fix this test on 64-bit: avoids that random unrelated operations
    	show up here, by adding a dummy getattr previously in the loop.
    
    diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py
    --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py
    +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py
    @@ -142,6 +142,7 @@
                 i = 0
                 b = B(1)
                 while i < 100:
    +                b.x
                     v = b.x # ID: loadattr
                     i += v
                 return i
    @@ -150,8 +151,6 @@
             loop, = log.loops_by_filename(self.filepath)
             assert loop.match_by_id('loadattr',
             '''
    -        guard_not_invalidated(descr=...)
    -        i16 = arraylen_gc(p10, descr=)
             i19 = call(ConstClass(ll_dict_lookup), _, _, _, descr=...)
             guard_no_exception(descr=...)
             i21 = int_and(i19, _)
    
    From noreply at buildbot.pypy.org  Tue Sep  6 17:27:20 2011
    From: noreply at buildbot.pypy.org (arigo)
    Date: Tue,  6 Sep 2011 17:27:20 +0200 (CEST)
    Subject: [pypy-commit] pypy default: Store the failargs too. Not used right
    	now.
    Message-ID: <20110906152720.B811F8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r47103:862346e1db81
    Date: 2011-09-06 17:26 +0200
    http://bitbucket.org/pypy/pypy/changeset/862346e1db81/
    
    Log:	Store the failargs too. Not used right now.
    
    diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py
    --- a/pypy/tool/jitlogparser/parser.py
    +++ b/pypy/tool/jitlogparser/parser.py
    @@ -8,6 +8,7 @@
         bridge = None
         offset = None
         asm = None
    +    failargs = ()
     
         def __init__(self, name, args, res, descr):
             self.name = name
    @@ -18,8 +19,8 @@
             if self._is_guard:
                 self.guard_no = int(self.descr[len('
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r47104:572d0576c166
    Date: 2011-09-06 17:26 +0200
    http://bitbucket.org/pypy/pypy/changeset/572d0576c166/
    
    Log:	Python 2.5 compatibility.
    
    diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py
    --- a/pypy/module/pypyjit/test_pypy_c/model.py
    +++ b/pypy/module/pypyjit/test_pypy_c/model.py
    @@ -2,7 +2,10 @@
     import sys
     import re
     import os.path
    -from _pytest.assertion import newinterpret
    +try:
    +    from _pytest.assertion import newinterpret
    +except ImportError:   # e.g. Python 2.5
    +    newinterpret = None
     from pypy.tool.jitlogparser.parser import SimpleParser, Function, TraceForOpcode
     from pypy.tool.jitlogparser.storage import LoopStorage
     
    @@ -196,7 +199,7 @@
                         source = str(source.deindent()).strip()
             except py.error.ENOENT:
                 source = None
    -        if source and source.startswith('self._assert('):
    +        if source and source.startswith('self._assert(') and newinterpret:
                 # transform self._assert(x, 'foo') into assert x, 'foo'
                 source = source.replace('self._assert(', 'assert ')
                 source = source[:-1] # remove the trailing ')'
    diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py
    --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py
    +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py
    @@ -1,3 +1,4 @@
    +from __future__ import with_statement
     import sys
     import types
     import subprocess
    
    From noreply at buildbot.pypy.org  Tue Sep  6 17:39:40 2011
    From: noreply at buildbot.pypy.org (justinpeel)
    Date: Tue,  6 Sep 2011 17:39:40 +0200 (CEST)
    Subject: [pypy-commit] pypy gc-trace-faster: Attempt to reduce calls to
     trace_and_drag_out_of_nursery_partial. Not as effective as hoped,
     but still gives some improvement.
    Message-ID: <20110906153940.37A418203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Justin Peel 
    Branch: gc-trace-faster
    Changeset: r47105:2f505084ea21
    Date: 2011-09-06 09:38 -0600
    http://bitbucket.org/pypy/pypy/changeset/2f505084ea21/
    
    Log:	Attempt to reduce calls to trace_and_drag_out_of_nursery_partial.
    	Not as effective as hoped, but still gives some improvement.
    
    diff --git a/pypy/rpython/memory/gc/minimark.py b/pypy/rpython/memory/gc/minimark.py
    --- a/pypy/rpython/memory/gc/minimark.py
    +++ b/pypy/rpython/memory/gc/minimark.py
    @@ -1334,8 +1334,47 @@
                     while bytes > 0:
                         p -= 1
                         cardbyte = ord(p.char[0])
    +                    bytes -= 1
    +                    if cardbyte == 0:
    +                        # keep moving along while there are unmarked bytes
    +                        if bytes == 0:
    +                            break
    +                        p -= 1
    +                        cardbyte = ord(p.char[0])
    +                        bytes -= 1
    +                        counter = 1
    +                        while bytes > 0 and cardbyte == 0:
    +                            p -= 1
    +                            cardbyte = ord(p.char[0])
    +                            bytes -= 1
    +                            counter += 1
    +                        interval_start = interval_start + counter*8*self.card_page_indices
    +                    if cardbyte == 255 and bytes > 0:
    +                        # keep moving until we find a byte that isn't fully marked
    +                        p.char[0] = '\x00'
    +                        counter = 1
    +                        p -= 1
    +                        cardbyte = ord(p.char[0])
    +                        bytes -= 1
    +                        while bytes > 0 and cardbyte == 255:
    +                            p.char[0] = '\x00'
    +                            p -= 1
    +                            cardbyte = ord(p.char[0])
    +                            bytes -= 1
    +                            counter += 1
    +                        interval_stop = interval_start + counter*8*self.card_page_indices
    +                        if interval_stop > length:
    +                            interval_stop = length
    +                            ll_assert(bytes == 0,
    +                                "premature end of object")
    +                        if bool(self.young_rawmalloced_objects):
    +                            self.trace_and_drag_out_of_nursery_partial_young_raw(
    +                                obj, interval_start, interval_stop)
    +                        else:
    +                            self.trace_and_drag_out_of_nursery_partial(
    +                                obj, interval_start, interval_stop)
    +                        interval_start = interval_stop
                         p.char[0] = '\x00'           # reset the bits
    -                    bytes -= 1
                         next_byte_start = interval_start + 8*self.card_page_indices
                         #
                         while cardbyte != 0:
    
    From noreply at buildbot.pypy.org  Tue Sep  6 18:04:34 2011
    From: noreply at buildbot.pypy.org (antocuni)
    Date: Tue,  6 Sep 2011 18:04:34 +0200 (CEST)
    Subject: [pypy-commit] pypy ffistruct: a branch to implement a JIT friendly
     struct type in _ffi
    Message-ID: <20110906160434.88F008203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Antonio Cuni 
    Branch: ffistruct
    Changeset: r47106:8b5b5a7627cf
    Date: 2011-09-05 15:43 +0200
    http://bitbucket.org/pypy/pypy/changeset/8b5b5a7627cf/
    
    Log:	a branch to implement a JIT friendly struct type in _ffi
    
    
    From noreply at buildbot.pypy.org  Tue Sep  6 18:04:36 2011
    From: noreply at buildbot.pypy.org (antocuni)
    Date: Tue,  6 Sep 2011 18:04:36 +0200 (CEST)
    Subject: [pypy-commit] pypy ffistruct: implement FFIType.sizeof();
     put some test logic into a base class
    Message-ID: <20110906160436.053D48203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Antonio Cuni 
    Branch: ffistruct
    Changeset: r47107:318937fd8e2e
    Date: 2011-09-05 16:24 +0200
    http://bitbucket.org/pypy/pypy/changeset/318937fd8e2e/
    
    Log:	implement FFIType.sizeof(); put some test logic into a base class
    
    diff --git a/pypy/module/_ffi/__init__.py b/pypy/module/_ffi/__init__.py
    --- a/pypy/module/_ffi/__init__.py
    +++ b/pypy/module/_ffi/__init__.py
    @@ -10,4 +10,7 @@
             'get_libc':'interp_ffi.get_libc',
         }
     
    -    appleveldefs = {}
    +    appleveldefs = {
    +        'Structure': 'app_struct.Structure',
    +        'Field':     'app_struct.Field',
    +        }
    diff --git a/pypy/module/_ffi/interp_ffi.py b/pypy/module/_ffi/interp_ffi.py
    --- a/pypy/module/_ffi/interp_ffi.py
    +++ b/pypy/module/_ffi/interp_ffi.py
    @@ -30,6 +30,12 @@
                 return space.w_None
             return self.w_pointer_to
     
    +    def descr_sizeof(self, space):
    +        return space.wrap(self.sizeof())
    +
    +    def sizeof(self):
    +        return intmask(self.ffitype.c_size)
    +
         def repr(self, space):
             return space.wrap(self.__repr__())
     
    @@ -86,6 +92,7 @@
         'FFIType',
         __repr__ = interp2app(W_FFIType.repr),
         deref_pointer = interp2app(W_FFIType.descr_deref_pointer),
    +    sizeof = interp2app(W_FFIType.descr_sizeof),
         )
     
     
    diff --git a/pypy/module/_ffi/test/test__ffi.py b/pypy/module/_ffi/test/test__ffi.py
    --- a/pypy/module/_ffi/test/test__ffi.py
    +++ b/pypy/module/_ffi/test/test__ffi.py
    @@ -7,7 +7,7 @@
     
     import os, sys, py
     
    -class AppTestFfi:
    +class BaseAppTestFFI(object):
     
         @classmethod
         def prepare_c_example(cls):
    @@ -36,7 +36,6 @@
             eci = ExternalCompilationInfo(export_symbols=[])
             return str(platform.compile([c_file], eci, 'x', standalone=False))
     
    -    
         def setup_class(cls):
             from pypy.rpython.lltypesystem import rffi
             from pypy.rlib.libffi import get_libc_name, CDLL, types
    @@ -52,7 +51,12 @@
             pow = libm.getpointer('pow', [], types.void)
             pow_addr = rffi.cast(rffi.LONG, pow.funcsym)
             cls.w_pow_addr = space.wrap(pow_addr)
    -        #
    +
    +class AppTestFFI(BaseAppTestFFI):
    +
    +    def setup_class(cls):
    +        BaseAppTestFFI.setup_class.im_func(cls)
    +        space = cls.space
             # these are needed for test_single_float_args
             from ctypes import c_float
             f_12_34 = c_float(12.34).value
    @@ -82,7 +86,12 @@
             from _ffi import types
             assert str(types.sint) == ""
             assert str(types.uint) == ""
    -        
    +
    +    def test_sizeof(self):
    +        from _ffi import types
    +        assert types.sbyte.sizeof() == 1
    +        assert types.sint.sizeof() == 4
    +    
         def test_callfunc(self):
             from _ffi import CDLL, types
             libm = CDLL(self.libm_name)
    
    From noreply at buildbot.pypy.org  Tue Sep  6 18:04:37 2011
    From: noreply at buildbot.pypy.org (antocuni)
    Date: Tue,  6 Sep 2011 18:04:37 +0200 (CEST)
    Subject: [pypy-commit] pypy ffistruct: start to implement _ffi.Structure
    Message-ID: <20110906160437.8D2EA8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Antonio Cuni 
    Branch: ffistruct
    Changeset: r47108:6fe3c422d545
    Date: 2011-09-05 16:37 +0200
    http://bitbucket.org/pypy/pypy/changeset/6fe3c422d545/
    
    Log:	start to implement _ffi.Structure
    
    diff --git a/pypy/module/_ffi/app_struct.py b/pypy/module/_ffi/app_struct.py
    new file mode 100644
    --- /dev/null
    +++ b/pypy/module/_ffi/app_struct.py
    @@ -0,0 +1,29 @@
    +class Field(object):
    +
    +    def __init__(self, name, ffitype):
    +        self.name = name
    +        self.ffitype = ffitype
    +        self.offset = -1
    +
    +class MetaStructure(type):
    +
    +    def __new__(cls, name, bases, dic):
    +        cls._compute_shape(dic)
    +        return type.__new__(cls, name, bases, dic)
    +
    +    @classmethod
    +    def _compute_shape(cls, dic):
    +        fields = dic.get('_fields_')
    +        if fields is None:
    +            return
    +        size = 0
    +        for field in fields:
    +            field.offset = size # XXX: alignment!
    +            size += field.ffitype.sizeof()
    +            dic[field.name] = field
    +        dic['_size_'] = size
    +
    +
    +class Structure(object):
    +
    +    __metaclass__ = MetaStructure
    diff --git a/pypy/module/_ffi/test/test_struct.py b/pypy/module/_ffi/test/test_struct.py
    new file mode 100644
    --- /dev/null
    +++ b/pypy/module/_ffi/test/test_struct.py
    @@ -0,0 +1,19 @@
    +from pypy.module._ffi.test.test__ffi import BaseAppTestFFI
    +
    +class AppTestStruct(BaseAppTestFFI):
    +
    +    def test_compute_shape(self):
    +        from _ffi import Structure, Field, types
    +        class Point(Structure):
    +            _fields_ = [
    +                Field('x', types.slong),
    +                Field('y', types.slong),
    +                ]
    +
    +        longsize = types.slong.sizeof()
    +        assert isinstance(Point.x, Field)
    +        assert isinstance(Point.y, Field)
    +        assert Point.x.offset == 0
    +        assert Point.y.offset == longsize
    +        assert Point._size_ == longsize*2
    +        
    
    From noreply at buildbot.pypy.org  Tue Sep  6 18:04:39 2011
    From: noreply at buildbot.pypy.org (antocuni)
    Date: Tue,  6 Sep 2011 18:04:39 +0200 (CEST)
    Subject: [pypy-commit] pypy ffistruct: split the implementation/tests of
     W_FFIType and W_FuncPtr into two separate files
    Message-ID: <20110906160439.2CAEC8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Antonio Cuni 
    Branch: ffistruct
    Changeset: r47109:edee74efbb9c
    Date: 2011-09-05 16:47 +0200
    http://bitbucket.org/pypy/pypy/changeset/edee74efbb9c/
    
    Log:	split the implementation/tests of W_FFIType and W_FuncPtr into two
    	separate files
    
    diff --git a/pypy/module/_ffi/__init__.py b/pypy/module/_ffi/__init__.py
    --- a/pypy/module/_ffi/__init__.py
    +++ b/pypy/module/_ffi/__init__.py
    @@ -1,13 +1,12 @@
     from pypy.interpreter.mixedmodule import MixedModule
    -from pypy.module._ffi import interp_ffi
     
     class Module(MixedModule):
     
         interpleveldefs = {
    -        'CDLL':    'interp_ffi.W_CDLL',
    -        'types':   'interp_ffi.W_types',
    -        'FuncPtr': 'interp_ffi.W_FuncPtr',
    -        'get_libc':'interp_ffi.get_libc',
    +        'types':   'interp_ffitype.W_types',
    +        'CDLL':    'interp_funcptr.W_CDLL',
    +        'FuncPtr': 'interp_funcptr.W_FuncPtr',
    +        'get_libc':'interp_funcptr.get_libc',
         }
     
         appleveldefs = {
    diff --git a/pypy/module/_ffi/app_struct.py b/pypy/module/_ffi/app_struct.py
    --- a/pypy/module/_ffi/app_struct.py
    +++ b/pypy/module/_ffi/app_struct.py
    @@ -5,6 +5,15 @@
             self.ffitype = ffitype
             self.offset = -1
     
    +    ## def __get__(self, obj, cls=None):
    +    ##     if obj is None:
    +    ##         return self
    +    ##     return getfield(obj._buffer, self.ffitype, self.offset)
    +
    +    ## def __set__(self, obj, value):
    +    ##     setfield(obj._buffer, self.ffitype, self.offset, value)
    +
    +
     class MetaStructure(type):
     
         def __new__(cls, name, bases, dic):
    diff --git a/pypy/module/_ffi/interp_ffitype.py b/pypy/module/_ffi/interp_ffitype.py
    new file mode 100644
    --- /dev/null
    +++ b/pypy/module/_ffi/interp_ffitype.py
    @@ -0,0 +1,156 @@
    +from pypy.rlib import libffi
    +from pypy.rlib.rarithmetic import intmask
    +from pypy.interpreter.baseobjspace import Wrappable
    +from pypy.interpreter.typedef import TypeDef
    +from pypy.interpreter.gateway import interp2app
    +
    +class W_FFIType(Wrappable):
    +
    +    _immutable_fields_ = ['name', 'ffitype', 'w_datashape', 'w_pointer_to']
    +
    +    def __init__(self, name, ffitype, w_datashape=None, w_pointer_to=None):
    +        self.name = name
    +        self.ffitype = ffitype
    +        self.w_datashape = w_datashape
    +        self.w_pointer_to = w_pointer_to
    +        if self.is_struct():
    +            assert w_datashape is not None
    +
    +    def descr_deref_pointer(self, space):
    +        if self.w_pointer_to is None:
    +            return space.w_None
    +        return self.w_pointer_to
    +
    +    def descr_sizeof(self, space):
    +        return space.wrap(self.sizeof())
    +
    +    def sizeof(self):
    +        return intmask(self.ffitype.c_size)
    +
    +    def repr(self, space):
    +        return space.wrap(self.__repr__())
    +
    +    def __repr__(self):
    +        return "" % self.name
    +
    +    def is_signed(self):
    +        return (self is app_types.slong or
    +                self is app_types.sint or
    +                self is app_types.sshort or
    +                self is app_types.sbyte or
    +                self is app_types.slonglong)
    +
    +    def is_unsigned(self):
    +        return (self is app_types.ulong or
    +                self is app_types.uint or
    +                self is app_types.ushort or
    +                self is app_types.ubyte or
    +                self is app_types.ulonglong)
    +
    +    def is_pointer(self):
    +        return self.ffitype is libffi.types.pointer
    +
    +    def is_char(self):
    +        return self is app_types.char
    +
    +    def is_unichar(self):
    +        return self is app_types.unichar
    +
    +    def is_longlong(self):
    +        return libffi.IS_32_BIT and (self is app_types.slonglong or
    +                                     self is app_types.ulonglong)
    +
    +    def is_double(self):
    +        return self is app_types.double
    +
    +    def is_singlefloat(self):
    +        return self is app_types.float
    +
    +    def is_void(self):
    +        return self is app_types.void
    +
    +    def is_struct(self):
    +        return libffi.types.is_struct(self.ffitype)
    +
    +    def is_char_p(self):
    +        return self is app_types.char_p
    +
    +    def is_unichar_p(self):
    +        return self is app_types.unichar_p
    +
    +
    +W_FFIType.typedef = TypeDef(
    +    'FFIType',
    +    __repr__ = interp2app(W_FFIType.repr),
    +    deref_pointer = interp2app(W_FFIType.descr_deref_pointer),
    +    sizeof = interp2app(W_FFIType.descr_sizeof),
    +    )
    +
    +
    +def build_ffi_types():
    +    types = [
    +        # note: most of the type name directly come from the C equivalent,
    +        # with the exception of bytes: in C, ubyte and char are equivalent,
    +        # but for _ffi the first expects a number while the second a 1-length
    +        # string
    +        W_FFIType('slong',     libffi.types.slong),
    +        W_FFIType('sint',      libffi.types.sint),
    +        W_FFIType('sshort',    libffi.types.sshort),
    +        W_FFIType('sbyte',     libffi.types.schar),
    +        W_FFIType('slonglong', libffi.types.slonglong),
    +        #
    +        W_FFIType('ulong',     libffi.types.ulong),
    +        W_FFIType('uint',      libffi.types.uint),
    +        W_FFIType('ushort',    libffi.types.ushort),
    +        W_FFIType('ubyte',     libffi.types.uchar),
    +        W_FFIType('ulonglong', libffi.types.ulonglong),
    +        #
    +        W_FFIType('char',      libffi.types.uchar),
    +        W_FFIType('unichar',   libffi.types.wchar_t),
    +        #
    +        W_FFIType('double',    libffi.types.double),
    +        W_FFIType('float',     libffi.types.float),
    +        W_FFIType('void',      libffi.types.void),
    +        W_FFIType('void_p',    libffi.types.pointer),
    +        #
    +        # missing types:
    +
    +        ## 's' : ffi_type_pointer,
    +        ## 'z' : ffi_type_pointer,
    +        ## 'O' : ffi_type_pointer,
    +        ## 'Z' : ffi_type_pointer,
    +
    +        ]
    +    d = dict([(t.name, t) for t in types])
    +    w_char = d['char']
    +    w_unichar = d['unichar']
    +    d['char_p'] = W_FFIType('char_p', libffi.types.pointer, w_pointer_to = w_char)
    +    d['unichar_p'] = W_FFIType('unichar_p', libffi.types.pointer, w_pointer_to = w_unichar)
    +    return d
    +
    +class app_types:
    +    pass
    +app_types.__dict__ = build_ffi_types()
    +
    +def descr_new_pointer(space, w_cls, w_pointer_to):
    +    try:
    +        return descr_new_pointer.cache[w_pointer_to]
    +    except KeyError:
    +        if w_pointer_to is app_types.char:
    +            w_result = app_types.char_p
    +        elif w_pointer_to is app_types.unichar:
    +            w_result = app_types.unichar_p
    +        else:
    +            w_pointer_to = space.interp_w(W_FFIType, w_pointer_to)
    +            name = '(pointer to %s)' % w_pointer_to.name
    +            w_result = W_FFIType(name, libffi.types.pointer, w_pointer_to = w_pointer_to)
    +        descr_new_pointer.cache[w_pointer_to] = w_result
    +        return w_result
    +descr_new_pointer.cache = {}
    +
    +class W_types(Wrappable):
    +    pass
    +W_types.typedef = TypeDef(
    +    'types',
    +    Pointer = interp2app(descr_new_pointer, as_classmethod=True),
    +    **app_types.__dict__)
    diff --git a/pypy/module/_ffi/interp_ffi.py b/pypy/module/_ffi/interp_funcptr.py
    rename from pypy/module/_ffi/interp_ffi.py
    rename to pypy/module/_ffi/interp_funcptr.py
    --- a/pypy/module/_ffi/interp_ffi.py
    +++ b/pypy/module/_ffi/interp_funcptr.py
    @@ -4,6 +4,7 @@
     from pypy.interpreter.gateway import interp2app, unwrap_spec
     from pypy.interpreter.typedef import TypeDef
     from pypy.module._rawffi.structure import W_StructureInstance, W_Structure
    +from pypy.module._ffi.interp_ffitype import W_FFIType
     #
     from pypy.rpython.lltypesystem import lltype, rffi
     #
    @@ -13,157 +14,6 @@
     from pypy.rlib.rarithmetic import intmask, r_uint
     from pypy.rlib.objectmodel import we_are_translated
     
    -class W_FFIType(Wrappable):
    -
    -    _immutable_fields_ = ['name', 'ffitype', 'w_datashape', 'w_pointer_to']
    -
    -    def __init__(self, name, ffitype, w_datashape=None, w_pointer_to=None):
    -        self.name = name
    -        self.ffitype = ffitype
    -        self.w_datashape = w_datashape
    -        self.w_pointer_to = w_pointer_to
    -        if self.is_struct():
    -            assert w_datashape is not None
    -
    -    def descr_deref_pointer(self, space):
    -        if self.w_pointer_to is None:
    -            return space.w_None
    -        return self.w_pointer_to
    -
    -    def descr_sizeof(self, space):
    -        return space.wrap(self.sizeof())
    -
    -    def sizeof(self):
    -        return intmask(self.ffitype.c_size)
    -
    -    def repr(self, space):
    -        return space.wrap(self.__repr__())
    -
    -    def __repr__(self):
    -        return "" % self.name
    -
    -    def is_signed(self):
    -        return (self is app_types.slong or
    -                self is app_types.sint or
    -                self is app_types.sshort or
    -                self is app_types.sbyte or
    -                self is app_types.slonglong)
    -
    -    def is_unsigned(self):
    -        return (self is app_types.ulong or
    -                self is app_types.uint or
    -                self is app_types.ushort or
    -                self is app_types.ubyte or
    -                self is app_types.ulonglong)
    -
    -    def is_pointer(self):
    -        return self.ffitype is libffi.types.pointer
    -
    -    def is_char(self):
    -        return self is app_types.char
    -
    -    def is_unichar(self):
    -        return self is app_types.unichar
    -
    -    def is_longlong(self):
    -        return libffi.IS_32_BIT and (self is app_types.slonglong or
    -                                     self is app_types.ulonglong)
    -
    -    def is_double(self):
    -        return self is app_types.double
    -
    -    def is_singlefloat(self):
    -        return self is app_types.float
    -
    -    def is_void(self):
    -        return self is app_types.void
    -
    -    def is_struct(self):
    -        return libffi.types.is_struct(self.ffitype)
    -
    -    def is_char_p(self):
    -        return self is app_types.char_p
    -
    -    def is_unichar_p(self):
    -        return self is app_types.unichar_p
    -
    -
    -W_FFIType.typedef = TypeDef(
    -    'FFIType',
    -    __repr__ = interp2app(W_FFIType.repr),
    -    deref_pointer = interp2app(W_FFIType.descr_deref_pointer),
    -    sizeof = interp2app(W_FFIType.descr_sizeof),
    -    )
    -
    -
    -def build_ffi_types():
    -    types = [
    -        # note: most of the type name directly come from the C equivalent,
    -        # with the exception of bytes: in C, ubyte and char are equivalent,
    -        # but for _ffi the first expects a number while the second a 1-length
    -        # string
    -        W_FFIType('slong',     libffi.types.slong),
    -        W_FFIType('sint',      libffi.types.sint),
    -        W_FFIType('sshort',    libffi.types.sshort),
    -        W_FFIType('sbyte',     libffi.types.schar),
    -        W_FFIType('slonglong', libffi.types.slonglong),
    -        #
    -        W_FFIType('ulong',     libffi.types.ulong),
    -        W_FFIType('uint',      libffi.types.uint),
    -        W_FFIType('ushort',    libffi.types.ushort),
    -        W_FFIType('ubyte',     libffi.types.uchar),
    -        W_FFIType('ulonglong', libffi.types.ulonglong),
    -        #
    -        W_FFIType('char',      libffi.types.uchar),
    -        W_FFIType('unichar',   libffi.types.wchar_t),
    -        #
    -        W_FFIType('double',    libffi.types.double),
    -        W_FFIType('float',     libffi.types.float),
    -        W_FFIType('void',      libffi.types.void),
    -        W_FFIType('void_p',    libffi.types.pointer),
    -        #
    -        # missing types:
    -
    -        ## 's' : ffi_type_pointer,
    -        ## 'z' : ffi_type_pointer,
    -        ## 'O' : ffi_type_pointer,
    -        ## 'Z' : ffi_type_pointer,
    -
    -        ]
    -    d = dict([(t.name, t) for t in types])
    -    w_char = d['char']
    -    w_unichar = d['unichar']
    -    d['char_p'] = W_FFIType('char_p', libffi.types.pointer, w_pointer_to = w_char)
    -    d['unichar_p'] = W_FFIType('unichar_p', libffi.types.pointer, w_pointer_to = w_unichar)
    -    return d
    -
    -class app_types:
    -    pass
    -app_types.__dict__ = build_ffi_types()
    -
    -def descr_new_pointer(space, w_cls, w_pointer_to):
    -    try:
    -        return descr_new_pointer.cache[w_pointer_to]
    -    except KeyError:
    -        if w_pointer_to is app_types.char:
    -            w_result = app_types.char_p
    -        elif w_pointer_to is app_types.unichar:
    -            w_result = app_types.unichar_p
    -        else:
    -            w_pointer_to = space.interp_w(W_FFIType, w_pointer_to)
    -            name = '(pointer to %s)' % w_pointer_to.name
    -            w_result = W_FFIType(name, libffi.types.pointer, w_pointer_to = w_pointer_to)
    -        descr_new_pointer.cache[w_pointer_to] = w_result
    -        return w_result
    -descr_new_pointer.cache = {}
    -
    -class W_types(Wrappable):
    -    pass
    -W_types.typedef = TypeDef(
    -    'types',
    -    Pointer = interp2app(descr_new_pointer, as_classmethod=True),
    -    **app_types.__dict__)
    -
     
     def unwrap_ffitype(space, w_argtype, allow_void=False):
         res = w_argtype.ffitype
    diff --git a/pypy/module/_ffi/test/test_ffitype.py b/pypy/module/_ffi/test/test_ffitype.py
    new file mode 100644
    --- /dev/null
    +++ b/pypy/module/_ffi/test/test_ffitype.py
    @@ -0,0 +1,37 @@
    +from pypy.module._ffi.test.test_funcptr import BaseAppTestFFI
    +
    +class AppTestFFIType(BaseAppTestFFI):
    +
    +    def test_simple_types(self):
    +        from _ffi import types
    +        assert str(types.sint) == ""
    +        assert str(types.uint) == ""
    +
    +    def test_sizeof(self):
    +        from _ffi import types
    +        assert types.sbyte.sizeof() == 1
    +        assert types.sint.sizeof() == 4
    +
    +    def test_typed_pointer(self):
    +        from _ffi import types
    +        intptr = types.Pointer(types.sint) # create a typed pointer to sint
    +        assert intptr.deref_pointer() is types.sint
    +        assert str(intptr) == ''
    +        assert types.sint.deref_pointer() is None
    +        raises(TypeError, "types.Pointer(42)")
    +
    +    def test_pointer_identity(self):
    +        from _ffi import types
    +        x = types.Pointer(types.slong)
    +        y = types.Pointer(types.slong)
    +        z = types.Pointer(types.char)
    +        assert x is y
    +        assert x is not z
    +
    +    def test_char_p_cached(self):
    +        from _ffi import types
    +        x = types.Pointer(types.char)
    +        assert x is types.char_p
    +        x = types.Pointer(types.unichar)
    +        assert x is types.unichar_p
    +
    diff --git a/pypy/module/_ffi/test/test__ffi.py b/pypy/module/_ffi/test/test_funcptr.py
    rename from pypy/module/_ffi/test/test__ffi.py
    rename to pypy/module/_ffi/test/test_funcptr.py
    --- a/pypy/module/_ffi/test/test__ffi.py
    +++ b/pypy/module/_ffi/test/test_funcptr.py
    @@ -82,16 +82,6 @@
             res = dll.getfunc('Py_IsInitialized', [], types.slong)()
             assert res == 1
     
    -    def test_simple_types(self):
    -        from _ffi import types
    -        assert str(types.sint) == ""
    -        assert str(types.uint) == ""
    -
    -    def test_sizeof(self):
    -        from _ffi import types
    -        assert types.sbyte.sizeof() == 1
    -        assert types.sint.sizeof() == 4
    -    
         def test_callfunc(self):
             from _ffi import CDLL, types
             libm = CDLL(self.libm_name)
    @@ -266,29 +256,6 @@
             assert list(array) == list('foobar\00')
             do_nothing.free_temp_buffers()
     
    -    def test_typed_pointer(self):
    -        from _ffi import types
    -        intptr = types.Pointer(types.sint) # create a typed pointer to sint
    -        assert intptr.deref_pointer() is types.sint
    -        assert str(intptr) == ''
    -        assert types.sint.deref_pointer() is None
    -        raises(TypeError, "types.Pointer(42)")
    -
    -    def test_pointer_identity(self):
    -        from _ffi import types
    -        x = types.Pointer(types.slong)
    -        y = types.Pointer(types.slong)
    -        z = types.Pointer(types.char)
    -        assert x is y
    -        assert x is not z
    -
    -    def test_char_p_cached(self):
    -        from _ffi import types
    -        x = types.Pointer(types.char)
    -        assert x is types.char_p
    -        x = types.Pointer(types.unichar)
    -        assert x is types.unichar_p
    -
         def test_typed_pointer_args(self):
             """
                 extern int dummy; // defined in test_void_result 
    diff --git a/pypy/module/_ffi/test/test_struct.py b/pypy/module/_ffi/test/test_struct.py
    --- a/pypy/module/_ffi/test/test_struct.py
    +++ b/pypy/module/_ffi/test/test_struct.py
    @@ -1,4 +1,4 @@
    -from pypy.module._ffi.test.test__ffi import BaseAppTestFFI
    +from pypy.module._ffi.test.test_funcptr import BaseAppTestFFI
     
     class AppTestStruct(BaseAppTestFFI):
     
    
    From noreply at buildbot.pypy.org  Tue Sep  6 18:04:40 2011
    From: noreply at buildbot.pypy.org (antocuni)
    Date: Tue,  6 Sep 2011 18:04:40 +0200 (CEST)
    Subject: [pypy-commit] pypy ffistruct: introduce the concept of _StructDescr,
     which describes the layout and ffitype of a structure
    Message-ID: <20110906160440.C55018203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Antonio Cuni 
    Branch: ffistruct
    Changeset: r47110:b8cb7ac0d45a
    Date: 2011-09-06 16:33 +0200
    http://bitbucket.org/pypy/pypy/changeset/b8cb7ac0d45a/
    
    Log:	introduce the concept of _StructDescr, which describes the layout
    	and ffitype of a structure
    
    diff --git a/pypy/module/_ffi/__init__.py b/pypy/module/_ffi/__init__.py
    --- a/pypy/module/_ffi/__init__.py
    +++ b/pypy/module/_ffi/__init__.py
    @@ -7,6 +7,7 @@
             'CDLL':    'interp_funcptr.W_CDLL',
             'FuncPtr': 'interp_funcptr.W_FuncPtr',
             'get_libc':'interp_funcptr.get_libc',
    +        '_StructDescr': 'interp_struct.W__StructDescr',
         }
     
         appleveldefs = {
    diff --git a/pypy/module/_ffi/app_struct.py b/pypy/module/_ffi/app_struct.py
    --- a/pypy/module/_ffi/app_struct.py
    +++ b/pypy/module/_ffi/app_struct.py
    @@ -1,3 +1,5 @@
    +import _ffi
    +
     class Field(object):
     
         def __init__(self, name, ffitype):
    @@ -13,7 +15,6 @@
         ## def __set__(self, obj, value):
         ##     setfield(obj._buffer, self.ffitype, self.offset, value)
     
    -
     class MetaStructure(type):
     
         def __new__(cls, name, bases, dic):
    @@ -26,13 +27,16 @@
             if fields is None:
                 return
             size = 0
    +        ffitypes = []
             for field in fields:
                 field.offset = size # XXX: alignment!
                 size += field.ffitype.sizeof()
    +            ffitypes.append(field.ffitype)
                 dic[field.name] = field
    -        dic['_size_'] = size
    +        alignment = 0 # XXX
    +        struct_descr = _ffi._StructDescr(size, alignment, ffitypes)
    +        dic['_struct_'] = struct_descr
     
     
     class Structure(object):
    -
         __metaclass__ = MetaStructure
    diff --git a/pypy/module/_ffi/test/test_struct.py b/pypy/module/_ffi/test/test_struct.py
    --- a/pypy/module/_ffi/test/test_struct.py
    +++ b/pypy/module/_ffi/test/test_struct.py
    @@ -2,6 +2,13 @@
     
     class AppTestStruct(BaseAppTestFFI):
     
    +    def test__StructDescr(self):
    +        from _ffi import _StructDescr, types
    +        longsize = types.slong.sizeof()
    +        descr = _StructDescr(longsize*2, 0, [types.slong, types.slong])
    +        assert descr.ffitype.sizeof() == longsize*2
    +        assert repr(descr.ffitype) == '>'
    +
         def test_compute_shape(self):
             from _ffi import Structure, Field, types
             class Point(Structure):
    @@ -15,5 +22,4 @@
             assert isinstance(Point.y, Field)
             assert Point.x.offset == 0
             assert Point.y.offset == longsize
    -        assert Point._size_ == longsize*2
    -        
    +        assert Point._struct_.ffitype.sizeof() == longsize*2
    
    From noreply at buildbot.pypy.org  Tue Sep  6 18:04:42 2011
    From: noreply at buildbot.pypy.org (antocuni)
    Date: Tue,  6 Sep 2011 18:04:42 +0200 (CEST)
    Subject: [pypy-commit] pypy ffistruct: store also the struct name in the
    	descr
    Message-ID: <20110906160442.883BB8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Antonio Cuni 
    Branch: ffistruct
    Changeset: r47111:69a6b100e900
    Date: 2011-09-06 16:36 +0200
    http://bitbucket.org/pypy/pypy/changeset/69a6b100e900/
    
    Log:	store also the struct name in the descr
    
    diff --git a/pypy/module/_ffi/app_struct.py b/pypy/module/_ffi/app_struct.py
    --- a/pypy/module/_ffi/app_struct.py
    +++ b/pypy/module/_ffi/app_struct.py
    @@ -18,11 +18,11 @@
     class MetaStructure(type):
     
         def __new__(cls, name, bases, dic):
    -        cls._compute_shape(dic)
    +        cls._compute_shape(name, dic)
             return type.__new__(cls, name, bases, dic)
     
         @classmethod
    -    def _compute_shape(cls, dic):
    +    def _compute_shape(cls, name, dic):
             fields = dic.get('_fields_')
             if fields is None:
                 return
    @@ -34,7 +34,7 @@
                 ffitypes.append(field.ffitype)
                 dic[field.name] = field
             alignment = 0 # XXX
    -        struct_descr = _ffi._StructDescr(size, alignment, ffitypes)
    +        struct_descr = _ffi._StructDescr(name, size, alignment, ffitypes)
             dic['_struct_'] = struct_descr
     
     
    diff --git a/pypy/module/_ffi/test/test_struct.py b/pypy/module/_ffi/test/test_struct.py
    --- a/pypy/module/_ffi/test/test_struct.py
    +++ b/pypy/module/_ffi/test/test_struct.py
    @@ -5,9 +5,10 @@
         def test__StructDescr(self):
             from _ffi import _StructDescr, types
             longsize = types.slong.sizeof()
    -        descr = _StructDescr(longsize*2, 0, [types.slong, types.slong])
    +        descr = _StructDescr('foo', longsize*2, 0, [types.slong, types.slong])
    +        assert descr.name == 'foo'
             assert descr.ffitype.sizeof() == longsize*2
    -        assert repr(descr.ffitype) == '>'
    +        assert repr(descr.ffitype) == ''
     
         def test_compute_shape(self):
             from _ffi import Structure, Field, types
    @@ -22,4 +23,5 @@
             assert isinstance(Point.y, Field)
             assert Point.x.offset == 0
             assert Point.y.offset == longsize
    +        assert Point._struct_.name == 'Point'
             assert Point._struct_.ffitype.sizeof() == longsize*2
    
    From noreply at buildbot.pypy.org  Tue Sep  6 18:04:44 2011
    From: noreply at buildbot.pypy.org (antocuni)
    Date: Tue,  6 Sep 2011 18:04:44 +0200 (CEST)
    Subject: [pypy-commit] pypy ffistruct: don't store the name on the struct
     descr, but make it accessible from the ffi type
    Message-ID: <20110906160444.122DB8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Antonio Cuni 
    Branch: ffistruct
    Changeset: r47112:ced67009fa52
    Date: 2011-09-06 16:39 +0200
    http://bitbucket.org/pypy/pypy/changeset/ced67009fa52/
    
    Log:	don't store the name on the struct descr, but make it accessible
    	from the ffi type
    
    diff --git a/pypy/module/_ffi/interp_ffitype.py b/pypy/module/_ffi/interp_ffitype.py
    --- a/pypy/module/_ffi/interp_ffitype.py
    +++ b/pypy/module/_ffi/interp_ffitype.py
    @@ -1,7 +1,7 @@
     from pypy.rlib import libffi
     from pypy.rlib.rarithmetic import intmask
     from pypy.interpreter.baseobjspace import Wrappable
    -from pypy.interpreter.typedef import TypeDef
    +from pypy.interpreter.typedef import TypeDef, interp_attrproperty
     from pypy.interpreter.gateway import interp2app
     
     class W_FFIType(Wrappable):
    @@ -81,6 +81,7 @@
     
     W_FFIType.typedef = TypeDef(
         'FFIType',
    +    name = interp_attrproperty('name', W_FFIType),
         __repr__ = interp2app(W_FFIType.repr),
         deref_pointer = interp2app(W_FFIType.descr_deref_pointer),
         sizeof = interp2app(W_FFIType.descr_sizeof),
    diff --git a/pypy/module/_ffi/test/test_ffitype.py b/pypy/module/_ffi/test/test_ffitype.py
    --- a/pypy/module/_ffi/test/test_ffitype.py
    +++ b/pypy/module/_ffi/test/test_ffitype.py
    @@ -6,7 +6,9 @@
             from _ffi import types
             assert str(types.sint) == ""
             assert str(types.uint) == ""
    -
    +        assert types.sint.name == 'sint'
    +        assert types.uint.name == 'uint'
    +        
         def test_sizeof(self):
             from _ffi import types
             assert types.sbyte.sizeof() == 1
    diff --git a/pypy/module/_ffi/test/test_struct.py b/pypy/module/_ffi/test/test_struct.py
    --- a/pypy/module/_ffi/test/test_struct.py
    +++ b/pypy/module/_ffi/test/test_struct.py
    @@ -6,9 +6,8 @@
             from _ffi import _StructDescr, types
             longsize = types.slong.sizeof()
             descr = _StructDescr('foo', longsize*2, 0, [types.slong, types.slong])
    -        assert descr.name == 'foo'
             assert descr.ffitype.sizeof() == longsize*2
    -        assert repr(descr.ffitype) == ''
    +        assert descr.ffitype.name == 'struct foo'
     
         def test_compute_shape(self):
             from _ffi import Structure, Field, types
    @@ -23,5 +22,6 @@
             assert isinstance(Point.y, Field)
             assert Point.x.offset == 0
             assert Point.y.offset == longsize
    -        assert Point._struct_.name == 'Point'
             assert Point._struct_.ffitype.sizeof() == longsize*2
    +        assert Point._struct_.ffitype.name == 'struct Point'
    +        
    
    From noreply at buildbot.pypy.org  Tue Sep  6 19:06:32 2011
    From: noreply at buildbot.pypy.org (snus_mumrik)
    Date: Tue,  6 Sep 2011 19:06:32 +0200 (CEST)
    Subject: [pypy-commit] pypy numpy-indexing-by-arrays: Branch for adding
     "index arrays" to numpy
    Message-ID: <20110906170632.981E38203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Ilya Osadchiy 
    Branch: numpy-indexing-by-arrays
    Changeset: r47113:41bb9c2e7f3a
    Date: 2011-09-03 14:25 +0300
    http://bitbucket.org/pypy/pypy/changeset/41bb9c2e7f3a/
    
    Log:	Branch for adding "index arrays" to numpy
    
    
    From noreply at buildbot.pypy.org  Tue Sep  6 19:06:33 2011
    From: noreply at buildbot.pypy.org (snus_mumrik)
    Date: Tue,  6 Sep 2011 19:06:33 +0200 (CEST)
    Subject: [pypy-commit] pypy numpy-indexing-by-arrays: Initial implementation
    Message-ID: <20110906170633.D19AF8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Ilya Osadchiy 
    Branch: numpy-indexing-by-arrays
    Changeset: r47114:7e7b4f1c2c5c
    Date: 2011-09-05 22:24 +0300
    http://bitbucket.org/pypy/pypy/changeset/7e7b4f1c2c5c/
    
    Log:	Initial implementation
    
    diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py
    --- a/pypy/module/micronumpy/interp_numarray.py
    +++ b/pypy/module/micronumpy/interp_numarray.py
    @@ -217,7 +217,6 @@
             return space.wrap("[" + " ".join(concrete._getnums(True)) + "]")
     
         def descr_getitem(self, space, w_idx):
    -        # TODO: indexing by arrays and lists
             if space.isinstance_w(w_idx, space.w_tuple):
                 length = space.len_w(w_idx)
                 if length == 0:
    @@ -226,6 +225,24 @@
                     raise OperationError(space.w_IndexError,
                                          space.wrap("invalid index"))
                 w_idx = space.getitem(w_idx, space.wrap(0))
    +        elif space.issequence_w(w_idx):
    +            w_idx = convert_to_array(space, w_idx)
    +            bool_dtype = space.fromcache(interp_dtype.W_BoolDtype)
    +            int_dtype = space.fromcache(interp_dtype.W_Int64Dtype)
    +            if w_idx.find_dtype() is bool_dtype:
    +                # TODO: indexing by bool array
    +                raise NotImplementedError("sorry, not yet implemented")
    +            else:
    +                # Indexing by array
    +
    +                # FIXME: should raise exception if any index in
    +                # array is out od bound, but this kills lazy execution
    +                new_sig = signature.Signature.find_sig([
    +                    IndexedByArray.signature, self.signature
    +                ])                
    +                res = IndexedByArray(new_sig, int_dtype, self, w_idx)
    +                return space.wrap(res)
    +
             start, stop, step, slice_length = space.decode_index4(w_idx, self.find_size())
             if step == 0:
                 # Single index
    @@ -430,6 +447,29 @@
             assert isinstance(call_sig, signature.Call2)
             return call_sig.func(self.res_dtype, lhs, rhs)
     
    +class IndexedByArray(VirtualArray):
    +    """
    +    Intermediate class for performing indexing of array by another array
    +    """
    +    signature = signature.BaseSignature()
    +    def __init__(self, signature, int_dtype, source, index):
    +        VirtualArray.__init__(self, signature, source.find_dtype())
    +        self.source = source
    +        self.index = index
    +        self.int_dtype = int_dtype
    +
    +    def _del_sources(self):
    +        self.source = None
    +        self.index = None
    +
    +    def _find_size(self):
    +        return self.index.find_size()
    +
    +    def _eval(self, i):
    +        idx = self.int_dtype.unbox(self.index.eval(i).convert_to(self.int_dtype))
    +        val = self.source.eval(idx).convert_to(self.res_dtype)
    +        return val
    +
     class ViewArray(BaseArray):
         """
         Class for representing views of arrays, they will reflect changes of parent
    diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py
    --- a/pypy/module/micronumpy/interp_ufuncs.py
    +++ b/pypy/module/micronumpy/interp_ufuncs.py
    @@ -200,7 +200,7 @@
         int64_dtype = space.fromcache(interp_dtype.W_Int64Dtype)
     
         if space.is_w(w_type, space.w_bool):
    -        if current_guess is None:
    +        if current_guess is None or current_guess is bool_dtype:
                 return bool_dtype
         elif space.is_w(w_type, space.w_int):
             if (current_guess is None or current_guess is bool_dtype or
    @@ -270,4 +270,4 @@
             setattr(self, ufunc_name, ufunc)
     
     def get(space):
    -    return space.fromcache(UfuncState)
    \ No newline at end of file
    +    return space.fromcache(UfuncState)
    diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py
    --- a/pypy/module/micronumpy/test/test_numarray.py
    +++ b/pypy/module/micronumpy/test/test_numarray.py
    @@ -119,6 +119,20 @@
             for i in xrange(5):
                 assert a[i] == b[i]
     
    +    def test_index_by_array(self):
    +        from numpy import array
    +        a = array(range(5))
    +        idx_list = [3, 1, 3, 2, 0, 4]
    +        idx_arr = array(idx_list)
    +        a_by_arr = a[idx_arr]
    +        assert len(a_by_arr) == 6
    +        for i in xrange(6):
    +            assert a_by_arr[i] == range(5)[idx_list[i]]
    +        a_by_list = a[idx_list]
    +        assert len(a_by_list) == 6
    +        for i in xrange(6):
    +            assert a_by_list[i] == range(5)[idx_list[i]]
    +
         def test_setitem(self):
             from numpy import array
             a = array(range(5))
    
    From noreply at buildbot.pypy.org  Tue Sep  6 19:14:18 2011
    From: noreply at buildbot.pypy.org (fijal)
    Date: Tue,  6 Sep 2011 19:14:18 +0200 (CEST)
    Subject: [pypy-commit] pypy inline-dict-ops: merge default
    Message-ID: <20110906171418.A48F38203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Maciej Fijalkowski 
    Branch: inline-dict-ops
    Changeset: r47115:36e972ae4251
    Date: 2011-09-05 09:00 +0200
    http://bitbucket.org/pypy/pypy/changeset/36e972ae4251/
    
    Log:	merge default
    
    diff too long, truncating to 10000 out of 41373 lines
    
    diff --git a/LICENSE b/LICENSE
    --- a/LICENSE
    +++ b/LICENSE
    @@ -37,22 +37,22 @@
         Armin Rigo
         Maciej Fijalkowski
         Carl Friedrich Bolz
    +    Antonio Cuni
         Amaury Forgeot d'Arc
    -    Antonio Cuni
         Samuele Pedroni
         Michael Hudson
         Holger Krekel
    +    Benjamin Peterson
         Christian Tismer
    -    Benjamin Peterson
    +    Hakan Ardo
    +    Alex Gaynor
         Eric van Riet Paap
    -    Anders Chrigström
    -    Håkan Ardö
    +    Anders Chrigstrom
    +    David Schneider
         Richard Emslie
         Dan Villiom Podlaski Christiansen
         Alexander Schremmer
    -    Alex Gaynor
    -    David Schneider
    -    Aurelién Campeas
    +    Aurelien Campeas
         Anders Lehmann
         Camillo Bruni
         Niklaus Haldimann
    @@ -63,16 +63,17 @@
         Bartosz Skowron
         Jakub Gustak
         Guido Wesdorp
    +    Daniel Roberts
         Adrien Di Mascio
         Laura Creighton
         Ludovic Aubry
         Niko Matsakis
    -    Daniel Roberts
         Jason Creighton
    -    Jacob Hallén
    +    Jacob Hallen
         Alex Martelli
         Anders Hammarquist
         Jan de Mooij
    +    Wim Lavrijsen
         Stephan Diehl
         Michael Foord
         Stefan Schwarzer
    @@ -83,9 +84,13 @@
         Alexandre Fayolle
         Marius Gedminas
         Simon Burton
    +    Justin Peel
         Jean-Paul Calderone
         John Witulski
    +    Lukas Diekmann
    +    holger krekel
         Wim Lavrijsen
    +    Dario Bertini
         Andreas Stührk
         Jean-Philippe St. Pierre
         Guido van Rossum
    @@ -97,15 +102,16 @@
         Georg Brandl
         Gerald Klix
         Wanja Saatkamp
    +    Ronny Pfannschmidt
         Boris Feigin
         Oscar Nierstrasz
    -    Dario Bertini
         David Malcolm
         Eugene Oden
         Henry Mason
    +    Sven Hager
         Lukas Renggli
    +    Ilya Osadchiy
         Guenter Jantzen
    -    Ronny Pfannschmidt
         Bert Freudenberg
         Amit Regmi
         Ben Young
    @@ -122,8 +128,8 @@
         Jared Grubb
         Karl Bartel
         Gabriel Lavoie
    +    Victor Stinner
         Brian Dorsey
    -    Victor Stinner
         Stuart Williams
         Toby Watson
         Antoine Pitrou
    @@ -134,19 +140,23 @@
         Jonathan David Riehl
         Elmo Mäntynen
         Anders Qvist
    -    Beatrice Düring
    +    Beatrice During
         Alexander Sedov
    +    Timo Paulssen
    +    Corbin Simpson
         Vincent Legoll
    +    Romain Guillebert
         Alan McIntyre
    -    Romain Guillebert
         Alex Perry
         Jens-Uwe Mager
    +    Simon Cross
         Dan Stromberg
    -    Lukas Diekmann
    +    Guillebert Romain
         Carl Meyer
         Pieter Zieschang
         Alejandro J. Cura
         Sylvain Thenault
    +    Christoph Gerum
         Travis Francis Athougies
         Henrik Vendelbo
         Lutz Paelike
    @@ -157,6 +167,7 @@
         Miguel de Val Borro
         Ignas Mikalajunas
         Artur Lisiecki
    +    Philip Jenvey
         Joshua Gilbert
         Godefroid Chappelle
         Yusei Tahara
    @@ -165,27 +176,31 @@
         Gustavo Niemeyer
         William Leslie
         Akira Li
    -    Kristján Valur Jónsson
    +    Kristjan Valur Jonsson
         Bobby Impollonia
    +    Michael Hudson-Doyle
         Andrew Thompson
         Anders Sigfridsson
    +    Floris Bruynooghe
         Jacek Generowicz
         Dan Colish
    -    Sven Hager
         Zooko Wilcox-O Hearn
    +    Dan Villiom Podlaski Christiansen
         Anders Hammarquist
    +    Chris Lambacher
         Dinu Gherman
         Dan Colish
    +    Brett Cannon
         Daniel Neuhäuser
         Michael Chermside
         Konrad Delong
         Anna Ravencroft
         Greg Price
         Armin Ronacher
    +    Christian Muirhead
         Jim Baker
    -    Philip Jenvey
         Rodrigo Araújo
    -    Brett Cannon
    +    Romain Guillebert
     
         Heinrich-Heine University, Germany 
         Open End AB (formerly AB Strakt), Sweden
    diff --git a/ctypes_configure/configure.py b/ctypes_configure/configure.py
    --- a/ctypes_configure/configure.py
    +++ b/ctypes_configure/configure.py
    @@ -559,7 +559,9 @@
     C_HEADER = """
     #include 
     #include    /* for offsetof() */
    -#include    /* FreeBSD: for uint64_t */
    +#ifndef _WIN32
    +#  include    /* FreeBSD: for uint64_t */
    +#endif
     
     void dump(char* key, int value) {
         printf("%s: %d\\n", key, value);
    diff --git a/ctypes_configure/stdoutcapture.py b/ctypes_configure/stdoutcapture.py
    --- a/ctypes_configure/stdoutcapture.py
    +++ b/ctypes_configure/stdoutcapture.py
    @@ -15,6 +15,15 @@
                 not hasattr(os, 'fdopen')):
                 self.dummy = 1
             else:
    +            try:
    +                self.tmpout = os.tmpfile()
    +                if mixed_out_err:
    +                    self.tmperr = self.tmpout
    +                else:
    +                    self.tmperr = os.tmpfile()
    +            except OSError:     # bah?  on at least one Windows box
    +                self.dummy = 1
    +                return
                 self.dummy = 0
                 # make new stdout/stderr files if needed
                 self.localoutfd = os.dup(1)
    @@ -29,11 +38,6 @@
                     sys.stderr = os.fdopen(self.localerrfd, 'w', 0)
                 else:
                     self.saved_stderr = None
    -            self.tmpout = os.tmpfile()
    -            if mixed_out_err:
    -                self.tmperr = self.tmpout
    -            else:
    -                self.tmperr = os.tmpfile()
                 os.dup2(self.tmpout.fileno(), 1)
                 os.dup2(self.tmperr.fileno(), 2)
     
    diff --git a/lib-python/conftest.py b/lib-python/conftest.py
    --- a/lib-python/conftest.py
    +++ b/lib-python/conftest.py
    @@ -154,18 +154,18 @@
         RegrTest('test_cmd.py'),
         RegrTest('test_cmd_line_script.py'),
         RegrTest('test_codeccallbacks.py', core=True),
    -    RegrTest('test_codecencodings_cn.py'),
    -    RegrTest('test_codecencodings_hk.py'),
    -    RegrTest('test_codecencodings_jp.py'),
    -    RegrTest('test_codecencodings_kr.py'),
    -    RegrTest('test_codecencodings_tw.py'),
    +    RegrTest('test_codecencodings_cn.py', usemodules='_multibytecodec'),
    +    RegrTest('test_codecencodings_hk.py', usemodules='_multibytecodec'),
    +    RegrTest('test_codecencodings_jp.py', usemodules='_multibytecodec'),
    +    RegrTest('test_codecencodings_kr.py', usemodules='_multibytecodec'),
    +    RegrTest('test_codecencodings_tw.py', usemodules='_multibytecodec'),
     
    -    RegrTest('test_codecmaps_cn.py'),
    -    RegrTest('test_codecmaps_hk.py'),
    -    RegrTest('test_codecmaps_jp.py'),
    -    RegrTest('test_codecmaps_kr.py'),
    -    RegrTest('test_codecmaps_tw.py'),
    -    RegrTest('test_codecs.py', core=True),
    +    RegrTest('test_codecmaps_cn.py', usemodules='_multibytecodec'),
    +    RegrTest('test_codecmaps_hk.py', usemodules='_multibytecodec'),
    +    RegrTest('test_codecmaps_jp.py', usemodules='_multibytecodec'),
    +    RegrTest('test_codecmaps_kr.py', usemodules='_multibytecodec'),
    +    RegrTest('test_codecmaps_tw.py', usemodules='_multibytecodec'),
    +    RegrTest('test_codecs.py', core=True, usemodules='_multibytecodec'),
         RegrTest('test_codeop.py', core=True),
         RegrTest('test_coercion.py', core=True),
         RegrTest('test_collections.py'),
    @@ -314,7 +314,7 @@
         RegrTest('test_mmap.py'),
         RegrTest('test_module.py', core=True),
         RegrTest('test_modulefinder.py'),
    -    RegrTest('test_multibytecodec.py'),
    +    RegrTest('test_multibytecodec.py', usemodules='_multibytecodec'),
         RegrTest('test_multibytecodec_support.py', skip="not a test"),
         RegrTest('test_multifile.py'),
         RegrTest('test_multiprocessing.py', skip='FIXME leaves subprocesses'),
    diff --git a/lib-python/modified-2.7/ctypes/util.py b/lib-python/modified-2.7/ctypes/util.py
    --- a/lib-python/modified-2.7/ctypes/util.py
    +++ b/lib-python/modified-2.7/ctypes/util.py
    @@ -72,8 +72,8 @@
             return name
     
     if os.name == "posix" and sys.platform == "darwin":
    -    from ctypes.macholib.dyld import dyld_find as _dyld_find
         def find_library(name):
    +        from ctypes.macholib.dyld import dyld_find as _dyld_find
             possible = ['lib%s.dylib' % name,
                         '%s.dylib' % name,
                         '%s.framework/%s' % (name, name)]
    diff --git a/lib-python/modified-2.7/distutils/unixccompiler.py b/lib-python/modified-2.7/distutils/unixccompiler.py
    --- a/lib-python/modified-2.7/distutils/unixccompiler.py
    +++ b/lib-python/modified-2.7/distutils/unixccompiler.py
    @@ -324,7 +324,7 @@
                 # On OSX users can specify an alternate SDK using
                 # '-isysroot', calculate the SDK root if it is specified
                 # (and use it further on)
    -            cflags = sysconfig.get_config_var('CFLAGS')
    +            cflags = sysconfig.get_config_var('CFLAGS') or ''
                 m = re.search(r'-isysroot\s+(\S+)', cflags)
                 if m is None:
                     sysroot = '/'
    diff --git a/lib-python/modified-2.7/gzip.py b/lib-python/modified-2.7/gzip.py
    new file mode 100644
    --- /dev/null
    +++ b/lib-python/modified-2.7/gzip.py
    @@ -0,0 +1,514 @@
    +"""Functions that read and write gzipped files.
    +
    +The user of the file doesn't have to worry about the compression,
    +but random access is not allowed."""
    +
    +# based on Andrew Kuchling's minigzip.py distributed with the zlib module
    +
    +import struct, sys, time, os
    +import zlib
    +import io
    +import __builtin__
    +
    +__all__ = ["GzipFile","open"]
    +
    +FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
    +
    +READ, WRITE = 1, 2
    +
    +def write32u(output, value):
    +    # The L format writes the bit pattern correctly whether signed
    +    # or unsigned.
    +    output.write(struct.pack("'
    +
    +    def _check_closed(self):
    +        """Raises a ValueError if the underlying file object has been closed.
    +
    +        """
    +        if self.closed:
    +            raise ValueError('I/O operation on closed file.')
    +
    +    def _init_write(self, filename):
    +        self.name = filename
    +        self.crc = zlib.crc32("") & 0xffffffffL
    +        self.size = 0
    +        self.writebuf = []
    +        self.bufsize = 0
    +
    +    def _write_gzip_header(self):
    +        self.fileobj.write('\037\213')             # magic header
    +        self.fileobj.write('\010')                 # compression method
    +        fname = os.path.basename(self.name)
    +        if fname.endswith(".gz"):
    +            fname = fname[:-3]
    +        flags = 0
    +        if fname:
    +            flags = FNAME
    +        self.fileobj.write(chr(flags))
    +        mtime = self.mtime
    +        if mtime is None:
    +            mtime = time.time()
    +        write32u(self.fileobj, long(mtime))
    +        self.fileobj.write('\002')
    +        self.fileobj.write('\377')
    +        if fname:
    +            self.fileobj.write(fname + '\000')
    +
    +    def _init_read(self):
    +        self.crc = zlib.crc32("") & 0xffffffffL
    +        self.size = 0
    +
    +    def _read_gzip_header(self):
    +        magic = self.fileobj.read(2)
    +        if magic != '\037\213':
    +            raise IOError, 'Not a gzipped file'
    +        method = ord( self.fileobj.read(1) )
    +        if method != 8:
    +            raise IOError, 'Unknown compression method'
    +        flag = ord( self.fileobj.read(1) )
    +        self.mtime = read32(self.fileobj)
    +        # extraflag = self.fileobj.read(1)
    +        # os = self.fileobj.read(1)
    +        self.fileobj.read(2)
    +
    +        if flag & FEXTRA:
    +            # Read & discard the extra field, if present
    +            xlen = ord(self.fileobj.read(1))
    +            xlen = xlen + 256*ord(self.fileobj.read(1))
    +            self.fileobj.read(xlen)
    +        if flag & FNAME:
    +            # Read and discard a null-terminated string containing the filename
    +            while True:
    +                s = self.fileobj.read(1)
    +                if not s or s=='\000':
    +                    break
    +        if flag & FCOMMENT:
    +            # Read and discard a null-terminated string containing a comment
    +            while True:
    +                s = self.fileobj.read(1)
    +                if not s or s=='\000':
    +                    break
    +        if flag & FHCRC:
    +            self.fileobj.read(2)     # Read & discard the 16-bit header CRC
    +
    +    def write(self,data):
    +        self._check_closed()
    +        if self.mode != WRITE:
    +            import errno
    +            raise IOError(errno.EBADF, "write() on read-only GzipFile object")
    +
    +        if self.fileobj is None:
    +            raise ValueError, "write() on closed GzipFile object"
    +
    +        # Convert data type if called by io.BufferedWriter.
    +        if isinstance(data, memoryview):
    +            data = data.tobytes()
    +
    +        if len(data) > 0:
    +            self.size = self.size + len(data)
    +            self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
    +            self.fileobj.write( self.compress.compress(data) )
    +            self.offset += len(data)
    +
    +        return len(data)
    +
    +    def read(self, size=-1):
    +        self._check_closed()
    +        if self.mode != READ:
    +            import errno
    +            raise IOError(errno.EBADF, "read() on write-only GzipFile object")
    +
    +        if self.extrasize <= 0 and self.fileobj is None:
    +            return ''
    +
    +        readsize = 1024
    +        if size < 0:        # get the whole thing
    +            try:
    +                while True:
    +                    self._read(readsize)
    +                    readsize = min(self.max_read_chunk, readsize * 2)
    +            except EOFError:
    +                size = self.extrasize
    +        elif size == 0:
    +            return ""
    +        else:               # just get some more of it
    +            try:
    +                while size > self.extrasize:
    +                    self._read(readsize)
    +                    readsize = min(self.max_read_chunk, readsize * 2)
    +            except EOFError:
    +                if size > self.extrasize:
    +                    size = self.extrasize
    +
    +        offset = self.offset - self.extrastart
    +        chunk = self.extrabuf[offset: offset + size]
    +        self.extrasize = self.extrasize - size
    +
    +        self.offset += size
    +        return chunk
    +
    +    def _unread(self, buf):
    +        self.extrasize = len(buf) + self.extrasize
    +        self.offset -= len(buf)
    +
    +    def _read(self, size=1024):
    +        if self.fileobj is None:
    +            raise EOFError, "Reached EOF"
    +
    +        if self._new_member:
    +            # If the _new_member flag is set, we have to
    +            # jump to the next member, if there is one.
    +            #
    +            # First, check if we're at the end of the file;
    +            # if so, it's time to stop; no more members to read.
    +            pos = self.fileobj.tell()   # Save current position
    +            self.fileobj.seek(0, 2)     # Seek to end of file
    +            if pos == self.fileobj.tell():
    +                raise EOFError, "Reached EOF"
    +            else:
    +                self.fileobj.seek( pos ) # Return to original position
    +
    +            self._init_read()
    +            self._read_gzip_header()
    +            self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
    +            self._new_member = False
    +
    +        # Read a chunk of data from the file
    +        buf = self.fileobj.read(size)
    +
    +        # If the EOF has been reached, flush the decompression object
    +        # and mark this object as finished.
    +
    +        if buf == "":
    +            uncompress = self.decompress.flush()
    +            self._read_eof()
    +            self._add_read_data( uncompress )
    +            raise EOFError, 'Reached EOF'
    +
    +        uncompress = self.decompress.decompress(buf)
    +        self._add_read_data( uncompress )
    +
    +        if self.decompress.unused_data != "":
    +            # Ending case: we've come to the end of a member in the file,
    +            # so seek back to the start of the unused data, finish up
    +            # this member, and read a new gzip header.
    +            # (The number of bytes to seek back is the length of the unused
    +            # data, minus 8 because _read_eof() will rewind a further 8 bytes)
    +            self.fileobj.seek( -len(self.decompress.unused_data)+8, 1)
    +
    +            # Check the CRC and file size, and set the flag so we read
    +            # a new member on the next call
    +            self._read_eof()
    +            self._new_member = True
    +
    +    def _add_read_data(self, data):
    +        self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
    +        offset = self.offset - self.extrastart
    +        self.extrabuf = self.extrabuf[offset:] + data
    +        self.extrasize = self.extrasize + len(data)
    +        self.extrastart = self.offset
    +        self.size = self.size + len(data)
    +
    +    def _read_eof(self):
    +        # We've read to the end of the file, so we have to rewind in order
    +        # to reread the 8 bytes containing the CRC and the file size.
    +        # We check the that the computed CRC and size of the
    +        # uncompressed data matches the stored values.  Note that the size
    +        # stored is the true file size mod 2**32.
    +        self.fileobj.seek(-8, 1)
    +        crc32 = read32(self.fileobj)
    +        isize = read32(self.fileobj)  # may exceed 2GB
    +        if crc32 != self.crc:
    +            raise IOError("CRC check failed %s != %s" % (hex(crc32),
    +                                                         hex(self.crc)))
    +        elif isize != (self.size & 0xffffffffL):
    +            raise IOError, "Incorrect length of data produced"
    +
    +        # Gzip files can be padded with zeroes and still have archives.
    +        # Consume all zero bytes and set the file position to the first
    +        # non-zero byte. See http://www.gzip.org/#faq8
    +        c = "\x00"
    +        while c == "\x00":
    +            c = self.fileobj.read(1)
    +        if c:
    +            self.fileobj.seek(-1, 1)
    +
    +    @property
    +    def closed(self):
    +        return self.fileobj is None
    +
    +    def close(self):
    +        if self.fileobj is None:
    +            return
    +        if self.mode == WRITE:
    +            self.fileobj.write(self.compress.flush())
    +            write32u(self.fileobj, self.crc)
    +            # self.size may exceed 2GB, or even 4GB
    +            write32u(self.fileobj, self.size & 0xffffffffL)
    +            self.fileobj = None
    +        elif self.mode == READ:
    +            self.fileobj = None
    +        if self.myfileobj:
    +            self.myfileobj.close()
    +            self.myfileobj = None
    +
    +    def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH):
    +        self._check_closed()
    +        if self.mode == WRITE:
    +            # Ensure the compressor's buffer is flushed
    +            self.fileobj.write(self.compress.flush(zlib_mode))
    +            self.fileobj.flush()
    +
    +    def fileno(self):
    +        """Invoke the underlying file object's fileno() method.
    +
    +        This will raise AttributeError if the underlying file object
    +        doesn't support fileno().
    +        """
    +        return self.fileobj.fileno()
    +
    +    def rewind(self):
    +        '''Return the uncompressed stream file position indicator to the
    +        beginning of the file'''
    +        if self.mode != READ:
    +            raise IOError("Can't rewind in write mode")
    +        self.fileobj.seek(0)
    +        self._new_member = True
    +        self.extrabuf = ""
    +        self.extrasize = 0
    +        self.extrastart = 0
    +        self.offset = 0
    +
    +    def readable(self):
    +        return self.mode == READ
    +
    +    def writable(self):
    +        return self.mode == WRITE
    +
    +    def seekable(self):
    +        return True
    +
    +    def seek(self, offset, whence=0):
    +        if whence:
    +            if whence == 1:
    +                offset = self.offset + offset
    +            else:
    +                raise ValueError('Seek from end not supported')
    +        if self.mode == WRITE:
    +            if offset < self.offset:
    +                raise IOError('Negative seek in write mode')
    +            count = offset - self.offset
    +            for i in range(count // 1024):
    +                self.write(1024 * '\0')
    +            self.write((count % 1024) * '\0')
    +        elif self.mode == READ:
    +            if offset == self.offset:
    +                self.read(0) # to make sure that this file is open
    +                return self.offset
    +            if offset < self.offset:
    +                # for negative seek, rewind and do positive seek
    +                self.rewind()
    +            count = offset - self.offset
    +            for i in range(count // 1024):
    +                self.read(1024)
    +            self.read(count % 1024)
    +
    +        return self.offset
    +
    +    def readline(self, size=-1):
    +        if size < 0:
    +            # Shortcut common case - newline found in buffer.
    +            offset = self.offset - self.extrastart
    +            i = self.extrabuf.find('\n', offset) + 1
    +            if i > 0:
    +                self.extrasize -= i - offset
    +                self.offset += i - offset
    +                return self.extrabuf[offset: i]
    +
    +            size = sys.maxint
    +            readsize = self.min_readsize
    +        else:
    +            readsize = size
    +        bufs = []
    +        while size != 0:
    +            c = self.read(readsize)
    +            i = c.find('\n')
    +
    +            # We set i=size to break out of the loop under two
    +            # conditions: 1) there's no newline, and the chunk is
    +            # larger than size, or 2) there is a newline, but the
    +            # resulting line would be longer than 'size'.
    +            if (size <= i) or (i == -1 and len(c) > size):
    +                i = size - 1
    +
    +            if i >= 0 or c == '':
    +                bufs.append(c[:i + 1])    # Add portion of last chunk
    +                self._unread(c[i + 1:])   # Push back rest of chunk
    +                break
    +
    +            # Append chunk to list, decrease 'size',
    +            bufs.append(c)
    +            size = size - len(c)
    +            readsize = min(size, readsize * 2)
    +        if readsize > self.min_readsize:
    +            self.min_readsize = min(readsize, self.min_readsize * 2, 512)
    +        return ''.join(bufs) # Return resulting line
    +
    +
    +def _test():
    +    # Act like gzip; with -d, act like gunzip.
    +    # The input file is not deleted, however, nor are any other gzip
    +    # options or features supported.
    +    args = sys.argv[1:]
    +    decompress = args and args[0] == "-d"
    +    if decompress:
    +        args = args[1:]
    +    if not args:
    +        args = ["-"]
    +    for arg in args:
    +        if decompress:
    +            if arg == "-":
    +                f = GzipFile(filename="", mode="rb", fileobj=sys.stdin)
    +                g = sys.stdout
    +            else:
    +                if arg[-3:] != ".gz":
    +                    print "filename doesn't end in .gz:", repr(arg)
    +                    continue
    +                f = open(arg, "rb")
    +                g = __builtin__.open(arg[:-3], "wb")
    +        else:
    +            if arg == "-":
    +                f = sys.stdin
    +                g = GzipFile(filename="", mode="wb", fileobj=sys.stdout)
    +            else:
    +                f = __builtin__.open(arg, "rb")
    +                g = open(arg + ".gz", "wb")
    +        while True:
    +            chunk = f.read(1024)
    +            if not chunk:
    +                break
    +            g.write(chunk)
    +        if g is not sys.stdout:
    +            g.close()
    +        if f is not sys.stdin:
    +            f.close()
    +
    +if __name__ == '__main__':
    +    _test()
    diff --git a/lib-python/modified-2.7/sqlite3/test/regression.py b/lib-python/modified-2.7/sqlite3/test/regression.py
    --- a/lib-python/modified-2.7/sqlite3/test/regression.py
    +++ b/lib-python/modified-2.7/sqlite3/test/regression.py
    @@ -274,6 +274,18 @@
             cur.execute("UPDATE foo SET id = 3 WHERE id = 1")
             self.assertEqual(cur.description, None)
     
    +    def CheckStatementCache(self):
    +        cur = self.con.cursor()
    +        cur.execute("CREATE TABLE foo (id INTEGER)")
    +        values = [(i,) for i in xrange(5)]
    +        cur.executemany("INSERT INTO foo (id) VALUES (?)", values)
    +
    +        cur.execute("SELECT id FROM foo")
    +        self.assertEqual(list(cur), values)
    +        self.con.commit()
    +        cur.execute("SELECT id FROM foo")
    +        self.assertEqual(list(cur), values)
    +
     def suite():
         regression_suite = unittest.makeSuite(RegressionTests, "Check")
         return unittest.TestSuite((regression_suite,))
    diff --git a/lib-python/modified-2.7/tarfile.py b/lib-python/modified-2.7/tarfile.py
    --- a/lib-python/modified-2.7/tarfile.py
    +++ b/lib-python/modified-2.7/tarfile.py
    @@ -252,8 +252,8 @@
            the high bit set. So we calculate two checksums, unsigned and
            signed.
         """
    -    unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
    -    signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
    +    unsigned_chksum = 256 + sum(struct.unpack("148B8x356B", buf[:512]))
    +    signed_chksum = 256 + sum(struct.unpack("148b8x356b", buf[:512]))
         return unsigned_chksum, signed_chksum
     
     def copyfileobj(src, dst, length=None):
    @@ -265,7 +265,6 @@
         if length is None:
             shutil.copyfileobj(src, dst)
             return
    -
         BUFSIZE = 16 * 1024
         blocks, remainder = divmod(length, BUFSIZE)
         for b in xrange(blocks):
    @@ -802,19 +801,19 @@
             if self.closed:
                 raise ValueError("I/O operation on closed file")
     
    -        buf = ""
             if self.buffer:
                 if size is None:
    -                buf = self.buffer
    +                buf = self.buffer + self.fileobj.read()
                     self.buffer = ""
                 else:
                     buf = self.buffer[:size]
                     self.buffer = self.buffer[size:]
    -
    -        if size is None:
    -            buf += self.fileobj.read()
    +                buf += self.fileobj.read(size - len(buf))
             else:
    -            buf += self.fileobj.read(size - len(buf))
    +            if size is None:
    +                buf = self.fileobj.read()
    +            else:
    +                buf = self.fileobj.read(size)
     
             self.position += len(buf)
             return buf
    diff --git a/lib-python/modified-2.7/test/regrtest.py b/lib-python/modified-2.7/test/regrtest.py
    --- a/lib-python/modified-2.7/test/regrtest.py
    +++ b/lib-python/modified-2.7/test/regrtest.py
    @@ -1403,7 +1403,26 @@
             test_zipimport
             test_zlib
             """,
    -    'openbsd3':
    +    'openbsd4':
    +        """
    +        test_ascii_formatd
    +        test_bsddb
    +        test_bsddb3
    +        test_ctypes
    +        test_dl
    +        test_epoll
    +        test_gdbm
    +        test_locale
    +        test_normalization
    +        test_ossaudiodev
    +        test_pep277
    +        test_tcl
    +        test_tk
    +        test_ttk_guionly
    +        test_ttk_textonly
    +        test_multiprocessing
    +        """,
    +    'openbsd5':
             """
             test_ascii_formatd
             test_bsddb
    diff --git a/lib-python/modified-2.7/test/test_bz2.py b/lib-python/modified-2.7/test/test_bz2.py
    --- a/lib-python/modified-2.7/test/test_bz2.py
    +++ b/lib-python/modified-2.7/test/test_bz2.py
    @@ -50,6 +50,7 @@
             self.filename = TESTFN
     
         def tearDown(self):
    +        test_support.gc_collect()
             if os.path.isfile(self.filename):
                 os.unlink(self.filename)
     
    diff --git a/lib-python/modified-2.7/test/test_fcntl.py b/lib-python/modified-2.7/test/test_fcntl.py
    new file mode 100644
    --- /dev/null
    +++ b/lib-python/modified-2.7/test/test_fcntl.py
    @@ -0,0 +1,108 @@
    +"""Test program for the fcntl C module.
    +
    +OS/2+EMX doesn't support the file locking operations.
    +
    +"""
    +import os
    +import struct
    +import sys
    +import unittest
    +from test.test_support import (verbose, TESTFN, unlink, run_unittest,
    +    import_module)
    +
    +# Skip test if no fnctl module.
    +fcntl = import_module('fcntl')
    +
    +
    +# TODO - Write tests for flock() and lockf().
    +
    +def get_lockdata():
    +    if sys.platform.startswith('atheos'):
    +        start_len = "qq"
    +    else:
    +        try:
    +            os.O_LARGEFILE
    +        except AttributeError:
    +            start_len = "ll"
    +        else:
    +            start_len = "qq"
    +
    +    if sys.platform in ('netbsd1', 'netbsd2', 'netbsd3',
    +                        'Darwin1.2', 'darwin',
    +                        'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5',
    +                        'freebsd6', 'freebsd7', 'freebsd8',
    +                        'bsdos2', 'bsdos3', 'bsdos4',
    +                        'openbsd', 'openbsd2', 'openbsd3', 'openbsd4', 'openbsd5'):
    +        if struct.calcsize('l') == 8:
    +            off_t = 'l'
    +            pid_t = 'i'
    +        else:
    +            off_t = 'lxxxx'
    +            pid_t = 'l'
    +        lockdata = struct.pack(off_t + off_t + pid_t + 'hh', 0, 0, 0,
    +                               fcntl.F_WRLCK, 0)
    +    elif sys.platform in ['aix3', 'aix4', 'hp-uxB', 'unixware7']:
    +        lockdata = struct.pack('hhlllii', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
    +    elif sys.platform in ['os2emx']:
    +        lockdata = None
    +    else:
    +        lockdata = struct.pack('hh'+start_len+'hh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)
    +    if lockdata:
    +        if verbose:
    +            print 'struct.pack: ', repr(lockdata)
    +    return lockdata
    +
    +lockdata = get_lockdata()
    +
    +
    +class TestFcntl(unittest.TestCase):
    +
    +    def setUp(self):
    +        self.f = None
    +
    +    def tearDown(self):
    +        if self.f and not self.f.closed:
    +            self.f.close()
    +        unlink(TESTFN)
    +
    +    def test_fcntl_fileno(self):
    +        # the example from the library docs
    +        self.f = open(TESTFN, 'w')
    +        rv = fcntl.fcntl(self.f.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
    +        if verbose:
    +            print 'Status from fcntl with O_NONBLOCK: ', rv
    +        if sys.platform not in ['os2emx']:
    +            rv = fcntl.fcntl(self.f.fileno(), fcntl.F_SETLKW, lockdata)
    +            if verbose:
    +                print 'String from fcntl with F_SETLKW: ', repr(rv)
    +        self.f.close()
    +
    +    def test_fcntl_file_descriptor(self):
    +        # again, but pass the file rather than numeric descriptor
    +        self.f = open(TESTFN, 'w')
    +        rv = fcntl.fcntl(self.f, fcntl.F_SETFL, os.O_NONBLOCK)
    +        if sys.platform not in ['os2emx']:
    +            rv = fcntl.fcntl(self.f, fcntl.F_SETLKW, lockdata)
    +        self.f.close()
    +
    +    def test_fcntl_64_bit(self):
    +        # Issue #1309352: fcntl shouldn't fail when the third arg fits in a
    +        # C 'long' but not in a C 'int'.
    +        try:
    +            cmd = fcntl.F_NOTIFY
    +            # This flag is larger than 2**31 in 64-bit builds
    +            flags = fcntl.DN_MULTISHOT
    +        except AttributeError:
    +            self.skipTest("F_NOTIFY or DN_MULTISHOT unavailable")
    +        fd = os.open(os.path.dirname(os.path.abspath(TESTFN)), os.O_RDONLY)
    +        try:
    +            fcntl.fcntl(fd, cmd, flags)
    +        finally:
    +            os.close(fd)
    +
    +
    +def test_main():
    +    run_unittest(TestFcntl)
    +
    +if __name__ == '__main__':
    +    test_main()
    diff --git a/lib-python/modified-2.7/test/test_multibytecodec.py b/lib-python/modified-2.7/test/test_multibytecodec.py
    --- a/lib-python/modified-2.7/test/test_multibytecodec.py
    +++ b/lib-python/modified-2.7/test/test_multibytecodec.py
    @@ -148,7 +148,8 @@
     class Test_StreamReader(unittest.TestCase):
         def test_bug1728403(self):
             try:
    -            open(TESTFN, 'w').write('\xa1')
    +            with open(TESTFN, 'w') as f:
    +                f.write('\xa1')
                 f = codecs.open(TESTFN, encoding='cp949')
                 self.assertRaises(UnicodeDecodeError, f.read, 2)
             finally:
    diff --git a/lib-python/modified-2.7/test/test_tempfile.py b/lib-python/modified-2.7/test/test_tempfile.py
    --- a/lib-python/modified-2.7/test/test_tempfile.py
    +++ b/lib-python/modified-2.7/test/test_tempfile.py
    @@ -23,8 +23,8 @@
     
     # TEST_FILES may need to be tweaked for systems depending on the maximum
     # number of files that can be opened at one time (see ulimit -n)
    -if sys.platform in ('openbsd3', 'openbsd4'):
    -    TEST_FILES = 48
    +if sys.platform.startswith("openbsd"):
    +    TEST_FILES = 64 # ulimit -n defaults to 128 for normal users
     else:
         TEST_FILES = 100
     
    diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py
    --- a/lib_pypy/_ctypes/basics.py
    +++ b/lib_pypy/_ctypes/basics.py
    @@ -54,7 +54,8 @@
         def get_ffi_argtype(self):
             if self._ffiargtype:
                 return self._ffiargtype
    -        return _shape_to_ffi_type(self._ffiargshape)
    +        self._ffiargtype = _shape_to_ffi_type(self._ffiargshape)
    +        return self._ffiargtype
     
         def _CData_output(self, resbuffer, base=None, index=-1):
             #assert isinstance(resbuffer, _rawffi.ArrayInstance)
    @@ -166,7 +167,8 @@
         return tp._alignmentofinstances()
     
     def byref(cdata):
    -    from ctypes import pointer
    +    # "pointer" is imported at the end of this module to avoid circular
    +    # imports
         return pointer(cdata)
     
     def cdata_from_address(self, address):
    @@ -224,5 +226,9 @@
         'Z' : _ffi.types.void_p,
         'X' : _ffi.types.void_p,
         'v' : _ffi.types.sshort,
    +    '?' : _ffi.types.ubyte,
         }
     
    +
    +# used by "byref"
    +from _ctypes.pointer import pointer
    diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py
    --- a/lib_pypy/_ctypes/function.py
    +++ b/lib_pypy/_ctypes/function.py
    @@ -91,13 +91,15 @@
                         raise TypeError(
                             "item %d in _argtypes_ has no from_param method" % (
                                 i + 1,))
    -            #
    -            if all([hasattr(argtype, '_ffiargshape') for argtype in argtypes]):
    -                fastpath_cls = make_fastpath_subclass(self.__class__)
    -                fastpath_cls.enable_fastpath_maybe(self)
                 self._argtypes_ = list(argtypes)
    +            self._check_argtypes_for_fastpath()
         argtypes = property(_getargtypes, _setargtypes)
     
    +    def _check_argtypes_for_fastpath(self):
    +        if all([hasattr(argtype, '_ffiargshape') for argtype in self._argtypes_]):
    +            fastpath_cls = make_fastpath_subclass(self.__class__)
    +            fastpath_cls.enable_fastpath_maybe(self)
    +
         def _getparamflags(self):
             return self._paramflags
     
    @@ -216,6 +218,7 @@
                     import ctypes
                     restype = ctypes.c_int
                 self._ptr = self._getfuncptr_fromaddress(self._argtypes_, restype)
    +            self._check_argtypes_for_fastpath()
                 return
     
             
    @@ -466,7 +469,8 @@
             newargs = []
             for argtype, arg in zip(argtypes, args):
                 param = argtype.from_param(arg)
    -            if argtype._type_ == 'P': # special-case for c_void_p
    +            _type_ = getattr(argtype, '_type_', None)
    +            if _type_ == 'P': # special-case for c_void_p
                     param = param._get_buffer_value()
                 elif self._is_primitive(argtype):
                     param = param.value
    diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py
    --- a/lib_pypy/_ctypes/structure.py
    +++ b/lib_pypy/_ctypes/structure.py
    @@ -14,6 +14,15 @@
                 raise TypeError("Expected CData subclass, got %s" % (tp,))
             if isinstance(tp, StructOrUnionMeta):
                 tp._make_final()
    +        if len(f) == 3:
    +            if (not hasattr(tp, '_type_')
    +                or not isinstance(tp._type_, str)
    +                or tp._type_ not in "iIhHbBlL"):
    +                #XXX: are those all types?
    +                #     we just dont get the type name
    +                #     in the interp levle thrown TypeError
    +                #     from rawffi if there are more
    +                raise TypeError('bit fields not allowed for type ' + tp.__name__)
     
         all_fields = []
         for cls in reversed(inspect.getmro(superclass)):
    @@ -34,34 +43,37 @@
         for i, field in enumerate(all_fields):
             name = field[0]
             value = field[1]
    +        is_bitfield = (len(field) == 3)
             fields[name] = Field(name,
                                  self._ffistruct.fieldoffset(name),
                                  self._ffistruct.fieldsize(name),
    -                             value, i)
    +                             value, i, is_bitfield)
     
         if anonymous_fields:
             resnames = []
             for i, field in enumerate(all_fields):
                 name = field[0]
                 value = field[1]
    +            is_bitfield = (len(field) == 3)
                 startpos = self._ffistruct.fieldoffset(name)
                 if name in anonymous_fields:
                     for subname in value._names:
                         resnames.append(subname)
    -                    relpos = startpos + value._fieldtypes[subname].offset
    -                    subvalue = value._fieldtypes[subname].ctype
    +                    subfield = getattr(value, subname)
    +                    relpos = startpos + subfield.offset
    +                    subvalue = subfield.ctype
                         fields[subname] = Field(subname,
                                                 relpos, subvalue._sizeofinstances(),
    -                                            subvalue, i)
    +                                            subvalue, i, is_bitfield)
                 else:
                     resnames.append(name)
             names = resnames
         self._names = names
    -    self._fieldtypes = fields
    +    self.__dict__.update(fields)
     
     class Field(object):
    -    def __init__(self, name, offset, size, ctype, num):
    -        for k in ('name', 'offset', 'size', 'ctype', 'num'):
    +    def __init__(self, name, offset, size, ctype, num, is_bitfield):
    +        for k in ('name', 'offset', 'size', 'ctype', 'num', 'is_bitfield'):
                 self.__dict__[k] = locals()[k]
     
         def __setattr__(self, name, value):
    @@ -71,6 +83,35 @@
             return "" % (self.name, self.offset,
                                                        self.size)
     
    +    def __get__(self, obj, cls=None):
    +        if obj is None:
    +            return self
    +        if self.is_bitfield:
    +            # bitfield member, use direct access
    +            return obj._buffer.__getattr__(self.name)
    +        else:
    +            fieldtype = self.ctype
    +            offset = self.num
    +            suba = obj._subarray(fieldtype, self.name)
    +            return fieldtype._CData_output(suba, obj, offset)
    +
    +
    +    def __set__(self, obj, value):
    +        fieldtype = self.ctype
    +        cobj = fieldtype.from_param(value)
    +        if ensure_objects(cobj) is not None:
    +            key = keepalive_key(self.num)
    +            store_reference(obj, key, cobj._objects)
    +        arg = cobj._get_buffer_value()
    +        if fieldtype._fficompositesize is not None:
    +            from ctypes import memmove
    +            dest = obj._buffer.fieldaddress(self.name)
    +            memmove(dest, arg, fieldtype._fficompositesize)
    +        else:
    +            obj._buffer.__setattr__(self.name, arg)
    +
    +
    +
     # ________________________________________________________________
     
     def _set_shape(tp, rawfields, is_union=False):
    @@ -79,17 +120,12 @@
         tp._ffiargshape = tp._ffishape = (tp._ffistruct, 1)
         tp._fficompositesize = tp._ffistruct.size
     
    -def struct_getattr(self, name):
    -    if name not in ('_fields_', '_fieldtypes'):
    -        if hasattr(self, '_fieldtypes') and name in self._fieldtypes:
    -            return self._fieldtypes[name]
    -    return _CDataMeta.__getattribute__(self, name)
     
     def struct_setattr(self, name, value):
         if name == '_fields_':
             if self.__dict__.get('_fields_', None) is not None:
                 raise AttributeError("_fields_ is final")
    -        if self in [v for k, v in value]:
    +        if self in [f[1] for f in value]:
                 raise AttributeError("Structure or union cannot contain itself")
             names_and_fields(
                 self,
    @@ -127,14 +163,14 @@
             if '_fields_' not in self.__dict__:
                 self._fields_ = []
                 self._names = []
    -            self._fieldtypes = {}
                 _set_shape(self, [], self._is_union)
     
    -    __getattr__ = struct_getattr
         __setattr__ = struct_setattr
     
         def from_address(self, address):
             instance = StructOrUnion.__new__(self)
    +        if isinstance(address, _rawffi.StructureInstance):
    +            address = address.buffer
             instance.__dict__['_buffer'] = self._ffistruct.fromaddress(address)
             return instance
     
    @@ -200,40 +236,6 @@
             A = _rawffi.Array(fieldtype._ffishape)
             return A.fromaddress(address, 1)
     
    -    def __setattr__(self, name, value):
    -        try:
    -            field = self._fieldtypes[name]
    -        except KeyError:
    -            return _CData.__setattr__(self, name, value)
    -        fieldtype = field.ctype
    -        cobj = fieldtype.from_param(value)
    -        if ensure_objects(cobj) is not None:
    -            key = keepalive_key(field.num)
    -            store_reference(self, key, cobj._objects)
    -        arg = cobj._get_buffer_value()
    -        if fieldtype._fficompositesize is not None:
    -            from ctypes import memmove
    -            dest = self._buffer.fieldaddress(name)
    -            memmove(dest, arg, fieldtype._fficompositesize)
    -        else:
    -            self._buffer.__setattr__(name, arg)
    -
    -    def __getattribute__(self, name):
    -        if name == '_fieldtypes':
    -            return _CData.__getattribute__(self, '_fieldtypes')
    -        try:
    -            field = self._fieldtypes[name]
    -        except KeyError:
    -            return _CData.__getattribute__(self, name)
    -        if field.size >> 16:
    -            # bitfield member, use direct access
    -            return self._buffer.__getattr__(name)
    -        else:
    -            fieldtype = field.ctype
    -            offset = field.num
    -            suba = self._subarray(fieldtype, name)
    -            return fieldtype._CData_output(suba, self, offset)
    -
         def _get_buffer_for_param(self):
             return self
     
    diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py
    --- a/lib_pypy/_sqlite3.py
    +++ b/lib_pypy/_sqlite3.py
    @@ -24,6 +24,7 @@
     from ctypes import c_void_p, c_int, c_double, c_int64, c_char_p, cdll
     from ctypes import POINTER, byref, string_at, CFUNCTYPE, cast
     from ctypes import sizeof, c_ssize_t
    +from collections import OrderedDict
     import datetime
     import sys
     import time
    @@ -274,6 +275,28 @@
     def unicode_text_factory(x):
         return unicode(x, 'utf-8')
     
    +
    +class StatementCache(object):
    +    def __init__(self, connection, maxcount):
    +        self.connection = connection
    +        self.maxcount = maxcount
    +        self.cache = OrderedDict()
    +
    +    def get(self, sql, cursor, row_factory):
    +        try:
    +            stat = self.cache[sql]
    +        except KeyError:
    +            stat = Statement(self.connection, sql)
    +            self.cache[sql] = stat
    +            if len(self.cache) > self.maxcount:
    +                self.cache.popitem(0)
    +        #
    +        if stat.in_use:
    +            stat = Statement(self.connection, sql)
    +        stat.set_row_factory(row_factory)
    +        return stat
    +
    +
     class Connection(object):
         def __init__(self, database, timeout=5.0, detect_types=0, isolation_level="",
                      check_same_thread=True, factory=None, cached_statements=100):
    @@ -291,6 +314,7 @@
             self.row_factory = None
             self._isolation_level = isolation_level
             self.detect_types = detect_types
    +        self.statement_cache = StatementCache(self, cached_statements)
     
             self.cursors = []
     
    @@ -399,7 +423,7 @@
             cur = Cursor(self)
             if not isinstance(sql, (str, unicode)):
                 raise Warning("SQL is of wrong type. Must be string or unicode.")
    -        statement = Statement(cur, sql, self.row_factory)
    +        statement = self.statement_cache.get(sql, cur, self.row_factory)
             return statement
     
         def _get_isolation_level(self):
    @@ -681,6 +705,8 @@
             from sqlite3.dump import _iterdump
             return _iterdump(self)
     
    +DML, DQL, DDL = range(3)
    +
     class Cursor(object):
         def __init__(self, con):
             if not isinstance(con, Connection):
    @@ -708,12 +734,12 @@
             if type(sql) is unicode:
                 sql = sql.encode("utf-8")
             self._check_closed()
    -        self.statement = Statement(self, sql, self.row_factory)
    +        self.statement = self.connection.statement_cache.get(sql, self, self.row_factory)
     
             if self.connection._isolation_level is not None:
    -            if self.statement.kind == "DDL":
    +            if self.statement.kind == DDL:
                     self.connection.commit()
    -            elif self.statement.kind == "DML":
    +            elif self.statement.kind == DML:
                     self.connection._begin()
     
             self.statement.set_params(params)
    @@ -724,19 +750,18 @@
                 self.statement.reset()
                 raise self.connection._get_exception(ret)
     
    -        if self.statement.kind == "DQL":
    -            if ret == SQLITE_ROW:
    -                self.statement._build_row_cast_map()
    -                self.statement._readahead()
    -            else:
    -                self.statement.item = None
    -                self.statement.exhausted = True
    +        if self.statement.kind == DQL and ret == SQLITE_ROW:
    +            self.statement._build_row_cast_map()
    +            self.statement._readahead(self)
    +        else:
    +            self.statement.item = None
    +            self.statement.exhausted = True
     
    -        if self.statement.kind in ("DML", "DDL"):
    +        if self.statement.kind == DML or self.statement.kind == DDL:
                 self.statement.reset()
     
             self.rowcount = -1
    -        if self.statement.kind == "DML":
    +        if self.statement.kind == DML:
                 self.rowcount = sqlite.sqlite3_changes(self.connection.db)
     
             return self
    @@ -747,8 +772,9 @@
             if type(sql) is unicode:
                 sql = sql.encode("utf-8")
             self._check_closed()
    -        self.statement = Statement(self, sql, self.row_factory)
    -        if self.statement.kind == "DML":
    +        self.statement = self.connection.statement_cache.get(sql, self, self.row_factory)
    +
    +        if self.statement.kind == DML:
                 self.connection._begin()
             else:
                 raise ProgrammingError, "executemany is only for DML statements"
    @@ -800,7 +826,7 @@
             return self
     
         def __iter__(self):
    -        return self.statement
    +        return iter(self.fetchone, None)
     
         def _check_reset(self):
             if self.reset:
    @@ -817,7 +843,7 @@
                 return None
     
             try:
    -            return self.statement.next()
    +            return self.statement.next(self)
             except StopIteration:
                 return None
     
    @@ -831,7 +857,7 @@
             if size is None:
                 size = self.arraysize
             lst = []
    -        for row in self.statement:
    +        for row in self:
                 lst.append(row)
                 if len(lst) == size:
                     break
    @@ -842,7 +868,7 @@
             self._check_reset()
             if self.statement is None:
                 return []
    -        return list(self.statement)
    +        return list(self)
     
         def _getdescription(self):
             if self._description is None:
    @@ -872,39 +898,47 @@
         lastrowid = property(_getlastrowid)
     
     class Statement(object):
    -    def __init__(self, cur, sql, row_factory):
    +    def __init__(self, connection, sql):
             self.statement = None
             if not isinstance(sql, str):
                 raise ValueError, "sql must be a string"
    -        self.con = cur.connection
    -        self.cur = weakref.ref(cur)
    +        self.con = connection
             self.sql = sql # DEBUG ONLY
    -        self.row_factory = row_factory
             first_word = self._statement_kind = sql.lstrip().split(" ")[0].upper()
             if first_word in ("INSERT", "UPDATE", "DELETE", "REPLACE"):
    -            self.kind = "DML"
    +            self.kind = DML
             elif first_word in ("SELECT", "PRAGMA"):
    -            self.kind = "DQL"
    +            self.kind = DQL
             else:
    -            self.kind = "DDL"
    +            self.kind = DDL
             self.exhausted = False
    +        self.in_use = False
    +        #
    +        # set by set_row_factory
    +        self.row_factory = None
     
             self.statement = c_void_p()
             next_char = c_char_p()
    -        ret = sqlite.sqlite3_prepare_v2(self.con.db, sql, -1, byref(self.statement), byref(next_char))
    +        sql_char = c_char_p(sql)
    +        ret = sqlite.sqlite3_prepare_v2(self.con.db, sql_char, -1, byref(self.statement), byref(next_char))
             if ret == SQLITE_OK and self.statement.value is None:
                 # an empty statement, we work around that, as it's the least trouble
                 ret = sqlite.sqlite3_prepare_v2(self.con.db, "select 42", -1, byref(self.statement), byref(next_char))
    -            self.kind = "DQL"
    +            self.kind = DQL
     
             if ret != SQLITE_OK:
                 raise self.con._get_exception(ret)
             self.con._remember_statement(self)
             if _check_remaining_sql(next_char.value):
    -            raise Warning, "One and only one statement required"
    +            raise Warning, "One and only one statement required: %r" % (
    +                next_char.value,)
    +        # sql_char should remain alive until here
     
             self._build_row_cast_map()
     
    +    def set_row_factory(self, row_factory):
    +        self.row_factory = row_factory
    +
         def _build_row_cast_map(self):
             self.row_cast_map = []
             for i in xrange(sqlite.sqlite3_column_count(self.statement)):
    @@ -974,6 +1008,7 @@
             ret = sqlite.sqlite3_reset(self.statement)
             if ret != SQLITE_OK:
                 raise self.con._get_exception(ret)
    +        self.mark_dirty()
     
             if params is None:
                 if sqlite.sqlite3_bind_parameter_count(self.statement) != 0:
    @@ -1004,10 +1039,7 @@
                         raise ProgrammingError("missing parameter '%s'" %param)
                     self.set_param(idx, param)
     
    -    def __iter__(self):
    -        return self
    -
    -    def next(self):
    +    def next(self, cursor):
             self.con._check_closed()
             self.con._check_thread()
             if self.exhausted:
    @@ -1023,10 +1055,10 @@
                 sqlite.sqlite3_reset(self.statement)
                 raise exc
     
    -        self._readahead()
    +        self._readahead(cursor)
             return item
     
    -    def _readahead(self):
    +    def _readahead(self, cursor):
             self.column_count = sqlite.sqlite3_column_count(self.statement)
             row = []
             for i in xrange(self.column_count):
    @@ -1061,23 +1093,30 @@
     
             row = tuple(row)
             if self.row_factory is not None:
    -            row = self.row_factory(self.cur(), row)
    +            row = self.row_factory(cursor, row)
             self.item = row
     
         def reset(self):
             self.row_cast_map = None
    -        return sqlite.sqlite3_reset(self.statement)
    +        ret = sqlite.sqlite3_reset(self.statement)
    +        self.in_use = False
    +        self.exhausted = False
    +        return ret
     
         def finalize(self):
             sqlite.sqlite3_finalize(self.statement)
             self.statement = None
    +        self.in_use = False
    +
    +    def mark_dirty(self):
    +        self.in_use = True
     
         def __del__(self):
             sqlite.sqlite3_finalize(self.statement)
             self.statement = None
     
         def _get_description(self):
    -        if self.kind == "DML":
    +        if self.kind == DML:
                 return None
             desc = []
             for i in xrange(sqlite.sqlite3_column_count(self.statement)):
    diff --git a/lib_pypy/_subprocess.py b/lib_pypy/_subprocess.py
    --- a/lib_pypy/_subprocess.py
    +++ b/lib_pypy/_subprocess.py
    @@ -35,7 +35,7 @@
     _DuplicateHandle.restype = ctypes.c_int
         
     _WaitForSingleObject = _kernel32.WaitForSingleObject
    -_WaitForSingleObject.argtypes = [ctypes.c_int, ctypes.c_int]
    +_WaitForSingleObject.argtypes = [ctypes.c_int, ctypes.c_uint]
     _WaitForSingleObject.restype = ctypes.c_int
     
     _GetExitCodeProcess = _kernel32.GetExitCodeProcess
    diff --git a/lib_pypy/distributed/test/test_distributed.py b/lib_pypy/distributed/test/test_distributed.py
    --- a/lib_pypy/distributed/test/test_distributed.py
    +++ b/lib_pypy/distributed/test/test_distributed.py
    @@ -9,7 +9,7 @@
     class AppTestDistributed(object):
         def setup_class(cls):
             cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
    -            "usemodules":("_stackless",)})
    +            "usemodules":("_continuation",)})
     
         def test_init(self):
             import distributed
    @@ -91,10 +91,8 @@
     
     class AppTestDistributedTasklets(object):
         spaceconfig = {"objspace.std.withtproxy": True,
    -                   "objspace.usemodules._stackless": True}
    +                   "objspace.usemodules._continuation": True}
         def setup_class(cls):
    -        #cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
    -        #    "usemodules":("_stackless",)})
             cls.w_test_env = cls.space.appexec([], """():
             from distributed import test_env
             return test_env
    diff --git a/lib_pypy/distributed/test/test_greensock.py b/lib_pypy/distributed/test/test_greensock.py
    --- a/lib_pypy/distributed/test/test_greensock.py
    +++ b/lib_pypy/distributed/test/test_greensock.py
    @@ -10,7 +10,7 @@
             if not option.runappdirect:
                 py.test.skip("Cannot run this on top of py.py because of PopenGateway")
             cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
    -                                       "usemodules":("_stackless",)})
    +                                       "usemodules":("_continuation",)})
             cls.w_remote_side_code = cls.space.appexec([], """():
             import sys
             sys.path.insert(0, '%s')
    diff --git a/lib_pypy/distributed/test/test_socklayer.py b/lib_pypy/distributed/test/test_socklayer.py
    --- a/lib_pypy/distributed/test/test_socklayer.py
    +++ b/lib_pypy/distributed/test/test_socklayer.py
    @@ -9,7 +9,8 @@
     class AppTestSocklayer:
         def setup_class(cls):
             cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
    -                                       "usemodules":("_stackless","_socket", "select")})
    +                                       "usemodules":("_continuation",
    +                                                     "_socket", "select")})
         
         def test_socklayer(self):
             class X(object):
    diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py
    --- a/lib_pypy/greenlet.py
    +++ b/lib_pypy/greenlet.py
    @@ -1,1 +1,151 @@
    -from _stackless import greenlet
    +import _continuation, sys
    +
    +
    +# ____________________________________________________________
    +# Exceptions
    +
    +class GreenletExit(Exception):
    +    """This special exception does not propagate to the parent greenlet; it
    +can be used to kill a single greenlet."""
    +
    +error = _continuation.error
    +
    +# ____________________________________________________________
    +# Helper function
    +
    +def getcurrent():
    +    "Returns the current greenlet (i.e. the one which called this function)."
    +    try:
    +        return _tls.current
    +    except AttributeError:
    +        # first call in this thread: current == main
    +        _green_create_main()
    +        return _tls.current
    +
    +# ____________________________________________________________
    +# The 'greenlet' class
    +
    +_continulet = _continuation.continulet
    +
    +class greenlet(_continulet):
    +    getcurrent = staticmethod(getcurrent)
    +    error = error
    +    GreenletExit = GreenletExit
    +    __main = False
    +    __started = False
    +
    +    def __new__(cls, *args, **kwds):
    +        self = _continulet.__new__(cls)
    +        self.parent = getcurrent()
    +        return self
    +
    +    def __init__(self, run=None, parent=None):
    +        if run is not None:
    +            self.run = run
    +        if parent is not None:
    +            self.parent = parent
    +
    +    def switch(self, *args):
    +        "Switch execution to this greenlet, optionally passing the values "
    +        "given as argument(s).  Returns the value passed when switching back."
    +        return self.__switch(_continulet.switch, args)
    +
    +    def throw(self, typ=GreenletExit, val=None, tb=None):
    +        "raise exception in greenlet, return value passed when switching back"
    +        return self.__switch(_continulet.throw, typ, val, tb)
    +
    +    def __switch(target, unbound_method, *args):
    +        current = getcurrent()
    +        #
    +        while not target:
    +            if not target.__started:
    +                if unbound_method != _continulet.throw:
    +                    greenlet_func = _greenlet_start
    +                else:
    +                    greenlet_func = _greenlet_throw
    +                _continulet.__init__(target, greenlet_func, *args)
    +                unbound_method = _continulet.switch
    +                args = ()
    +                target.__started = True
    +                break
    +            # already done, go to the parent instead
    +            # (NB. infinite loop possible, but unlikely, unless you mess
    +            # up the 'parent' explicitly.  Good enough, because a Ctrl-C
    +            # will show that the program is caught in this loop here.)
    +            target = target.parent
    +        #
    +        try:
    +            if current.__main:
    +                if target.__main:
    +                    # switch from main to main
    +                    if unbound_method == _continulet.throw:
    +                        raise args[0], args[1], args[2]
    +                    (args,) = args
    +                else:
    +                    # enter from main to target
    +                    args = unbound_method(target, *args)
    +            else:
    +                if target.__main:
    +                    # leave to go to target=main
    +                    args = unbound_method(current, *args)
    +                else:
    +                    # switch from non-main to non-main
    +                    args = unbound_method(current, *args, to=target)
    +        except GreenletExit, e:
    +            args = (e,)
    +        finally:
    +            _tls.current = current
    +        #
    +        if len(args) == 1:
    +            return args[0]
    +        else:
    +            return args
    +
    +    def __nonzero__(self):
    +        return self.__main or _continulet.is_pending(self)
    +
    +    @property
    +    def dead(self):
    +        return self.__started and not self
    +
    +    @property
    +    def gr_frame(self):
    +        raise NotImplementedError("attribute 'gr_frame' of greenlet objects")
    +
    +# ____________________________________________________________
    +# Internal stuff
    +
    +try:
    +    from thread import _local
    +except ImportError:
    +    class _local(object):    # assume no threads
    +        pass
    +
    +_tls = _local()
    +
    +def _green_create_main():
    +    # create the main greenlet for this thread
    +    _tls.current = None
    +    gmain = greenlet.__new__(greenlet)
    +    gmain._greenlet__main = True
    +    gmain._greenlet__started = True
    +    assert gmain.parent is None
    +    _tls.main = gmain
    +    _tls.current = gmain
    +
    +def _greenlet_start(greenlet, args):
    +    _tls.current = greenlet
    +    try:
    +        res = greenlet.run(*args)
    +    finally:
    +        if greenlet.parent is not _tls.main:
    +            _continuation.permute(greenlet, greenlet.parent)
    +    return (res,)
    +
    +def _greenlet_throw(greenlet, exc, value, tb):
    +    _tls.current = greenlet
    +    try:
    +        raise exc, value, tb
    +    finally:
    +        if greenlet.parent is not _tls.main:
    +            _continuation.permute(greenlet, greenlet.parent)
    diff --git a/lib_pypy/pypy_test/test_coroutine.py b/lib_pypy/pypy_test/test_coroutine.py
    --- a/lib_pypy/pypy_test/test_coroutine.py
    +++ b/lib_pypy/pypy_test/test_coroutine.py
    @@ -2,7 +2,7 @@
     from py.test import skip, raises
     
     try:
    -    from lib_pypy.stackless import coroutine, CoroutineExit
    +    from stackless import coroutine, CoroutineExit
     except ImportError, e:
         skip('cannot import stackless: %s' % (e,))
     
    @@ -20,10 +20,6 @@
             assert not co.is_zombie
     
         def test_is_zombie_del_without_frame(self):
    -        try:
    -            import _stackless # are we on pypy with a stackless build?
    -        except ImportError:
    -            skip("only works on pypy-c-stackless")
             import gc
             res = []
             class MyCoroutine(coroutine):
    @@ -45,10 +41,6 @@
             assert res[0], "is_zombie was False in __del__"
     
         def test_is_zombie_del_with_frame(self):
    -        try:
    -            import _stackless # are we on pypy with a stackless build?
    -        except ImportError:
    -            skip("only works on pypy-c-stackless")
             import gc
             res = []
             class MyCoroutine(coroutine):
    diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py
    --- a/lib_pypy/pyrepl/reader.py
    +++ b/lib_pypy/pyrepl/reader.py
    @@ -401,13 +401,19 @@
                 return "(arg: %s) "%self.arg
             if "\n" in self.buffer:
                 if lineno == 0:
    -                return self._ps2
    +                res = self.ps2
                 elif lineno == self.buffer.count("\n"):
    -                return self._ps4
    +                res = self.ps4
                 else:
    -                return self._ps3
    +                res = self.ps3
             else:
    -            return self._ps1
    +            res = self.ps1
    +        # Lazily call str() on self.psN, and cache the results using as key
    +        # the object on which str() was called.  This ensures that even if the
    +        # same object is used e.g. for ps1 and ps2, str() is called only once.
    +        if res not in self._pscache:
    +            self._pscache[res] = str(res)
    +        return self._pscache[res]
     
         def push_input_trans(self, itrans):
             self.input_trans_stack.append(self.input_trans)
    @@ -473,8 +479,7 @@
                 self.pos = 0
                 self.dirty = 1
                 self.last_command = None
    -            self._ps1, self._ps2, self._ps3, self._ps4 = \
    -                           map(str, [self.ps1, self.ps2, self.ps3, self.ps4])
    +            self._pscache = {}
             except:
                 self.restore()
                 raise
    diff --git a/lib_pypy/pyrepl/readline.py b/lib_pypy/pyrepl/readline.py
    --- a/lib_pypy/pyrepl/readline.py
    +++ b/lib_pypy/pyrepl/readline.py
    @@ -33,7 +33,7 @@
     from pyrepl.unix_console import UnixConsole, _error
     
     
    -ENCODING = 'latin1'     # XXX hard-coded
    +ENCODING = sys.getfilesystemencoding() or 'latin1'     # XXX review
     
     __all__ = ['add_history',
                'clear_history',
    diff --git a/pypy/annotation/builtin.py b/pypy/annotation/builtin.py
    --- a/pypy/annotation/builtin.py
    +++ b/pypy/annotation/builtin.py
    @@ -308,9 +308,6 @@
                 clsdef = clsdef.commonbase(cdef)
         return SomeInstance(clsdef)
     
    -def robjmodel_we_are_translated():
    -    return immutablevalue(True)
    -
     def robjmodel_r_dict(s_eqfn, s_hashfn, s_force_non_null=None):
         if s_force_non_null is None:
             force_non_null = False
    @@ -376,8 +373,6 @@
     
     BUILTIN_ANALYZERS[pypy.rlib.rarithmetic.intmask] = rarith_intmask
     BUILTIN_ANALYZERS[pypy.rlib.objectmodel.instantiate] = robjmodel_instantiate
    -BUILTIN_ANALYZERS[pypy.rlib.objectmodel.we_are_translated] = (
    -    robjmodel_we_are_translated)
     BUILTIN_ANALYZERS[pypy.rlib.objectmodel.r_dict] = robjmodel_r_dict
     BUILTIN_ANALYZERS[pypy.rlib.objectmodel.hlinvoke] = robjmodel_hlinvoke
     BUILTIN_ANALYZERS[pypy.rlib.objectmodel.keepalive_until_here] = robjmodel_keepalive_until_here
    @@ -416,7 +411,8 @@
     from pypy.annotation.model import SomePtr
     from pypy.rpython.lltypesystem import lltype
     
    -def malloc(s_T, s_n=None, s_flavor=None, s_zero=None, s_track_allocation=None):
    +def malloc(s_T, s_n=None, s_flavor=None, s_zero=None, s_track_allocation=None,
    +           s_add_memory_pressure=None):
         assert (s_n is None or s_n.knowntype == int
                 or issubclass(s_n.knowntype, pypy.rlib.rarithmetic.base_int))
         assert s_T.is_constant()
    @@ -432,6 +428,8 @@
         else:
             assert s_flavor.is_constant()
             assert s_track_allocation is None or s_track_allocation.is_constant()
    +        assert (s_add_memory_pressure is None or
    +                s_add_memory_pressure.is_constant())
             # not sure how to call malloc() for the example 'p' in the
             # presence of s_extraargs
             r = SomePtr(lltype.Ptr(s_T.const))
    diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py
    --- a/pypy/annotation/description.py
    +++ b/pypy/annotation/description.py
    @@ -399,9 +399,7 @@
                     if b1 is object:
                         continue
                     if b1.__dict__.get('_mixin_', False):
    -                    assert b1.__bases__ == () or b1.__bases__ == (object,), (
    -                        "mixin class %r should have no base" % (b1,))
    -                    self.add_sources_for_class(b1, mixin=True)
    +                    self.add_mixin(b1)
                     else:
                         assert base is object, ("multiple inheritance only supported "
                                                 "with _mixin_: %r" % (cls,))
    @@ -469,6 +467,15 @@
                     return
             self.classdict[name] = Constant(value)
     
    +    def add_mixin(self, base):
    +        for subbase in base.__bases__:
    +            if subbase is object:
    +                continue
    +            assert subbase.__dict__.get("_mixin_", False), ("Mixin class %r has non"
    +                "mixin base class %r" % (base, subbase))
    +            self.add_mixin(subbase)
    +        self.add_sources_for_class(base, mixin=True)
    +
         def add_sources_for_class(self, cls, mixin=False):
             for name, value in cls.__dict__.items():
                 self.add_source_attribute(name, value, mixin)
    diff --git a/pypy/config/makerestdoc.py b/pypy/config/makerestdoc.py
    --- a/pypy/config/makerestdoc.py
    +++ b/pypy/config/makerestdoc.py
    @@ -134,7 +134,7 @@
             for child in self._children:
                 subpath = fullpath + "." + child._name
                 toctree.append(subpath)
    -        content.add(Directive("toctree", *toctree, maxdepth=4))
    +        content.add(Directive("toctree", *toctree, **{'maxdepth': 4}))
             content.join(
                 ListItem(Strong("name:"), self._name),
                 ListItem(Strong("description:"), self.doc))
    diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
    --- a/pypy/config/pypyoption.py
    +++ b/pypy/config/pypyoption.py
    @@ -33,7 +33,8 @@
          "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO",
          "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array",
          "_bisect", "binascii", "_multiprocessing", '_warnings',
    -     "_collections", "_multibytecodec", "micronumpy", "_ffi"]
    +     "_collections", "_multibytecodec", "micronumpy", "_ffi",
    +     "_continuation"]
     ))
     
     translation_modules = default_modules.copy()
    @@ -99,6 +100,7 @@
         "_ssl"      : ["pypy.module._ssl.interp_ssl"],
         "_hashlib"  : ["pypy.module._ssl.interp_ssl"],
         "_minimal_curses": ["pypy.module._minimal_curses.fficurses"],
    +    "_continuation": ["pypy.rlib.rstacklet"],
         }
     
     def get_module_validator(modname):
    diff --git a/pypy/config/test/test_config.py b/pypy/config/test/test_config.py
    --- a/pypy/config/test/test_config.py
    +++ b/pypy/config/test/test_config.py
    @@ -1,5 +1,5 @@
     from pypy.config.config import *
    -import py
    +import py, sys
     
     def make_description():
         gcoption = ChoiceOption('name', 'GC name', ['ref', 'framework'], 'ref')
    @@ -69,13 +69,15 @@
         attrs = dir(config)
         assert '__repr__' in attrs        # from the type
         assert '_cfgimpl_values' in attrs # from self
    -    assert 'gc' in attrs              # custom attribute
    -    assert 'objspace' in attrs        # custom attribute
    +    if sys.version_info >= (2, 6):
    +        assert 'gc' in attrs              # custom attribute
    +        assert 'objspace' in attrs        # custom attribute
         #
         attrs = dir(config.gc)
    -    assert 'name' in attrs
    -    assert 'dummy' in attrs
    -    assert 'float' in attrs
    +    if sys.version_info >= (2, 6):
    +        assert 'name' in attrs
    +        assert 'dummy' in attrs
    +        assert 'float' in attrs
     
     def test_arbitrary_option():
         descr = OptionDescription("top", "", [
    @@ -279,11 +281,11 @@
     
     def test_underscore_in_option_name():
         descr = OptionDescription("opt", "", [
    -        BoolOption("_stackless", "", default=False),
    +        BoolOption("_foobar", "", default=False),
         ])
         config = Config(descr)
         parser = to_optparse(config)
    -    assert parser.has_option("--_stackless")
    +    assert parser.has_option("--_foobar")
     
     def test_none():
         dummy1 = BoolOption('dummy1', 'doc dummy', default=False, cmdline=None)
    diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py
    --- a/pypy/config/translationoption.py
    +++ b/pypy/config/translationoption.py
    @@ -13,6 +13,10 @@
     DEFL_LOW_INLINE_THRESHOLD = DEFL_INLINE_THRESHOLD / 2.0
     
     DEFL_GC = "minimark"
    +if sys.platform.startswith("linux"):
    +    DEFL_ROOTFINDER_WITHJIT = "asmgcc"
    +else:
    +    DEFL_ROOTFINDER_WITHJIT = "shadowstack"
     
     IS_64_BITS = sys.maxint > 2147483647
     
    @@ -24,10 +28,9 @@
     
     translation_optiondescription = OptionDescription(
             "translation", "Translation Options", [
    -    BoolOption("stackless", "enable stackless features during compilation",
    -               default=False, cmdline="--stackless",
    -               requires=[("translation.type_system", "lltype"),
    -                         ("translation.gcremovetypeptr", False)]),  # XXX?
    +    BoolOption("continuation", "enable single-shot continuations",
    +               default=False, cmdline="--continuation",
    +               requires=[("translation.type_system", "lltype")]),
         ChoiceOption("type_system", "Type system to use when RTyping",
                      ["lltype", "ootype"], cmdline=None, default="lltype",
                      requires={
    @@ -66,7 +69,8 @@
                          "statistics": [("translation.gctransformer", "framework")],
                          "generation": [("translation.gctransformer", "framework")],
                          "hybrid": [("translation.gctransformer", "framework")],
    -                     "boehm": [("translation.gctransformer", "boehm")],
    +                     "boehm": [("translation.gctransformer", "boehm"),
    +                               ("translation.continuation", False)],  # breaks
                          "markcompact": [("translation.gctransformer", "framework")],
                          "minimark": [("translation.gctransformer", "framework")],
                          },
    @@ -109,7 +113,7 @@
         BoolOption("jit", "generate a JIT",
                    default=False,
                    suggests=[("translation.gc", DEFL_GC),
    -                         ("translation.gcrootfinder", "asmgcc"),
    +                         ("translation.gcrootfinder", DEFL_ROOTFINDER_WITHJIT),
                              ("translation.list_comprehension_operations", True)]),
         ChoiceOption("jit_backend", "choose the backend for the JIT",
                      ["auto", "x86", "x86-without-sse2", "llvm"],
    @@ -385,8 +389,6 @@
                 config.translation.suggest(withsmallfuncsets=5)
             elif word == 'jit':
                 config.translation.suggest(jit=True)
    -            if config.translation.stackless:
    -                raise NotImplementedError("JIT conflicts with stackless for now")
             elif word == 'removetypeptr':
                 config.translation.suggest(gcremovetypeptr=True)
             else:
    diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt
    --- a/pypy/doc/_ref.txt
    +++ b/pypy/doc/_ref.txt
    @@ -1,11 +1,10 @@
     .. _`ctypes_configure/doc/sample.py`: https://bitbucket.org/pypy/pypy/src/default/ctypes_configure/doc/sample.py
     .. _`demo/`: https://bitbucket.org/pypy/pypy/src/default/demo/
    -.. _`demo/pickle_coroutine.py`: https://bitbucket.org/pypy/pypy/src/default/demo/pickle_coroutine.py
     .. _`lib-python/`: https://bitbucket.org/pypy/pypy/src/default/lib-python/
     .. _`lib-python/2.7/dis.py`: https://bitbucket.org/pypy/pypy/src/default/lib-python/2.7/dis.py
     .. _`lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/
    +.. _`lib_pypy/greenlet.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/greenlet.py
     .. _`lib_pypy/pypy_test/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/pypy_test/
    -.. _`lib_pypy/stackless.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/stackless.py
     .. _`lib_pypy/tputil.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/tputil.py
     .. _`pypy/annotation`:
     .. _`pypy/annotation/`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation/
    @@ -55,7 +54,6 @@
     .. _`pypy/module`:
     .. _`pypy/module/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/
     .. _`pypy/module/__builtin__/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/__init__.py
    -.. _`pypy/module/_stackless/test/test_composable_coroutine.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/_stackless/test/test_composable_coroutine.py
     .. _`pypy/objspace`:
     .. _`pypy/objspace/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/
     .. _`pypy/objspace/dump.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/dump.py
    @@ -117,6 +115,7 @@
     .. _`pypy/translator/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/
     .. _`pypy/translator/backendopt/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/backendopt/
     .. _`pypy/translator/c/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/
    +.. _`pypy/translator/c/src/stacklet/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/src/stacklet/
     .. _`pypy/translator/cli/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/cli/
     .. _`pypy/translator/goal/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/goal/
     .. _`pypy/translator/jvm/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/jvm/
    diff --git a/pypy/doc/architecture.rst b/pypy/doc/architecture.rst
    --- a/pypy/doc/architecture.rst
    +++ b/pypy/doc/architecture.rst
    @@ -153,7 +153,7 @@
     
     * Optionally, `various transformations`_ can then be applied which, for
       example, perform optimizations such as inlining, add capabilities
    -  such as stackless_-style concurrency, or insert code for the
    +  such as stackless-style concurrency (deprecated), or insert code for the
       `garbage collector`_.
     
     * Then, the graphs are converted to source code for the target platform
    @@ -255,7 +255,6 @@
     
     .. _Python: http://docs.python.org/reference/
     .. _Psyco: http://psyco.sourceforge.net
    -.. _stackless: stackless.html
     .. _`generate Just-In-Time Compilers`: jit/index.html
     .. _`JIT Generation in PyPy`: jit/index.html
     .. _`implement your own interpreter`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html
    diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py
    --- a/pypy/doc/conf.py
    +++ b/pypy/doc/conf.py
    @@ -45,9 +45,9 @@
     # built documents.
     #
     # The short X.Y version.
    -version = '1.5'
    +version = '1.6'
     # The full version, including alpha/beta/rc tags.
    -release = '1.5'
    +release = '1.6'
     
     # The language for content autogenerated by Sphinx. Refer to documentation
     # for a list of supported languages.
    diff --git a/pypy/doc/config/objspace.usemodules._stackless.txt b/pypy/doc/config/objspace.usemodules._continuation.txt
    rename from pypy/doc/config/objspace.usemodules._stackless.txt
    rename to pypy/doc/config/objspace.usemodules._continuation.txt
    --- a/pypy/doc/config/objspace.usemodules._stackless.txt
    +++ b/pypy/doc/config/objspace.usemodules._continuation.txt
    @@ -1,6 +1,4 @@
    -Use the '_stackless' module. 
    +Use the '_continuation' module. 
     
    -Exposes the `stackless` primitives, and also implies a stackless build. 
    -See also :config:`translation.stackless`.
    -
    -.. _`stackless`: ../stackless.html
    +Exposes the `continulet` app-level primitives.
    +See also :config:`translation.continuation`.
    diff --git a/pypy/doc/config/translation.stackless.txt b/pypy/doc/config/translation.continuation.txt
    rename from pypy/doc/config/translation.stackless.txt
    rename to pypy/doc/config/translation.continuation.txt
    --- a/pypy/doc/config/translation.stackless.txt
    +++ b/pypy/doc/config/translation.continuation.txt
    @@ -1,5 +1,2 @@
    -Run the `stackless transform`_ on each generated graph, which enables the use
    -of coroutines at RPython level and the "stackless" module when translating
    -PyPy.
    -
    -.. _`stackless transform`: ../stackless.html
    +Enable the use of a stackless-like primitive called "stacklet".
    +In PyPy, this is exposed at app-level by the "_continuation" module.
    diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst
    --- a/pypy/doc/contributor.rst
    +++ b/pypy/doc/contributor.rst
    @@ -9,22 +9,22 @@
         Armin Rigo
         Maciej Fijalkowski
         Carl Friedrich Bolz
    +    Antonio Cuni
         Amaury Forgeot d'Arc
    -    Antonio Cuni
         Samuele Pedroni
         Michael Hudson
         Holger Krekel
    +    Benjamin Peterson
         Christian Tismer
    -    Benjamin Peterson
    +    Hakan Ardo
    +    Alex Gaynor
         Eric van Riet Paap
    -    Anders Chrigström
    -    Håkan Ardö
    +    Anders Chrigstrom
    +    David Schneider
         Richard Emslie
         Dan Villiom Podlaski Christiansen
         Alexander Schremmer
    -    Alex Gaynor
    -    David Schneider
    -    Aurelién Campeas
    +    Aurelien Campeas
         Anders Lehmann
         Camillo Bruni
         Niklaus Haldimann
    @@ -35,16 +35,17 @@
         Bartosz Skowron
         Jakub Gustak
         Guido Wesdorp
    +    Daniel Roberts
         Adrien Di Mascio
         Laura Creighton
         Ludovic Aubry
         Niko Matsakis
    -    Daniel Roberts
         Jason Creighton
    -    Jacob Hallén
    +    Jacob Hallen
         Alex Martelli
         Anders Hammarquist
         Jan de Mooij
    +    Wim Lavrijsen
         Stephan Diehl
         Michael Foord
         Stefan Schwarzer
    @@ -55,9 +56,13 @@
         Alexandre Fayolle
         Marius Gedminas
         Simon Burton
    +    Justin Peel
         Jean-Paul Calderone
         John Witulski
    +    Lukas Diekmann
    +    holger krekel
         Wim Lavrijsen
    +    Dario Bertini
         Andreas Stührk
         Jean-Philippe St. Pierre
         Guido van Rossum
    @@ -69,15 +74,16 @@
         Georg Brandl
         Gerald Klix
         Wanja Saatkamp
    +    Ronny Pfannschmidt
         Boris Feigin
         Oscar Nierstrasz
    -    Dario Bertini
         David Malcolm
         Eugene Oden
         Henry Mason
    +    Sven Hager
         Lukas Renggli
    +    Ilya Osadchiy
         Guenter Jantzen
    -    Ronny Pfannschmidt
         Bert Freudenberg
         Amit Regmi
         Ben Young
    @@ -94,8 +100,8 @@
         Jared Grubb
         Karl Bartel
         Gabriel Lavoie
    +    Victor Stinner
         Brian Dorsey
    -    Victor Stinner
         Stuart Williams
         Toby Watson
         Antoine Pitrou
    @@ -106,19 +112,23 @@
         Jonathan David Riehl
         Elmo Mäntynen
         Anders Qvist
    -    Beatrice Düring
    +    Beatrice During
         Alexander Sedov
    +    Timo Paulssen
    +    Corbin Simpson
         Vincent Legoll
    +    Romain Guillebert
         Alan McIntyre
    -    Romain Guillebert
         Alex Perry
         Jens-Uwe Mager
    +    Simon Cross
         Dan Stromberg
    -    Lukas Diekmann
    +    Guillebert Romain
         Carl Meyer
         Pieter Zieschang
         Alejandro J. Cura
         Sylvain Thenault
    +    Christoph Gerum
         Travis Francis Athougies
         Henrik Vendelbo
         Lutz Paelike
    @@ -129,6 +139,7 @@
         Miguel de Val Borro
         Ignas Mikalajunas
         Artur Lisiecki
    +    Philip Jenvey
         Joshua Gilbert
         Godefroid Chappelle
         Yusei Tahara
    @@ -137,24 +148,29 @@
         Gustavo Niemeyer
         William Leslie
         Akira Li
    -    Kristján Valur Jónsson
    +    Kristjan Valur Jonsson
         Bobby Impollonia
    +    Michael Hudson-Doyle
         Andrew Thompson
         Anders Sigfridsson
    +    Floris Bruynooghe
         Jacek Generowicz
         Dan Colish
    -    Sven Hager
         Zooko Wilcox-O Hearn
    +    Dan Villiom Podlaski Christiansen
         Anders Hammarquist
    +    Chris Lambacher
         Dinu Gherman
         Dan Colish
    +    Brett Cannon
         Daniel Neuhäuser
         Michael Chermside
         Konrad Delong
         Anna Ravencroft
         Greg Price
         Armin Ronacher
    +    Christian Muirhead
         Jim Baker
    -    Philip Jenvey
         Rodrigo Araújo
    +    Romain Guillebert
     
    diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
    --- a/pypy/doc/cpython_differences.rst
    +++ b/pypy/doc/cpython_differences.rst
    @@ -24,6 +24,7 @@
         _bisect
         _codecs
         _collections
    +    `_continuation`_
         `_ffi`_
         _hashlib
         _io
    @@ -84,9 +85,12 @@
     
         _winreg
     
    -  Extra module with Stackless_ only:
    -
    -    _stackless
    +  Note that only some of these modules are built-in in a typical
    +  CPython installation, and the rest is from non built-in extension
    +  modules.  This means that e.g. ``import parser`` will, on CPython,
    +  find a local file ``parser.py``, while ``import sys`` will not find a
    +  local file ``sys.py``.  In PyPy the difference does not exist: all
    +  these modules are built-in.
     
     * Supported by being rewritten in pure Python (possibly using ``ctypes``):
       see the `lib_pypy/`_ directory.  Examples of modules that we
    @@ -101,11 +105,11 @@
     
     .. the nonstandard modules are listed below...
     .. _`__pypy__`: __pypy__-module.html
    +.. _`_continuation`: stackless.html
     .. _`_ffi`: ctypes-implementation.html
     .. _`_rawffi`: ctypes-implementation.html
     .. _`_minimal_curses`: config/objspace.usemodules._minimal_curses.html
     .. _`cpyext`: http://morepypy.blogspot.com/2010/04/using-cpython-extension-modules-with.html
    -.. _Stackless: stackless.html
     
     
     Differences related to garbage collection strategies
    @@ -280,7 +284,14 @@
       never a dictionary as it sometimes is in CPython. Assigning to
       ``__builtins__`` has no effect.
     
    -* object identity of immutable keys in dictionaries is not necessarily preserved.
    -  Never compare immutable objects with ``is``.
    +* Do not compare immutable objects with ``is``.  For example on CPython
    +  it is true that ``x is 0`` works, i.e. does the same as ``type(x) is
    +  int and x == 0``, but it is so by accident.  If you do instead
    +  ``x is 1000``, then it stops working, because 1000 is too large and
    +  doesn't come from the internal cache.  In PyPy it fails to work in
    +  both cases, because we have no need for a cache at all.
    +
    +* Also, object identity of immutable keys in dictionaries is not necessarily
    +  preserved.
     
     .. include:: _ref.txt
    diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
    --- a/pypy/doc/faq.rst
    +++ b/pypy/doc/faq.rst
    @@ -315,6 +315,28 @@
     
     .. _`Andrew Brown's tutorial`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html
     
    +---------------------------------------------------------
    +Can RPython modules for PyPy be translated independently?
    +---------------------------------------------------------
    +
    +No, you have to rebuild the entire interpreter.  This means two things:
    +
    +* It is imperative to use test-driven development.  You have to test
    +  exhaustively your module in pure Python, before even attempting to
    +  translate it.  Once you translate it, you should have only a few typing
    +  issues left to fix, but otherwise the result should work out of the box.
    +
    +* Second, and perhaps most important: do you have a really good reason
    +  for writing the module in RPython in the first place?  Nowadays you
    +  should really look at alternatives, like writing it in pure Python,
    +  using ctypes if it needs to call C code.  Other alternatives are being
    +  developed too (as of summer 2011), like a Cython binding.
    +
    +In this context it is not that important to be able to translate
    +RPython modules independently of translating the complete interpreter.
    +(It could be done given enough efforts, but it's a really serious
    +undertaking.  Consider it as quite unlikely for now.)
    +
     ----------------------------------------------------------
     Why does PyPy draw a Mandelbrot fractal while translating?
     ----------------------------------------------------------
    diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst
    --- a/pypy/doc/garbage_collection.rst
    +++ b/pypy/doc/garbage_collection.rst
    @@ -147,7 +147,7 @@
     You can read more about them at the start of
     `pypy/rpython/memory/gc/minimark.py`_.
     
    -In more details:
    +In more detail:
     
     - The small newly malloced objects are allocated in the nursery (case 1).
       All objects living in the nursery are "young".
    diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst
    --- a/pypy/doc/getting-started-python.rst
    +++ b/pypy/doc/getting-started-python.rst
    @@ -32,7 +32,10 @@
     .. _`windows document`: windows.html
     
     You can translate the whole of PyPy's Python interpreter to low level C code,
    -or `CLI code`_.
    +or `CLI code`_.  If you intend to build using gcc, check to make sure that
    +the version you have is not 4.2 or you will run into `this bug`_.
    +
    +.. _`this bug`: https://bugs.launchpad.net/ubuntu/+source/gcc-4.2/+bug/187391
     
     1. First `download a pre-built PyPy`_ for your architecture which you will
        use to translate your Python interpreter.  It is, of course, possible to
    @@ -64,7 +67,6 @@
        * ``libssl-dev`` (for the optional ``_ssl`` module)
        * ``libgc-dev`` (for the Boehm garbage collector: only needed when translating with `--opt=0, 1` or `size`)
        * ``python-sphinx`` (for the optional documentation build.  You need version 1.0.7 or later)
    -   * ``python-greenlet`` (for the optional stackless support in interpreted mode/testing)
     
     
     3. Translation is time-consuming -- 45 minutes on a very fast machine --
    @@ -102,7 +104,7 @@
     
         $ ./pypy-c
         Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11)
    -    [PyPy 1.5.0-alpha0 with GCC 4.4.3] on linux2
    +    [PyPy 1.6.0 with GCC 4.4.3] on linux2
         Type "help", "copyright", "credits" or "license" for more information.
         And now for something completely different: ``this sentence is false''
         >>>> 46 - 4
    @@ -117,19 +119,8 @@
     Installation_ below.
     
     The ``translate.py`` script takes a very large number of options controlling
    -what to translate and how.  See ``translate.py -h``. Some of the more
    -interesting options (but for now incompatible with the JIT) are:
    -
    -   * ``--stackless``: this produces a pypy-c that includes features
    -     inspired by `Stackless Python `__.
    -
    -   * ``--gc=boehm|ref|marknsweep|semispace|generation|hybrid|minimark``:
    -     choose between using
    -     the `Boehm-Demers-Weiser garbage collector`_, our reference
    -     counting implementation or one of own collector implementations
    -     (the default depends on the optimization level but is usually
    -     ``minimark``).
    -
    +what to translate and how.  See ``translate.py -h``. The default options
    +should be suitable for mostly everybody by now.
     Find a more detailed description of the various options in our `configuration
     sections`_.
     
    @@ -162,7 +153,7 @@
     
         $ ./pypy-cli
         Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11)
    -    [PyPy 1.5.0-alpha0] on linux2
    +    [PyPy 1.6.0] on linux2
         Type "help", "copyright", "credits" or "license" for more information.
         And now for something completely different: ``distopian and utopian chairs''
         >>>> 
    @@ -199,7 +190,7 @@
     
             $ ./pypy-jvm 
             Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11)
    -        [PyPy 1.5.0-alpha0] on linux2
    +        [PyPy 1.6.0] on linux2
             Type "help", "copyright", "credits" or "license" for more information.
             And now for something completely different: ``# assert did not crash''
             >>>> 
    @@ -238,7 +229,7 @@
     the ``bin/pypy`` executable.
     
     To install PyPy system wide on unix-like systems, it is recommended to put the
    -whole hierarchy alone (e.g. in ``/opt/pypy1.5``) and put a symlink to the
    +whole hierarchy alone (e.g. in ``/opt/pypy1.6``) and put a symlink to the
     ``pypy`` executable into ``/usr/bin`` or ``/usr/local/bin``
     
     If the executable fails to find suitable libraries, it will report
    diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst
    --- a/pypy/doc/getting-started.rst
    +++ b/pypy/doc/getting-started.rst
    @@ -53,11 +53,11 @@
     PyPy is ready to be executed as soon as you unpack the tarball or the zip
     file, with no need to install it in any specific location::
     
    -    $ tar xf pypy-1.5-linux.tar.bz2
    +    $ tar xf pypy-1.6-linux.tar.bz2
     
    -    $ ./pypy-1.5-linux/bin/pypy
    +    $ ./pypy-1.6/bin/pypy
         Python 2.7.1 (?, Apr 27 2011, 12:44:21)
    -    [PyPy 1.5.0-alpha0 with GCC 4.4.3] on linux2
    +    [PyPy 1.6.0 with GCC 4.4.3] on linux2
         Type "help", "copyright", "credits" or "license" for more information.
         And now for something completely different: ``implementing LOGO in LOGO:
         "turtles all the way down"''
    @@ -73,16 +73,16 @@
     
         $ curl -O http://python-distribute.org/distribute_setup.py
     
    -    $ curl -O https://github.com/pypa/pip/raw/master/contrib/get-pip.py
    +    $ curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py
     
    -    $ ./pypy-1.5-linux/bin/pypy distribute_setup.py
    +    $ ./pypy-1.6/bin/pypy distribute_setup.py
     
    -    $ ./pypy-1.5-linux/bin/pypy get-pip.py
    +    $ ./pypy-1.6/bin/pypy get-pip.py
     
    -    $ ./pypy-1.5-linux/bin/pip install pygments  # for example
    +    $ ./pypy-1.6/bin/pip install pygments  # for example
     
    -3rd party libraries will be installed in ``pypy-1.5-linux/site-packages``, and
    -the scripts in ``pypy-1.5-linux/bin``.
    +3rd party libraries will be installed in ``pypy-1.6/site-packages``, and
    +the scripts in ``pypy-1.6/bin``.
     
     Installing using virtualenv
     ---------------------------
    diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst
    --- a/pypy/doc/how-to-release.rst
    +++ b/pypy/doc/how-to-release.rst
    @@ -21,8 +21,8 @@
     Release Steps
     ----------------
     
    -* at code freeze make a release branch under
    -  http://codepeak.net/svn/pypy/release/x.y(.z). IMPORTANT: bump the
    +* at code freeze make a release branch using release-x.x.x in mercurial
    +  IMPORTANT: bump the
       pypy version number in module/sys/version.py and in
       module/cpyext/include/patchlevel.h, notice that the branch
       will capture the revision number of this change for the release;
    @@ -42,18 +42,11 @@
         JIT: windows, linux, os/x
         no JIT: windows, linux, os/x
         sandbox: linux, os/x
    -    stackless: windows, linux, os/x
     
     * write release announcement pypy/doc/release-x.y(.z).txt
       the release announcement should contain a direct link to the download page
     * update pypy.org (under extradoc/pypy.org), rebuild and commit
     
    -* update http://codespeak.net/pypy/trunk:
    -   code0> + chmod -R yourname:users /www/codespeak.net/htdocs/pypy/trunk
    -   local> cd ..../pypy/doc && py.test
    -   local> cd ..../pypy
    -   local> rsync -az doc codespeak.net:/www/codespeak.net/htdocs/pypy/trunk/pypy/
    -
     * post announcement on morepypy.blogspot.com
     * send announcements to pypy-dev, python-list,
       python-announce, python-dev ...
    diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
    --- a/pypy/doc/index-of-release-notes.rst
    +++ b/pypy/doc/index-of-release-notes.rst
    @@ -16,3 +16,4 @@
        release-1.4.0beta.rst
        release-1.4.1.rst
        release-1.5.0.rst
    +   release-1.6.0.rst
    diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst
    --- a/pypy/doc/index.rst
    +++ b/pypy/doc/index.rst
    @@ -15,7 +15,7 @@
     
     * `FAQ`_: some frequently asked questions.
     
    -* `Release 1.5`_: the latest official release
    +* `Release 1.6`_: the latest official release
     
     * `PyPy Blog`_: news and status info about PyPy 
     
    @@ -35,7 +35,7 @@
     
       * `Differences between PyPy and CPython`_
       * `What PyPy can do for your objects`_
    -  * `Stackless and coroutines`_
    +  * `Continulets and greenlets`_
       * `JIT Generation in PyPy`_ 
       * `Sandboxing Python code`_
     
    @@ -77,7 +77,7 @@
     .. _`Getting Started`: getting-started.html
     .. _`Papers`: extradoc.html
     .. _`Videos`: video-index.html
    -.. _`Release 1.5`: http://pypy.org/download.html
    +.. _`Release 1.6`: http://pypy.org/download.html
     .. _`speed.pypy.org`: http://speed.pypy.org
     .. _`RPython toolchain`: translation.html
     .. _`potential project ideas`: project-ideas.html
    @@ -122,9 +122,9 @@
     Windows, on top of .NET, and on top of Java.
     To dig into PyPy it is recommended to try out the current
     Mercurial default branch, which is always working or mostly working,
    -instead of the latest release, which is `1.5`__.
    +instead of the latest release, which is `1.6`__.
     
    -.. __: release-1.5.0.html
    +.. __: release-1.6.0.html
     
     PyPy is mainly developed on Linux and Mac OS X.  Windows is supported,
     but platform-specific bugs tend to take longer before we notice and fix
    @@ -292,8 +292,6 @@
     
     `pypy/translator/jvm/`_            the Java backend
     
    -`pypy/translator/stackless/`_      the `Stackless Transform`_
    -
     `pypy/translator/tool/`_           helper tools for translation, including the Pygame
                                        `graph viewer`_
     
    @@ -318,7 +316,7 @@
     .. _`transparent proxies`: objspace-proxies.html#tproxy
     .. _`Differences between PyPy and CPython`: cpython_differences.html
     .. _`What PyPy can do for your objects`: objspace-proxies.html
    -.. _`Stackless and coroutines`: stackless.html
    +.. _`Continulets and greenlets`: stackless.html
     .. _StdObjSpace: objspace.html#the-standard-object-space 
     .. _`abstract interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation
     .. _`rpython`: coding-guide.html#rpython 
    @@ -337,7 +335,6 @@
     .. _`low-level type system`: rtyper.html#low-level-type
     .. _`object-oriented type system`: rtyper.html#oo-type
     .. _`garbage collector`: garbage_collection.html
    -.. _`Stackless Transform`: translation.html#the-stackless-transform
     .. _`main PyPy-translation scripts`: getting-started-python.html#translating-the-pypy-python-interpreter
     .. _`.NET`: http://www.microsoft.com/net/
     .. _Mono: http://www.mono-project.com/
    diff --git a/pypy/doc/jit/pyjitpl5.rst b/pypy/doc/jit/pyjitpl5.rst
    --- a/pypy/doc/jit/pyjitpl5.rst
    +++ b/pypy/doc/jit/pyjitpl5.rst
    @@ -103,7 +103,7 @@
     
     The meta-interpreter starts interpreting the JIT bytecode.  Each operation is
     executed and then recorded in a list of operations, called the trace.
    -Operations can have a list of boxes that operate on, arguments.  Some operations
    +Operations can have a list of boxes they operate on, arguments.  Some operations
     (like GETFIELD and GETARRAYITEM) also have special objects that describe how
     their arguments are laid out in memory.  All possible operations generated by
     tracing are listed in metainterp/resoperation.py.  When a (interpreter-level)
    diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst
    --- a/pypy/doc/project-ideas.rst
    +++ b/pypy/doc/project-ideas.rst
    @@ -48,12 +48,6 @@
     
     .. image:: image/jitviewer.png
     
    -We would like to add one level to this hierarchy, by showing the generated
    -machine code for each jit operation.  The necessary information is already in
    -the log file produced by the JIT, so it is "only" a matter of teaching the
    -jitviewer to display it.  Ideally, the machine code should be hidden by
    -default and viewable on request.
    -
     The jitviewer is a web application based on flask and jinja2 (and jQuery on
     the client): if you have great web developing skills and want to help PyPy,
     this is an ideal task to get started, because it does not require any deep
    diff --git a/pypy/doc/release-1.6.0.rst b/pypy/doc/release-1.6.0.rst
    new file mode 100644
    --- /dev/null
    +++ b/pypy/doc/release-1.6.0.rst
    @@ -0,0 +1,95 @@
    +========================
    +PyPy 1.6 - kickass panda
    +========================
    +
    +We're pleased to announce the 1.6 release of PyPy. This release brings a lot
    +of bugfixes and performance improvements over 1.5, and improves support for
    +Windows 32bit and OS X 64bit. This version fully implements Python 2.7.1 and
    +has beta level support for loading CPython C extensions.  You can download it
    +here:
    +
    +    http://pypy.org/download.html
    +
    +What is PyPy?
    +=============
    +
    +PyPy is a very compliant Python interpreter, almost a drop-in replacement for
    +CPython 2.7.1. It's fast (`pypy 1.5 and cpython 2.6.2`_ performance comparison)
    +due to its integrated tracing JIT compiler.
    +
    +This release supports x86 machines running Linux 32/64 or Mac OS X.  Windows 32
    +is beta (it roughly works but a lot of small issues have not been fixed so
    +far).  Windows 64 is not yet supported.
    +
    +The main topics of this release are speed and stability: on average on
    +our benchmark suite, PyPy 1.6 is between **20% and 30%** faster than PyPy 1.5,
    +which was already much faster than CPython on our set of benchmarks.
    +
    +The speed improvements have been made possible by optimizing many of the
    +layers which compose PyPy.  In particular, we improved: the Garbage Collector,
    +the JIT warmup time, the optimizations performed by the JIT, the quality of
    +the generated machine code and the implementation of our Python interpreter.
    +
    +.. _`pypy 1.5 and cpython 2.6.2`: http://speed.pypy.org
    +
    +
    +Highlights
    +==========
    +
    +* Numerous performance improvements, overall giving considerable speedups:
    +
    +  - better GC behavior when dealing with very large objects and arrays
    +
    +  - **fast ctypes:** now calls to ctypes functions are seen and optimized
    +    by the JIT, and they are up to 60 times faster than PyPy 1.5 and 10 times
    +    faster than CPython
    +
    +  - improved generators(1): simple generators now are inlined into the caller
    +    loop, making performance up to 3.5 times faster than PyPy 1.5.
    +
    +  - improved generators(2): thanks to other optimizations, even generators
    +    that are not inlined are between 10% and 20% faster than PyPy 1.5.
    +
    +  - faster warmup time for the JIT
    +
    +  - JIT support for single floats (e.g., for ``array('f')``)
    +
    +  - optimized dictionaries: the internal representation of dictionaries is now
    +    dynamically selected depending on the type of stored objects, resulting in
    +    faster code and smaller memory footprint.  For example, dictionaries whose
    +    keys are all strings, or all integers. Other dictionaries are also smaller
    +    due to bugfixes.
    +
    +* JitViewer: this is the first official release which includes the JitViewer,
    +  a web-based tool which helps you to see which parts of your Python code have
    +  been compiled by the JIT, down until the assembler. The `jitviewer`_ 0.1 has
    +  already been release and works well with PyPy 1.6.
    +
    +* The CPython extension module API has been improved and now supports many
    +  more extensions. For information on which one are supported, please refer to
    +  our `compatibility wiki`_.
    +
    +* Multibyte encoding support: this was of of the last areas in which we were
    +  still behind CPython, but now we fully support them.
    +
    +* Preliminary support for NumPy: this release includes a preview of a very
    +  fast NumPy module integrated with the PyPy JIT.  Unfortunately, this does
    +  not mean that you can expect to take an existing NumPy program and run it on
    +  PyPy, because the module is still unfinished and supports only some of the
    +  numpy API. However, barring some details, what works should be
    +  blazingly fast :-)
    +
    +* Bugfixes: since the 1.5 release we fixed 53 bugs in our `bug tracker`_, not
    +  counting the numerous bugs that were found and reported through other
    +  channels than the bug tracker.
    +
    +Cheers,
    +
    +Hakan Ardo, Carl Friedrich Bolz, Laura Creighton, Antonio Cuni,
    +Maciej Fijalkowski, Amaury Forgeot d'Arc, Alex Gaynor,
    +Armin Rigo and the PyPy team
    +
    +.. _`jitviewer`: http://morepypy.blogspot.com/2011/08/visualization-of-jitted-code.html
    +.. _`bug tracker`: https://bugs.pypy.org
    +.. _`compatibility wiki`: https://bitbucket.org/pypy/compatibility/wiki/Home
    +
    diff --git a/pypy/doc/rlib.rst b/pypy/doc/rlib.rst
    --- a/pypy/doc/rlib.rst
    +++ b/pypy/doc/rlib.rst
    @@ -134,69 +134,6 @@
     a hierarchy of Address classes, in a typical static-OO-programming style.
     
     
    -``rstack``
    -==========
    -
    -The `pypy/rlib/rstack.py`_ module allows an RPython program to control its own execution stack.
    -This is only useful if the program is translated using stackless. An old
    -description of the exposed functions is below.
    -
    -We introduce an RPython type ``frame_stack_top`` and a built-in function
    -``yield_current_frame_to_caller()`` that work as follows (see example below):
    -
    -* The built-in function ``yield_current_frame_to_caller()`` causes the current
    -  function's state to be captured in a new ``frame_stack_top`` object that is
    -  returned to the parent.  Only one frame, the current one, is captured this
    -  way.  The current frame is suspended and the caller continues to run.  Note
    -  that the caller is only resumed once: when
    -  ``yield_current_frame_to_caller()`` is called.  See below.
    -
    -* A ``frame_stack_top`` object can be jumped to by calling its ``switch()``
    -  method with no argument.
    -
    -* ``yield_current_frame_to_caller()`` and ``switch()`` themselves return a new
    -  ``frame_stack_top`` object: the freshly captured state of the caller of the
    -  source ``switch()`` that was just executed, or None in the case described
    -  below.
    -
    -* the function that called ``yield_current_frame_to_caller()`` also has a
    -  normal return statement, like all functions.  This statement must return
    -  another ``frame_stack_top`` object.  The latter is *not* returned to the
    -  original caller; there is no way to return several times to the caller.
    -  Instead, it designates the place to which the execution must jump, as if by
    -  a ``switch()``.  The place to which we jump this way will see a None as the
    -  source frame stack top.
    -
    -* every frame stack top must be resumed once and only once.  Not resuming
    -  it at all causes a leak.  Resuming it several times causes a crash.
    -
    -* a function that called ``yield_current_frame_to_caller()`` should not raise.
    -  It would have no implicit parent frame to propagate the exception to.  That
    -  would be a crashingly bad idea.
    -
    -The following example would print the numbers from 1 to 7 in order::
    -
    -    def g():
    -        print 2
    -        frametop_before_5 = yield_current_frame_to_caller()
    -        print 4
    -        frametop_before_7 = frametop_before_5.switch()
    -        print 6
    -        return frametop_before_7
    -
    -    def f():
    -        print 1
    -        frametop_before_4 = g()
    -        print 3
    -        frametop_before_6 = frametop_before_4.switch()
    -        print 5
    -        frametop_after_return = frametop_before_6.switch()
    -        print 7
    -        assert frametop_after_return is None
    -
    -    f()
    -
    -
     ``streamio``
     ============
     
    diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst
    --- a/pypy/doc/stackless.rst
    +++ b/pypy/doc/stackless.rst
    @@ -8,446 +8,299 @@
     ================
     
     PyPy can expose to its user language features similar to the ones
    -present in `Stackless Python`_: **no recursion depth limit**, and the
    -ability to write code in a **massively concurrent style**.  It actually
    -exposes three different paradigms to choose from:
    +present in `Stackless Python`_: the ability to write code in a
    +**massively concurrent style**.  (It does not (any more) offer the
    +ability to run with no `recursion depth limit`_, but the same effect
    +can be achieved indirectly.)
     
    -* `Tasklets and channels`_;
    +This feature is based on a custom primitive called a continulet_.
    +Continulets can be directly used by application code, or it is possible
    +to write (entirely at app-level) more user-friendly interfaces.
     
    -* Greenlets_;
    +Currently PyPy implements greenlets_ on top of continulets.  It would be
    +easy to implement tasklets and channels as well, emulating the model
    +of `Stackless Python`_.
     
    -* Plain coroutines_.
    +Continulets are extremely light-weight, which means that PyPy should be
    +able to handle programs containing large amounts of them.  However, due
    +to an implementation restriction, a PyPy compiled with
    +``--gcrootfinder=shadowstack`` consumes at least one page of physical
    +memory (4KB) per live continulet, and half a megabyte of virtual memory
    +on 32-bit or a complete megabyte on 64-bit.  Moreover, the feature is
    +only available (so far) on x86 and x86-64 CPUs; for other CPUs you need
    +to add a short page of custom assembler to
    +`pypy/translator/c/src/stacklet/`_.
     
    -All of them are extremely light-weight, which means that PyPy should be
    -able to handle programs containing large amounts of coroutines, tasklets
    -and greenlets.
     
    +Theory
    +======
     
    -Requirements
    -++++++++++++++++
    +The fundamental idea is that, at any point in time, the program happens
    +to run one stack of frames (or one per thread, in case of
    +multi-threading).  To see the stack, start at the top frame and follow
    +the chain of ``f_back`` until you reach the bottom frame.  From the
    +point of view of one of these frames, it has a ``f_back`` pointing to
    +another frame (unless it is the bottom frame), and it is itself being
    +pointed to by another frame (unless it is the top frame).
     
    -If you are running py.py on top of CPython, then you need to enable
    -the _stackless module by running it as follows::
    +The theory behind continulets is to literally take the previous sentence
    +as definition of "an O.K. situation".  The trick is that there are
    +O.K. situations that are more complex than just one stack: you will
    +always have one stack, but you can also have in addition one or more
    +detached *cycles* of frames, such that by following the ``f_back`` chain
    +you run in a circle.  But note that these cycles are indeed completely
    +detached: the top frame (the currently running one) is always the one
    +which is not the ``f_back`` of anybody else, and it is always the top of
    +a stack that ends with the bottom frame, never a part of these extra
    +cycles.
     
    -    py.py --withmod-_stackless
    +How do you create such cycles?  The fundamental operation to do so is to
    +take two frames and *permute* their ``f_back`` --- i.e. exchange them.
    +You can permute any two ``f_back`` without breaking the rule of "an O.K.
    +situation".  Say for example that ``f`` is some frame halfway down the
    +stack, and you permute its ``f_back`` with the ``f_back`` of the top
    +frame.  Then you have removed from the normal stack all intermediate
    +frames, and turned them into one stand-alone cycle.  By doing the same
    +permutation again you restore the original situation.
     
    -This is implemented internally using greenlets, so it only works on a
    -platform where `greenlets`_ are supported.  A few features do
    -not work this way, though, and really require a translated
    -``pypy-c``.
    +In practice, in PyPy, you cannot change the ``f_back`` of an abitrary
    +frame, but only of frames stored in ``continulets``.
     
    -To obtain a translated version of ``pypy-c`` that includes Stackless
    -support, run translate.py as follows::
    -
    -    cd pypy/translator/goal
    -    python translate.py --stackless
    +Continulets are internally implemented using stacklets.  Stacklets are a
    +bit more primitive (they are really one-shot continuations), but that
    +idea only works in C, not in Python.  The basic idea of continulets is
    +to have at any point in time a complete valid stack; this is important
    +e.g. to correctly propagate exceptions (and it seems to give meaningful
    +tracebacks too).
     
     
     Application level interface
     =============================
     
    -A stackless PyPy contains a module called ``stackless``.  The interface
    -exposed by this module have not been refined much, so it should be
    -considered in-flux (as of 2007).
     
    -So far, PyPy does not provide support for ``stackless`` in a threaded
    -environment.  This limitation is not fundamental, as previous experience
    -has shown, so supporting this would probably be reasonably easy.
    +.. _continulet:
     
    -An interesting point is that the same ``stackless`` module can provide
    -a number of different concurrency paradigms at the same time.  From a
    -theoretical point of view, none of above-mentioned existing three
    -paradigms considered on its own is new: two of them are from previous
    -Python work, and the third one is a variant of the classical coroutine.
    -The new part is that the PyPy implementation manages to provide all of
    -them and let the user implement more.  Moreover - and this might be an
    -important theoretical contribution of this work - we manage to provide
    -these concurrency concepts in a "composable" way.  In other words, it
    -is possible to naturally mix in a single application multiple
    -concurrency paradigms, and multiple unrelated usages of the same
    -paradigm.  This is discussed in the Composability_ section below.
    +Continulets
    ++++++++++++
     
    +A translated PyPy contains by default a module called ``_continuation``
    +exporting the type ``continulet``.  A ``continulet`` object from this
    +module is a container that stores a "one-shot continuation".  It plays
    +the role of an extra frame you can insert in the stack, and whose
    +``f_back`` can be changed.
     
    -Infinite recursion
    -++++++++++++++++++
    +To make a continulet object, call ``continulet()`` with a callable and
    +optional extra arguments.
     
    -Any stackless PyPy executable natively supports recursion that is only
    -limited by the available memory.  As in normal Python, though, there is
    -an initial recursion limit (which is 5000 in all pypy-c's, and 1000 in
    -CPython).  It can be changed with ``sys.setrecursionlimit()``.  With a
    -stackless PyPy, any value is acceptable - use ``sys.maxint`` for
    -unlimited.
    +Later, the first time you ``switch()`` to the continulet, the callable
    +is invoked with the same continulet object as the extra first argument.
    +At that point, the one-shot continuation stored in the continulet points
    +to the caller of ``switch()``.  In other words you have a perfectly
    +normal-looking stack of frames.  But when ``switch()`` is called again,
    +this stored one-shot continuation is exchanged with the current one; it
    +means that the caller of ``switch()`` is suspended with its continuation
    +stored in the container, and the old continuation from the continulet
    +object is resumed.
     
    -In some cases, you can write Python code that causes interpreter-level
    -infinite recursion -- i.e. infinite recursion without going via
    -application-level function calls.  It is possible to limit that too,
    -with ``_stackless.set_stack_depth_limit()``, or to unlimit it completely
    -by setting it to ``sys.maxint``.
    +The most primitive API is actually 'permute()', which just permutes the
    +one-shot continuation stored in two (or more) continulets.
     
    +In more details:
     
    -Coroutines
    -++++++++++
    +* ``continulet(callable, *args, **kwds)``: make a new continulet.
    +  Like a generator, this only creates it; the ``callable`` is only
    +  actually called the first time it is switched to.  It will be
    +  called as follows::
     
    -A Coroutine is similar to a very small thread, with no preemptive scheduling.
    -Within a family of coroutines, the flow of execution is explicitly
    -transferred from one to another by the programmer.  When execution is
    -transferred to a coroutine, it begins to execute some Python code.  When
    -it transfers execution away from itself it is temporarily suspended, and
    -when execution returns to it it resumes its execution from the
    -point where it was suspended.  Conceptually, only one coroutine is
    -actively running at any given time (but see Composability_ below).
    +      callable(cont, *args, **kwds)
     
    -The ``stackless.coroutine`` class is instantiated with no argument.
    -It provides the following methods and attributes:
    +  where ``cont`` is the same continulet object.
     
    -* ``stackless.coroutine.getcurrent()``
    +  Note that it is actually ``cont.__init__()`` that binds
    +  the continulet.  It is also possible to create a not-bound-yet
    +  continulet by calling explicitly ``continulet.__new__()``, and
    +  only bind it later by calling explicitly ``cont.__init__()``.
     
    -    Static method returning the currently running coroutine.  There is a
    -    so-called "main" coroutine object that represents the "outer"
    -    execution context, where your main program started and where it runs
    -    as long as it does not switch to another coroutine.
    +* ``cont.switch(value=None, to=None)``: start the continulet if
    +  it was not started yet.  Otherwise, store the current continuation
    +  in ``cont``, and activate the target continuation, which is the
    +  one that was previously stored in ``cont``.  Note that the target
    +  continuation was itself previously suspended by another call to
    +  ``switch()``; this older ``switch()`` will now appear to return.
    +  The ``value`` argument is any object that is carried to the target
    +  and returned by the target's ``switch()``.
     
    -* ``coro.bind(callable, *args, **kwds)``
    +  If ``to`` is given, it must be another continulet object.  In
    +  that case, performs a "double switch": it switches as described
    +  above to ``cont``, and then immediately switches again to ``to``.
    +  This is different from switching directly to ``to``: the current
    +  continuation gets stored in ``cont``, the old continuation from
    +  ``cont`` gets stored in ``to``, and only then we resume the
    +  execution from the old continuation out of ``to``.
     
    -    Bind the coroutine so that it will execute ``callable(*args,
    -    **kwds)``.  The call is not performed immediately, but only the
    -    first time we call the ``coro.switch()`` method.  A coroutine must
    -    be bound before it is switched to.  When the coroutine finishes
    -    (because the call to the callable returns), the coroutine exits and
    -    implicitly switches back to another coroutine (its "parent"); after
    -    this point, it is possible to bind it again and switch to it again.
    -    (Which coroutine is the parent of which is not documented, as it is
    -    likely to change when the interface is refined.)
    +* ``cont.throw(type, value=None, tb=None, to=None)``: similar to
    +  ``switch()``, except that immediately after the switch is done, raise
    +  the given exception in the target.
     
    -* ``coro.switch()``
    +* ``cont.is_pending()``: return True if the continulet is pending.
    +  This is False when it is not initialized (because we called
    +  ``__new__`` and not ``__init__``) or when it is finished (because
    +  the ``callable()`` returned).  When it is False, the continulet
    +  object is empty and cannot be ``switch()``-ed to.
     
    -    Suspend the current (caller) coroutine, and resume execution in the
    -    target coroutine ``coro``.
    +* ``permute(*continulets)``: a global function that permutes the
    +  continuations stored in the given continulets arguments.  Mostly
    +  theoretical.  In practice, using ``cont.switch()`` is easier and
    +  more efficient than using ``permute()``; the latter does not on
    +  its own change the currently running frame.
     
    -* ``coro.kill()``
     
    -    Kill ``coro`` by sending a CoroutineExit exception and switching
    -    execution immediately to it. This exception can be caught in the 
    -    coroutine itself and can be raised from any call to ``coro.switch()``. 
    -    This exception isn't propagated to the parent coroutine.
    +Genlets
    ++++++++
     
    -* ``coro.throw(type, value)``
    +The ``_continuation`` module also exposes the ``generator`` decorator::
     
    -    Insert an exception in ``coro`` an resume switches execution
    -    immediately to it. In the coroutine itself, this exception
    -    will come from any call to ``coro.switch()`` and can be caught. If the
    -    exception isn't caught, it will be propagated to the parent coroutine.
    +    @generator
    +    def f(cont, a, b):
    +        cont.switch(a + b)
    +        cont.switch(a + b + 1)
     
    -When a coroutine is garbage-collected, it gets the ``.kill()`` method sent to
    -it. This happens at the point the next ``.switch`` method is called, so the
    -target coroutine of this call will be executed only after the ``.kill`` has
    -finished.
    +    for i in f(10, 20):
    +        print i
     
    -Example
    -~~~~~~~
    +This example prints 30 and 31.  The only advantage over using regular
    +generators is that the generator itself is not limited to ``yield``
    +statements that must all occur syntactically in the same function.
    +Instead, we can pass around ``cont``, e.g. to nested sub-functions, and
    +call ``cont.switch(x)`` from there.
     
    -Here is a classical producer/consumer example: an algorithm computes a
    -sequence of values, while another consumes them.  For our purposes we
    -assume that the producer can generate several values at once, and the
    -consumer can process up to 3 values in a batch - it can also process
    -batches with fewer than 3 values without waiting for the producer (which
    -would be messy to express with a classical Python generator). ::
    +The ``generator`` decorator can also be applied to methods::
     
    -    def producer(lst):
    -        while True:
    -            ...compute some more values...
    -            lst.extend(new_values)
    -            coro_consumer.switch()
    -
    -    def consumer(lst):
    -        while True:
    -            # First ask the producer for more values if needed
    -            while len(lst) == 0:
    -                coro_producer.switch()
    -            # Process the available values in a batch, but at most 3
    -            batch = lst[:3]
    -            del lst[:3]
    -            ...process batch...
    -
    -    # Initialize two coroutines with a shared list as argument
    -    exchangelst = []
    -    coro_producer = coroutine()
    -    coro_producer.bind(producer, exchangelst)
    -    coro_consumer = coroutine()
    -    coro_consumer.bind(consumer, exchangelst)
    -
    -    # Start running the consumer coroutine
    -    coro_consumer.switch()
    -
    -
    -Tasklets and channels
    -+++++++++++++++++++++
    -
    -The ``stackless`` module also provides an interface that is roughly
    -compatible with the interface of the ``stackless`` module in `Stackless
    -Python`_: it contains ``stackless.tasklet`` and ``stackless.channel``
    -classes.  Tasklets are also similar to microthreads, but (like coroutines)
    -they don't actually run in parallel with other microthreads; instead,
    -they synchronize and exchange data with each other over Channels, and
    -these exchanges determine which Tasklet runs next.
    -
    -For usage reference, see the documentation on the `Stackless Python`_
    -website.
    -
    -Note that Tasklets and Channels are implemented at application-level in
    -`lib_pypy/stackless.py`_ on top of coroutines_.  You can refer to this
    -module for more details and API documentation.
    -
    -The stackless.py code tries to resemble the stackless C code as much
    -as possible. This makes the code somewhat unpythonic.
    -
    -Bird's eye view of tasklets and channels
    -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    -
    -Tasklets are a bit like threads: they encapsulate a function in such a way that
    -they can be suspended/restarted any time. Unlike threads, they won't
    -run concurrently, but must be cooperative. When using stackless
    -features, it is vitally important that no action is performed that blocks
    -everything else.  In particular, blocking input/output should be centralized
    -to a single tasklet.
    -
    -Communication between tasklets is done via channels. 
    -There are three ways for a tasklet to give up control:
    -
    -1. call ``stackless.schedule()``
    -2. send something over a channel
    -3. receive something from a channel
    -
    -A (live) tasklet can either be running, waiting to get scheduled, or be
    -blocked by a channel.
    -
    -Scheduling is done in strictly round-robin manner. A blocked tasklet
    -is removed from the scheduling queue and will be reinserted when it
    -becomes unblocked.
    -
    -Example
    -~~~~~~~
    -
    -Here is a many-producers many-consumers example, where any consumer can
    -process the result of any producer.  For this situation we set up a
    -single channel where all producer send, and on which all consumers
    -wait::
    -
    -    def producer(chan):
    -        while True:
    -            chan.send(...next value...)
    -
    -    def consumer(chan):
    -        while True:
    -            x = chan.receive()
    -            ...do something with x...
    -
    -    # Set up the N producer and M consumer tasklets
    -    common_channel = stackless.channel()
    -    for i in range(N):
    -        stackless.tasklet(producer, common_channel)()
    -    for i in range(M):
    -        stackless.tasklet(consumer, common_channel)()
    -
    -    # Run it all
    -    stackless.run()
    -
    -Each item sent over the channel is received by one of the waiting
    -consumers; which one is not specified.  The producers block until their
    -item is consumed: the channel is not a queue, but rather a meeting point
    -which causes tasklets to block until both a consumer and a producer are
    -ready.  In practice, the reason for having several consumers receiving
    -on a single channel is that some of the consumers can be busy in other
    -ways part of the time.  For example, each consumer might receive a
    -database request, process it, and send the result to a further channel
    -before it asks for the next request.  In this situation, further
    -requests can still be received by other consumers.
    +    class X:
    +        @generator
    +        def f(self, cont, a, b):
    +            ...
     
     
     Greenlets
     +++++++++
     
    -A Greenlet is a kind of primitive Tasklet with a lower-level interface
    -and with exact control over the execution order.  Greenlets are similar
    -to Coroutines, with a slightly different interface: greenlets put more
    -emphasis on a tree structure.  The various greenlets of a program form a
    -precise tree, which fully determines their order of execution.
    +Greenlets are implemented on top of continulets in `lib_pypy/greenlet.py`_.
    +See the official `documentation of the greenlets`_.
     
    -For usage reference, see the `documentation of the greenlets`_.
    -The PyPy interface is identical.  You should use ``greenlet.greenlet``
    -instead of ``stackless.greenlet`` directly, because the greenlet library
    -can give you the latter when you ask for the former on top of PyPy.
    +Note that unlike the CPython greenlets, this version does not suffer
    +from GC issues: if the program "forgets" an unfinished greenlet, it will
    +always be collected at the next garbage collection.
     
    -PyPy's greenlets do not suffer from the cyclic GC limitation that the
    -CPython greenlets have: greenlets referencing each other via local
    -variables tend to leak on top of CPython (where it is mostly impossible
    -to do the right thing).  It works correctly on top of PyPy.
     
    +Unimplemented features
    +++++++++++++++++++++++
     
    -Coroutine Pickling
    -++++++++++++++++++
    +The following features (present in some past Stackless version of PyPy)
    +are for the time being not supported any more:
     
    -Coroutines and tasklets can be pickled and unpickled, i.e. serialized to
    -a string of bytes for the purpose of storage or transmission.  This
    -allows "live" coroutines or tasklets to be made persistent, moved to
    -other machines, or cloned in any way.  The standard ``pickle`` module
    -works with coroutines and tasklets (at least in a translated ``pypy-c``;
    -unpickling live coroutines or tasklets cannot be easily implemented on
    -top of CPython).
    +* Tasklets and channels (currently ``stackless.py`` seems to import,
    +  but you have tasklets on top of coroutines on top of greenlets on
    +  top of continulets on top of stacklets, and it's probably not too
    +  hard to cut two of these levels by adapting ``stackless.py`` to
    +  use directly continulets)
     
    -To be able to achieve this result, we have to consider many objects that
    -are not normally pickleable in CPython.  Here again, the `Stackless
    -Python`_ implementation has paved the way, and we follow the same
    -general design decisions: simple internal objects like bound method
    -objects and various kinds of iterators are supported; frame objects can
    -be fully pickled and unpickled
    -(by serializing a reference to the bytecode they are
    -running in addition to all the local variables).  References to globals
    -and modules are pickled by name, similarly to references to functions
    -and classes in the traditional CPython ``pickle``.
    +* Coroutines (could be rewritten at app-level)
     
    -The "magic" part of this process is the implementation of the unpickling
    -of a chain of frames.  The Python interpreter of PyPy uses
    -interpreter-level recursion to represent application-level calls.  The
    -reason for this is that it tremendously simplifies the implementation of
    -the interpreter itself.  Indeed, in Python, almost any operation can
    -potentially result in a non-tail-recursive call to another Python
    -function.  This makes writing a non-recursive interpreter extremely
    -tedious; instead, we rely on lower-level transformations during the
    -translation process to control this recursion.  This is the `Stackless
    -Transform`_, which is at the heart of PyPy's support for stackless-style
    -concurrency.
    +* Pickling and unpickling continulets (*)
     
    -At any point in time, a chain of Python-level frames corresponds to a
    -chain of interpreter-level frames (e.g. C frames in pypy-c), where each
    -single Python-level frame corresponds to one or a few interpreter-level
    -frames - depending on the length of the interpreter-level call chain
    -from one bytecode evaluation loop to the next (recursively invoked) one.
    +* Continuing execution of a continulet in a different thread (*)
     
    -This means that it is not sufficient to simply create a chain of Python
    -frame objects in the heap of a process before we can resume execution of
    -these newly built frames.  We must recreate a corresponding chain of
    -interpreter-level frames.  To this end, we have inserted a few *named
    -resume points* (see 3.2.4, in `D07.1 Massive Parallelism and Translation Aspects`_) in the Python interpreter of PyPy.  This is the
    -motivation for implementing the interpreter-level primitives
    -``resume_state_create()`` and ``resume_state_invoke()``, the powerful
    -interface that allows an RPython program to artificially rebuild a chain
    -of calls in a reflective way, completely from scratch, and jump to it.
    +* Automatic unlimited stack (must be emulated__ so far)
     
    -.. _`D07.1 Massive Parallelism and Translation Aspects`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf
    +* Support for other CPUs than x86 and x86-64
     
    -Example
    -~~~~~~~
    +* The app-level ``f_back`` field of frames crossing continulet boundaries
    +  is None for now, unlike what I explain in the theoretical overview
    +  above.  It mostly means that in a ``pdb.set_trace()`` you cannot go
    +  ``up`` past countinulet boundaries.  This could be fixed.
     
    -(See `demo/pickle_coroutine.py`_ for the complete source of this demo.)
    +.. __: `recursion depth limit`_
     
    -Consider a program which contains a part performing a long-running
    -computation::
    +(*) Pickling, as well as changing threads, could be implemented by using
    +a "soft" stack switching mode again.  We would get either "hard" or
    +"soft" switches, similarly to Stackless Python 3rd version: you get a
    +"hard" switch (like now) when the C stack contains non-trivial C frames
    +to save, and a "soft" switch (like previously) when it contains only
    +simple calls from Python to Python.  Soft-switched continulets would
    +also consume a bit less RAM, and the switch might be a bit faster too
    +(unsure about that; what is the Stackless Python experience?).
     
    -    def ackermann(x, y):
    -        if x == 0:
    -            return y + 1
    -        if y == 0:
    -            return ackermann(x - 1, 1)
    -        return ackermann(x - 1, ackermann(x, y - 1))
     
    -By using pickling, we can save the state of the computation while it is
    -running, for the purpose of restoring it later and continuing the
    -computation at another time or on a different machine.  However,
    -pickling does not produce a whole-program dump: it can only pickle
    -individual coroutines.  This means that the computation should be
    -started in its own coroutine::
    +Recursion depth limit
    ++++++++++++++++++++++
     
    -    # Make a coroutine that will run 'ackermann(3, 8)'
    -    coro = coroutine()
    -    coro.bind(ackermann, 3, 8)
    +You can use continulets to emulate the infinite recursion depth present
    +in Stackless Python and in stackless-enabled older versions of PyPy.
     
    -    # Now start running the coroutine
    -    result = coro.switch()
    +The trick is to start a continulet "early", i.e. when the recursion
    +depth is very low, and switch to it "later", i.e. when the recursion
    +depth is high.  Example::
     
    -The coroutine itself must switch back to the main program when it needs
    -to be interrupted (we can only pickle suspended coroutines).  Due to
    -current limitations this requires an explicit check in the
    -``ackermann()`` function::
    +    from _continuation import continulet
     
    -    def ackermann(x, y):
    -        if interrupt_flag:      # test a global flag
    -            main.switch()       # and switch back to 'main' if it is set
    -        if x == 0:
    -            return y + 1
    -        if y == 0:
    -            return ackermann(x - 1, 1)
    -        return ackermann(x - 1, ackermann(x, y - 1))
    +    def invoke(_, callable, arg):
    +        return callable(arg)
     
    -The global ``interrupt_flag`` would be set for example by a timeout, or
    -by a signal handler reacting to Ctrl-C, etc.  It causes the coroutine to
    -transfer control back to the main program.  The execution comes back
    -just after the line ``coro.switch()``, where we can pickle the coroutine
    -if necessary::
    +    def bootstrap(c):
    +        # this loop runs forever, at a very low recursion depth
    +        callable, arg = c.switch()
    +        while True:
    +            # start a new continulet from here, and switch to
    +            # it using an "exchange", i.e. a switch with to=.
    +            to = continulet(invoke, callable, arg)
    +            callable, arg = c.switch(to=to)
     
    -    if not coro.is_alive:
    -        print "finished; the result is:", result
    -    else:
    -        # save the state of the suspended coroutine
    -        f = open('demo.pickle', 'w')
    -        pickle.dump(coro, f)
    -        f.close()
    +    c = continulet(bootstrap)
    +    c.switch()
     
    -The process can then stop.  At any later time, or on another machine,
    -we can reload the file and restart the coroutine with::
     
    -    f = open('demo.pickle', 'r')
    -    coro = pickle.load(f)
    -    f.close()
    -    result = coro.switch()
    +    def recursive(n):
    +        if n == 0:
    +            return ("ok", n)
    +        if n % 200 == 0:
    +            prev = c.switch((recursive, n - 1))
    +        else:
    +            prev = recursive(n - 1)
    +        return (prev[0], prev[1] + 1)
     
    -Limitations
    -~~~~~~~~~~~
    +    print recursive(999999)     # prints ('ok', 999999)
     
    -Coroutine pickling is subject to some limitations.  First of all, it is
    -not a whole-program "memory dump".  It means that only the "local" state
    -of a coroutine is saved.  The local state is defined to include the
    -chain of calls and the local variables, but not for example the value of
    -any global variable.
    +Note that if you press Ctrl-C while running this example, the traceback
    +will be built with *all* recursive() calls so far, even if this is more
    +than the number that can possibly fit in the C stack.  These frames are
    +"overlapping" each other in the sense of the C stack; more precisely,
    +they are copied out of and into the C stack as needed.
     
    -As in normal Python, the pickle will not include any function object's
    -code, any class definition, etc., but only references to functions and
    -classes.  Unlike normal Python, the pickle contains frames.  A pickled
    -frame stores a bytecode index, representing the current execution
    -position.  This means that the user program cannot be modified *at all*
    -between pickling and unpickling!
    +(The example above also makes use of the following general "guideline"
    +to help newcomers write continulets: in ``bootstrap(c)``, only call
    +methods on ``c``, not on another continulet object.  That's why we wrote
    +``c.switch(to=to)`` and not ``to.switch()``, which would mess up the
    +state.  This is however just a guideline; in general we would recommend
    +to use other interfaces like genlets and greenlets.)
     
    -On the other hand, the pickled data is fairly independent from the
    -platform and from the PyPy version.
     
    -Pickling/unpickling fails if the coroutine is suspended in a state that
    -involves Python frames which were *indirectly* called.  To define this
    -more precisely, a Python function can issue a regular function or method
    -call to invoke another Python function - this is a *direct* call and can
    -be pickled and unpickled.  But there are many ways to invoke a Python
    -function indirectly.  For example, most operators can invoke a special
    -method ``__xyz__()`` on a class, various built-in functions can call
    -back Python functions, signals can invoke signal handlers, and so on.
    -These cases are not supported yet.
    -
    -
    -Composability
    -+++++++++++++
    +Theory of composability
    ++++++++++++++++++++++++
     
     Although the concept of coroutines is far from new, they have not been
     generally integrated into mainstream languages, or only in limited form
     (like generators in Python and iterators in C#).  We can argue that a
     possible reason for that is that they do not scale well when a program's
     complexity increases: they look attractive in small examples, but the
    -models that require explicit switching, by naming the target coroutine,
    -do not compose naturally.  This means that a program that uses
    -coroutines for two unrelated purposes may run into conflicts caused by
    -unexpected interactions.
    +models that require explicit switching, for example by naming the target
    +coroutine, do not compose naturally.  This means that a program that
    +uses coroutines for two unrelated purposes may run into conflicts caused
    +by unexpected interactions.
     
     To illustrate the problem, consider the following example (simplified
    -code; see the full source in
    -`pypy/module/_stackless/test/test_composable_coroutine.py`_).  First, a
    -simple usage of coroutine::
    +code using a theorical ``coroutine`` class).  First, a simple usage of
    +coroutine::
     
         main_coro = coroutine.getcurrent()    # the main (outer) coroutine
         data = []
    @@ -530,74 +383,35 @@
     main coroutine, which confuses the ``generator_iterator.next()`` method
     (it gets resumed, but not as a result of a call to ``Yield()``).
     
    -As part of trying to combine multiple different paradigms into a single
    -application-level module, we have built a way to solve this problem.
    -The idea is to avoid the notion of a single, global "main" coroutine (or
    -a single main greenlet, or a single main tasklet).  Instead, each
    -conceptually separated user of one of these concurrency interfaces can
    -create its own "view" on what the main coroutine/greenlet/tasklet is,
    -which other coroutine/greenlet/tasklets there are, and which of these is
    -the currently running one.  Each "view" is orthogonal to the others.  In
    -particular, each view has one (and exactly one) "current"
    -coroutine/greenlet/tasklet at any point in time.  When the user switches
    -to a coroutine/greenlet/tasklet, it implicitly means that he wants to
    -switch away from the current coroutine/greenlet/tasklet *that belongs to
    -the same view as the target*.
    +Thus the notion of coroutine is *not composable*.  By opposition, the
    +primitive notion of continulets is composable: if you build two
    +different interfaces on top of it, or have a program that uses twice the
    +same interface in two parts, then assuming that both parts independently
    +work, the composition of the two parts still works.
     
    -The precise application-level interface has not been fixed yet; so far,
    -"views" in the above sense are objects of the type
    -``stackless.usercostate``.  The above two examples can be rewritten in
    -the following way::
    +A full proof of that claim would require careful definitions, but let us
    +just claim that this fact is true because of the following observation:
    +the API of continulets is such that, when doing a ``switch()``, it
    +requires the program to have some continulet to explicitly operate on.
    +It shuffles the current continuation with the continuation stored in
    +that continulet, but has no effect outside.  So if a part of a program
    +has a continulet object, and does not expose it as a global, then the
    +rest of the program cannot accidentally influence the continuation
    +stored in that continulet object.
     
    -    producer_view = stackless.usercostate()   # a local view
    -    main_coro = producer_view.getcurrent()    # the main (outer) coroutine
    -    ...
    -    producer_coro = producer_view.newcoroutine()
    -    ...
    -
    -and::
    -
    -    generators_view = stackless.usercostate()
    -
    -    def generator(f):
    -        def wrappedfunc(*args, **kwds):
    -            g = generators_view.newcoroutine(generator_iterator)
    -            ...
    -
    -            ...generators_view.getcurrent()...
    -
    -Then the composition ``grab_values()`` works as expected, because the
    -two views are independent.  The coroutine captured as ``self.caller`` in
    -the ``generator_iterator.next()`` method is the main coroutine of the
    -``generators_view``.  It is no longer the same object as the main
    -coroutine of the ``producer_view``, so when ``data_producer()`` issues
    -the following command::
    -
    -    main_coro.switch()
    -
    -the control flow cannot accidentally jump back to
    -``generator_iterator.next()``.  In other words, from the point of view
    -of ``producer_view``, the function ``grab_next_value()`` always runs in
    -its main coroutine ``main_coro`` and the function ``data_producer`` in
    -its coroutine ``producer_coro``.  This is the case independently of
    -which ``generators_view``-based coroutine is the current one when
    -``grab_next_value()`` is called.
    -
    -Only code that has explicit access to the ``producer_view`` or its
    -coroutine objects can perform switches that are relevant for the
    -generator code.  If the view object and the coroutine objects that share
    -this view are all properly encapsulated inside the generator logic, no
    -external code can accidentally temper with the expected control flow any
    -longer.
    -
    -In conclusion: we will probably change the app-level interface of PyPy's
    -stackless module in the future to not expose coroutines and greenlets at
    -all, but only views.  They are not much more difficult to use, and they
    -scale automatically to larger programs.
    +In other words, if we regard the continulet object as being essentially
    +a modifiable ``f_back``, then it is just a link between the frame of
    +``callable()`` and the parent frame --- and it cannot be arbitrarily
    +changed by unrelated code, as long as they don't explicitly manipulate
    +the continulet object.  Typically, both the frame of ``callable()``
    +(commonly a local function) and its parent frame (which is the frame
    +that switched to it) belong to the same class or module; so from that
    +point of view the continulet is a purely local link between two local
    +frames.  It doesn't make sense to have a concept that allows this link
    +to be manipulated from outside.
     
     
     .. _`Stackless Python`: http://www.stackless.com
     .. _`documentation of the greenlets`: http://packages.python.org/greenlet/
    -.. _`Stackless Transform`: translation.html#the-stackless-transform
     
     .. include:: _ref.txt
    diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst
    --- a/pypy/doc/translation.rst
    +++ b/pypy/doc/translation.rst
    @@ -552,14 +552,15 @@
     
     The stackless transform converts functions into a form that knows how
     to save the execution point and active variables into a heap structure
    -and resume execution at that point.  This is used to implement
    +and resume execution at that point.  This was used to implement
     coroutines as an RPython-level feature, which in turn are used to
    -implement `coroutines, greenlets and tasklets`_ as an application
    +implement coroutines, greenlets and tasklets as an application
     level feature for the Standard Interpreter.
     
    -Enable the stackless transformation with :config:`translation.stackless`.
    +The stackless transformation has been deprecated and is no longer
    +available in trunk.  It has been replaced with continulets_.
     
    -.. _`coroutines, greenlets and tasklets`: stackless.html
    +.. _continulets: stackless.html
     
     .. _`preparing the graphs for source generation`:
     
    diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
    --- a/pypy/interpreter/baseobjspace.py
    +++ b/pypy/interpreter/baseobjspace.py
    @@ -111,6 +111,9 @@
         def setslotvalue(self, index, w_val):
             raise NotImplementedError
     
    +    def delslotvalue(self, index):
    +        raise NotImplementedError
    +
         def descr_call_mismatch(self, space, opname, RequiredClass, args):
             if RequiredClass is None:
                 classname = '?'
    @@ -623,9 +626,9 @@
                 self.default_compiler = compiler
                 return compiler
     
    -    def createframe(self, code, w_globals, closure=None):
    +    def createframe(self, code, w_globals, outer_func=None):
             "Create an empty PyFrame suitable for this code object."
    -        return self.FrameClass(self, code, w_globals, closure)
    +        return self.FrameClass(self, code, w_globals, outer_func)
     
         def allocate_lock(self):
             """Return an interp-level Lock object if threads are enabled,
    diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
    --- a/pypy/interpreter/function.py
    +++ b/pypy/interpreter/function.py
    @@ -30,7 +30,7 @@
         can_change_code = True
         _immutable_fields_ = ['code?',
                               'w_func_globals?',
    -                          'closure?',
    +                          'closure?[*]',
                               'defs_w?[*]',
                               'name?']
     
    @@ -96,7 +96,7 @@
                 assert isinstance(code, PyCode)
                 if nargs < 5:
                     new_frame = self.space.createframe(code, self.w_func_globals,
    -                                                   self.closure)
    +                                                   self)
                     for i in funccallunrolling:
                         if i < nargs:
                             new_frame.locals_stack_w[i] = args_w[i]
    @@ -156,7 +156,7 @@
         def _flat_pycall(self, code, nargs, frame):
             # code is a PyCode
             new_frame = self.space.createframe(code, self.w_func_globals,
    -                                                   self.closure)
    +                                                   self)
             for i in xrange(nargs):
                 w_arg = frame.peekvalue(nargs-1-i)
                 new_frame.locals_stack_w[i] = w_arg
    @@ -167,7 +167,7 @@
         def _flat_pycall_defaults(self, code, nargs, frame, defs_to_load):
             # code is a PyCode
             new_frame = self.space.createframe(code, self.w_func_globals,
    -                                                   self.closure)
    +                                                   self)
             for i in xrange(nargs):
                 w_arg = frame.peekvalue(nargs-1-i)
                 new_frame.locals_stack_w[i] = w_arg
    diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
    --- a/pypy/interpreter/gateway.py
    +++ b/pypy/interpreter/gateway.py
    @@ -64,7 +64,7 @@
                     self.visit_self(el[1], *args)
                 else:
                     self.visit_function(el, *args)
    -        else:
    +        elif isinstance(el, type):
                 for typ in self.bases_order:
                     if issubclass(el, typ):
                         visit = getattr(self, "visit__%s" % (typ.__name__,))
    @@ -73,6 +73,8 @@
                 else:
                     raise Exception("%s: no match for unwrap_spec element %s" % (
                         self.__class__.__name__, el))
    +        else:
    +            raise Exception("unable to dispatch, %s, perhaps your parameter should have started with w_?" % el)
     
         def apply_over(self, unwrap_spec, *extra):
             dispatch = self.dispatch
    diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py
    --- a/pypy/interpreter/nestedscope.py
    +++ b/pypy/interpreter/nestedscope.py
    @@ -8,7 +8,7 @@
     
     class Cell(Wrappable):
         "A simple container for a wrapped value."
    -    
    +
         def __init__(self, w_value=None):
             self.w_value = w_value
     
    @@ -90,32 +90,33 @@
         #     variables coming from a parent function in which i'm nested
         # 'closure' is a list of Cell instances: the received free vars.
     
    -    cells = None
    -
         @jit.unroll_safe
    -    def initialize_frame_scopes(self, closure, code):
    -        super_initialize_frame_scopes(self, closure, code)
    +    def initialize_frame_scopes(self, outer_func, code):
    +        super_initialize_frame_scopes(self, outer_func, code)
             ncellvars = len(code.co_cellvars)
             nfreevars = len(code.co_freevars)
             if not nfreevars:
                 if not ncellvars:
    +                self.cells = []
                     return            # no self.cells needed - fast path
    -            if closure is None:
    -                closure = []
    -        elif closure is None:
    +        elif outer_func is None:
                 space = self.space
                 raise OperationError(space.w_TypeError,
                                      space.wrap("directly executed code object "
                                                 "may not contain free variables"))
    -        if len(closure) != nfreevars:
    +        if outer_func and outer_func.closure:
    +            closure_size = len(outer_func.closure)
    +        else:
    +            closure_size = 0
    +        if closure_size != nfreevars:
                 raise ValueError("code object received a closure with "
                                      "an unexpected number of free variables")
             self.cells = [None] * (ncellvars + nfreevars)
             for i in range(ncellvars):
                 self.cells[i] = Cell()
             for i in range(nfreevars):
    -            self.cells[i + ncellvars] = closure[i]
    -    
    +            self.cells[i + ncellvars] = outer_func.closure[i]
    +
         def _getcells(self):
             return self.cells
     
    diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
    --- a/pypy/interpreter/pycode.py
    +++ b/pypy/interpreter/pycode.py
    @@ -198,7 +198,7 @@
     
         def funcrun(self, func, args):
             frame = self.space.createframe(self, func.w_func_globals,
    -                                  func.closure)
    +                                  func)
             sig = self._signature
             # speed hack
             fresh_frame = jit.hint(frame, access_directly=True,
    @@ -211,7 +211,7 @@
     
         def funcrun_obj(self, func, w_obj, args):
             frame = self.space.createframe(self, func.w_func_globals,
    -                                  func.closure)
    +                                  func)
             sig = self._signature
             # speed hack
             fresh_frame = jit.hint(frame, access_directly=True,
    diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
    --- a/pypy/interpreter/pyframe.py
    +++ b/pypy/interpreter/pyframe.py
    @@ -51,7 +51,10 @@
         is_being_profiled        = False
         escaped                  = False  # see mark_as_escaped()
     
    -    def __init__(self, space, code, w_globals, closure):
    +    def __init__(self, space, code, w_globals, outer_func):
    +        if not we_are_translated():
    +            assert type(self) in (space.FrameClass, CPythonFrame), (
    +                "use space.FrameClass(), not directly PyFrame()")
             self = hint(self, access_directly=True, fresh_virtualizable=True)
             assert isinstance(code, pycode.PyCode)
             self.pycode = code
    @@ -67,7 +70,7 @@
                 self.builtin = space.builtin.pick_builtin(w_globals)
             # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS.
             # class bodies only have CO_NEWLOCALS.
    -        self.initialize_frame_scopes(closure, code)
    +        self.initialize_frame_scopes(outer_func, code)
             self.f_lineno = code.co_firstlineno
     
         def mark_as_escaped(self):
    @@ -80,7 +83,7 @@
             self.escaped = True
     
         def append_block(self, block):
    -        block.previous = self.lastblock
    +        assert block.previous is self.lastblock
             self.lastblock = block
     
         def pop_block(self):
    @@ -106,15 +109,16 @@
             while i >= 0:
                 block = lst[i]
                 i -= 1
    -            self.append_block(block)
    +            block.previous = self.lastblock
    +            self.lastblock = block
     
         def get_builtin(self):
             if self.space.config.objspace.honor__builtins__:
                 return self.builtin
             else:
                 return self.space.builtin
    -        
    -    def initialize_frame_scopes(self, closure, code): 
    +
    +    def initialize_frame_scopes(self, outer_func, code):
             # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS.
             # class bodies only have CO_NEWLOCALS.
             # CO_NEWLOCALS: make a locals dict unless optimized is also set
    @@ -381,7 +385,11 @@
             
             # do not use the instance's __init__ but the base's, because we set
             # everything like cells from here
    -        PyFrame.__init__(self, space, pycode, w_globals, closure)
    +        # XXX hack
    +        from pypy.interpreter.function import Function
    +        outer_func = Function(space, None, closure=closure,
    +                             forcename="fake")
    +        PyFrame.__init__(self, space, pycode, w_globals, outer_func)
             f_back = space.interp_w(PyFrame, w_f_back, can_be_None=True)
             new_frame.f_backref = jit.non_virtual_ref(f_back)
     
    diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
    --- a/pypy/interpreter/pyopcode.py
    +++ b/pypy/interpreter/pyopcode.py
    @@ -892,16 +892,16 @@
             raise BytecodeCorruption, "old opcode, no longer in use"
     
         def SETUP_LOOP(self, offsettoend, next_instr):
    -        block = LoopBlock(self, next_instr + offsettoend)
    -        self.append_block(block)
    +        block = LoopBlock(self, next_instr + offsettoend, self.lastblock)
    +        self.lastblock = block
     
         def SETUP_EXCEPT(self, offsettoend, next_instr):
    -        block = ExceptBlock(self, next_instr + offsettoend)
    -        self.append_block(block)
    +        block = ExceptBlock(self, next_instr + offsettoend, self.lastblock)
    +        self.lastblock = block
     
         def SETUP_FINALLY(self, offsettoend, next_instr):
    -        block = FinallyBlock(self, next_instr + offsettoend)
    -        self.append_block(block)
    +        block = FinallyBlock(self, next_instr + offsettoend, self.lastblock)
    +        self.lastblock = block
     
         def SETUP_WITH(self, offsettoend, next_instr):
             w_manager = self.peekvalue()
    @@ -915,8 +915,8 @@
             w_exit = self.space.get(w_descr, w_manager)
             self.settopvalue(w_exit)
             w_result = self.space.get_and_call_function(w_enter, w_manager)
    -        block = WithBlock(self, next_instr + offsettoend)
    -        self.append_block(block)
    +        block = WithBlock(self, next_instr + offsettoend, self.lastblock)
    +        self.lastblock = block
             self.pushvalue(w_result)
     
         def WITH_CLEANUP(self, oparg, next_instr):
    @@ -1247,10 +1247,10 @@
     
         _immutable_ = True
     
    -    def __init__(self, frame, handlerposition):
    +    def __init__(self, frame, handlerposition, previous):
             self.handlerposition = handlerposition
             self.valuestackdepth = frame.valuestackdepth
    -        self.previous = None # this makes a linked list of blocks
    +        self.previous = previous   # this makes a linked list of blocks
     
         def __eq__(self, other):
             return (self.__class__ is other.__class__ and
    diff --git a/pypy/interpreter/pyparser/future.py b/pypy/interpreter/pyparser/future.py
    --- a/pypy/interpreter/pyparser/future.py
    +++ b/pypy/interpreter/pyparser/future.py
    @@ -109,25 +109,19 @@
                 self.getc() == self.getc(+2)):
                 self.pos += 3
                 while 1: # Deal with a triple quoted docstring
    -                if self.getc() == '\\':
    -                    self.pos += 2
    +                c = self.getc()
    +                if c == '\\':
    +                    self.pos += 1
    +                    self._skip_next_char_from_docstring()
    +                elif c != endchar:
    +                    self._skip_next_char_from_docstring()
                     else:
    -                    c = self.getc()
    -                    if c != endchar:
    -                        self.pos += 1
    -                        if c == '\n':
    -                            self.atbol()
    -                        elif c == '\r':
    -                            if self.getc() == '\n':
    -                                self.pos += 1
    -                                self.atbol()
    -                    else:
    -                        self.pos += 1
    -                        if (self.getc() == endchar and
    -                            self.getc(+1) == endchar):
    -                            self.pos += 2
    -                            self.consume_empty_line()
    -                            break
    +                    self.pos += 1
    +                    if (self.getc() == endchar and
    +                        self.getc(+1) == endchar):
    +                        self.pos += 2
    +                        self.consume_empty_line()
    +                        break
     
             else: # Deal with a single quoted docstring
                 self.pos += 1
    @@ -138,17 +132,21 @@
                         self.consume_empty_line()
                         return
                     elif c == '\\':
    -                    # Deal with linefeeds
    -                    if self.getc() != '\r':
    -                        self.pos += 1
    -                    else:
    -                        self.pos += 1
    -                        if self.getc() == '\n':
    -                            self.pos += 1
    +                    self._skip_next_char_from_docstring()
                     elif c in '\r\n':
                         # Syntax error
                         return
     
    +    def _skip_next_char_from_docstring(self):
    +        c = self.getc()
    +        self.pos += 1
    +        if c == '\n':
    +            self.atbol()
    +        elif c == '\r':
    +            if self.getc() == '\n':
    +                self.pos += 1
    +            self.atbol()
    +
         def consume_continuation(self):
             c = self.getc()
             if c in '\n\r':
    diff --git a/pypy/interpreter/pyparser/test/test_futureautomaton.py b/pypy/interpreter/pyparser/test/test_futureautomaton.py
    --- a/pypy/interpreter/pyparser/test/test_futureautomaton.py
    +++ b/pypy/interpreter/pyparser/test/test_futureautomaton.py
    @@ -221,6 +221,14 @@
         assert f.lineno == 3
         assert f.col_offset == 0
     
    +def test_lots_of_continuation_lines():
    +    s = "\\\n\\\n\\\n\\\n\\\n\\\n\nfrom __future__ import with_statement\n"
    +    f = run(s)
    +    assert f.pos == len(s)
    +    assert f.flags == fut.CO_FUTURE_WITH_STATEMENT
    +    assert f.lineno == 8
    +    assert f.col_offset == 0
    +
     # This looks like a bug in cpython parser
     # and would require extensive modifications
     # to future.py in order to emulate the same behaviour
    @@ -239,3 +247,19 @@
             raise AssertionError('IndentationError not raised')
         assert f.lineno == 2
         assert f.col_offset == 0
    +
    +def test_continuation_lines_in_docstring_single_quoted():
    +    s = '"\\\n\\\n\\\n\\\n\\\n\\\n"\nfrom  __future__ import division\n'
    +    f = run(s)
    +    assert f.pos == len(s)
    +    assert f.flags == fut.CO_FUTURE_DIVISION
    +    assert f.lineno == 8
    +    assert f.col_offset == 0
    +
    +def test_continuation_lines_in_docstring_triple_quoted():
    +    s = '"""\\\n\\\n\\\n\\\n\\\n\\\n"""\nfrom  __future__ import division\n'
    +    f = run(s)
    +    assert f.pos == len(s)
    +    assert f.flags == fut.CO_FUTURE_DIVISION
    +    assert f.lineno == 8
    +    assert f.col_offset == 0
    diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py
    --- a/pypy/interpreter/test/test_gateway.py
    +++ b/pypy/interpreter/test/test_gateway.py
    @@ -704,7 +704,7 @@
     class TestPassThroughArguments_CALL_METHOD(TestPassThroughArguments):
     
         def setup_class(cls):
    -        space = gettestobjspace(usemodules=('_stackless',), **{
    +        space = gettestobjspace(usemodules=('itertools',), **{
                 "objspace.opcodes.CALL_METHOD": True
                 })
             cls.space = space
    diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
    --- a/pypy/interpreter/typedef.py
    +++ b/pypy/interpreter/typedef.py
    @@ -258,6 +258,11 @@
                         self.slots_w = [None] * nslots
                 def setslotvalue(self, index, w_value):
                     self.slots_w[index] = w_value
    +            def delslotvalue(self, index):
    +                if self.slots_w[index] is None:
    +                    return False
    +                self.slots_w[index] = None
    +                return True
                 def getslotvalue(self, index):
                     return self.slots_w[index]
             add(Proto)
    @@ -530,11 +535,10 @@
             """member.__delete__(obj)
             Delete the value of the slot 'member' from the given 'obj'."""
             self.typecheck(space, w_obj)
    -        w_oldresult = w_obj.getslotvalue(self.index)
    -        if w_oldresult is None:
    +        success = w_obj.delslotvalue(self.index)
    +        if not success:
                 raise OperationError(space.w_AttributeError,
                                      space.wrap(self.name)) # XXX better message
    -        w_obj.setslotvalue(self.index, None)
     
     Member.typedef = TypeDef(
         "member_descriptor",
    diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py
    --- a/pypy/jit/backend/llgraph/llimpl.py
    +++ b/pypy/jit/backend/llgraph/llimpl.py
    @@ -55,6 +55,12 @@
         else:
             return LLSupport.from_rstr(s)
     
    +FLOAT_ARRAY_TP = lltype.Ptr(lltype.Array(lltype.Float, hints={"nolength": True}))
    +def maybe_uncast(TP, array):
    +    if array._TYPE.TO._hints.get("uncast_on_llgraph"):
    +        array = rffi.cast(TP, array)
    +    return array
    +
     # a list of argtypes of all operations - couldn't find any and it's
     # very useful.  Note however that the table is half-broken here and
     # there, in ways that are sometimes a bit hard to fix; that's why
    @@ -1102,13 +1108,15 @@
             return heaptracker.adr2int(llmemory.cast_ptr_to_adr(x))
         if TP == llmemory.Address:
             return heaptracker.adr2int(x)
    +    if TP is lltype.SingleFloat:
    +        return longlong.singlefloat2int(x)
         return lltype.cast_primitive(lltype.Signed, x)
     
     def cast_from_int(TYPE, x):
         if isinstance(TYPE, lltype.Ptr):
             if isinstance(x, (int, long, llmemory.AddressAsInt)):
                 x = llmemory.cast_int_to_adr(x)
    -        if TYPE is rffi.VOIDP:
    +        if TYPE is rffi.VOIDP or TYPE.TO._hints.get("uncast_on_llgraph"):
                 # assume that we want a "C-style" cast, without typechecking the value
                 return rffi.cast(TYPE, x)
             return llmemory.cast_adr_to_ptr(x, TYPE)
    @@ -1117,6 +1125,9 @@
                 x = llmemory.cast_int_to_adr(x)
             assert lltype.typeOf(x) == llmemory.Address
             return x
    +    elif TYPE is lltype.SingleFloat:
    +        assert lltype.typeOf(x) is lltype.Signed
    +        return longlong.int2singlefloat(x)
         else:
             if lltype.typeOf(x) == llmemory.Address:
                 x = heaptracker.adr2int(x)
    @@ -1171,6 +1182,7 @@
         del _future_values[:]
     
     def set_future_value_int(index, value):
    +    assert lltype.typeOf(value) is lltype.Signed
         set_future_value_ref(index, value)
     
     def set_future_value_float(index, value):
    @@ -1354,8 +1366,8 @@
         return cast_to_floatstorage(array.getitem(index))
     
     def do_getarrayitem_raw_float(array, index):
    -    array = array.adr.ptr._obj
    -    return cast_to_floatstorage(array.getitem(index))
    +    array = maybe_uncast(FLOAT_ARRAY_TP, array.adr.ptr)
    +    return cast_to_floatstorage(array._obj.getitem(index))
     
     def do_getarrayitem_gc_ptr(array, index):
         array = array._obj.container
    @@ -1433,8 +1445,9 @@
         newvalue = cast_from_floatstorage(ITEMTYPE, newvalue)
         array.setitem(index, newvalue)
     
    +
     def do_setarrayitem_raw_float(array, index, newvalue):
    -    array = array.adr.ptr
    +    array = maybe_uncast(FLOAT_ARRAY_TP, array.adr.ptr)
         ITEMTYPE = lltype.typeOf(array).TO.OF
         newvalue = cast_from_floatstorage(ITEMTYPE, newvalue)
         array._obj.setitem(index, newvalue)
    @@ -1537,6 +1550,7 @@
         'i': lltype.Signed,
         'f': lltype.Float,
         'L': lltype.SignedLongLong,
    +    'S': lltype.SingleFloat,
         'v': lltype.Void,
         }
     
    diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py
    --- a/pypy/jit/backend/llgraph/runner.py
    +++ b/pypy/jit/backend/llgraph/runner.py
    @@ -23,13 +23,14 @@
     class Descr(history.AbstractDescr):
     
         def __init__(self, ofs, typeinfo, extrainfo=None, name=None,
    -                 arg_types=None, count_fields_if_immut=-1):
    +                 arg_types=None, count_fields_if_immut=-1, ffi_flags=0):
             self.ofs = ofs
             self.typeinfo = typeinfo
             self.extrainfo = extrainfo
             self.name = name
             self.arg_types = arg_types
             self.count_fields_if_immut = count_fields_if_immut
    +        self.ffi_flags = ffi_flags
     
         def get_arg_types(self):
             return self.arg_types
    @@ -65,6 +66,9 @@
         def count_fields_if_immutable(self):
             return self.count_fields_if_immut
     
    +    def get_ffi_flags(self):
    +        return self.ffi_flags
    +
         def __lt__(self, other):
             raise TypeError("cannot use comparison on Descrs")
         def __le__(self, other):
    @@ -89,6 +93,7 @@
     class BaseCPU(model.AbstractCPU):
         supports_floats = True
         supports_longlong = llimpl.IS_32_BIT
    +    supports_singlefloats = True
     
         def __init__(self, rtyper, stats=None, opts=None,
                      translate_support_code=False,
    @@ -111,14 +116,14 @@
             return False
     
         def getdescr(self, ofs, typeinfo='?', extrainfo=None, name=None,
    -                 arg_types=None, count_fields_if_immut=-1):
    +                 arg_types=None, count_fields_if_immut=-1, ffi_flags=0):
             key = (ofs, typeinfo, extrainfo, name, arg_types,
    -               count_fields_if_immut)
    +               count_fields_if_immut, ffi_flags)
             try:
                 return self._descrs[key]
             except KeyError:
                 descr = Descr(ofs, typeinfo, extrainfo, name, arg_types,
    -                          count_fields_if_immut)
    +                          count_fields_if_immut, ffi_flags)
                 self._descrs[key] = descr
                 return descr
     
    @@ -321,7 +326,7 @@
             token = history.getkind(getattr(S, fieldname))
             return self.getdescr(ofs, token[0], name=fieldname, extrainfo=ofs2)
     
    -    def calldescrof(self, FUNC, ARGS, RESULT, extrainfo=None):
    +    def calldescrof(self, FUNC, ARGS, RESULT, extrainfo):
             arg_types = []
             for ARG in ARGS:
                 token = history.getkind(ARG)
    @@ -335,16 +340,21 @@
             return self.getdescr(0, token[0], extrainfo=extrainfo,
                                  arg_types=''.join(arg_types))
     
    -    def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo=None):
    +    def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo, ffi_flags):
             from pypy.jit.backend.llsupport.ffisupport import get_ffi_type_kind
    +        from pypy.jit.backend.llsupport.ffisupport import UnsupportedKind
             arg_types = []
    -        for arg in ffi_args:
    -            kind = get_ffi_type_kind(arg)
    -            if kind != history.VOID:
    -                arg_types.append(kind)
    -        reskind = get_ffi_type_kind(ffi_result)
    +        try:
    +            for arg in ffi_args:
    +                kind = get_ffi_type_kind(self, arg)
    +                if kind != history.VOID:
    +                    arg_types.append(kind)
    +            reskind = get_ffi_type_kind(self, ffi_result)
    +        except UnsupportedKind:
    +            return None
             return self.getdescr(0, reskind, extrainfo=extrainfo,
    -                             arg_types=''.join(arg_types))
    +                             arg_types=''.join(arg_types),
    +                             ffi_flags=ffi_flags)
     
     
         def grab_exc_value(self):
    @@ -552,7 +562,7 @@
             return FieldDescr.new(T1, fieldname)
     
         @staticmethod
    -    def calldescrof(FUNC, ARGS, RESULT, extrainfo=None):
    +    def calldescrof(FUNC, ARGS, RESULT, extrainfo):
             return StaticMethDescr.new(FUNC, ARGS, RESULT, extrainfo)
     
         @staticmethod
    diff --git a/pypy/jit/backend/llgraph/test/test_llgraph.py b/pypy/jit/backend/llgraph/test/test_llgraph.py
    --- a/pypy/jit/backend/llgraph/test/test_llgraph.py
    +++ b/pypy/jit/backend/llgraph/test/test_llgraph.py
    @@ -19,6 +19,9 @@
         def setup_method(self, _):
             self.cpu = self.cpu_type(None)
     
    +    def test_memoryerror(self):
    +        py.test.skip("does not make much sense on the llgraph backend")
    +
     
     def test_cast_adr_to_int_and_back():
         X = lltype.Struct('X', ('foo', lltype.Signed))
    diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py
    --- a/pypy/jit/backend/llsupport/descr.py
    +++ b/pypy/jit/backend/llsupport/descr.py
    @@ -270,10 +270,12 @@
         _clsname = ''
         loop_token = None
         arg_classes = ''     # <-- annotation hack
    +    ffi_flags = 0
     
    -    def __init__(self, arg_classes, extrainfo=None):
    +    def __init__(self, arg_classes, extrainfo=None, ffi_flags=0):
             self.arg_classes = arg_classes    # string of "r" and "i" (ref/int)
             self.extrainfo = extrainfo
    +        self.ffi_flags = ffi_flags
     
         def __repr__(self):
             res = '%s(%s)' % (self.__class__.__name__, self.arg_classes)
    @@ -294,6 +296,13 @@
         def get_extra_info(self):
             return self.extrainfo
     
    +    def get_ffi_flags(self):
    +        return self.ffi_flags
    +
    +    def get_call_conv(self):
    +        from pypy.rlib.clibffi import get_call_conv
    +        return get_call_conv(self.ffi_flags, True)
    +
         def get_arg_types(self):
             return self.arg_classes
     
    @@ -313,6 +322,8 @@
                     c = 'f'
                 elif c == 'f' and longlong.supports_longlong:
                     return 'longlong.getrealfloat(%s)' % (process('L'),)
    +            elif c == 'S':
    +                return 'longlong.int2singlefloat(%s)' % (process('i'),)
                 arg = 'args_%s[%d]' % (c, seen[c])
                 seen[c] += 1
                 return arg
    @@ -328,6 +339,8 @@
                     return lltype.Void
                 elif arg == 'L':
                     return lltype.SignedLongLong
    +            elif arg == 'S':
    +                return lltype.SingleFloat
                 else:
                     raise AssertionError(arg)
     
    @@ -344,6 +357,8 @@
                 result = 'rffi.cast(lltype.SignedLongLong, res)'
             elif self.get_return_type() == history.VOID:
                 result = 'None'
    +        elif self.get_return_type() == 'S':
    +            result = 'longlong.singlefloat2int(res)'
             else:
                 assert 0
             source = py.code.Source("""
    @@ -354,14 +369,15 @@
             """ % locals())
             ARGS = [TYPE(arg) for arg in self.arg_classes]
             FUNC = lltype.FuncType(ARGS, RESULT)
    -        d = locals().copy()
    -        d.update(globals())
    +        d = globals().copy()
    +        d.update(locals())
             exec source.compile() in d
             self.call_stub = d['call_stub']
     
         def verify_types(self, args_i, args_r, args_f, return_type):
             assert self._return_type in return_type
    -        assert self.arg_classes.count('i') == len(args_i or ())
    +        assert (self.arg_classes.count('i') +
    +                self.arg_classes.count('S')) == len(args_i or ())
             assert self.arg_classes.count('r') == len(args_r or ())
             assert (self.arg_classes.count('f') +
                     self.arg_classes.count('L')) == len(args_f or ())
    @@ -394,8 +410,8 @@
         """
         _clsname = 'DynamicIntCallDescr'
     
    -    def __init__(self, arg_classes, result_size, result_sign, extrainfo=None):
    -        BaseIntCallDescr.__init__(self, arg_classes, extrainfo)
    +    def __init__(self, arg_classes, result_size, result_sign, extrainfo=None, ffi_flags=0):
    +        BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags)
             assert isinstance(result_sign, bool)
             self._result_size = chr(result_size)
             self._result_sign = result_sign
    @@ -438,23 +454,39 @@
         def get_result_size(self, translate_support_code):
             return 0
     
    +_SingleFloatCallDescr = None   # built lazily
    +
     def getCallDescrClass(RESULT):
         if RESULT is lltype.Void:
             return VoidCallDescr
         if RESULT is lltype.Float:
             return FloatCallDescr
    +    if RESULT is lltype.SingleFloat:
    +        global _SingleFloatCallDescr
    +        if _SingleFloatCallDescr is None:
    +            assert rffi.sizeof(rffi.UINT) == rffi.sizeof(RESULT)
    +            class SingleFloatCallDescr(getCallDescrClass(rffi.UINT)):
    +                _clsname = 'SingleFloatCallDescr'
    +                _return_type = 'S'
    +            _SingleFloatCallDescr = SingleFloatCallDescr
    +        return _SingleFloatCallDescr
         if is_longlong(RESULT):
             return LongLongCallDescr
         return getDescrClass(RESULT, BaseIntCallDescr, GcPtrCallDescr,
                              NonGcPtrCallDescr, 'Call', 'get_result_size',
                              Ellipsis,  # <= floatattrname should not be used here
                              '_is_result_signed')
    +getCallDescrClass._annspecialcase_ = 'specialize:memo'
     
     def get_call_descr(gccache, ARGS, RESULT, extrainfo=None):
         arg_classes = []
         for ARG in ARGS:
             kind = getkind(ARG)
    -        if   kind == 'int': arg_classes.append('i')
    +        if   kind == 'int':
    +            if ARG is lltype.SingleFloat:
    +                arg_classes.append('S')
    +            else:
    +                arg_classes.append('i')
             elif kind == 'ref': arg_classes.append('r')
             elif kind == 'float':
                 if is_longlong(ARG):
    @@ -486,6 +518,9 @@
                 return GcPtrDescr
             else:
                 return NonGcPtrDescr
    +    if TYPE is lltype.SingleFloat:
    +        assert rffi.sizeof(rffi.UINT) == rffi.sizeof(TYPE)
    +        TYPE = rffi.UINT
         try:
             return _cache[nameprefix, TYPE]
         except KeyError:
    diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py
    --- a/pypy/jit/backend/llsupport/ffisupport.py
    +++ b/pypy/jit/backend/llsupport/ffisupport.py
    @@ -1,41 +1,58 @@
     from pypy.rlib.rarithmetic import intmask
     from pypy.jit.metainterp import history
    -from pypy.jit.backend.llsupport.descr import DynamicIntCallDescr, NonGcPtrCallDescr,\
    -    FloatCallDescr, VoidCallDescr
    +from pypy.rpython.lltypesystem import rffi
    +from pypy.jit.backend.llsupport.descr import (
    +    DynamicIntCallDescr, NonGcPtrCallDescr, FloatCallDescr, VoidCallDescr,
    +    LongLongCallDescr, getCallDescrClass)
     
     class UnsupportedKind(Exception):
         pass
     
    -def get_call_descr_dynamic(ffi_args, ffi_result, extrainfo=None):
    +def get_call_descr_dynamic(cpu, ffi_args, ffi_result, extrainfo=None, ffi_flags=0):
         """Get a call descr: the types of result and args are represented by
         rlib.libffi.types.*"""
         try:
    -        reskind = get_ffi_type_kind(ffi_result)
    -        argkinds = [get_ffi_type_kind(arg) for arg in ffi_args]
    +        reskind = get_ffi_type_kind(cpu, ffi_result)
    +        argkinds = [get_ffi_type_kind(cpu, arg) for arg in ffi_args]
         except UnsupportedKind:
    -        return None # ??
    +        return None
         arg_classes = ''.join(argkinds)
         if reskind == history.INT:
             size = intmask(ffi_result.c_size)
             signed = is_ffi_type_signed(ffi_result)
    -        return DynamicIntCallDescr(arg_classes, size, signed, extrainfo)
    +        return DynamicIntCallDescr(arg_classes, size, signed, extrainfo,
    +                                   ffi_flags=ffi_flags)
         elif reskind == history.REF:
    -        return  NonGcPtrCallDescr(arg_classes, extrainfo)
    +        return  NonGcPtrCallDescr(arg_classes, extrainfo,
    +                                  ffi_flags=ffi_flags)
         elif reskind == history.FLOAT:
    -        return FloatCallDescr(arg_classes, extrainfo)
    +        return FloatCallDescr(arg_classes, extrainfo,
    +                              ffi_flags=ffi_flags)
         elif reskind == history.VOID:
    -        return VoidCallDescr(arg_classes, extrainfo)
    +        return VoidCallDescr(arg_classes, extrainfo,
    +                             ffi_flags=ffi_flags)
    +    elif reskind == 'L':
    +        return LongLongCallDescr(arg_classes, extrainfo,
    +                                 ffi_flags=ffi_flags)
    +    elif reskind == 'S':
    +        SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT)
    +        return SingleFloatCallDescr(arg_classes, extrainfo,
    +                                    ffi_flags=ffi_flags)
         assert False
     
    -def get_ffi_type_kind(ffi_type):
    +def get_ffi_type_kind(cpu, ffi_type):
         from pypy.rlib.libffi import types
         kind = types.getkind(ffi_type)
         if kind == 'i' or kind == 'u':
             return history.INT
    -    elif kind == 'f':
    +    elif cpu.supports_floats and kind == 'f':
             return history.FLOAT
         elif kind == 'v':
             return history.VOID
    +    elif cpu.supports_longlong and (kind == 'I' or kind == 'U'):     # longlong
    +        return 'L'
    +    elif cpu.supports_singlefloats and kind == 's':    # singlefloat
    +        return 'S'
         raise UnsupportedKind("Unsupported kind '%s'" % kind)
     
     def is_ffi_type_signed(ffi_type):
    diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py
    --- a/pypy/jit/backend/llsupport/gc.py
    +++ b/pypy/jit/backend/llsupport/gc.py
    @@ -366,36 +366,92 @@
     
         def add_jit2gc_hooks(self, jit2gc):
             #
    -        def collect_jit_stack_root(callback, gc, addr):
    -            if addr.signed[0] != GcRootMap_shadowstack.MARKER:
    -                # common case
    -                if gc.points_to_valid_gc_object(addr):
    -                    callback(gc, addr)
    -                return WORD
    -            else:
    -                # case of a MARKER followed by an assembler stack frame
    -                follow_stack_frame_of_assembler(callback, gc, addr)
    -                return 2 * WORD
    +        # ---------------
    +        # This is used to enumerate the shadowstack in the presence
    +        # of the JIT.  It is also used by the stacklet support in
    +        # rlib/_stacklet_shadowstack.  That's why it is written as
    +        # an iterator that can also be used with a custom_trace.
             #
    -        def follow_stack_frame_of_assembler(callback, gc, addr):
    -            frame_addr = addr.signed[1]
    -            addr = llmemory.cast_int_to_adr(frame_addr + self.force_index_ofs)
    -            force_index = addr.signed[0]
    -            if force_index < 0:
    -                force_index = ~force_index
    -            callshape = self._callshapes[force_index]
    -            n = 0
    -            while True:
    -                offset = rffi.cast(lltype.Signed, callshape[n])
    -                if offset == 0:
    -                    break
    -                addr = llmemory.cast_int_to_adr(frame_addr + offset)
    -                if gc.points_to_valid_gc_object(addr):
    -                    callback(gc, addr)
    -                n += 1
    +        class RootIterator:
    +            _alloc_flavor_ = "raw"
    +
    +            def next(iself, gc, next, range_highest):
    +                # Return the "next" valid GC object' address.  This usually
    +                # means just returning "next", until we reach "range_highest",
    +                # except that we are skipping NULLs.  If "next" contains a
    +                # MARKER instead, then we go into JIT-frame-lookup mode.
    +                #
    +                while True:
    +                    #
    +                    # If we are not iterating right now in a JIT frame
    +                    if iself.frame_addr == 0:
    +                        #
    +                        # Look for the next shadowstack address that
    +                        # contains a valid pointer
    +                        while next != range_highest:
    +                            if next.signed[0] == self.MARKER:
    +                                break
    +                            if gc.points_to_valid_gc_object(next):
    +                                return next
    +                            next += llmemory.sizeof(llmemory.Address)
    +                        else:
    +                            return llmemory.NULL     # done
    +                        #
    +                        # It's a JIT frame.  Save away 'next' for later, and
    +                        # go into JIT-frame-exploring mode.
    +                        next += llmemory.sizeof(llmemory.Address)
    +                        frame_addr = next.signed[0]
    +                        iself.saved_next = next
    +                        iself.frame_addr = frame_addr
    +                        addr = llmemory.cast_int_to_adr(frame_addr +
    +                                                        self.force_index_ofs)
    +                        addr = iself.translateptr(iself.context, addr)
    +                        force_index = addr.signed[0]
    +                        if force_index < 0:
    +                            force_index = ~force_index
    +                        # NB: the next line reads a still-alive _callshapes,
    +                        # because we ensure that just before we called this
    +                        # piece of assembler, we put on the (same) stack a
    +                        # pointer to a loop_token that keeps the force_index
    +                        # alive.
    +                        callshape = self._callshapes[force_index]
    +                    else:
    +                        # Continuing to explore this JIT frame
    +                        callshape = iself.callshape
    +                    #
    +                    # 'callshape' points to the next INT of the callshape.
    +                    # If it's zero we are done with the JIT frame.
    +                    while rffi.cast(lltype.Signed, callshape[0]) != 0:
    +                        #
    +                        # Non-zero: it's an offset inside the JIT frame.
    +                        # Read it and increment 'callshape'.
    +                        offset = rffi.cast(lltype.Signed, callshape[0])
    +                        callshape = lltype.direct_ptradd(callshape, 1)
    +                        addr = llmemory.cast_int_to_adr(iself.frame_addr +
    +                                                        offset)
    +                        addr = iself.translateptr(iself.context, addr)
    +                        if gc.points_to_valid_gc_object(addr):
    +                            #
    +                            # The JIT frame contains a valid GC pointer at
    +                            # this address (as opposed to NULL).  Save
    +                            # 'callshape' for the next call, and return the
    +                            # address.
    +                            iself.callshape = callshape
    +                            return addr
    +                    #
    +                    # Restore 'prev' and loop back to the start.
    +                    iself.frame_addr = 0
    +                    next = iself.saved_next
    +                    next += llmemory.sizeof(llmemory.Address)
    +
    +        # ---------------
             #
    +        root_iterator = RootIterator()
    +        root_iterator.frame_addr = 0
    +        root_iterator.context = llmemory.NULL
    +        root_iterator.translateptr = lambda context, addr: addr
             jit2gc.update({
    -            'rootstackhook': collect_jit_stack_root,
    +            'root_iterator': root_iterator,
                 })
     
         def initialize(self):
    @@ -544,18 +600,19 @@
             assert self.GCClass.inline_simple_malloc
             assert self.GCClass.inline_simple_malloc_varsize
     
    -        # make a malloc function, with three arguments
    +        # make a malloc function, with two arguments
             def malloc_basic(size, tid):
                 type_id = llop.extract_ushort(llgroup.HALFWORD, tid)
                 has_finalizer = bool(tid & (1<", res)
                 return res
    @@ -571,14 +628,10 @@
             def malloc_array(itemsize, tid, num_elem):
                 type_id = llop.extract_ushort(llgroup.HALFWORD, tid)
                 check_typeid(type_id)
    -            try:
    -                return llop1.do_malloc_varsize_clear(
    -                    llmemory.GCREF,
    -                    type_id, num_elem, self.array_basesize, itemsize,
    -                    self.array_length_ofs, True)
    -            except MemoryError:
    -                fatalerror("out of memory (from JITted code)")
    -                return lltype.nullptr(llmemory.GCREF.TO)
    +            return llop1.do_malloc_varsize_clear(
    +                llmemory.GCREF,
    +                type_id, num_elem, self.array_basesize, itemsize,
    +                self.array_length_ofs)
             self.malloc_array = malloc_array
             self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType(
                 [lltype.Signed] * 3, llmemory.GCREF))
    @@ -591,23 +644,15 @@
             unicode_type_id = self.layoutbuilder.get_type_id(rstr.UNICODE)
             #
             def malloc_str(length):
    -            try:
    -                return llop1.do_malloc_varsize_clear(
    -                    llmemory.GCREF,
    -                    str_type_id, length, str_basesize, str_itemsize,
    -                    str_ofs_length, True)
    -            except MemoryError:
    -                fatalerror("out of memory (from JITted code)")
    -                return lltype.nullptr(llmemory.GCREF.TO)
    +            return llop1.do_malloc_varsize_clear(
    +                llmemory.GCREF,
    +                str_type_id, length, str_basesize, str_itemsize,
    +                str_ofs_length)
             def malloc_unicode(length):
    -            try:
    -                return llop1.do_malloc_varsize_clear(
    -                    llmemory.GCREF,
    -                    unicode_type_id, length, unicode_basesize,unicode_itemsize,
    -                    unicode_ofs_length, True)
    -            except MemoryError:
    -                fatalerror("out of memory (from JITted code)")
    -                return lltype.nullptr(llmemory.GCREF.TO)
    +            return llop1.do_malloc_varsize_clear(
    +                llmemory.GCREF,
    +                unicode_type_id, length, unicode_basesize,unicode_itemsize,
    +                unicode_ofs_length)
             self.malloc_str = malloc_str
             self.malloc_unicode = malloc_unicode
             self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType(
    @@ -628,16 +673,12 @@
                 if self.DEBUG:
                     random_usage_of_xmm_registers()
                 assert size >= self.minimal_size_in_nursery
    -            try:
    -                # NB. although we call do_malloc_fixedsize_clear() here,
    -                # it's a bit of a hack because we set tid to 0 and may
    -                # also use it to allocate varsized objects.  The tid
    -                # and possibly the length are both set afterward.
    -                gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF,
    -                                            0, size, True, False, False)
    -            except MemoryError:
    -                fatalerror("out of memory (from JITted code)")
    -                return 0
    +            # NB. although we call do_malloc_fixedsize_clear() here,
    +            # it's a bit of a hack because we set tid to 0 and may
    +            # also use it to allocate varsized objects.  The tid
    +            # and possibly the length are both set afterward.
    +            gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF,
    +                                        0, size, False, False)
                 return rffi.cast(lltype.Signed, gcref)
             self.malloc_slowpath = malloc_slowpath
             self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed)
    diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py
    --- a/pypy/jit/backend/llsupport/llmodel.py
    +++ b/pypy/jit/backend/llsupport/llmodel.py
    @@ -253,13 +253,13 @@
             return ofs, size, sign
         unpack_arraydescr_size._always_inline_ = True
     
    -    def calldescrof(self, FUNC, ARGS, RESULT, extrainfo=None):
    +    def calldescrof(self, FUNC, ARGS, RESULT, extrainfo):
             return get_call_descr(self.gc_ll_descr, ARGS, RESULT, extrainfo)
     
    -    def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo=None):
    +    def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo, ffi_flags):
             from pypy.jit.backend.llsupport import ffisupport
    -        return ffisupport.get_call_descr_dynamic(ffi_args, ffi_result,
    -                                                 extrainfo)
    +        return ffisupport.get_call_descr_dynamic(self, ffi_args, ffi_result,
    +                                                 extrainfo, ffi_flags)
     
         def get_overflow_error(self):
             ovf_vtable = self.cast_adr_to_int(self._ovf_error_vtable)
    @@ -586,7 +586,7 @@
         def bh_call_i(self, func, calldescr, args_i, args_r, args_f):
             assert isinstance(calldescr, BaseIntCallDescr)
             if not we_are_translated():
    -            calldescr.verify_types(args_i, args_r, args_f, history.INT)
    +            calldescr.verify_types(args_i, args_r, args_f, history.INT + 'S')
             return calldescr.call_stub(func, args_i, args_r, args_f)
     
         def bh_call_r(self, func, calldescr, args_i, args_r, args_f):
    diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py
    --- a/pypy/jit/backend/llsupport/test/test_descr.py
    +++ b/pypy/jit/backend/llsupport/test/test_descr.py
    @@ -51,7 +51,8 @@
         S = lltype.GcStruct('S', ('x', lltype.Char),
                                  ('y', lltype.Ptr(T)),
                                  ('z', lltype.Ptr(U)),
    -                             ('f', lltype.Float))
    +                             ('f', lltype.Float),
    +                             ('s', lltype.SingleFloat))
         assert getFieldDescrClass(lltype.Ptr(T)) is GcPtrFieldDescr
         assert getFieldDescrClass(lltype.Ptr(U)) is NonGcPtrFieldDescr
         cls = getFieldDescrClass(lltype.Char)
    @@ -60,6 +61,10 @@
         clsf = getFieldDescrClass(lltype.Float)
         assert clsf != cls
         assert clsf == getFieldDescrClass(lltype.Float)
    +    clss = getFieldDescrClass(lltype.SingleFloat)
    +    assert clss not in (cls, clsf)
    +    assert clss == getFieldDescrClass(lltype.SingleFloat)
    +    assert clss == getFieldDescrClass(rffi.UINT)    # for now
         #
         c0 = GcCache(False)
         c1 = GcCache(True)
    @@ -71,14 +76,17 @@
             descr_y = get_field_descr(c2, S, 'y')
             descr_z = get_field_descr(c2, S, 'z')
             descr_f = get_field_descr(c2, S, 'f')
    +        descr_s = get_field_descr(c2, S, 's')
             assert descr_x.__class__ is cls
             assert descr_y.__class__ is GcPtrFieldDescr
             assert descr_z.__class__ is NonGcPtrFieldDescr
             assert descr_f.__class__ is clsf
    +        assert descr_s.__class__ is clss
             assert descr_x.name == 'S.x'
             assert descr_y.name == 'S.y'
             assert descr_z.name == 'S.z'
             assert descr_f.name == 'S.f'
    +        assert descr_s.name == 'S.s'
             if not tsc:
                 assert descr_x.offset < descr_y.offset < descr_z.offset
                 assert descr_x.sort_key() < descr_y.sort_key() < descr_z.sort_key()
    @@ -86,23 +94,29 @@
                 assert descr_y.get_field_size(False) == rffi.sizeof(lltype.Ptr(T))
                 assert descr_z.get_field_size(False) == rffi.sizeof(lltype.Ptr(U))
                 assert descr_f.get_field_size(False) == rffi.sizeof(lltype.Float)
    +            assert descr_s.get_field_size(False) == rffi.sizeof(
    +                                                            lltype.SingleFloat)
             else:
                 assert isinstance(descr_x.offset, Symbolic)
                 assert isinstance(descr_y.offset, Symbolic)
                 assert isinstance(descr_z.offset, Symbolic)
                 assert isinstance(descr_f.offset, Symbolic)
    +            assert isinstance(descr_s.offset, Symbolic)
                 assert isinstance(descr_x.get_field_size(True), Symbolic)
                 assert isinstance(descr_y.get_field_size(True), Symbolic)
                 assert isinstance(descr_z.get_field_size(True), Symbolic)
                 assert isinstance(descr_f.get_field_size(True), Symbolic)
    +            assert isinstance(descr_s.get_field_size(True), Symbolic)
             assert not descr_x.is_pointer_field()
             assert     descr_y.is_pointer_field()
             assert not descr_z.is_pointer_field()
             assert not descr_f.is_pointer_field()
    +        assert not descr_s.is_pointer_field()
             assert not descr_x.is_float_field()
             assert not descr_y.is_float_field()
             assert not descr_z.is_float_field()
             assert     descr_f.is_float_field()
    +        assert not descr_s.is_float_field()
     
     
     def test_get_field_descr_sign():
    @@ -136,6 +150,7 @@
         A4 = lltype.GcArray(lltype.Float)
         A5 = lltype.GcArray(lltype.Struct('x', ('v', lltype.Signed),
                                           ('k', lltype.Signed)))
    +    A6 = lltype.GcArray(lltype.SingleFloat)
         assert getArrayDescrClass(A2) is GcPtrArrayDescr
         assert getArrayDescrClass(A3) is NonGcPtrArrayDescr
         cls = getArrayDescrClass(A1)
    @@ -144,6 +159,9 @@
         clsf = getArrayDescrClass(A4)
         assert clsf != cls
         assert clsf == getArrayDescrClass(lltype.GcArray(lltype.Float))
    +    clss = getArrayDescrClass(A5)
    +    assert clss not in (clsf, cls)
    +    assert clss == getArrayDescrClass(lltype.GcArray(rffi.UINT))
         #
         c0 = GcCache(False)
         descr1 = get_array_descr(c0, A1)
    @@ -151,10 +169,12 @@
         descr3 = get_array_descr(c0, A3)
         descr4 = get_array_descr(c0, A4)
         descr5 = get_array_descr(c0, A5)
    +    descr6 = get_array_descr(c0, A6)
         assert descr1.__class__ is cls
         assert descr2.__class__ is GcPtrArrayDescr
         assert descr3.__class__ is NonGcPtrArrayDescr
         assert descr4.__class__ is clsf
    +    assert descr5.__class__ is clss
         assert descr1 == get_array_descr(c0, lltype.GcArray(lltype.Char))
         assert not descr1.is_array_of_pointers()
         assert     descr2.is_array_of_pointers()
    @@ -174,24 +194,29 @@
         assert descr2.get_base_size(False) == get_alignment('p')
         assert descr3.get_base_size(False) == get_alignment('p')
         assert descr4.get_base_size(False) == get_alignment('d')
    +    assert descr5.get_base_size(False) == get_alignment('f')
         assert descr1.get_ofs_length(False) == 0
         assert descr2.get_ofs_length(False) == 0
         assert descr3.get_ofs_length(False) == 0
         assert descr4.get_ofs_length(False) == 0
    +    assert descr5.get_ofs_length(False) == 0
         assert descr1.get_item_size(False) == rffi.sizeof(lltype.Char)
         assert descr2.get_item_size(False) == rffi.sizeof(lltype.Ptr(T))
         assert descr3.get_item_size(False) == rffi.sizeof(lltype.Ptr(U))
         assert descr4.get_item_size(False) == rffi.sizeof(lltype.Float)
         assert descr5.get_item_size(False) == rffi.sizeof(lltype.Signed) * 2
    +    assert descr6.get_item_size(False) == rffi.sizeof(lltype.SingleFloat)
         #
         assert isinstance(descr1.get_base_size(True), Symbolic)
         assert isinstance(descr2.get_base_size(True), Symbolic)
         assert isinstance(descr3.get_base_size(True), Symbolic)
         assert isinstance(descr4.get_base_size(True), Symbolic)
    +    assert isinstance(descr5.get_base_size(True), Symbolic)
         assert isinstance(descr1.get_ofs_length(True), Symbolic)
         assert isinstance(descr2.get_ofs_length(True), Symbolic)
         assert isinstance(descr3.get_ofs_length(True), Symbolic)
         assert isinstance(descr4.get_ofs_length(True), Symbolic)
    +    assert isinstance(descr5.get_ofs_length(True), Symbolic)
         assert isinstance(descr1.get_item_size(True), Symbolic)
         assert isinstance(descr2.get_item_size(True), Symbolic)
         assert isinstance(descr3.get_item_size(True), Symbolic)
    @@ -216,6 +241,11 @@
         assert descr.is_array_of_floats()
         assert descr.get_base_size(False) == 0
         assert descr.get_ofs_length(False) == -1
    +    CA = rffi.CArray(rffi.FLOAT)
    +    descr = get_array_descr(c0, CA)
    +    assert not descr.is_array_of_floats()
    +    assert descr.get_base_size(False) == 0
    +    assert descr.get_ofs_length(False) == -1
     
     
     def test_get_array_descr_sign():
    @@ -263,6 +293,11 @@
         assert descr4.get_result_size(False) == rffi.sizeof(lltype.Float)
         assert descr4.get_return_type() == history.FLOAT
         assert descr4.arg_classes == "ff"
    +    #
    +    descr5 = get_call_descr(c0, [lltype.SingleFloat], lltype.SingleFloat)
    +    assert descr5.get_result_size(False) == rffi.sizeof(lltype.SingleFloat)
    +    assert descr5.get_return_type() == "S"
    +    assert descr5.arg_classes == "S"
     
     def test_get_call_descr_not_translated_longlong():
         if sys.maxint > 2147483647:
    @@ -292,6 +327,11 @@
         assert isinstance(descr4.get_result_size(True), Symbolic)
         assert descr4.get_return_type() == history.FLOAT
         assert descr4.arg_classes == "ff"
    +    #
    +    descr5 = get_call_descr(c1, [lltype.SingleFloat], lltype.SingleFloat)
    +    assert isinstance(descr5.get_result_size(True), Symbolic)
    +    assert descr5.get_return_type() == "S"
    +    assert descr5.arg_classes == "S"
     
     def test_call_descr_extra_info():
         c1 = GcCache(True)
    @@ -349,8 +389,11 @@
         #
         descr4f = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Float)
         assert 'FloatCallDescr' in descr4f.repr_of_descr()
    +    #
    +    descr5f = get_call_descr(c0, [lltype.Char], lltype.SingleFloat)
    +    assert 'SingleFloatCallDescr' in descr5f.repr_of_descr()
     
    -def test_call_stubs():
    +def test_call_stubs_1():
         c0 = GcCache(False)
         ARGS = [lltype.Char, lltype.Signed]
         RES = lltype.Char
    @@ -364,6 +407,8 @@
         res = call_stub(rffi.cast(lltype.Signed, fnptr), [1, 2], None, None)
         assert res == ord('c')
     
    +def test_call_stubs_2():
    +    c0 = GcCache(False)
         ARRAY = lltype.GcArray(lltype.Signed)
         ARGS = [lltype.Float, lltype.Ptr(ARRAY)]
         RES = lltype.Float
    @@ -379,3 +424,27 @@
         res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr),
                                [], [opaquea], [longlong.getfloatstorage(3.5)])
         assert longlong.getrealfloat(res) == 4.5
    +
    +def test_call_stubs_single_float():
    +    from pypy.rlib.longlong2float import uint2singlefloat, singlefloat2uint
    +    from pypy.rlib.rarithmetic import r_singlefloat, intmask
    +    #
    +    c0 = GcCache(False)
    +    ARGS = [lltype.SingleFloat, lltype.SingleFloat, lltype.SingleFloat]
    +    RES = lltype.SingleFloat
    +
    +    def f(a, b, c):
    +        a = float(a)
    +        b = float(b)
    +        c = float(c)
    +        x = a - (b / c)
    +        return r_singlefloat(x)
    +
    +    fnptr = llhelper(lltype.Ptr(lltype.FuncType(ARGS, RES)), f)
    +    descr2 = get_call_descr(c0, ARGS, RES)
    +    a = intmask(singlefloat2uint(r_singlefloat(-10.0)))
    +    b = intmask(singlefloat2uint(r_singlefloat(3.0)))
    +    c = intmask(singlefloat2uint(r_singlefloat(2.0)))
    +    res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr),
    +                           [a, b, c], [], [])
    +    assert float(uint2singlefloat(rffi.r_uint(res))) == -11.5
    diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py
    --- a/pypy/jit/backend/llsupport/test/test_ffisupport.py
    +++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py
    @@ -1,24 +1,56 @@
     from pypy.rlib.libffi import types
    -from pypy.jit.backend.llsupport.ffisupport import get_call_descr_dynamic, \
    -    VoidCallDescr, DynamicIntCallDescr
    -    
    +from pypy.jit.codewriter.longlong import is_64_bit
    +from pypy.jit.backend.llsupport.ffisupport import *
    +
    +
    +class FakeCPU:
    +    def __init__(self, supports_floats=False, supports_longlong=False,
    +                 supports_singlefloats=False):
    +        self.supports_floats = supports_floats
    +        self.supports_longlong = supports_longlong
    +        self.supports_singlefloats = supports_singlefloats
    +
    +
     def test_call_descr_dynamic():
    +    args = [types.sint, types.pointer]
    +    descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, ffi_flags=42)
    +    assert isinstance(descr, DynamicIntCallDescr)
    +    assert descr.arg_classes == 'ii'
    +    assert descr.get_ffi_flags() == 42
     
         args = [types.sint, types.double, types.pointer]
    -    descr = get_call_descr_dynamic(args, types.void)
    +    descr = get_call_descr_dynamic(FakeCPU(), args, types.void)
    +    assert descr is None    # missing floats
    +    descr = get_call_descr_dynamic(FakeCPU(supports_floats=True),
    +                                   args, types.void, ffi_flags=43)
         assert isinstance(descr, VoidCallDescr)
         assert descr.arg_classes == 'ifi'
    +    assert descr.get_ffi_flags() == 43
     
    -    descr = get_call_descr_dynamic([], types.sint8)
    +    descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8)
         assert isinstance(descr, DynamicIntCallDescr)
         assert descr.get_result_size(False) == 1
         assert descr.is_result_signed() == True
     
    -    descr = get_call_descr_dynamic([], types.uint8)
    +    descr = get_call_descr_dynamic(FakeCPU(), [], types.uint8)
         assert isinstance(descr, DynamicIntCallDescr)
         assert descr.get_result_size(False) == 1
         assert descr.is_result_signed() == False
     
    -    descr = get_call_descr_dynamic([], types.float)
    -    assert descr is None # single floats are not supported so far
    -    
    +    if not is_64_bit:
    +        descr = get_call_descr_dynamic(FakeCPU(), [], types.slonglong)
    +        assert descr is None   # missing longlongs
    +        descr = get_call_descr_dynamic(FakeCPU(supports_longlong=True),
    +                                       [], types.slonglong, ffi_flags=43)
    +        assert isinstance(descr, LongLongCallDescr)
    +        assert descr.get_ffi_flags() == 43
    +    else:
    +        assert types.slonglong is types.slong
    +
    +    descr = get_call_descr_dynamic(FakeCPU(), [], types.float)
    +    assert descr is None   # missing singlefloats
    +    descr = get_call_descr_dynamic(FakeCPU(supports_singlefloats=True),
    +                                   [], types.float, ffi_flags=44)
    +    SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT)
    +    assert isinstance(descr, SingleFloatCallDescr)
    +    assert descr.get_ffi_flags() == 44
    diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py
    --- a/pypy/jit/backend/llsupport/test/test_gc.py
    +++ b/pypy/jit/backend/llsupport/test/test_gc.py
    @@ -246,9 +246,8 @@
         def __init__(self):
             self.record = []
     
    -    def do_malloc_fixedsize_clear(self, RESTYPE, type_id, size, can_collect,
    +    def do_malloc_fixedsize_clear(self, RESTYPE, type_id, size,
                                       has_finalizer, contains_weakptr):
    -        assert can_collect
             assert not contains_weakptr
             p = llmemory.raw_malloc(size)
             p = llmemory.cast_adr_to_ptr(p, RESTYPE)
    @@ -258,8 +257,7 @@
             return p
     
         def do_malloc_varsize_clear(self, RESTYPE, type_id, length, size,
    -                                itemsize, offset_to_length, can_collect):
    -        assert can_collect
    +                                itemsize, offset_to_length):
             p = llmemory.raw_malloc(size + itemsize * length)
             (p + offset_to_length).signed[0] = length
             p = llmemory.cast_adr_to_ptr(p, RESTYPE)
    diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py
    --- a/pypy/jit/backend/model.py
    +++ b/pypy/jit/backend/model.py
    @@ -8,12 +8,13 @@
         # ^^^ This is only useful on 32-bit platforms.  If True,
         # longlongs are supported by the JIT, but stored as doubles.
         # Boxes and Consts are BoxFloats and ConstFloats.
    +    supports_singlefloats = False
     
         done_with_this_frame_void_v = -1
         done_with_this_frame_int_v = -1
         done_with_this_frame_ref_v = -1
         done_with_this_frame_float_v = -1
    -    exit_frame_with_exception_v = -1
    +    propagate_exception_v = -1
         total_compiled_loops = 0
         total_compiled_bridges = 0
         total_freed_loops = 0
    diff --git a/pypy/jit/backend/test/calling_convention_test.py b/pypy/jit/backend/test/calling_convention_test.py
    --- a/pypy/jit/backend/test/calling_convention_test.py
    +++ b/pypy/jit/backend/test/calling_convention_test.py
    @@ -8,6 +8,7 @@
                                              ConstObj, BoxFloat, ConstFloat)
     from pypy.jit.metainterp.resoperation import ResOperation, rop
     from pypy.jit.metainterp.typesystem import deref
    +from pypy.jit.codewriter.effectinfo import EffectInfo
     from pypy.jit.tool.oparser import parse
     from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi, rclass
     from pypy.rpython.ootypesystem import ootype
    @@ -96,7 +97,8 @@
                 FUNC = self.FuncType(funcargs, F)
                 FPTR = self.Ptr(FUNC)
                 func_ptr = llhelper(FPTR, func)
    -            calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT)
    +            calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
    +                                        EffectInfo.MOST_GENERAL)
                 funcbox = self.get_funcbox(cpu, func_ptr)
     
                 ops = '[%s]\n' % arguments
    @@ -148,7 +150,8 @@
                 FUNC = self.FuncType(args, F)
                 FPTR = self.Ptr(FUNC)
                 func_ptr = llhelper(FPTR, func)
    -            calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT)
    +            calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
    +                                        EffectInfo.MOST_GENERAL)
                 funcbox = self.get_funcbox(cpu, func_ptr)
     
                 res = self.execute_operation(rop.CALL,
    @@ -190,7 +193,8 @@
                 FUNC = self.FuncType(args, F)
                 FPTR = self.Ptr(FUNC)
                 func_ptr = llhelper(FPTR, func)
    -            calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT)
    +            calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
    +                                        EffectInfo.MOST_GENERAL)
                 funcbox = self.get_funcbox(cpu, func_ptr)
     
                 res = self.execute_operation(rop.CALL,
    @@ -268,7 +272,8 @@
                     else:
                         ARGS.append(lltype.Signed)
                 FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof(
    -                lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES)
    +                lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES,
    +                EffectInfo.MOST_GENERAL)
                 ops = '''
                 [%s]
                 f99 = call_assembler(%s, descr=called_looptoken)
    @@ -290,3 +295,59 @@
                     assert abs(x - expected_result) < 0.0001
                 finally:
                     del self.cpu.done_with_this_frame_float_v
    +
    +    def test_call_with_singlefloats(self):
    +        cpu = self.cpu
    +        if not cpu.supports_floats or not cpu.supports_singlefloats:
    +            py.test.skip('requires floats and singlefloats')
    +
    +        import random
    +        from pypy.rlib.libffi import types
    +        from pypy.rlib.rarithmetic import r_singlefloat
    +
    +        def func(*args):
    +            res = 0.0
    +            for i, x in enumerate(args):
    +                res += (i + 1.1) * float(x)
    +            return res
    +
    +        F = lltype.Float
    +        S = lltype.SingleFloat
    +        I = lltype.Signed
    +        floats = [random.random() - 0.5 for i in range(8)]
    +        singlefloats = [r_singlefloat(random.random() - 0.5) for i in range(8)]
    +        ints = [random.randrange(-99, 99) for i in range(8)]
    +        for repeat in range(100):
    +            args = []
    +            argvalues = []
    +            argslist = []
    +            local_floats = list(floats)
    +            local_singlefloats = list(singlefloats)
    +            local_ints = list(ints)
    +            for i in range(8):
    +                case = random.randrange(0, 3)
    +                if case == 0:
    +                    args.append(F)
    +                    arg = local_floats.pop()
    +                    argslist.append(boxfloat(arg))
    +                elif case == 1:
    +                    args.append(S)
    +                    arg = local_singlefloats.pop()
    +                    argslist.append(BoxInt(longlong.singlefloat2int(arg)))
    +                else:
    +                    args.append(I)
    +                    arg = local_ints.pop()
    +                    argslist.append(BoxInt(arg))
    +                argvalues.append(arg)
    +            FUNC = self.FuncType(args, F)
    +            FPTR = self.Ptr(FUNC)
    +            func_ptr = llhelper(FPTR, func)
    +            calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
    +                                        EffectInfo.MOST_GENERAL)
    +            funcbox = self.get_funcbox(cpu, func_ptr)
    +
    +            res = self.execute_operation(rop.CALL,
    +                                         [funcbox] + argslist,
    +                                         'float', descr=calldescr)
    +            expected = func(*argvalues)
    +            assert abs(res.getfloat() - expected) < 0.0001
    diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py
    --- a/pypy/jit/backend/test/runner_test.py
    +++ b/pypy/jit/backend/test/runner_test.py
    @@ -9,6 +9,7 @@
                                              ConstObj, BoxFloat, ConstFloat)
     from pypy.jit.metainterp.resoperation import ResOperation, rop
     from pypy.jit.metainterp.typesystem import deref
    +from pypy.jit.codewriter.effectinfo import EffectInfo
     from pypy.jit.tool.oparser import parse
     from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi, rclass
     from pypy.rpython.ootypesystem import ootype
    @@ -445,7 +446,8 @@
                 return chr(ord(c) + 1)
             FPTR = self.Ptr(self.FuncType([lltype.Char], lltype.Char))
             func_ptr = llhelper(FPTR, func)
    -        calldescr = cpu.calldescrof(deref(FPTR), (lltype.Char,), lltype.Char)
    +        calldescr = cpu.calldescrof(deref(FPTR), (lltype.Char,), lltype.Char,
    +                                    EffectInfo.MOST_GENERAL)
             x = cpu.bh_call_i(self.get_funcbox(cpu, func_ptr).value,
                               calldescr, [ord('A')], None, None)
             assert x == ord('B')
    @@ -458,14 +460,15 @@
                                               lltype.Float))
                 func_ptr = llhelper(FPTR, func)
                 FTP = deref(FPTR)
    -            calldescr = cpu.calldescrof(FTP, FTP.ARGS, FTP.RESULT)
    +            calldescr = cpu.calldescrof(FTP, FTP.ARGS, FTP.RESULT,
    +                                        EffectInfo.MOST_GENERAL)
                 x = cpu.bh_call_f(self.get_funcbox(cpu, func_ptr).value,
                                   calldescr,
                                   [42], None, [longlong.getfloatstorage(3.5)])
                 assert longlong.getrealfloat(x) == 3.5 - 42
     
         def test_call(self):
    -        from pypy.rlib.libffi import types
    +        from pypy.rlib.libffi import types, FUNCFLAG_CDECL
     
             def func_int(a, b):
                 return a + b
    @@ -486,13 +489,16 @@
                 FUNC = deref(FPTR)
                 funcbox = self.get_funcbox(cpu, func_ptr)
                 # first, try it with the "normal" calldescr
    -            calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT)
    +            calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
    +                                        EffectInfo.MOST_GENERAL)
                 res = self.execute_operation(rop.CALL,
                                              [funcbox, BoxInt(num), BoxInt(num)],
                                              'int', descr=calldescr)
                 assert res.value == 2 * num
                 # then, try it with the dynamic calldescr
    -            dyn_calldescr = cpu.calldescrof_dynamic([ffi_type, ffi_type], ffi_type)
    +            dyn_calldescr = cpu.calldescrof_dynamic([ffi_type, ffi_type], ffi_type,
    +                                                    EffectInfo.MOST_GENERAL,
    +                                                    ffi_flags=FUNCFLAG_CDECL)
                 res = self.execute_operation(rop.CALL,
                                              [funcbox, BoxInt(num), BoxInt(num)],
                                              'int', descr=dyn_calldescr)
    @@ -507,7 +513,8 @@
                 FUNC = self.FuncType([F] * 7 + [I] * 2 + [F] * 3, F)
                 FPTR = self.Ptr(FUNC)
                 func_ptr = llhelper(FPTR, func)
    -            calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT)
    +            calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
    +                                        EffectInfo.MOST_GENERAL)
                 funcbox = self.get_funcbox(cpu, func_ptr)
                 args = ([boxfloat(.1) for i in range(7)] +
                         [BoxInt(1), BoxInt(2), boxfloat(.2), boxfloat(.3),
    @@ -529,7 +536,8 @@
     
             FUNC = self.FuncType([lltype.Signed]*16, lltype.Signed)
             FPTR = self.Ptr(FUNC)
    -        calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT)
    +        calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
    +                                         EffectInfo.MOST_GENERAL)
             func_ptr = llhelper(FPTR, func)
             args = range(16)
             funcbox = self.get_funcbox(self.cpu, func_ptr)
    @@ -552,7 +560,8 @@
                 FPTR = self.Ptr(self.FuncType([TP] * nb_args, TP))
                 func_ptr = llhelper(FPTR, func_ints)
                 FUNC = deref(FPTR)
    -            calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT)
    +            calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
    +                                        EffectInfo.MOST_GENERAL)
                 funcbox = self.get_funcbox(cpu, func_ptr)
                 args = [280-24*i for i in range(nb_args)]
                 res = self.execute_operation(rop.CALL,
    @@ -566,7 +575,8 @@
     
             FUNC = self.FuncType([lltype.Float, lltype.Float], lltype.Float)
             FPTR = self.Ptr(FUNC)
    -        calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT)
    +        calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
    +                                         EffectInfo.MOST_GENERAL)
             func_ptr = llhelper(FPTR, func)
             funcbox = self.get_funcbox(self.cpu, func_ptr)
             res = self.execute_operation(rop.CALL, [funcbox, constfloat(1.5),
    @@ -1627,7 +1637,8 @@
             '''
             FPTR = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void))
             fptr = llhelper(FPTR, func)
    -        calldescr = self.cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT)
    +        calldescr = self.cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT,
    +                                         EffectInfo.MOST_GENERAL)
     
             xtp = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True)
             xtp.subclassrange_min = 1
    @@ -1845,7 +1856,8 @@
             FUNC = self.FuncType([lltype.Signed, lltype.Signed], lltype.Void)
             func_ptr = llhelper(lltype.Ptr(FUNC), maybe_force)
             funcbox = self.get_funcbox(self.cpu, func_ptr).constbox()
    -        calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT)
    +        calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
    +                                         EffectInfo.MOST_GENERAL)
             cpu = self.cpu
             i0 = BoxInt()
             i1 = BoxInt()
    @@ -1888,7 +1900,8 @@
             FUNC = self.FuncType([lltype.Signed, lltype.Signed], lltype.Signed)
             func_ptr = llhelper(lltype.Ptr(FUNC), maybe_force)
             funcbox = self.get_funcbox(self.cpu, func_ptr).constbox()
    -        calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT)
    +        calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
    +                                         EffectInfo.MOST_GENERAL)
             cpu = self.cpu
             i0 = BoxInt()
             i1 = BoxInt()
    @@ -1933,7 +1946,8 @@
             FUNC = self.FuncType([lltype.Signed, lltype.Signed], lltype.Float)
             func_ptr = llhelper(lltype.Ptr(FUNC), maybe_force)
             funcbox = self.get_funcbox(self.cpu, func_ptr).constbox()
    -        calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT)
    +        calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
    +                                         EffectInfo.MOST_GENERAL)
             cpu = self.cpu
             i0 = BoxInt()
             i1 = BoxInt()
    @@ -1969,7 +1983,7 @@
             assert values == [1, 10]
     
         def test_call_to_c_function(self):
    -        from pypy.rlib.libffi import CDLL, types, ArgChain
    +        from pypy.rlib.libffi import CDLL, types, ArgChain, FUNCFLAG_CDECL
             from pypy.rpython.lltypesystem.ll2ctypes import libc_name
             libc = CDLL(libc_name)
             c_tolower = libc.getpointer('tolower', [types.uchar], types.sint)
    @@ -1979,7 +1993,9 @@
             cpu = self.cpu
             func_adr = llmemory.cast_ptr_to_adr(c_tolower.funcsym)
             funcbox = ConstInt(heaptracker.adr2int(func_adr))
    -        calldescr = cpu.calldescrof_dynamic([types.uchar], types.sint)
    +        calldescr = cpu.calldescrof_dynamic([types.uchar], types.sint,
    +                                            EffectInfo.MOST_GENERAL,
    +                                            ffi_flags=FUNCFLAG_CDECL)
             i1 = BoxInt()
             i2 = BoxInt()
             tok = BoxInt()
    @@ -2035,7 +2051,9 @@
             funcbox = ConstInt(heaptracker.adr2int(func_adr))
             calldescr = cpu.calldescrof_dynamic([types.pointer, types_size_t,
                                                  types_size_t, types.pointer],
    -                                            types.void)
    +                                            types.void,
    +                                            EffectInfo.MOST_GENERAL,
    +                                            ffi_flags=clibffi.FUNCFLAG_CDECL)
             i0 = BoxInt()
             i1 = BoxInt()
             i2 = BoxInt()
    @@ -2061,6 +2079,62 @@
             assert len(glob.lst) > 0
             lltype.free(raw, flavor='raw')
     
    +    def test_call_to_winapi_function(self):
    +        from pypy.rlib.clibffi import _WIN32, FUNCFLAG_STDCALL
    +        if not _WIN32:
    +            py.test.skip("Windows test only")
    +        from pypy.rlib.libffi import CDLL, types, ArgChain
    +        from pypy.rlib.rwin32 import DWORD
    +        libc = CDLL('KERNEL32')
    +        c_GetCurrentDir = libc.getpointer('GetCurrentDirectoryA',
    +                                          [types.ulong, types.pointer],
    +                                          types.ulong)
    +
    +        cwd = os.getcwd()
    +        buflen = len(cwd) + 10
    +        buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw')
    +        argchain = ArgChain().arg(rffi.cast(DWORD, buflen)).arg(buffer)
    +        res = c_GetCurrentDir.call(argchain, DWORD)
    +        assert rffi.cast(lltype.Signed, res) == len(cwd)
    +        assert rffi.charp2strn(buffer, buflen) == cwd
    +        lltype.free(buffer, flavor='raw')
    +
    +        cpu = self.cpu
    +        func_adr = llmemory.cast_ptr_to_adr(c_GetCurrentDir.funcsym)
    +        funcbox = ConstInt(heaptracker.adr2int(func_adr))
    +        calldescr = cpu.calldescrof_dynamic([types.ulong, types.pointer],
    +                                            types.ulong,
    +                                            EffectInfo.MOST_GENERAL,
    +                                            ffi_flags=FUNCFLAG_STDCALL)
    +        i1 = BoxInt()
    +        i2 = BoxInt()
    +        faildescr = BasicFailDescr(1)
    +        # if the stdcall convention is ignored, then ESP is wrong after the
    +        # call: 8 bytes too much.  If we repeat the call often enough, crash.
    +        ops = []
    +        for i in range(50):
    +            i3 = BoxInt()
    +            ops += [
    +                ResOperation(rop.CALL_RELEASE_GIL, [funcbox, i1, i2], i3,
    +                             descr=calldescr),
    +                ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr),
    +                ]
    +            ops[-1].setfailargs([])
    +        ops += [
    +            ResOperation(rop.FINISH, [i3], None, descr=BasicFailDescr(0))
    +        ]
    +        looptoken = LoopToken()
    +        self.cpu.compile_loop([i1, i2], ops, looptoken)
    +
    +        buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw')
    +        self.cpu.set_future_value_int(0, buflen)
    +        self.cpu.set_future_value_int(1, rffi.cast(lltype.Signed, buffer))
    +        fail = self.cpu.execute_token(looptoken)
    +        assert fail.identifier == 0
    +        assert self.cpu.get_latest_value_int(0) == len(cwd)
    +        assert rffi.charp2strn(buffer, buflen) == cwd
    +        lltype.free(buffer, flavor='raw')
    +
         def test_guard_not_invalidated(self):
             cpu = self.cpu
             i0 = BoxInt()
    @@ -2330,7 +2404,8 @@
             ARGS = [lltype.Signed] * 10
             RES = lltype.Signed
             FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof(
    -            lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES)
    +            lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES,
    +            EffectInfo.MOST_GENERAL)
             for i in range(10):
                 self.cpu.set_future_value_int(i, i+1)
             res = self.cpu.execute_token(looptoken)
    @@ -2370,7 +2445,8 @@
             ARGS = [lltype.Float, lltype.Float]
             RES = lltype.Float
             FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof(
    -            lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES)
    +            lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES,
    +            EffectInfo.MOST_GENERAL)
             
             ops = '''
             [f0, f1]
    @@ -2460,7 +2536,8 @@
             ARGS = [lltype.Float, lltype.Float]
             RES = lltype.Float
             FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof(
    -            lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES)
    +            lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES,
    +            EffectInfo.MOST_GENERAL)
             
             ops = '''
             [f0, f1]
    @@ -2672,7 +2749,8 @@
                 #
                 FUNC = self.FuncType([lltype.Signed], RESTYPE)
                 FPTR = self.Ptr(FUNC)
    -            calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT)
    +            calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
    +                                             EffectInfo.MOST_GENERAL)
                 x = self.cpu.bh_call_i(self.get_funcbox(self.cpu, f).value,
                                        calldescr, [value], None, None)
                 assert x == expected, (
    @@ -2705,7 +2783,8 @@
                 #
                 FUNC = self.FuncType([lltype.Signed], RESTYPE)
                 FPTR = self.Ptr(FUNC)
    -            calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT)
    +            calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
    +                                             EffectInfo.MOST_GENERAL)
                 funcbox = self.get_funcbox(self.cpu, f)
                 res = self.execute_operation(rop.CALL, [funcbox, BoxInt(value)],
                                              'int', descr=calldescr)
    @@ -2739,7 +2818,8 @@
             #
             FUNC = self.FuncType([lltype.SignedLongLong], lltype.SignedLongLong)
             FPTR = self.Ptr(FUNC)
    -        calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT)
    +        calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
    +                                         EffectInfo.MOST_GENERAL)
             x = self.cpu.bh_call_f(self.get_funcbox(self.cpu, f).value,
                                    calldescr, None, None, [value])
             assert x == expected
    @@ -2766,12 +2846,74 @@
             #
             FUNC = self.FuncType([lltype.SignedLongLong], lltype.SignedLongLong)
             FPTR = self.Ptr(FUNC)
    -        calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT)
    +        calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
    +                                         EffectInfo.MOST_GENERAL)
             funcbox = self.get_funcbox(self.cpu, f)
             res = self.execute_operation(rop.CALL, [funcbox, BoxFloat(value)],
                                          'float', descr=calldescr)
             assert res.getfloatstorage() == expected
     
    +    def test_singlefloat_result_of_call_direct(self):
    +        if not self.cpu.supports_singlefloats:
    +            py.test.skip("singlefloat test")
    +        from pypy.translator.tool.cbuild import ExternalCompilationInfo
    +        from pypy.rlib.rarithmetic import r_singlefloat
    +        eci = ExternalCompilationInfo(
    +            separate_module_sources=["""
    +            float fn_test_result_of_call(float x)
    +            {
    +                return x / 2.0f;
    +            }
    +            """],
    +            export_symbols=['fn_test_result_of_call'])
    +        f = rffi.llexternal('fn_test_result_of_call', [lltype.SingleFloat],
    +                            lltype.SingleFloat,
    +                            compilation_info=eci, _nowrapper=True)
    +        value = r_singlefloat(-42.5)
    +        expected = r_singlefloat(-21.25)
    +        assert f(value) == expected
    +        #
    +        FUNC = self.FuncType([lltype.SingleFloat], lltype.SingleFloat)
    +        FPTR = self.Ptr(FUNC)
    +        calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
    +                                         EffectInfo.MOST_GENERAL)
    +        ivalue = longlong.singlefloat2int(value)
    +        iexpected = longlong.singlefloat2int(expected)
    +        x = self.cpu.bh_call_i(self.get_funcbox(self.cpu, f).value,
    +                               calldescr, [ivalue], None, None)
    +        assert x == iexpected
    +
    +    def test_singlefloat_result_of_call_compiled(self):
    +        if not self.cpu.supports_singlefloats:
    +            py.test.skip("test of singlefloat result")
    +        from pypy.translator.tool.cbuild import ExternalCompilationInfo
    +        from pypy.rlib.rarithmetic import r_singlefloat
    +        eci = ExternalCompilationInfo(
    +            separate_module_sources=["""
    +            float fn_test_result_of_call(float x)
    +            {
    +                return x / 2.0f;
    +            }
    +            """],
    +            export_symbols=['fn_test_result_of_call'])
    +        f = rffi.llexternal('fn_test_result_of_call', [lltype.SingleFloat],
    +                            lltype.SingleFloat,
    +                            compilation_info=eci, _nowrapper=True)
    +        value = r_singlefloat(-42.5)
    +        expected = r_singlefloat(-21.25)
    +        assert f(value) == expected
    +        #
    +        FUNC = self.FuncType([lltype.SingleFloat], lltype.SingleFloat)
    +        FPTR = self.Ptr(FUNC)
    +        calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
    +                                         EffectInfo.MOST_GENERAL)
    +        funcbox = self.get_funcbox(self.cpu, f)
    +        ivalue = longlong.singlefloat2int(value)
    +        iexpected = longlong.singlefloat2int(expected)
    +        res = self.execute_operation(rop.CALL, [funcbox, BoxInt(ivalue)],
    +                                     'int', descr=calldescr)
    +        assert res.value == iexpected
    +
         def test_free_loop_and_bridges(self):
             from pypy.jit.backend.llsupport.llmodel import AbstractLLCPU
             if not isinstance(self.cpu, AbstractLLCPU):
    @@ -2786,6 +2928,26 @@
             assert mem2 < mem1
             assert mem2 == mem0
     
    +    def test_memoryerror(self):
    +        excdescr = BasicFailDescr(666)
    +        self.cpu.propagate_exception_v = self.cpu.get_fail_descr_number(
    +            excdescr)
    +        self.cpu.setup_once()    # xxx redo it, because we added
    +                                 # propagate_exception_v
    +        i0 = BoxInt()
    +        p0 = BoxPtr()
    +        operations = [
    +            ResOperation(rop.NEWUNICODE, [i0], p0),
    +            ResOperation(rop.FINISH, [p0], None, descr=BasicFailDescr(1))
    +            ]
    +        inputargs = [i0]
    +        looptoken = LoopToken()
    +        self.cpu.compile_loop(inputargs, operations, looptoken)
    +        # overflowing value:
    +        self.cpu.set_future_value_int(0, sys.maxint // 4 + 1)
    +        fail = self.cpu.execute_token(looptoken)
    +        assert fail.identifier == excdescr.identifier
    +
     
     class OOtypeBackendTest(BaseBackendTest):
     
    diff --git a/pypy/jit/backend/test/test_ll_random.py b/pypy/jit/backend/test/test_ll_random.py
    --- a/pypy/jit/backend/test/test_ll_random.py
    +++ b/pypy/jit/backend/test/test_ll_random.py
    @@ -6,6 +6,7 @@
     from pypy.jit.metainterp.history import BoxPtr, BoxInt
     from pypy.jit.metainterp.history import BasicFailDescr
     from pypy.jit.codewriter import heaptracker
    +from pypy.jit.codewriter.effectinfo import EffectInfo
     from pypy.rpython.annlowlevel import llhelper
     from pypy.rlib.rarithmetic import intmask
     from pypy.rpython.llinterp import LLException
    @@ -468,6 +469,10 @@
             exec code in d
             return subset, d['f'], vtableptr
     
    +    def getcalldescr(self, builder, TP):
    +        ef = EffectInfo.MOST_GENERAL
    +        return builder.cpu.calldescrof(TP, TP.ARGS, TP.RESULT, ef)
    +
     # 1. non raising call and guard_no_exception
     class CallOperation(BaseCallOperation):
         def produce_into(self, builder, r):
    @@ -481,7 +486,7 @@
             ptr = llhelper(lltype.Ptr(TP), f)
             c_addr = ConstAddr(llmemory.cast_ptr_to_adr(ptr), builder.cpu)
             args = [c_addr] + subset
    -        descr = builder.cpu.calldescrof(TP, TP.ARGS, TP.RESULT)
    +        descr = self.getcalldescr(builder, TP)
             self.put(builder, args, descr)
             op = ResOperation(rop.GUARD_NO_EXCEPTION, [], None,
                               descr=BasicFailDescr())
    @@ -501,7 +506,7 @@
             ptr = llhelper(lltype.Ptr(TP), f)
             c_addr = ConstAddr(llmemory.cast_ptr_to_adr(ptr), builder.cpu)
             args = [c_addr] + subset
    -        descr = builder.cpu.calldescrof(TP, TP.ARGS, TP.RESULT)
    +        descr = self.getcalldescr(builder, TP)
             self.put(builder, args, descr)
             _, vtableptr = builder.get_random_structure_type_and_vtable(r)
             exc_box = ConstAddr(llmemory.cast_ptr_to_adr(vtableptr), builder.cpu)
    @@ -523,7 +528,7 @@
             ptr = llhelper(lltype.Ptr(TP), f)
             c_addr = ConstAddr(llmemory.cast_ptr_to_adr(ptr), builder.cpu)
             args = [c_addr] + subset
    -        descr = builder.cpu.calldescrof(TP, TP.ARGS, TP.RESULT)
    +        descr = self.getcalldescr(builder, TP)
             self.put(builder, args, descr)
             exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu)
             op = ResOperation(rop.GUARD_EXCEPTION, [exc_box], BoxPtr(),
    @@ -540,7 +545,7 @@
             ptr = llhelper(lltype.Ptr(TP), f)
             c_addr = ConstAddr(llmemory.cast_ptr_to_adr(ptr), builder.cpu)
             args = [c_addr] + subset
    -        descr = builder.cpu.calldescrof(TP, TP.ARGS, TP.RESULT)
    +        descr = self.getcalldescr(builder, TP)
             self.put(builder, args, descr)
             op = ResOperation(rop.GUARD_NO_EXCEPTION, [], BoxPtr(),
                               descr=BasicFailDescr())
    @@ -559,7 +564,7 @@
             ptr = llhelper(lltype.Ptr(TP), f)
             c_addr = ConstAddr(llmemory.cast_ptr_to_adr(ptr), builder.cpu)
             args = [c_addr] + subset
    -        descr = builder.cpu.calldescrof(TP, TP.ARGS, TP.RESULT)
    +        descr = self.getcalldescr(builder, TP)
             self.put(builder, args, descr)
             while True:
                 _, vtableptr = builder.get_random_structure_type_and_vtable(r)
    diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py
    --- a/pypy/jit/backend/x86/assembler.py
    +++ b/pypy/jit/backend/x86/assembler.py
    @@ -34,6 +34,7 @@
     from pypy.rlib.debug import (debug_print, debug_start, debug_stop,
                                  have_debug_prints)
     from pypy.rlib import rgc
    +from pypy.rlib.clibffi import FFI_DEFAULT_ABI
     from pypy.jit.backend.x86.jump import remap_frame_layout
     from pypy.jit.codewriter.effectinfo import EffectInfo
     from pypy.jit.codewriter import longlong
    @@ -55,7 +56,9 @@
             self.exc = exc
             self.is_guard_not_invalidated = is_guard_not_invalidated
     
    -DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER', ('i', lltype.Signed))
    +DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER', ('i', lltype.Signed),
    +                              ('bridge', lltype.Signed), # 0 or 1
    +                              ('number', lltype.Signed))
     
     class Assembler386(object):
         _regalloc = None
    @@ -88,6 +91,8 @@
             self._current_depths_cache = (0, 0)
             self.datablockwrapper = None
             self.stack_check_slowpath = 0
    +        self.propagate_exception_path = 0
    +        self.gcrootmap_retaddr_forced = 0
             self.teardown()
     
         def leave_jitted_hook(self):
    @@ -124,6 +129,7 @@
                 self._build_failure_recovery(True, withfloats=True)
                 support.ensure_sse2_floats()
                 self._build_float_constants()
    +        self._build_propagate_exception_path()
             if gc_ll_descr.get_malloc_slowpath_addr is not None:
                 self._build_malloc_slowpath()
             self._build_stack_check_slowpath()
    @@ -137,6 +143,9 @@
             assert self.memcpy_addr != 0, "setup_once() not called?"
             self.current_clt = looptoken.compiled_loop_token
             self.pending_guard_tokens = []
    +        if WORD == 8:
    +            self.pending_memoryerror_trampoline_from = []
    +            self.error_trampoline_64 = 0
             self.mc = codebuf.MachineCodeBlockWrapper()
             #assert self.datablockwrapper is None --- but obscure case
             # possible, e.g. getting MemoryError and continuing
    @@ -146,6 +155,8 @@
     
         def teardown(self):
             self.pending_guard_tokens = None
    +        if WORD == 8:
    +            self.pending_memoryerror_trampoline_from = None
             self.mc = None
             self.looppos = -1
             self.currently_compiling_loop = None
    @@ -154,9 +165,12 @@
         def finish_once(self):
             if self._debug:
                 debug_start('jit-backend-counts')
    -            for i in range(len(self.loop_run_counters)):
    -                struct = self.loop_run_counters[i]
    -                debug_print(str(i) + ':' + str(struct.i))
    +            for struct in self.loop_run_counters:
    +                if struct.bridge:
    +                    prefix = 'bridge '
    +                else:
    +                    prefix = 'loop '
    +                debug_print(prefix + str(struct.number) + ':' + str(struct.i))
                 debug_stop('jit-backend-counts')
     
         def _build_float_constants(self):
    @@ -231,15 +245,47 @@
             if self.cpu.supports_floats:          # restore the XMM registers
                 for i in range(self.cpu.NUM_REGS):# from where they were saved
                     mc.MOVSD_xs(i, (WORD*2)+8*i)
    +        #
    +        # Note: we check this after the code above, just because the code
    +        # above is more than 127 bytes on 64-bits...
    +        mc.TEST_rr(eax.value, eax.value)
    +        mc.J_il8(rx86.Conditions['Z'], 0) # patched later
    +        jz_location = mc.get_relative_pos()
    +        #
             nursery_free_adr = self.cpu.gc_ll_descr.get_nursery_free_addr()
             mc.MOV(edx, heap(nursery_free_adr))   # load this in EDX
             mc.RET()
    +        #
    +        # If the slowpath malloc failed, we raise a MemoryError that
    +        # always interrupts the current loop, as a "good enough"
    +        # approximation.  Also note that we didn't RET from this helper;
    +        # but the code we jump to will actually restore the stack
    +        # position based on EBP, which will get us out of here for free.
    +        offset = mc.get_relative_pos() - jz_location
    +        assert 0 < offset <= 127
    +        mc.overwrite(jz_location-1, chr(offset))
    +        mc.JMP(imm(self.propagate_exception_path))
    +        #
             rawstart = mc.materialize(self.cpu.asmmemmgr, [])
             self.malloc_slowpath2 = rawstart
     
    +    def _build_propagate_exception_path(self):
    +        if self.cpu.propagate_exception_v < 0:
    +            return      # not supported (for tests, or non-translated)
    +        #
    +        self.mc = codebuf.MachineCodeBlockWrapper()
    +        # call on_leave_jitted_save_exc()
    +        addr = self.cpu.get_on_leave_jitted_int(save_exception=True)
    +        self.mc.CALL(imm(addr))
    +        self.mc.MOV_ri(eax.value, self.cpu.propagate_exception_v)
    +        self._call_footer()
    +        rawstart = self.mc.materialize(self.cpu.asmmemmgr, [])
    +        self.propagate_exception_path = rawstart
    +        self.mc = None
    +
         def _build_stack_check_slowpath(self):
             _, _, slowpathaddr = self.cpu.insert_stack_check()
    -        if slowpathaddr == 0 or self.cpu.exit_frame_with_exception_v < 0:
    +        if slowpathaddr == 0 or self.cpu.propagate_exception_v < 0:
                 return      # no stack check (for tests, or non-translated)
             #
             # make a "function" that is called immediately at the start of
    @@ -295,19 +341,11 @@
             offset = mc.get_relative_pos() - jnz_location
             assert 0 < offset <= 127
             mc.overwrite(jnz_location-1, chr(offset))
    -        # clear the exception from the global position
    -        mc.MOV(eax, heap(self.cpu.pos_exc_value()))
    -        mc.MOV(heap(self.cpu.pos_exception()), imm0)
    -        mc.MOV(heap(self.cpu.pos_exc_value()), imm0)
    -        # save the current exception instance into fail_boxes_ptr[0]
    -        adr = self.fail_boxes_ptr.get_addr_for_num(0)
    -        mc.MOV(heap(adr), eax)
    -        # call the helper function to set the GC flag on the fail_boxes_ptr
    -        # array (note that there is no exception any more here)
    -        addr = self.cpu.get_on_leave_jitted_int(save_exception=False)
    +        # call on_leave_jitted_save_exc()
    +        addr = self.cpu.get_on_leave_jitted_int(save_exception=True)
             mc.CALL(imm(addr))
             #
    -        mc.MOV_ri(eax.value, self.cpu.exit_frame_with_exception_v)
    +        mc.MOV_ri(eax.value, self.cpu.propagate_exception_v)
             #
             # footer -- note the ADD, which skips the return address of this
             # function, and will instead return to the caller's caller.  Note
    @@ -320,6 +358,7 @@
             self.stack_check_slowpath = rawstart
     
         @staticmethod
    +    @rgc.no_collect
         def _release_gil_asmgcc(css):
             # similar to trackgcroot.py:pypy_asm_stackwalk, first part
             from pypy.rpython.memory.gctransform import asmgcroot
    @@ -335,6 +374,7 @@
                 before()
     
         @staticmethod
    +    @rgc.no_collect
         def _reacquire_gil_asmgcc(css):
             # first reacquire the GIL
             after = rffi.aroundstate.after
    @@ -349,12 +389,14 @@
             next.prev = prev
     
         @staticmethod
    +    @rgc.no_collect
         def _release_gil_shadowstack():
             before = rffi.aroundstate.before
             if before:
                 before()
     
         @staticmethod
    +    @rgc.no_collect
         def _reacquire_gil_shadowstack():
             after = rffi.aroundstate.after
             if after:
    @@ -403,7 +445,7 @@
             self.setup(looptoken)
             self.currently_compiling_loop = looptoken
             if log:
    -            self._register_counter()
    +            self._register_counter(False, looptoken.number)
                 operations = self._inject_debugging_code(looptoken, operations)
     
             regalloc = RegAlloc(self, self.cpu.translate_support_code)
    @@ -472,7 +514,7 @@
     
             self.setup(original_loop_token)
             if log:
    -            self._register_counter()
    +            self._register_counter(True, descr_number)
                 operations = self._inject_debugging_code(faildescr, operations)
     
             arglocs = self.rebuild_faillocs_from_descr(failure_recovery)
    @@ -519,6 +561,8 @@
             # at the end of self.mc.
             for tok in self.pending_guard_tokens:
                 tok.pos_recovery_stub = self.generate_quick_failure(tok)
    +        if WORD == 8 and len(self.pending_memoryerror_trampoline_from) > 0:
    +            self.error_trampoline_64 = self.generate_propagate_error_64()
     
         def patch_pending_failure_recoveries(self, rawstart):
             # after we wrote the assembler to raw memory, set up
    @@ -555,6 +599,12 @@
                     # less, we would run into the issue that overwriting the
                     # 5 bytes here might get a few nonsense bytes at the
                     # return address of the following CALL.
    +        if WORD == 8:
    +            for pos_after_jz in self.pending_memoryerror_trampoline_from:
    +                assert self.error_trampoline_64 != 0     # only if non-empty
    +                mc = codebuf.MachineCodeBlockWrapper()
    +                mc.writeimm32(self.error_trampoline_64 - pos_after_jz)
    +                mc.copy_to_raw_memory(rawstart + pos_after_jz - 4)
     
         def get_asmmemmgr_blocks(self, looptoken):
             clt = looptoken.compiled_loop_token
    @@ -569,7 +619,7 @@
             return self.mc.materialize(self.cpu.asmmemmgr, allblocks,
                                        self.cpu.gc_ll_descr.gcrootmap)
     
    -    def _register_counter(self):
    +    def _register_counter(self, bridge, number):
             if self._debug:
                 # YYY very minor leak -- we need the counters to stay alive
                 # forever, just because we want to report them at the end
    @@ -577,6 +627,8 @@
                 struct = lltype.malloc(DEBUG_COUNTER, flavor='raw',
                                        track_allocation=False)
                 struct.i = 0
    +            struct.bridge = int(bridge)
    +            struct.number = number
                 self.loop_run_counters.append(struct)
     
         def _find_failure_recovery_bytecode(self, faildescr):
    @@ -1067,9 +1119,10 @@
                         self.implement_guard(guard_token, checkfalsecond)
             return genop_cmp_guard_float
     
    -    def _emit_call(self, force_index, x, arglocs, start=0, tmp=eax):
    +    def _emit_call(self, force_index, x, arglocs, start=0, tmp=eax,
    +                   argtypes=None, callconv=FFI_DEFAULT_ABI):
             if IS_X86_64:
    -            return self._emit_call_64(force_index, x, arglocs, start)
    +            return self._emit_call_64(force_index, x, arglocs, start, argtypes)
     
             p = 0
             n = len(arglocs)
    @@ -1096,13 +1149,24 @@
             # x is a location
             self.mc.CALL(x)
             self.mark_gc_roots(force_index)
    +        #
    +        if callconv != FFI_DEFAULT_ABI:
    +            self._fix_stdcall(callconv, p)
     
    -    def _emit_call_64(self, force_index, x, arglocs, start):
    +    def _fix_stdcall(self, callconv, p):
    +        from pypy.rlib.clibffi import FFI_STDCALL
    +        assert callconv == FFI_STDCALL
    +        # it's a bit stupid, but we're just going to cancel the fact that
    +        # the called function just added 'p' to ESP, by subtracting it again.
    +        self.mc.SUB_ri(esp.value, p)
    +
    +    def _emit_call_64(self, force_index, x, arglocs, start, argtypes):
             src_locs = []
             dst_locs = []
             xmm_src_locs = []
             xmm_dst_locs = []
             pass_on_stack = []
    +        singlefloats = None
     
             # In reverse order for use with pop()
             unused_gpr = [r9, r8, ecx, edx, esi, edi]
    @@ -1122,6 +1186,11 @@
                         xmm_dst_locs.append(unused_xmm.pop())
                     else:
                         pass_on_stack.append(loc)
    +            elif (argtypes is not None and argtypes[i-start] == 'S' and
    +                  len(unused_xmm) > 0):
    +                # Singlefloat argument
    +                if singlefloats is None: singlefloats = []
    +                singlefloats.append((loc, unused_xmm.pop()))
                 else:
                     if len(unused_gpr) > 0:
                         src_locs.append(loc)
    @@ -1149,9 +1218,15 @@
                     else:
                         self.mc.MOV_sr(i*WORD, loc.value)
     
    -        # Handle register arguments
    +        # Handle register arguments: first remap the xmm arguments
    +        remap_frame_layout(self, xmm_src_locs, xmm_dst_locs,
    +                           X86_64_XMM_SCRATCH_REG)
    +        # Load the singlefloat arguments from main regs or stack to xmm regs
    +        if singlefloats is not None:
    +            for src, dst in singlefloats:
    +                self.mc.MOVD(dst, src)
    +        # Finally remap the arguments in the main regs
             remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG)
    -        remap_frame_layout(self, xmm_src_locs, xmm_dst_locs, X86_64_XMM_SCRATCH_REG)
     
             self._regalloc.reserve_param(len(pass_on_stack))
             self.mc.CALL(x)
    @@ -1266,6 +1341,20 @@
         def genop_cast_int_to_float(self, op, arglocs, resloc):
             self.mc.CVTSI2SD(resloc, arglocs[0])
     
    +    def genop_cast_float_to_singlefloat(self, op, arglocs, resloc):
    +        loc0, loctmp = arglocs
    +        self.mc.CVTSD2SS(loctmp, loc0)
    +        assert isinstance(resloc, RegLoc)
    +        assert isinstance(loctmp, RegLoc)
    +        self.mc.MOVD_rx(resloc.value, loctmp.value)
    +
    +    def genop_cast_singlefloat_to_float(self, op, arglocs, resloc):
    +        loc0, = arglocs
    +        assert isinstance(resloc, RegLoc)
    +        assert isinstance(loc0, RegLoc)
    +        self.mc.MOVD_xr(resloc.value, loc0.value)
    +        self.mc.CVTSS2SD_xx(resloc.value, resloc.value)
    +
         def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc):
             guard_opnum = guard_op.getopnum()
             self.mc.CMP(arglocs[0], imm0)
    @@ -1387,7 +1476,7 @@
             assert isinstance(loc_vtable, ImmedLoc)
             arglocs = arglocs[:-1]
             self.call(self.malloc_func_addr, arglocs, eax)
    -        # xxx ignore NULL returns for now
    +        self.propagate_memoryerror_if_eax_is_null()
             self.set_vtable(eax, loc_vtable)
     
         def set_vtable(self, loc, loc_vtable):
    @@ -1406,18 +1495,35 @@
         def genop_new(self, op, arglocs, result_loc):
             assert result_loc is eax
             self.call(self.malloc_func_addr, arglocs, eax)
    +        self.propagate_memoryerror_if_eax_is_null()
     
         def genop_new_array(self, op, arglocs, result_loc):
             assert result_loc is eax
             self.call(self.malloc_array_func_addr, arglocs, eax)
    +        self.propagate_memoryerror_if_eax_is_null()
     
         def genop_newstr(self, op, arglocs, result_loc):
             assert result_loc is eax
             self.call(self.malloc_str_func_addr, arglocs, eax)
    +        self.propagate_memoryerror_if_eax_is_null()
     
         def genop_newunicode(self, op, arglocs, result_loc):
             assert result_loc is eax
             self.call(self.malloc_unicode_func_addr, arglocs, eax)
    +        self.propagate_memoryerror_if_eax_is_null()
    +
    +    def propagate_memoryerror_if_eax_is_null(self):
    +        # if self.propagate_exception_path == 0 (tests), this may jump to 0
    +        # and segfaults.  too bad.  the alternative is to continue anyway
    +        # with eax==0, but that will segfault too.
    +        self.mc.TEST_rr(eax.value, eax.value)
    +        if WORD == 4:
    +            self.mc.J_il(rx86.Conditions['Z'], self.propagate_exception_path)
    +            self.mc.add_pending_relocation()
    +        elif WORD == 8:
    +            self.mc.J_il(rx86.Conditions['Z'], 0)
    +            pos = self.mc.get_relative_pos()
    +            self.pending_memoryerror_trampoline_from.append(pos)
     
         # ----------
     
    @@ -1702,6 +1808,12 @@
             return GuardToken(faildescr, failargs, fail_locs, exc,
                               is_guard_not_invalidated)
     
    +    def generate_propagate_error_64(self):
    +        assert WORD == 8
    +        startpos = self.mc.get_relative_pos()
    +        self.mc.JMP(imm(self.propagate_exception_path))
    +        return startpos
    +
         def generate_quick_failure(self, guardtok):
             """Generate the initial code for handling a failure.  We try to
             keep it as compact as possible.
    @@ -2037,7 +2149,9 @@
             else:
                 tmp = eax
     
    -        self._emit_call(force_index, x, arglocs, 3, tmp=tmp)
    +        self._emit_call(force_index, x, arglocs, 3, tmp=tmp,
    +                        argtypes=op.getdescr().get_arg_types(),
    +                        callconv=op.getdescr().get_call_conv())
     
             if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8:
                 # a float or a long long return
    @@ -2049,7 +2163,19 @@
                     #     and this way is simpler also because the result loc
                     #     can just be always a stack location
                 else:
    -                self.mc.FSTP_b(resloc.value)   # float return
    +                self.mc.FSTPL_b(resloc.value)   # float return
    +        elif op.getdescr().get_return_type() == 'S':
    +            # singlefloat return
    +            assert resloc is eax
    +            if IS_X86_32:
    +                # must convert ST(0) to a 32-bit singlefloat and load it into EAX
    +                # mess mess mess
    +                self.mc.SUB_ri(esp.value, 4)
    +                self.mc.FSTPS_s(0)
    +                self.mc.POP_r(eax.value)
    +            elif IS_X86_64:
    +                # must copy from the lower 32 bits of XMM0 into eax
    +                self.mc.MOVD_rx(eax.value, xmm0.value)
             elif size == WORD:
                 assert resloc is eax or resloc is xmm0    # a full word
             elif size == 0:
    @@ -2121,13 +2247,27 @@
                     css = get_ebp_ofs(pos + use_words - 1)
                     self._regalloc.close_stack_struct = css
                 # The location where the future CALL will put its return address
    -            # will be [ESP-WORD], so save that as the next frame's top address
    -            self.mc.LEA_rs(eax.value, -WORD)        # LEA EAX, [ESP-4]
    +            # will be [ESP-WORD].  But we can't use that as the next frame's
    +            # top address!  As the code after releasegil() runs without the
    +            # GIL, it might not be set yet by the time we need it (very
    +            # unlikely), or it might be overwritten by the following call
    +            # to reaquiregil() (much more likely).  So we hack even more
    +            # and use a dummy location containing a dummy value (a pointer
    +            # to itself) which we pretend is the return address :-/ :-/ :-/
    +            # It prevents us to store any %esp-based stack locations but we
    +            # don't so far.
    +            adr = self.datablockwrapper.malloc_aligned(WORD, WORD)
    +            rffi.cast(rffi.CArrayPtr(lltype.Signed), adr)[0] = adr
    +            self.gcrootmap_retaddr_forced = adr
                 frame_ptr = css + WORD * (2+asmgcroot.FRAME_PTR)
    -            self.mc.MOV_br(frame_ptr, eax.value)    # MOV [css.frame], EAX
    +            if rx86.fits_in_32bits(adr):
    +                self.mc.MOV_bi(frame_ptr, adr)          # MOV [css.frame], adr
    +            else:
    +                self.mc.MOV_ri(eax.value, adr)          # MOV EAX, adr
    +                self.mc.MOV_br(frame_ptr, eax.value)    # MOV [css.frame], EAX
                 # Save ebp
                 index_of_ebp = css + WORD * (2+asmgcroot.INDEX_OF_EBP)
    -            self.mc.MOV_br(index_of_ebp, ebp.value) # MOV [css.ebp], EBP
    +            self.mc.MOV_br(index_of_ebp, ebp.value)     # MOV [css.ebp], EBP
                 # Call the closestack() function (also releasing the GIL)
                 if IS_X86_32:
                     reg = eax
    @@ -2155,6 +2295,9 @@
             if gcrootmap.is_shadow_stack:
                 args = []
             else:
    +            assert self.gcrootmap_retaddr_forced == -1, (
    +                      "missing mark_gc_roots() in CALL_RELEASE_GIL")
    +            self.gcrootmap_retaddr_forced = 0
                 css = self._regalloc.close_stack_struct
                 assert css != 0
                 if IS_X86_32:
    @@ -2207,7 +2350,7 @@
             self._emit_call(fail_index, imm(asm_helper_adr), [eax, arglocs[1]], 0,
                             tmp=ecx)
             if IS_X86_32 and isinstance(result_loc, StackLoc) and result_loc.type == FLOAT:
    -            self.mc.FSTP_b(result_loc.value)
    +            self.mc.FSTPL_b(result_loc.value)
             #else: result_loc is already either eax or None, checked below
             self.mc.JMP_l8(0) # jump to done, patched later
             jmp_location = self.mc.get_relative_pos()
    @@ -2405,7 +2548,13 @@
                 if gcrootmap.is_shadow_stack:
                     gcrootmap.write_callshape(mark, force_index)
                 else:
    -                self.mc.insert_gcroot_marker(mark)
    +                if self.gcrootmap_retaddr_forced == 0:
    +                    self.mc.insert_gcroot_marker(mark)   # common case
    +                else:
    +                    assert self.gcrootmap_retaddr_forced != -1, (
    +                              "two mark_gc_roots() in a CALL_RELEASE_GIL")
    +                    gcrootmap.put(self.gcrootmap_retaddr_forced, mark)
    +                    self.gcrootmap_retaddr_forced = -1
     
         def target_arglocs(self, loop_token):
             return loop_token._x86_arglocs
    diff --git a/pypy/jit/backend/x86/codebuf.py b/pypy/jit/backend/x86/codebuf.py
    --- a/pypy/jit/backend/x86/codebuf.py
    +++ b/pypy/jit/backend/x86/codebuf.py
    @@ -25,8 +25,11 @@
             self.init_block_builder()
             # a list of relative positions; for each position p, the bytes
             # at [p-4:p] encode an absolute address that will need to be
    -        # made relative.
    -        self.relocations = []
    +        # made relative.  Only works on 32-bit!
    +        if WORD == 4:
    +            self.relocations = []
    +        else:
    +            self.relocations = None
             #
             # ResOperation --> offset in the assembly.
             # ops_offset[None] represents the beginning of the code after the last op
    @@ -42,9 +45,10 @@
     
         def copy_to_raw_memory(self, addr):
             self._copy_to_raw_memory(addr)
    -        for reloc in self.relocations:
    -            p = addr + reloc
    -            adr = rffi.cast(rffi.LONGP, p - WORD)
    -            adr[0] = intmask(adr[0] - p)
    +        if self.relocations is not None:
    +            for reloc in self.relocations:
    +                p = addr + reloc
    +                adr = rffi.cast(rffi.LONGP, p - WORD)
    +                adr[0] = intmask(adr[0] - p)
             valgrind.discard_translations(addr, self.get_relative_pos())
             self._dump(addr, "jit-backend-dump", backend_name)
    diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py
    --- a/pypy/jit/backend/x86/regalloc.py
    +++ b/pypy/jit/backend/x86/regalloc.py
    @@ -706,6 +706,17 @@
             self.Perform(op, [loc0], loc1)
             self.rm.possibly_free_var(op.getarg(0))
     
    +    def consider_cast_float_to_singlefloat(self, op):
    +        loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0))
    +        loc1 = self.rm.force_allocate_reg(op.result)
    +        self.xrm.possibly_free_var(op.getarg(0))
    +        tmpxvar = TempBox()
    +        loctmp = self.xrm.force_allocate_reg(tmpxvar)   # may be equal to loc0
    +        self.xrm.possibly_free_var(tmpxvar)
    +        self.Perform(op, [loc0, loctmp], loc1)
    +
    +    consider_cast_singlefloat_to_float = consider_cast_int_to_float
    +
         def _consider_llong_binop_xx(self, op):
             # must force both arguments into xmm registers, because we don't
             # know if they will be suitably aligned.  Exception: if the second
    @@ -833,8 +844,8 @@
     
         def consider_call(self, op):
             effectinfo = op.getdescr().get_extra_info()
    -        if effectinfo is not None:
    -            oopspecindex = effectinfo.oopspecindex
    +        oopspecindex = effectinfo.oopspecindex
    +        if oopspecindex != EffectInfo.OS_NONE:
                 if IS_X86_32:
                     # support for some of the llong operations,
                     # which only exist on x86-32
    diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py
    --- a/pypy/jit/backend/x86/regloc.py
    +++ b/pypy/jit/backend/x86/regloc.py
    @@ -521,6 +521,8 @@
         UCOMISD = _binaryop('UCOMISD')
         CVTSI2SD = _binaryop('CVTSI2SD')
         CVTTSD2SI = _binaryop('CVTTSD2SI')
    +    CVTSD2SS = _binaryop('CVTSD2SS')
    +    CVTSS2SD = _binaryop('CVTSS2SD')
         
         SQRTSD = _binaryop('SQRTSD')
     
    @@ -534,6 +536,8 @@
         PXOR  = _binaryop('PXOR')
         PCMPEQD = _binaryop('PCMPEQD')
     
    +    MOVD = _binaryop('MOVD')
    +
         CALL = _relative_unaryop('CALL')
         JMP = _relative_unaryop('JMP')
     
    diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py
    --- a/pypy/jit/backend/x86/runner.py
    +++ b/pypy/jit/backend/x86/runner.py
    @@ -19,6 +19,7 @@
     class AbstractX86CPU(AbstractLLCPU):
         debug = True
         supports_floats = True
    +    supports_singlefloats = True
     
         BOOTSTRAP_TP = lltype.FuncType([], lltype.Signed)
         dont_keepalive_stuff = False # for tests
    diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py
    --- a/pypy/jit/backend/x86/rx86.py
    +++ b/pypy/jit/backend/x86/rx86.py
    @@ -527,6 +527,7 @@
     
         NOP = insn('\x90')
         RET = insn('\xC3')
    +    RET16_i = insn('\xC2', immediate(1, 'h'))
     
         PUSH_r = insn(rex_nw, register(1), '\x50')
         PUSH_b = insn(rex_nw, '\xFF', orbyte(6<<3), stack_bp(1))
    @@ -573,7 +574,8 @@
         BTS_jr = insn(rex_w, '\x0F\xAB', register(2,8), abs_, immediate(1))
     
         # x87 instructions
    -    FSTP_b = insn('\xDD', orbyte(3<<3), stack_bp(1))
    +    FSTPL_b = insn('\xDD', orbyte(3<<3), stack_bp(1)) # rffi.DOUBLE ('as' wants L??)
    +    FSTPS_s = insn('\xD9', orbyte(3<<3), stack_sp(1)) # lltype.SingleFloat
     
         # ------------------------------ Random mess -----------------------
         RDTSC = insn('\x0F\x31')
    @@ -590,8 +592,18 @@
         CVTTSD2SI_rx = xmminsn('\xF2', rex_w, '\x0F\x2C', register(1, 8), register(2), '\xC0')
         CVTTSD2SI_rb = xmminsn('\xF2', rex_w, '\x0F\x2C', register(1, 8), stack_bp(2))
     
    -    MOVD_rx = xmminsn('\x66', rex_w, '\x0F\x7E', register(2, 8), register(1), '\xC0')
    -    MOVD_xr = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), register(2), '\xC0')
    +    CVTSD2SS_xx = xmminsn('\xF2', rex_nw, '\x0F\x5A',
    +                          register(1, 8), register(2), '\xC0')
    +    CVTSD2SS_xb = xmminsn('\xF2', rex_nw, '\x0F\x5A',
    +                          register(1, 8), stack_bp(2))
    +    CVTSS2SD_xx = xmminsn('\xF3', rex_nw, '\x0F\x5A',
    +                          register(1, 8), register(2), '\xC0')
    +    CVTSS2SD_xb = xmminsn('\xF3', rex_nw, '\x0F\x5A',
    +                          register(1, 8), stack_bp(2))
    +
    +    MOVD_rx = xmminsn('\x66', rex_nw, '\x0F\x7E', register(2, 8), register(1), '\xC0')
    +    MOVD_xr = xmminsn('\x66', rex_nw, '\x0F\x6E', register(1, 8), register(2), '\xC0')
    +    MOVD_xb = xmminsn('\x66', rex_nw, '\x0F\x6E', register(1, 8), stack_bp(2))
     
         PSRAD_xi = xmminsn('\x66', rex_nw, '\x0F\x72', register(1), '\xE0', immediate(2, 'b'))
     
    diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py
    --- a/pypy/jit/backend/x86/test/test_gc_integration.py
    +++ b/pypy/jit/backend/x86/test/test_gc_integration.py
    @@ -7,6 +7,7 @@
          BoxPtr, ConstPtr, TreeLoop
     from pypy.jit.metainterp.resoperation import rop, ResOperation
     from pypy.jit.codewriter import heaptracker
    +from pypy.jit.codewriter.effectinfo import EffectInfo
     from pypy.jit.backend.llsupport.descr import GcCache
     from pypy.jit.backend.llsupport.gc import GcLLDescription
     from pypy.jit.backend.detect_cpu import getcpuclass
    @@ -76,7 +77,8 @@
             for box in boxes:
                 regalloc.rm.try_allocate_reg(box)
             TP = lltype.FuncType([], lltype.Signed)
    -        calldescr = cpu.calldescrof(TP, TP.ARGS, TP.RESULT)
    +        calldescr = cpu.calldescrof(TP, TP.ARGS, TP.RESULT,
    +                                    EffectInfo.MOST_GENERAL)
             regalloc.rm._check_invariants()
             box = boxes[0]
             regalloc.position = 0
    diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py
    --- a/pypy/jit/backend/x86/test/test_regalloc.py
    +++ b/pypy/jit/backend/x86/test/test_regalloc.py
    @@ -16,6 +16,7 @@
     from pypy.rpython.annlowlevel import llhelper
     from pypy.rpython.lltypesystem import rclass, rstr
     from pypy.jit.codewriter import longlong
    +from pypy.jit.codewriter.effectinfo import EffectInfo
     from pypy.jit.backend.x86.rx86 import *
     
     def test_is_comparison_or_ovf_op():
    @@ -92,7 +93,8 @@
         zd_addr = cpu.cast_int_to_adr(zero_division_tp)
         zero_division_error = llmemory.cast_adr_to_ptr(zd_addr,
                                                 lltype.Ptr(rclass.OBJECT_VTABLE))
    -    raising_calldescr = cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT)
    +    raising_calldescr = cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT,
    +                                        EffectInfo.MOST_GENERAL)
     
         fdescr1 = BasicFailDescr(1)
         fdescr2 = BasicFailDescr(2)
    @@ -115,9 +117,12 @@
         f2ptr = llhelper(F2PTR, f2)
         f10ptr = llhelper(F10PTR, f10)
     
    -    f1_calldescr = cpu.calldescrof(F1PTR.TO, F1PTR.TO.ARGS, F1PTR.TO.RESULT)
    -    f2_calldescr = cpu.calldescrof(F2PTR.TO, F2PTR.TO.ARGS, F2PTR.TO.RESULT)
    -    f10_calldescr = cpu.calldescrof(F10PTR.TO, F10PTR.TO.ARGS, F10PTR.TO.RESULT)
    +    f1_calldescr = cpu.calldescrof(F1PTR.TO, F1PTR.TO.ARGS, F1PTR.TO.RESULT,
    +                                   EffectInfo.MOST_GENERAL)
    +    f2_calldescr = cpu.calldescrof(F2PTR.TO, F2PTR.TO.ARGS, F2PTR.TO.RESULT,
    +                                   EffectInfo.MOST_GENERAL)
    +    f10_calldescr= cpu.calldescrof(F10PTR.TO, F10PTR.TO.ARGS, F10PTR.TO.RESULT,
    +                                   EffectInfo.MOST_GENERAL)
     
         namespace = locals().copy()
         type_system = 'lltype'
    diff --git a/pypy/jit/backend/x86/test/test_regloc.py b/pypy/jit/backend/x86/test/test_regloc.py
    --- a/pypy/jit/backend/x86/test/test_regloc.py
    +++ b/pypy/jit/backend/x86/test/test_regloc.py
    @@ -62,7 +62,7 @@
                 assert mc.relocations == [5]
                 expected = "\xE8" + struct.pack(' movl $xxx, %eax
                     suffix = 'l'
    -                if ops[1][2:].isdigit():
    -                    ops[1] += 'd'
    -                else:
    -                    ops[1] = '%e' + ops[1][2:]
    +                ops[1] = reduce_to_32bit(ops[1])
    +            if instrname.lower() == 'movd':
    +                ops[0] = reduce_to_32bit(ops[0])
    +                ops[1] = reduce_to_32bit(ops[1])
                 #
                 op = '\t%s%s %s%s' % (instrname.lower(), suffix,
                                       ', '.join(ops), following)
    diff --git a/pypy/jit/codewriter/assembler.py b/pypy/jit/codewriter/assembler.py
    --- a/pypy/jit/codewriter/assembler.py
    +++ b/pypy/jit/codewriter/assembler.py
    @@ -76,6 +76,8 @@
                     TYPE = llmemory.Address
                 if TYPE == llmemory.Address:
                     value = heaptracker.adr2int(value)
    +            if TYPE is lltype.SingleFloat:
    +                value = longlong.singlefloat2int(value)
                 if not isinstance(value, (llmemory.AddressAsInt,
                                           ComputedIntSymbolic)):
                     value = lltype.cast_primitive(lltype.Signed, value)
    diff --git a/pypy/jit/codewriter/call.py b/pypy/jit/codewriter/call.py
    --- a/pypy/jit/codewriter/call.py
    +++ b/pypy/jit/codewriter/call.py
    @@ -6,7 +6,7 @@
     from pypy.jit.codewriter import support
     from pypy.jit.codewriter.jitcode import JitCode
     from pypy.jit.codewriter.effectinfo import (VirtualizableAnalyzer,
    -    QuasiImmutAnalyzer, CanReleaseGILAnalyzer, effectinfo_from_writeanalyze,
    +    QuasiImmutAnalyzer, RandomEffectsAnalyzer, effectinfo_from_writeanalyze,
         EffectInfo, CallInfoCollection)
     from pypy.translator.simplify import get_funcobj, get_functype
     from pypy.rpython.lltypesystem import lltype, llmemory
    @@ -31,7 +31,7 @@
                 self.readwrite_analyzer = ReadWriteAnalyzer(translator)
                 self.virtualizable_analyzer = VirtualizableAnalyzer(translator)
                 self.quasiimmut_analyzer = QuasiImmutAnalyzer(translator)
    -            self.canreleasegil_analyzer = CanReleaseGILAnalyzer(translator)
    +            self.randomeffects_analyzer = RandomEffectsAnalyzer(translator)
             #
             for index, jd in enumerate(jitdrivers_sd):
                 jd.index = index
    @@ -187,7 +187,7 @@
                 fnaddr = llmemory.cast_ptr_to_adr(fnptr)
             NON_VOID_ARGS = [ARG for ARG in FUNC.ARGS if ARG is not lltype.Void]
             calldescr = self.cpu.calldescrof(FUNC, tuple(NON_VOID_ARGS),
    -                                         FUNC.RESULT)
    +                                         FUNC.RESULT, EffectInfo.MOST_GENERAL)
             return (fnaddr, calldescr)
     
         def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE,
    @@ -219,9 +219,11 @@
                     assert not NON_VOID_ARGS, ("arguments not supported for "
                                                "loop-invariant function!")
             # build the extraeffect
    -        can_release_gil = self.canreleasegil_analyzer.analyze(op)
    -        # can_release_gil implies can_invalidate
    -        can_invalidate = can_release_gil or self.quasiimmut_analyzer.analyze(op)
    +        random_effects = self.randomeffects_analyzer.analyze(op)
    +        if random_effects:
    +            extraeffect = EffectInfo.EF_RANDOM_EFFECTS
    +        # random_effects implies can_invalidate
    +        can_invalidate = random_effects or self.quasiimmut_analyzer.analyze(op)
             if extraeffect is None:
                 if self.virtualizable_analyzer.analyze(op):
                     extraeffect = EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE
    @@ -239,12 +241,10 @@
             #
             effectinfo = effectinfo_from_writeanalyze(
                 self.readwrite_analyzer.analyze(op), self.cpu, extraeffect,
    -            oopspecindex, can_invalidate, can_release_gil)
    +            oopspecindex, can_invalidate)
             #
    -        if oopspecindex != EffectInfo.OS_NONE:
    -            assert effectinfo is not None
    +        assert effectinfo is not None
             if elidable or loopinvariant:
    -            assert effectinfo is not None
                 assert extraeffect != EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE
                 # XXX this should also say assert not can_invalidate, but
                 #     it can't because our analyzer is not good enough for now
    @@ -264,8 +264,7 @@
     
         def calldescr_canraise(self, calldescr):
             effectinfo = calldescr.get_extra_info()
    -        return (effectinfo is None or
    -                effectinfo.extraeffect > EffectInfo.EF_CANNOT_RAISE)
    +        return effectinfo.check_can_raise()
     
         def jitdriver_sd_from_portal_graph(self, graph):
             for jd in self.jitdrivers_sd:
    diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py
    --- a/pypy/jit/codewriter/effectinfo.py
    +++ b/pypy/jit/codewriter/effectinfo.py
    @@ -15,6 +15,7 @@
         EF_ELIDABLE_CAN_RAISE              = 3 #elidable function (but can raise)
         EF_CAN_RAISE                       = 4 #normal function (can raise)
         EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE = 5 #can raise and force virtualizables
    +    EF_RANDOM_EFFECTS                  = 6 #can do whatever
     
         # the 'oopspecindex' field is one of the following values:
         OS_NONE                     = 0    # normal case, no oopspec
    @@ -80,17 +81,26 @@
                     write_descrs_fields, write_descrs_arrays,
                     extraeffect=EF_CAN_RAISE,
                     oopspecindex=OS_NONE,
    -                can_invalidate=False, can_release_gil=False):
    -        key = (frozenset(readonly_descrs_fields),
    -               frozenset(readonly_descrs_arrays),
    -               frozenset(write_descrs_fields),
    -               frozenset(write_descrs_arrays),
    +                can_invalidate=False):
    +        key = (frozenset_or_none(readonly_descrs_fields),
    +               frozenset_or_none(readonly_descrs_arrays),
    +               frozenset_or_none(write_descrs_fields),
    +               frozenset_or_none(write_descrs_arrays),
                    extraeffect,
                    oopspecindex,
    -               can_invalidate,
    -               can_release_gil)
    +               can_invalidate)
             if key in cls._cache:
                 return cls._cache[key]
    +        if extraeffect == EffectInfo.EF_RANDOM_EFFECTS:
    +            assert readonly_descrs_fields is None
    +            assert readonly_descrs_arrays is None
    +            assert write_descrs_fields is None
    +            assert write_descrs_arrays is None
    +        else:
    +            assert readonly_descrs_fields is not None
    +            assert readonly_descrs_arrays is not None
    +            assert write_descrs_fields is not None
    +            assert write_descrs_arrays is not None
             result = object.__new__(cls)
             result.readonly_descrs_fields = readonly_descrs_fields
             result.readonly_descrs_arrays = readonly_descrs_arrays
    @@ -104,11 +114,13 @@
                 result.write_descrs_arrays = write_descrs_arrays
             result.extraeffect = extraeffect
             result.can_invalidate = can_invalidate
    -        result.can_release_gil = can_release_gil
             result.oopspecindex = oopspecindex
             cls._cache[key] = result
             return result
     
    +    def check_can_raise(self):
    +        return self.extraeffect > self.EF_CANNOT_RAISE
    +
         def check_can_invalidate(self):
             return self.can_invalidate
     
    @@ -116,56 +128,71 @@
             return self.extraeffect >= self.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE
     
         def has_random_effects(self):
    -        return self.oopspecindex == self.OS_LIBFFI_CALL or self.can_release_gil
    +        return self.extraeffect >= self.EF_RANDOM_EFFECTS
    +
    +
    +def frozenset_or_none(x):
    +    if x is None:
    +        return None
    +    return frozenset(x)
    +
    +EffectInfo.MOST_GENERAL = EffectInfo(None, None, None, None,
    +                                     EffectInfo.EF_RANDOM_EFFECTS,
    +                                     can_invalidate=True)
    +
     
     def effectinfo_from_writeanalyze(effects, cpu,
                                      extraeffect=EffectInfo.EF_CAN_RAISE,
                                      oopspecindex=EffectInfo.OS_NONE,
    -                                 can_invalidate=False,
    -                                 can_release_gil=False):
    +                                 can_invalidate=False):
         from pypy.translator.backendopt.writeanalyze import top_set
    -    if effects is top_set:
    -        return None
    -    readonly_descrs_fields = []
    -    readonly_descrs_arrays = []
    -    write_descrs_fields = []
    -    write_descrs_arrays = []
    +    if effects is top_set or extraeffect == EffectInfo.EF_RANDOM_EFFECTS:
    +        readonly_descrs_fields = None
    +        readonly_descrs_arrays = None
    +        write_descrs_fields = None
    +        write_descrs_arrays = None
    +        extraeffect = EffectInfo.EF_RANDOM_EFFECTS
    +    else:
    +        readonly_descrs_fields = []
    +        readonly_descrs_arrays = []
    +        write_descrs_fields = []
    +        write_descrs_arrays = []
     
    -    def add_struct(descrs_fields, (_, T, fieldname)):
    -        T = deref(T)
    -        if consider_struct(T, fieldname):
    -            descr = cpu.fielddescrof(T, fieldname)
    -            descrs_fields.append(descr)
    +        def add_struct(descrs_fields, (_, T, fieldname)):
    +            T = deref(T)
    +            if consider_struct(T, fieldname):
    +                descr = cpu.fielddescrof(T, fieldname)
    +                descrs_fields.append(descr)
     
    -    def add_array(descrs_arrays, (_, T)):
    -        ARRAY = deref(T)
    -        if consider_array(ARRAY):
    -            descr = cpu.arraydescrof(ARRAY)
    -            descrs_arrays.append(descr)
    +        def add_array(descrs_arrays, (_, T)):
    +            ARRAY = deref(T)
    +            if consider_array(ARRAY):
    +                descr = cpu.arraydescrof(ARRAY)
    +                descrs_arrays.append(descr)
     
    -    for tup in effects:
    -        if tup[0] == "struct":
    -            add_struct(write_descrs_fields, tup)
    -        elif tup[0] == "readstruct":
    -            tupw = ("struct",) + tup[1:]
    -            if tupw not in effects:
    -                add_struct(readonly_descrs_fields, tup)
    -        elif tup[0] == "array":
    -            add_array(write_descrs_arrays, tup)
    -        elif tup[0] == "readarray":
    -            tupw = ("array",) + tup[1:]
    -            if tupw not in effects:
    -                add_array(readonly_descrs_arrays, tup)
    -        else:
    -            assert 0
    +        for tup in effects:
    +            if tup[0] == "struct":
    +                add_struct(write_descrs_fields, tup)
    +            elif tup[0] == "readstruct":
    +                tupw = ("struct",) + tup[1:]
    +                if tupw not in effects:
    +                    add_struct(readonly_descrs_fields, tup)
    +            elif tup[0] == "array":
    +                add_array(write_descrs_arrays, tup)
    +            elif tup[0] == "readarray":
    +                tupw = ("array",) + tup[1:]
    +                if tupw not in effects:
    +                    add_array(readonly_descrs_arrays, tup)
    +            else:
    +                assert 0
    +    #
         return EffectInfo(readonly_descrs_fields,
                           readonly_descrs_arrays,
                           write_descrs_fields,
                           write_descrs_arrays,
                           extraeffect,
                           oopspecindex,
    -                      can_invalidate,
    -                      can_release_gil)
    +                      can_invalidate)
     
     def consider_struct(TYPE, fieldname):
         if fieldType(TYPE, fieldname) is lltype.Void:
    @@ -201,12 +228,13 @@
         def analyze_simple_operation(self, op, graphinfo):
             return op.opname == 'jit_force_quasi_immutable'
     
    -class CanReleaseGILAnalyzer(BoolGraphAnalyzer):
    +class RandomEffectsAnalyzer(BoolGraphAnalyzer):
         def analyze_direct_call(self, graph, seen=None):
    -        releases_gil = False
             if hasattr(graph, "func") and hasattr(graph.func, "_ptr"):
    -            releases_gil = graph.func._ptr._obj.releases_gil
    -        return releases_gil or super(CanReleaseGILAnalyzer, self).analyze_direct_call(graph, seen)
    +            if graph.func._ptr._obj.random_effects_on_gcobjs:
    +                return True
    +        return super(RandomEffectsAnalyzer, self).analyze_direct_call(graph,
    +                                                                      seen)
     
         def analyze_simple_operation(self, op, graphinfo):
             return False
    diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py
    --- a/pypy/jit/codewriter/jtransform.py
    +++ b/pypy/jit/codewriter/jtransform.py
    @@ -1,4 +1,5 @@
     import py
    +
     from pypy.jit.codewriter import support, heaptracker, longlong
     from pypy.jit.codewriter.effectinfo import EffectInfo
     from pypy.jit.codewriter.flatten import ListOfKind, IndirectCallTargets
    @@ -9,7 +10,7 @@
     from pypy.objspace.flow.model import SpaceOperation, Variable, Constant, c_last_exception
     from pypy.rlib import objectmodel
     from pypy.rlib.jit import _we_are_jitted
    -from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rclass
    +from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rclass, rffi
     from pypy.rpython.rclass import IR_QUASIIMMUTABLE, IR_QUASIIMMUTABLE_ARRAY
     from pypy.translator.simplify import get_funcobj
     from pypy.translator.unsimplify import varoftype
    @@ -22,6 +23,11 @@
         t = Transformer(cpu, callcontrol, portal_jd)
         t.transform(graph)
     
    +def integer_bounds(size, unsigned):
    +    if unsigned:
    +        return 0, 1 << (8 * size)
    +    else:
    +        return -(1 << (8 * size - 1)), 1 << (8 * size - 1)
     
     class Transformer(object):
         vable_array_vars = None
    @@ -200,7 +206,6 @@
                 self.vable_array_vars[op.result]= self.vable_array_vars[op.args[0]]
     
         rewrite_op_cast_pointer = rewrite_op_same_as
    -    rewrite_op_cast_opaque_ptr = rewrite_op_same_as   # rlib.rerased
         def rewrite_op_cast_bool_to_int(self, op): pass
         def rewrite_op_cast_bool_to_uint(self, op): pass
         def rewrite_op_cast_char_to_int(self, op): pass
    @@ -574,6 +579,7 @@
                     pure = '_pure'
             else:
                 pure = ''
    +        self.check_field_access(v_inst.concretetype.TO)
             argname = getattr(v_inst.concretetype.TO, '_gckind', 'gc')
             descr = self.cpu.fielddescrof(v_inst.concretetype.TO,
                                           c_fieldname.value)
    @@ -607,6 +613,7 @@
                 return [SpaceOperation('-live-', [], None),
                         SpaceOperation('setfield_vable_%s' % kind,
                                        [v_inst, descr, v_value], None)]
    +        self.check_field_access(v_inst.concretetype.TO)
             argname = getattr(v_inst.concretetype.TO, '_gckind', 'gc')
             descr = self.cpu.fielddescrof(v_inst.concretetype.TO,
                                           c_fieldname.value)
    @@ -619,6 +626,22 @@
             return (op.args[1].value == 'typeptr' and
                     op.args[0].concretetype.TO._hints.get('typeptr'))
     
    +    def check_field_access(self, STRUCT):
    +        # check against a GcStruct with a nested GcStruct as a first argument
    +        # but which is not an object at all; see metainterp/test/test_loop,
    +        # test_regular_pointers_in_short_preamble.
    +        if not isinstance(STRUCT, lltype.GcStruct):
    +            return
    +        if STRUCT._first_struct() == (None, None):
    +            return
    +        PARENT = STRUCT
    +        while not PARENT._hints.get('typeptr'):
    +            _, PARENT = PARENT._first_struct()
    +            if PARENT is None:
    +                raise NotImplementedError("%r is a GcStruct using nesting but "
    +                                          "not inheriting from object" %
    +                                          (STRUCT,))
    +
         def get_vinfo(self, v_virtualizable):
             if self.callcontrol is None:      # for tests
                 return None
    @@ -791,75 +814,127 @@
                 raise NotImplementedError("cast_ptr_to_int")
     
         def rewrite_op_force_cast(self, op):
    -        assert not self._is_gc(op.args[0])
    -        fromll = longlong.is_longlong(op.args[0].concretetype)
    -        toll   = longlong.is_longlong(op.result.concretetype)
    -        if fromll and toll:
    +        v_arg = op.args[0]
    +        v_result = op.result
    +        assert not self._is_gc(v_arg)
    +
    +        if v_arg.concretetype == v_result.concretetype:
                 return
    -        if fromll:
    -            args = op.args
    -            opname = 'truncate_longlong_to_int'
    -            RESULT = lltype.Signed
    -            v = varoftype(RESULT)
    -            op1 = SpaceOperation(opname, args, v)
    -            op2 = self.rewrite_operation(op1)
    -            oplist = self.force_cast_without_longlong(op2.result, op.result)
    +
    +        float_arg = v_arg.concretetype in [lltype.Float, lltype.SingleFloat]
    +        float_res = v_result.concretetype in [lltype.Float, lltype.SingleFloat]
    +        if not float_arg and not float_res:
    +            # some int -> some int cast
    +            return self._int_to_int_cast(v_arg, v_result)
    +        elif float_arg and float_res:
    +            # some float -> some float cast
    +            return self._float_to_float_cast(v_arg, v_result)
    +        elif not float_arg and float_res:
    +            # some int -> some float
    +            ops = []
    +            v1 = varoftype(lltype.Signed)
    +            oplist = self.rewrite_operation(
    +                SpaceOperation('force_cast', [v_arg], v1)
    +            )
                 if oplist:
    -                return [op2] + oplist
    -            #
    -            # force a renaming to put the correct result in place, even though
    -            # it might be slightly mistyped (e.g. Signed versus Unsigned)
    -            assert op2.result is v
    -            op2.result = op.result
    -            return op2
    -        elif toll:
    -            from pypy.rpython.lltypesystem import rffi
    -            size, unsigned = rffi.size_and_sign(op.args[0].concretetype)
    -            if unsigned:
    +                ops.extend(oplist)
    +            else:
    +                v1 = v_arg
    +            v2 = varoftype(lltype.Float)
    +            op = self.rewrite_operation(
    +                SpaceOperation('cast_int_to_float', [v1], v2)
    +            )
    +            ops.append(op)
    +            op2 = self.rewrite_operation(
    +                SpaceOperation('force_cast', [v2], v_result)
    +            )
    +            if op2:
    +                ops.append(op2)
    +            else:
    +                op.result = v_result
    +            return ops
    +        elif float_arg and not float_res:
    +            # some float -> some int
    +            ops = []
    +            v1 = varoftype(lltype.Float)
    +            op1 = self.rewrite_operation(
    +                SpaceOperation('force_cast', [v_arg], v1)
    +            )
    +            if op1:
    +                ops.append(op1)
    +            else:
    +                v1 = v_arg
    +            v2 = varoftype(lltype.Signed)
    +            op = self.rewrite_operation(
    +                SpaceOperation('cast_float_to_int', [v1], v2)
    +            )
    +            ops.append(op)
    +            oplist = self.rewrite_operation(
    +                SpaceOperation('force_cast', [v2], v_result)
    +            )
    +            if oplist:
    +                ops.extend(oplist)
    +            else:
    +                op.result = v_result
    +            return ops
    +        else:
    +            assert False
    +
    +    def _int_to_int_cast(self, v_arg, v_result):
    +        longlong_arg = longlong.is_longlong(v_arg.concretetype)
    +        longlong_res = longlong.is_longlong(v_result.concretetype)
    +        size1, unsigned1 = rffi.size_and_sign(v_arg.concretetype)
    +        size2, unsigned2 = rffi.size_and_sign(v_result.concretetype)
    +
    +        if longlong_arg and longlong_res:
    +            return
    +        elif longlong_arg:
    +            v = varoftype(lltype.Signed)
    +            op1 = self.rewrite_operation(
    +                SpaceOperation('truncate_longlong_to_int', [v_arg], v)
    +            )
    +            op2 = SpaceOperation('force_cast', [v], v_result)
    +            oplist = self.rewrite_operation(op2)
    +            if not oplist:
    +                op1.result = v_result
    +                oplist = []
    +            return [op1] + oplist
    +        elif longlong_res:
    +            if unsigned1:
                     INTERMEDIATE = lltype.Unsigned
                 else:
                     INTERMEDIATE = lltype.Signed
                 v = varoftype(INTERMEDIATE)
    -            oplist = self.force_cast_without_longlong(op.args[0], v)
    +            op1 = SpaceOperation('force_cast', [v_arg], v)
    +            oplist = self.rewrite_operation(op1)
                 if not oplist:
    -                v = op.args[0]
    +                v = v_arg
                     oplist = []
    -            if unsigned:
    +            if unsigned1:
                     opname = 'cast_uint_to_longlong'
                 else:
                     opname = 'cast_int_to_longlong'
    -            op1 = SpaceOperation(opname, [v], op.result)
    -            op2 = self.rewrite_operation(op1)
    +            op2 = self.rewrite_operation(
    +                SpaceOperation(opname, [v], v_result)
    +            )
                 return oplist + [op2]
    -        else:
    -            return self.force_cast_without_longlong(op.args[0], op.result)
     
    -    def force_cast_without_longlong(self, v_arg, v_result):
    -        from pypy.rpython.lltypesystem.rffi import size_and_sign, sizeof, FLOAT
    -        #
    -        if (v_result.concretetype in (FLOAT, lltype.Float) or
    -            v_arg.concretetype in (FLOAT, lltype.Float)):
    -            assert (v_result.concretetype == lltype.Float and
    -                    v_arg.concretetype == lltype.Float), "xxx unsupported cast"
    +        # We've now, ostensibly, dealt with the longlongs, everything should be
    +        # a Signed or smaller
    +        assert size1 <= rffi.sizeof(lltype.Signed)
    +        assert size2 <= rffi.sizeof(lltype.Signed)
    +
    +        # the target type is LONG or ULONG
    +        if size2 == rffi.sizeof(lltype.Signed):
                 return
    -        #
    -        size2, unsigned2 = size_and_sign(v_result.concretetype)
    -        assert size2 <= sizeof(lltype.Signed)
    -        if size2 == sizeof(lltype.Signed):
    -            return     # the target type is LONG or ULONG
    -        size1, unsigned1 = size_and_sign(v_arg.concretetype)
    -        assert size1 <= sizeof(lltype.Signed)
    -        #
    -        def bounds(size, unsigned):
    -            if unsigned:
    -                return 0, 1<<(8*size)
    -            else:
    -                return -(1<<(8*size-1)), 1<<(8*size-1)
    -        min1, max1 = bounds(size1, unsigned1)
    -        min2, max2 = bounds(size2, unsigned2)
    +
    +        min1, max1 = integer_bounds(size1, unsigned1)
    +        min2, max2 = integer_bounds(size2, unsigned2)
    +
    +        # the target type includes the source range
             if min2 <= min1 <= max1 <= max2:
    -            return     # the target type includes the source range
    -        #
    +            return
    +
             result = []
             if min2:
                 c_min2 = Constant(min2, lltype.Signed)
    @@ -867,17 +942,29 @@
                 result.append(SpaceOperation('int_sub', [v_arg, c_min2], v2))
             else:
                 v2 = v_arg
    -        c_mask = Constant(int((1<<(8*size2))-1), lltype.Signed)
    -        v3 = varoftype(lltype.Signed)
    +        c_mask = Constant(int((1 << (8 * size2)) - 1), lltype.Signed)
    +        if min2:
    +            v3 = varoftype(lltype.Signed)
    +        else:
    +            v3 = v_result
             result.append(SpaceOperation('int_and', [v2, c_mask], v3))
             if min2:
                 result.append(SpaceOperation('int_add', [v3, c_min2], v_result))
    -        else:
    -            result[-1].result = v_result
             return result
     
    +    def _float_to_float_cast(self, v_arg, v_result):
    +        if v_arg.concretetype == lltype.SingleFloat:
    +            assert v_result.concretetype == lltype.Float, "cast %s -> %s" % (
    +                v_arg.concretetype, v_result.concretetype)
    +            return SpaceOperation('cast_singlefloat_to_float', [v_arg],
    +                                  v_result)
    +        if v_result.concretetype == lltype.SingleFloat:
    +            assert v_arg.concretetype == lltype.Float, "cast %s -> %s" % (
    +                v_arg.concretetype, v_result.concretetype)
    +            return SpaceOperation('cast_float_to_singlefloat', [v_arg],
    +                                  v_result)
    +
         def rewrite_op_direct_ptradd(self, op):
    -        from pypy.rpython.lltypesystem import rffi
             # xxx otherwise, not implemented:
             assert op.args[0].concretetype == rffi.CCHARP
             #
    @@ -1423,7 +1510,7 @@
                 extraeffect = EffectInfo.EF_CANNOT_RAISE
             elif oopspec_name.startswith('libffi_call_'):
                 oopspecindex = EffectInfo.OS_LIBFFI_CALL
    -            extraeffect = EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE
    +            extraeffect = EffectInfo.EF_RANDOM_EFFECTS
             else:
                 assert False, 'unsupported oopspec: %s' % oopspec_name
             return self._handle_oopspec_call(op, args, oopspecindex, extraeffect)
    diff --git a/pypy/jit/codewriter/longlong.py b/pypy/jit/codewriter/longlong.py
    --- a/pypy/jit/codewriter/longlong.py
    +++ b/pypy/jit/codewriter/longlong.py
    @@ -7,7 +7,8 @@
     """
     
     import sys
    -from pypy.rpython.lltypesystem import lltype
    +from pypy.rpython.lltypesystem import lltype, rffi
    +from pypy.rlib import rarithmetic, longlong2float
     
     
     if sys.maxint > 2147483647:
    @@ -31,8 +32,6 @@
         # ---------- 32-bit platform ----------
         # the type FloatStorage is r_longlong, and conversion is needed
     
    -    from pypy.rlib import rarithmetic, longlong2float
    -
         is_64_bit = False
         supports_longlong = True
         r_float_storage = rarithmetic.r_longlong
    @@ -41,9 +40,19 @@
         getfloatstorage = longlong2float.float2longlong
         getrealfloat    = longlong2float.longlong2float
         gethash         = lambda xll: rarithmetic.intmask(xll - (xll >> 32))
    -    is_longlong     = lambda TYPE: (TYPE == lltype.SignedLongLong or
    -                                    TYPE == lltype.UnsignedLongLong)
    +    is_longlong     = lambda TYPE: (TYPE is lltype.SignedLongLong or
    +                                    TYPE is lltype.UnsignedLongLong)
     
         # -------------------------------------
     
     ZEROF = getfloatstorage(0.0)
    +
    +# ____________________________________________________________
    +
    +def int2singlefloat(x):
    +    x = rffi.r_uint(x)
    +    return longlong2float.uint2singlefloat(x)
    +
    +def singlefloat2int(x):
    +    x = longlong2float.singlefloat2uint(x)
    +    return rffi.cast(lltype.Signed, x)
    diff --git a/pypy/jit/codewriter/policy.py b/pypy/jit/codewriter/policy.py
    --- a/pypy/jit/codewriter/policy.py
    +++ b/pypy/jit/codewriter/policy.py
    @@ -12,6 +12,7 @@
             self.unsafe_loopy_graphs = set()
             self.supports_floats = False
             self.supports_longlong = False
    +        self.supports_singlefloats = False
     
         def set_supports_floats(self, flag):
             self.supports_floats = flag
    @@ -19,6 +20,9 @@
         def set_supports_longlong(self, flag):
             self.supports_longlong = flag
     
    +    def set_supports_singlefloats(self, flag):
    +        self.supports_singlefloats = flag
    +
         def dump_unsafe_loops(self):
             f = udir.join("unsafe-loops.txt").open('w')
             strs = [str(graph) for graph in self.unsafe_loopy_graphs]
    @@ -58,8 +62,9 @@
                         func, '_jit_unroll_safe_', False)
     
             unsupported = contains_unsupported_variable_type(graph,
    -                                                         self.supports_floats,
    -                                                         self.supports_longlong)
    +                            self.supports_floats,
    +                            self.supports_longlong,
    +                            self.supports_singlefloats)
             res = see_function and not unsupported
             if res and contains_loop:
                 self.unsafe_loopy_graphs.add(graph)
    @@ -80,17 +85,24 @@
             return res
     
     def contains_unsupported_variable_type(graph, supports_floats,
    -                                       supports_longlong):
    +                                              supports_longlong,
    +                                              supports_singlefloats):
         getkind = history.getkind
         try:
             for block in graph.iterblocks():
                 for v in block.inputargs:
    -                getkind(v.concretetype, supports_floats, supports_longlong)
    +                getkind(v.concretetype, supports_floats,
    +                                        supports_longlong,
    +                                        supports_singlefloats)
                 for op in block.operations:
                     for v in op.args:
    -                    getkind(v.concretetype, supports_floats, supports_longlong)
    +                    getkind(v.concretetype, supports_floats,
    +                                            supports_longlong,
    +                                            supports_singlefloats)
                     v = op.result
    -                getkind(v.concretetype, supports_floats, supports_longlong)
    +                getkind(v.concretetype, supports_floats,
    +                                        supports_longlong,
    +                                        supports_singlefloats)
         except NotImplementedError, e:
             log.WARNING('%s, ignoring graph' % (e,))
             log.WARNING('  %s' % (graph,))
    diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py
    --- a/pypy/jit/codewriter/support.py
    +++ b/pypy/jit/codewriter/support.py
    @@ -86,9 +86,12 @@
         reds_v = op.args[2+numgreens:]
         assert len(reds_v) == numreds
         #
    -    def _sort(args_v):
    +    def _sort(args_v, is_green):
             from pypy.jit.metainterp.history import getkind
             lst = [v for v in args_v if v.concretetype is not lltype.Void]
    +        if is_green:
    +            assert len(lst) == len(args_v), (
    +                "not supported so far: 'greens' variables contain Void")
             _kind2count = {'int': 1, 'ref': 2, 'float': 3}
             lst2 = sorted(lst, key=lambda v: _kind2count[getkind(v.concretetype)])
             # a crash here means that you have to reorder the variable named in
    @@ -97,7 +100,7 @@
             assert lst == lst2
             return lst
         #
    -    return (_sort(greens_v), _sort(reds_v))
    +    return (_sort(greens_v, True), _sort(reds_v, False))
     
     def maybe_on_top_of_llinterp(rtyper, fnptr):
         # Run a generated graph on top of the llinterp for testing.
    diff --git a/pypy/jit/codewriter/test/test_call.py b/pypy/jit/codewriter/test/test_call.py
    --- a/pypy/jit/codewriter/test/test_call.py
    +++ b/pypy/jit/codewriter/test/test_call.py
    @@ -191,4 +191,4 @@
         [block, _] = list(f_graph.iterblocks())
         [op] = block.operations
         call_descr = cc.getcalldescr(op)
    -    assert call_descr.extrainfo.can_release_gil
    \ No newline at end of file
    +    assert call_descr.extrainfo.has_random_effects()
    diff --git a/pypy/jit/codewriter/test/test_codewriter.py b/pypy/jit/codewriter/test/test_codewriter.py
    --- a/pypy/jit/codewriter/test/test_codewriter.py
    +++ b/pypy/jit/codewriter/test/test_codewriter.py
    @@ -5,7 +5,7 @@
     from pypy.rpython.lltypesystem import lltype, llmemory, rffi
     
     class FakeCallDescr(AbstractDescr):
    -    def __init__(self, FUNC, ARGS, RESULT, effectinfo=None):
    +    def __init__(self, FUNC, ARGS, RESULT, effectinfo):
             self.FUNC = FUNC
             self.ARGS = ARGS
             self.RESULT = RESULT
    diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py
    --- a/pypy/jit/codewriter/test/test_flatten.py
    +++ b/pypy/jit/codewriter/test/test_flatten.py
    @@ -50,7 +50,7 @@
         def __init__(self, rtyper):
             rtyper._builtin_func_for_spec_cache = FakeDict()
             self.rtyper = rtyper
    -    def calldescrof(self, FUNC, ARGS, RESULT):
    +    def calldescrof(self, FUNC, ARGS, RESULT, effectinfo):
             return FakeDescr()
         def fielddescrof(self, STRUCT, name):
             return FakeDescr()
    @@ -324,7 +324,7 @@
         def test_exc_exitswitch(self):
             def g(i):
                 pass
    -        
    +
             def f(i):
                 try:
                     g(i)
    @@ -854,13 +854,51 @@
                 int_return %i0
             """, transform=True)
     
    -    def test_force_cast_float(self):
    +    def test_force_cast_floats(self):
             from pypy.rpython.lltypesystem import rffi
    +        # Caststs to lltype.Float
             def f(n):
                 return rffi.cast(lltype.Float, n)
             self.encoding_test(f, [12.456], """
                 float_return %f0
             """, transform=True)
    +        self.encoding_test(f, [rffi.cast(rffi.SIGNEDCHAR, 42)], """
    +            cast_int_to_float %i0 -> %f0
    +            float_return %f0
    +        """, transform=True)
    +
    +        # Casts to lltype.SingleFloat
    +        def g(n):
    +            return rffi.cast(lltype.SingleFloat, n)
    +        self.encoding_test(g, [12.456], """
    +            cast_float_to_singlefloat %f0 -> %i0
    +            int_return %i0
    +        """, transform=True)
    +        self.encoding_test(g, [rffi.cast(rffi.SIGNEDCHAR, 42)], """
    +            cast_int_to_float %i0 -> %f0
    +            cast_float_to_singlefloat %f0 -> %i1
    +            int_return %i1
    +        """, transform=True)
    +
    +        # Casts from floats
    +        def f(n):
    +            return rffi.cast(rffi.SIGNEDCHAR, n)
    +        self.encoding_test(f, [12.456], """
    +            cast_float_to_int %f0 -> %i0
    +            int_sub %i0, $-128 -> %i1
    +            int_and %i1, $255 -> %i2
    +            int_add %i2, $-128 -> %i3
    +            int_return %i3
    +        """, transform=True)
    +        self.encoding_test(f, [rffi.cast(lltype.SingleFloat, 12.456)], """
    +            cast_singlefloat_to_float %i0 -> %f0
    +            cast_float_to_int %f0 -> %i1
    +            int_sub %i1, $-128 -> %i2
    +            int_and %i2, $255 -> %i3
    +            int_add %i3, $-128 -> %i4
    +            int_return %i4
    +        """, transform=True)
    +
     
         def test_direct_ptradd(self):
             from pypy.rpython.lltypesystem import rffi
    diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py
    --- a/pypy/jit/codewriter/test/test_jtransform.py
    +++ b/pypy/jit/codewriter/test/test_jtransform.py
    @@ -1048,3 +1048,13 @@
             assert op1.opname == 'jit_force_quasi_immutable'
             assert op1.args[0] == v_x
             assert op1.args[1] == ('fielddescr', STRUCT, 'mutate_x')
    +
    +def test_no_gcstruct_nesting_outside_of_OBJECT():
    +    PARENT = lltype.GcStruct('parent')
    +    STRUCT = lltype.GcStruct('struct', ('parent', PARENT),
    +                                       ('x', lltype.Signed))
    +    v_x = varoftype(lltype.Ptr(STRUCT))
    +    op = SpaceOperation('getfield', [v_x, Constant('x', lltype.Void)],
    +                        varoftype(lltype.Signed))
    +    tr = Transformer(None, None)
    +    raises(NotImplementedError, tr.rewrite_operation, op)
    diff --git a/pypy/jit/codewriter/test/test_longlong.py b/pypy/jit/codewriter/test/test_longlong.py
    --- a/pypy/jit/codewriter/test/test_longlong.py
    +++ b/pypy/jit/codewriter/test/test_longlong.py
    @@ -230,3 +230,18 @@
                 assert list(op1.args[3]) == []
                 assert list(op1.args[4]) == vlist
                 assert op1.result == v_result
    +
    +
    +##def test_singlefloat_constants():
    +##    v_x = varoftype(TYPE)
    +##    vlist = [v_x, const(rffi.cast(TYPE, 7))]
    +##    v_result = varoftype(TYPE)
    +##    op = SpaceOperation('llong_add', vlist, v_result)
    +##    tr = Transformer(FakeCPU(), FakeBuiltinCallControl())
    +##    op1 = tr.rewrite_operation(op)
    +##    #
    +##    assert op1.opname == 'residual_call_irf_f'
    +##    assert list(op1.args[2]) == []
    +##    assert list(op1.args[3]) == []
    +##    assert list(op1.args[4]) == vlist
    +##    assert op1.result == v_result
    diff --git a/pypy/jit/codewriter/test/test_policy.py b/pypy/jit/codewriter/test/test_policy.py
    --- a/pypy/jit/codewriter/test/test_policy.py
    +++ b/pypy/jit/codewriter/test/test_policy.py
    @@ -12,24 +12,30 @@
         graph = support.getgraph(f, [5])
         for sf in [False, True]:
             for sll in [False, True]:
    -            assert not contains_unsupported_variable_type(graph, sf, sll)
    +            for ssf in [False, True]:
    +                assert not contains_unsupported_variable_type(graph, sf,
    +                                                              sll, ssf)
         #
         graph = support.getgraph(f, [5.5])
         for sf in [False, True]:
             for sll in [False, True]:
    -            res = contains_unsupported_variable_type(graph, sf, sll)
    -            assert res is not sf
    +            for ssf in [False, True]:
    +                res = contains_unsupported_variable_type(graph, sf, sll, ssf)
    +                assert res is not sf
         #
         graph = support.getgraph(f, [r_singlefloat(5.5)])
         for sf in [False, True]:
             for sll in [False, True]:
    -            assert contains_unsupported_variable_type(graph, sf, sll)
    +            for ssf in [False, True]:
    +                res = contains_unsupported_variable_type(graph, sf, sll, ssf)
    +                assert res == (not ssf)
         #
         graph = support.getgraph(f, [r_longlong(5)])
         for sf in [False, True]:
             for sll in [False, True]:
    -            res = contains_unsupported_variable_type(graph, sf, sll)
    -            assert res == (sys.maxint == 2147483647 and not sll)
    +            for ssf in [False, True]:
    +                res = contains_unsupported_variable_type(graph, sf, sll, ssf)
    +                assert res == (sys.maxint == 2147483647 and not sll)
     
     
     def test_regular_function():
    diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py
    --- a/pypy/jit/metainterp/blackhole.py
    +++ b/pypy/jit/metainterp/blackhole.py
    @@ -499,6 +499,9 @@
         @arguments("r", returns="i")
         def bhimpl_ptr_nonzero(a):
             return bool(a)
    +    @arguments("r", returns="r")
    +    def bhimpl_cast_opaque_ptr(a):
    +        return a
     
         @arguments("i", returns="i")
         def bhimpl_int_copy(a):
    @@ -622,6 +625,19 @@
             x = float(a)
             return longlong.getfloatstorage(x)
     
    +    @arguments("f", returns="i")
    +    def bhimpl_cast_float_to_singlefloat(a):
    +        from pypy.rlib.rarithmetic import r_singlefloat
    +        a = longlong.getrealfloat(a)
    +        a = r_singlefloat(a)
    +        return longlong.singlefloat2int(a)
    +
    +    @arguments("i", returns="f")
    +    def bhimpl_cast_singlefloat_to_float(a):
    +        a = longlong.int2singlefloat(a)
    +        a = float(a)
    +        return longlong.getfloatstorage(a)
    +
         # ----------
         # control flow operations
     
    diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py
    --- a/pypy/jit/metainterp/compile.py
    +++ b/pypy/jit/metainterp/compile.py
    @@ -137,6 +137,10 @@
                 jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp(
                     greenkey, loop.preamble.token)
                 record_loop_or_bridge(metainterp_sd, loop.preamble)
    +        elif token.short_preamble:
    +            short = token.short_preamble[-1]
    +            metainterp_sd.logger_ops.log_short_preamble(short.inputargs,
    +                                                        short.operations)
             return token
         else:
             send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop,
    @@ -637,6 +641,7 @@
             debug_print("compile_new_bridge: got an InvalidLoop")
             # XXX I am fairly convinced that optimize_bridge cannot actually raise
             # InvalidLoop
    +        debug_print('InvalidLoop in compile_new_bridge')
             return None
         # Did it work?
         if target_loop_token is not None:
    @@ -668,10 +673,9 @@
         def handle_fail(self, metainterp_sd, jitdriver_sd):
             cpu = metainterp_sd.cpu
             exception = cpu.grab_exc_value()
    +        assert exception, "PropagateExceptionDescr: no exception??"
             raise metainterp_sd.ExitFrameWithExceptionRef(cpu, exception)
     
    -propagate_exception_descr = PropagateExceptionDescr()
    -
     def compile_tmp_callback(cpu, jitdriver_sd, greenboxes, redboxes,
                              memory_manager=None):
         """Make a LoopToken that corresponds to assembler code that just
    @@ -705,7 +709,7 @@
             finishargs = []
         #
         jd = jitdriver_sd
    -    faildescr = propagate_exception_descr
    +    faildescr = PropagateExceptionDescr()
         operations = [
             ResOperation(rop.CALL, callargs, result, descr=jd.portal_calldescr),
             ResOperation(rop.GUARD_NO_EXCEPTION, [], None, descr=faildescr),
    diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py
    --- a/pypy/jit/metainterp/executor.py
    +++ b/pypy/jit/metainterp/executor.py
    @@ -47,7 +47,7 @@
         func = argboxes[0].getint()
         # do the call using the correct function from the cpu
         rettype = descr.get_return_type()
    -    if rettype == INT:
    +    if rettype == INT or rettype == 'S':       # *S*ingle float
             try:
                 result = cpu.bh_call_i(func, descr, args_i, args_r, args_f)
             except Exception, e:
    @@ -61,7 +61,7 @@
                 metainterp.execute_raised(e)
                 result = NULL
             return BoxPtr(result)
    -    if rettype == FLOAT or rettype == 'L':
    +    if rettype == FLOAT or rettype == 'L':     # *L*ong long
             try:
                 result = cpu.bh_call_f(func, descr, args_i, args_r, args_f)
             except Exception, e:
    diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py
    --- a/pypy/jit/metainterp/history.py
    +++ b/pypy/jit/metainterp/history.py
    @@ -20,12 +20,16 @@
     
     FAILARGS_LIMIT = 1000
     
    -def getkind(TYPE, supports_floats=True, supports_longlong=True):
    +def getkind(TYPE, supports_floats=True,
    +                  supports_longlong=True,
    +                  supports_singlefloats=True):
         if TYPE is lltype.Void:
             return "void"
         elif isinstance(TYPE, lltype.Primitive):
             if TYPE is lltype.Float and supports_floats:
                 return 'float'
    +        if TYPE is lltype.SingleFloat and supports_singlefloats:
    +            return 'int'     # singlefloats are stored in an int
             if TYPE in (lltype.Float, lltype.SingleFloat):
                 raise NotImplementedError("type %s not supported" % TYPE)
             # XXX fix this for oo...
    @@ -145,6 +149,7 @@
             """ Implement in call descr.
             Must return INT, REF, FLOAT, or 'v' for void.
             On 32-bit (hack) it can also be 'L' for longlongs.
    +        Additionally it can be 'S' for singlefloats.
             """
             raise NotImplementedError
     
    diff --git a/pypy/jit/metainterp/optimize.py b/pypy/jit/metainterp/optimize.py
    --- a/pypy/jit/metainterp/optimize.py
    +++ b/pypy/jit/metainterp/optimize.py
    @@ -1,4 +1,4 @@
    -from pypy.rlib.debug import debug_start, debug_stop
    +from pypy.rlib.debug import debug_start, debug_stop, debug_print
     from pypy.jit.metainterp.jitexc import JitException
     
     class InvalidLoop(JitException):
    diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py
    --- a/pypy/jit/metainterp/optimizeopt/__init__.py
    +++ b/pypy/jit/metainterp/optimizeopt/__init__.py
    @@ -33,10 +33,6 @@
             if name in enable_opts:
                 if opt is not None:
                     o = opt()
    -                if unroll and name == 'string':
    -                    o.enabled = False
    -                # FIXME: Workaround to disable string optimisation
    -                # during preamble but to keep it during the loop
                     optimizations.append(o)
                 elif name == 'ffi' and config.translation.jit_ffi:
                     # we cannot put the class directly in the unrolling_iterable,
    diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py
    --- a/pypy/jit/metainterp/optimizeopt/fficall.py
    +++ b/pypy/jit/metainterp/optimizeopt/fficall.py
    @@ -1,12 +1,11 @@
     from pypy.rpython.annlowlevel import cast_base_ptr_to_instance
     from pypy.rlib.objectmodel import we_are_translated
     from pypy.rlib.libffi import Func
    -from pypy.rlib.debug import debug_start, debug_stop, debug_print
    +from pypy.rlib.debug import debug_print
     from pypy.jit.codewriter.effectinfo import EffectInfo
     from pypy.jit.metainterp.resoperation import rop, ResOperation
     from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method
     from pypy.jit.metainterp.optimizeopt.optimizer import Optimization
    -from pypy.jit.backend.llsupport.ffisupport import UnsupportedKind
     
     
     class FuncInfo(object):
    @@ -19,28 +18,27 @@
         def __init__(self, funcval, cpu, prepare_op):
             self.funcval = funcval
             self.opargs = []
    -        argtypes, restype = self._get_signature(funcval)
    -        try:
    -            self.descr = cpu.calldescrof_dynamic(argtypes, restype)
    -        except UnsupportedKind:
    -            # e.g., I or U for long longs
    -            self.descr = None
    +        argtypes, restype, flags = self._get_signature(funcval)
    +        self.descr = cpu.calldescrof_dynamic(argtypes, restype,
    +                                             EffectInfo.MOST_GENERAL,
    +                                             ffi_flags=flags)
    +        # ^^^ may be None if unsupported
             self.prepare_op = prepare_op
             self.delayed_ops = []
     
         def _get_signature(self, funcval):
             """
    -        given the funcval, return a tuple (argtypes, restype), where the
    -        actuall types are libffi.types.*
    +        given the funcval, return a tuple (argtypes, restype, flags), where
    +        the actuall types are libffi.types.*
     
             The implementation is tricky because we have three possible cases:
     
             - translated: the easiest case, we can just cast back the pointer to
    -          the original Func instance and read .argtypes and .restype
    +          the original Func instance and read .argtypes, .restype and .flags
     
             - completely untranslated: this is what we get from test_optimizeopt
               tests. funcval contains a FakeLLObject whose _fake_class is Func,
    -          and we can just get .argtypes and .restype
    +          and we can just get .argtypes, .restype and .flags
     
             - partially translated: this happens when running metainterp tests:
               funcval contains the low-level equivalent of a Func, and thus we
    @@ -52,10 +50,10 @@
             llfunc = funcval.box.getref_base()
             if we_are_translated():
                 func = cast_base_ptr_to_instance(Func, llfunc)
    -            return func.argtypes, func.restype
    +            return func.argtypes, func.restype, func.flags
             elif getattr(llfunc, '_fake_class', None) is Func:
                 # untranslated
    -            return llfunc.argtypes, llfunc.restype
    +            return llfunc.argtypes, llfunc.restype, llfunc.flags
             else:
                 # partially translated
                 # llfunc contains an opaque pointer to something like the following:
    @@ -66,7 +64,7 @@
                 # because we don't have the exact TYPE to cast to.  Instead, we
                 # just fish it manually :-(
                 f = llfunc._obj.container
    -            return f.inst_argtypes, f.inst_restype
    +            return f.inst_argtypes, f.inst_restype, f.inst_flags
     
     
     class OptFfiCall(Optimization):
    @@ -78,18 +76,9 @@
             else:
                 self.logops = None
     
    -    def propagate_begin_forward(self):
    -        debug_start('jit-log-ffiopt')
    -        Optimization.propagate_begin_forward(self)
    -
    -    def propagate_end_forward(self):
    -        debug_stop('jit-log-ffiopt')
    -        Optimization.propagate_end_forward(self)
    -
    -    def reconstruct_for_next_iteration(self, optimizer, valuemap):
    +    def new(self):
             return OptFfiCall()
    -        # FIXME: Should any status be saved for next iteration?
    -
    +    
         def begin_optimization(self, funcval, op):
             self.rollback_maybe('begin_optimization', op)
             self.funcinfo = FuncInfo(funcval, self.optimizer.cpu, op)
    @@ -184,7 +173,8 @@
         def do_call(self, op):
             funcval = self._get_funcval(op)
             funcinfo = self.funcinfo
    -        if not funcinfo or funcinfo.funcval is not funcval:
    +        if (not funcinfo or funcinfo.funcval is not funcval or
    +            funcinfo.descr is None):
                 return [op] # cannot optimize
             funcsymval = self.getvalue(op.getarg(2))
             arglist = [funcsymval.force_box()]
    @@ -207,9 +197,7 @@
     
         def _get_oopspec(self, op):
             effectinfo = op.getdescr().get_extra_info()
    -        if effectinfo is not None:
    -            return effectinfo.oopspecindex
    -        return EffectInfo.OS_NONE
    +        return effectinfo.oopspecindex
     
         def _get_funcval(self, op):
             return self.getvalue(op.getarg(1))
    diff --git a/pypy/jit/metainterp/optimizeopt/generalize.py b/pypy/jit/metainterp/optimizeopt/generalize.py
    new file mode 100644
    --- /dev/null
    +++ b/pypy/jit/metainterp/optimizeopt/generalize.py
    @@ -0,0 +1,19 @@
    +from pypy.jit.metainterp.optimizeopt.optimizer import MININT, MAXINT
    +
    +class GeneralizationStrategy(object):
    +    def __init__(self, optimizer):
    +        self.optimizer = optimizer
    +
    +    def apply(self):
    +        raise NotImplementedError
    +
    +class KillHugeIntBounds(GeneralizationStrategy):
    +    def apply(self):
    +        for v in self.optimizer.values.values():
    +            if v.is_constant():
    +                continue
    +            if v.intbound.lower < MININT/2:
    +                v.intbound.lower = MININT
    +            if v.intbound.upper > MAXINT/2:
    +                v.intbound.upper = MAXINT
    +          
    diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py
    --- a/pypy/jit/metainterp/optimizeopt/heap.py
    +++ b/pypy/jit/metainterp/optimizeopt/heap.py
    @@ -1,9 +1,10 @@
     import os
     
     from pypy.jit.metainterp.jitexc import JitException
    -from pypy.jit.metainterp.optimizeopt.optimizer import Optimization
    +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization, MODE_ARRAY
    +from pypy.jit.metainterp.history import ConstInt, Const
     from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method
    -from pypy.jit.metainterp.resoperation import rop
    +from pypy.jit.metainterp.resoperation import rop, ResOperation
     from pypy.rlib.objectmodel import we_are_translated
     
     
    @@ -24,6 +25,7 @@
             #      'cached_fields'.
             #
             self._cached_fields = {}
    +        self._cached_fields_getfield_op = {}        
             self._lazy_setfield = None
             self._lazy_setfield_registered = False
     
    @@ -70,9 +72,10 @@
             else:
                 return self._cached_fields.get(structvalue, None)
     
    -    def remember_field_value(self, structvalue, fieldvalue):
    +    def remember_field_value(self, structvalue, fieldvalue, getfield_op=None):
             assert self._lazy_setfield is None
             self._cached_fields[structvalue] = fieldvalue
    +        self._cached_fields_getfield_op[structvalue] = getfield_op        
     
         def force_lazy_setfield(self, optheap, can_cache=True):
             op = self._lazy_setfield
    @@ -81,7 +84,7 @@
                 # Now we clear _cached_fields, because actually doing the
                 # setfield might impact any of the stored result (because of
                 # possible aliasing).
    -            self._cached_fields.clear()
    +            self.clear()
                 self._lazy_setfield = None
                 optheap.next_optimization.propagate_forward(op)
                 if not can_cache:
    @@ -91,19 +94,49 @@
                 # field.
                 structvalue = optheap.getvalue(op.getarg(0))
                 fieldvalue  = optheap.getvalue(op.getarglist()[-1])
    -            self.remember_field_value(structvalue, fieldvalue)
    +            self.remember_field_value(structvalue, fieldvalue, op)
             elif not can_cache:
    -            self._cached_fields.clear()
    +            self.clear()
     
    -    def get_reconstructed(self, optimizer, valuemap):
    -        assert self._lazy_setfield is None
    -        cf = CachedField()
    -        for structvalue, fieldvalue in self._cached_fields.iteritems():
    -            structvalue2 = structvalue.get_reconstructed(optimizer, valuemap)
    -            fieldvalue2  = fieldvalue .get_reconstructed(optimizer, valuemap)
    -            cf._cached_fields[structvalue2] = fieldvalue2
    -        return cf
    +    def clear(self):
    +        self._cached_fields.clear()
    +        self._cached_fields_getfield_op.clear()
     
    +    def turned_constant(self, newvalue, value):
    +        if newvalue not in self._cached_fields and value in self._cached_fields:
    +            self._cached_fields[newvalue] = self._cached_fields[value]
    +            op = self._cached_fields_getfield_op[value].clone()
    +            constbox = value.box
    +            assert isinstance(constbox, Const)
    +            op.setarg(0, constbox)
    +            self._cached_fields_getfield_op[newvalue] = op
    +        for structvalue in self._cached_fields.keys():
    +            if self._cached_fields[structvalue] is value:
    +                self._cached_fields[structvalue] = newvalue
    +
    +    def produce_potential_short_preamble_ops(self, optimizer, shortboxes, descr):
    +        if self._lazy_setfield is not None:
    +            return
    +        for structvalue in self._cached_fields_getfield_op.keys():
    +            op = self._cached_fields_getfield_op[structvalue]
    +            if not op:
    +                continue
    +            if optimizer.getvalue(op.getarg(0)) in optimizer.opaque_pointers:
    +                continue
    +            if structvalue in self._cached_fields:
    +                if op.getopnum() == rop.SETFIELD_GC:
    +                    result = op.getarg(1)
    +                    if isinstance(result, Const):
    +                        newresult = result.clonebox()
    +                        optimizer.make_constant(newresult, result)
    +                        result = newresult
    +                    getop = ResOperation(rop.GETFIELD_GC, [op.getarg(0)],
    +                                         result, op.getdescr())
    +                    getop = shortboxes.add_potential(getop)
    +                    self._cached_fields_getfield_op[structvalue] = getop
    +                    self._cached_fields[structvalue] = optimizer.getvalue(result)
    +                elif op.result is not None:
    +                    shortboxes.add_potential(op)
     
     class BogusPureField(JitException):
         pass
    @@ -122,24 +155,32 @@
             self._remove_guard_not_invalidated = False
             self._seen_guard_not_invalidated = False
     
    -    def reconstruct_for_next_iteration(self, optimizer, valuemap):
    -        new = OptHeap()
    +    def force_at_end_of_preamble(self):
    +        self.force_all_lazy_setfields_and_arrayitems()
     
    -        if True:
    -            self.force_all_lazy_setfields_and_arrayitems()
    -        else:
    -            assert 0   # was: new.lazy_setfields = self.lazy_setfields
    +    def flush(self):
    +        self.force_all_lazy_setfields_and_arrayitems()
     
    -        for descr, d in self.cached_fields.items():
    -            new.cached_fields[descr] = d.get_reconstructed(optimizer, valuemap)
    +    def new(self):
    +        return OptHeap()
    +        
    +    def produce_potential_short_preamble_ops(self, sb):
    +        descrkeys = self.cached_fields.keys()
    +        if not we_are_translated():
    +            # XXX Pure operation of boxes that are cached in several places will
    +            #     only be removed from the peeled loop when red from the first
    +            #     place discovered here. This is far from ideal, as it makes
    +            #     the effectiveness of our optimization a bit random. It should
    +            #     howevere always generate correct results. For tests we dont
    +            #     want this randomness.
    +            descrkeys.sort(key=str, reverse=True)
    +        for descr in descrkeys:
    +            d = self.cached_fields[descr]
    +            d.produce_potential_short_preamble_ops(self.optimizer, sb, descr)
     
             for descr, submap in self.cached_arrayitems.items():
    -            newdict = {}
                 for index, d in submap.items():
    -                newdict[index] = d.get_reconstructed(optimizer, valuemap)
    -            new.cached_arrayitems[descr] = newdict
    -
    -        return new
    +                d.produce_potential_short_preamble_ops(self.optimizer, sb, descr)
     
         def clean_caches(self):
             del self._lazy_setfields_and_arrayitems[:]
    @@ -194,43 +235,43 @@
                 opnum == rop.CALL_RELEASE_GIL or
                 opnum == rop.CALL_ASSEMBLER):
                 if opnum == rop.CALL_ASSEMBLER:
    -                effectinfo = None
    +                self._seen_guard_not_invalidated = False
                 else:
                     effectinfo = op.getdescr().get_extra_info()
    -            if effectinfo is None or effectinfo.check_can_invalidate():
    -                self._seen_guard_not_invalidated = False
    -            if effectinfo is not None and not effectinfo.has_random_effects():
    -                # XXX we can get the wrong complexity here, if the lists
    -                # XXX stored on effectinfo are large
    -                for fielddescr in effectinfo.readonly_descrs_fields:
    -                    self.force_lazy_setfield(fielddescr)
    -                for arraydescr in effectinfo.readonly_descrs_arrays:
    -                    self.force_lazy_setarrayitem(arraydescr)
    -                for fielddescr in effectinfo.write_descrs_fields:
    -                    self.force_lazy_setfield(fielddescr, can_cache=False)
    -                for arraydescr in effectinfo.write_descrs_arrays:
    -                    self.force_lazy_setarrayitem(arraydescr, can_cache=False)
    -                if effectinfo.check_forces_virtual_or_virtualizable():
    -                    vrefinfo = self.optimizer.metainterp_sd.virtualref_info
    -                    self.force_lazy_setfield(vrefinfo.descr_forced)
    -                    # ^^^ we only need to force this field; the other fields
    -                    # of virtualref_info and virtualizable_info are not gcptrs.
    -                return
    +                if effectinfo.check_can_invalidate():
    +                    self._seen_guard_not_invalidated = False
    +                if not effectinfo.has_random_effects():
    +                    self.force_from_effectinfo(effectinfo)
    +                    return
             self.force_all_lazy_setfields_and_arrayitems()
             self.clean_caches()
     
    +    def force_from_effectinfo(self, effectinfo):
    +        # XXX we can get the wrong complexity here, if the lists
    +        # XXX stored on effectinfo are large
    +        for fielddescr in effectinfo.readonly_descrs_fields:
    +            self.force_lazy_setfield(fielddescr)
    +        for arraydescr in effectinfo.readonly_descrs_arrays:
    +            self.force_lazy_setarrayitem(arraydescr)
    +        for fielddescr in effectinfo.write_descrs_fields:
    +            self.force_lazy_setfield(fielddescr, can_cache=False)
    +        for arraydescr in effectinfo.write_descrs_arrays:
    +            self.force_lazy_setarrayitem(arraydescr, can_cache=False)
    +        if effectinfo.check_forces_virtual_or_virtualizable():
    +            vrefinfo = self.optimizer.metainterp_sd.virtualref_info
    +            self.force_lazy_setfield(vrefinfo.descr_forced)
    +            # ^^^ we only need to force this field; the other fields
    +            # of virtualref_info and virtualizable_info are not gcptrs.
     
         def turned_constant(self, value):
             assert value.is_constant()
             newvalue = self.getvalue(value.box)
             if value is not newvalue:
                 for cf in self.cached_fields.itervalues():
    -                if value in cf._cached_fields:
    -                    cf._cached_fields[newvalue] = cf._cached_fields[value]
    +                cf.turned_constant(newvalue, value)
                 for submap in self.cached_arrayitems.itervalues():
                     for cf in submap.itervalues():
    -                    if value in cf._cached_fields:
    -                        cf._cached_fields[newvalue] = cf._cached_fields[value]
    +                    cf.turned_constant(newvalue, value)
     
         def force_lazy_setfield(self, descr, can_cache=True):
             try:
    @@ -239,13 +280,14 @@
                 return
             cf.force_lazy_setfield(self, can_cache)
     
    -    def force_lazy_setarrayitem(self, arraydescr, can_cache=True):
    +    def force_lazy_setarrayitem(self, arraydescr, indexvalue=None, can_cache=True):
             try:
                 submap = self.cached_arrayitems[arraydescr]
             except KeyError:
                 return
    -        for cf in submap.values():
    -            cf.force_lazy_setfield(self, can_cache)
    +        for idx, cf in submap.iteritems():
    +            if indexvalue is None or indexvalue.intbound.contains(idx):
    +                cf.force_lazy_setfield(self, can_cache)
     
         def fixup_guard_situation(self):
             # hackish: reverse the order of the last two operations if it makes
    @@ -332,7 +374,7 @@
             self.emit_operation(op)
             # then remember the result of reading the field
             fieldvalue = self.getvalue(op.result)
    -        cf.remember_field_value(structvalue, fieldvalue)
    +        cf.remember_field_value(structvalue, fieldvalue, op)
     
         def optimize_SETFIELD_GC(self, op):
             if self.has_pure_result(rop.GETFIELD_GC_PURE, [op.getarg(0)],
    @@ -349,6 +391,7 @@
             indexvalue = self.getvalue(op.getarg(1))
             cf = None
             if indexvalue.is_constant():
    +            arrayvalue.make_len_gt(MODE_ARRAY, op.getdescr(), indexvalue.box.getint())
                 # use the cache on (arraydescr, index), which is a constant
                 cf = self.arrayitem_cache(op.getdescr(), indexvalue.box.getint())
                 fieldvalue = cf.getfield_from_cache(self, arrayvalue)
    @@ -357,14 +400,14 @@
                     return
             else:
                 # variable index, so make sure the lazy setarrayitems are done
    -            self.force_lazy_setarrayitem(op.getdescr())
    +            self.force_lazy_setarrayitem(op.getdescr(), indexvalue=indexvalue)
             # default case: produce the operation
             arrayvalue.ensure_nonnull()
             self.emit_operation(op)
             # the remember the result of reading the array item
             if cf is not None:
                 fieldvalue = self.getvalue(op.result)
    -            cf.remember_field_value(arrayvalue, fieldvalue)
    +            cf.remember_field_value(arrayvalue, fieldvalue, op)
     
         def optimize_SETARRAYITEM_GC(self, op):
             if self.has_pure_result(rop.GETARRAYITEM_GC_PURE, [op.getarg(0),
    @@ -376,12 +419,14 @@
             #
             indexvalue = self.getvalue(op.getarg(1))
             if indexvalue.is_constant():
    +            arrayvalue = self.getvalue(op.getarg(0))
    +            arrayvalue.make_len_gt(MODE_ARRAY, op.getdescr(), indexvalue.box.getint())
                 # use the cache on (arraydescr, index), which is a constant
                 cf = self.arrayitem_cache(op.getdescr(), indexvalue.box.getint())
                 cf.do_setfield(self, op)
             else:
                 # variable index, so make sure the lazy setarrayitems are done
    -            self.force_lazy_setarrayitem(op.getdescr(), can_cache=False)
    +            self.force_lazy_setarrayitem(op.getdescr(), indexvalue=indexvalue, can_cache=False)
                 # and then emit the operation
                 self.emit_operation(op)
     
    diff --git a/pypy/jit/metainterp/optimizeopt/intbounds.py b/pypy/jit/metainterp/optimizeopt/intbounds.py
    --- a/pypy/jit/metainterp/optimizeopt/intbounds.py
    +++ b/pypy/jit/metainterp/optimizeopt/intbounds.py
    @@ -1,7 +1,8 @@
    +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization, CONST_1, CONST_0, \
    +                                                  MODE_ARRAY, MODE_STR, MODE_UNICODE
     from pypy.jit.metainterp.history import ConstInt
     from pypy.jit.metainterp.optimizeopt.intutils import (IntBound, IntLowerBound,
         IntUpperBound)
    -from pypy.jit.metainterp.optimizeopt.optimizer import Optimization, CONST_1, CONST_0
     from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method
     from pypy.jit.metainterp.resoperation import rop
     
    @@ -14,18 +15,17 @@
             self.posponedop = None
             self.nextop = None
     
    -    def reconstruct_for_next_iteration(self, optimizer, valuemap):
    +    def new(self):
             assert self.posponedop is None
    -        return self
    +        return OptIntBounds()
    +        
    +    def flush(self):
    +        assert self.posponedop is None
     
         def setup(self):
             self.posponedop = None
             self.nextop = None
     
    -    def reconstruct_for_next_iteration(self, optimizer, valuemap):
    -        assert self.posponedop is None
    -        return self
    -
         def propagate_forward(self, op):
             if op.is_ovf():
                 self.posponedop = op
    @@ -125,6 +125,17 @@
             r = self.getvalue(op.result)
             r.intbound.intersect(v1.intbound.div_bound(v2.intbound))
     
    +    def optimize_INT_MOD(self, op):
    +        self.emit_operation(op)
    +        v2 = self.getvalue(op.getarg(1))
    +        if v2.is_constant():
    +            val = v2.box.getint()
    +            r = self.getvalue(op.result)
    +            if val < 0:
    +                val = -val
    +            r.intbound.make_gt(IntBound(-val, -val))
    +            r.intbound.make_lt(IntBound(val, val))
    +
         def optimize_INT_LSHIFT(self, op):
             v1 = self.getvalue(op.getarg(0))
             v2 = self.getvalue(op.getarg(1))
    @@ -275,10 +286,27 @@
     
         def optimize_ARRAYLEN_GC(self, op):
             self.emit_operation(op)
    -        v1 = self.getvalue(op.result)
    -        v1.intbound.make_ge(IntLowerBound(0))
    +        array  = self.getvalue(op.getarg(0))
    +        result = self.getvalue(op.result)
    +        array.make_len_gt(MODE_ARRAY, op.getdescr(), -1)
    +        array.lenbound.bound.intersect(result.intbound)
    +        result.intbound = array.lenbound.bound
     
    -    optimize_STRLEN = optimize_UNICODELEN = optimize_ARRAYLEN_GC
    +    def optimize_STRLEN(self, op):
    +        self.emit_operation(op)
    +        array  = self.getvalue(op.getarg(0))
    +        result = self.getvalue(op.result)
    +        array.make_len_gt(MODE_STR, op.getdescr(), -1)
    +        array.lenbound.bound.intersect(result.intbound)
    +        result.intbound = array.lenbound.bound
    +
    +    def optimize_UNICODELEN(self, op):
    +        self.emit_operation(op)
    +        array  = self.getvalue(op.getarg(0))
    +        result = self.getvalue(op.result)
    +        array.make_len_gt(MODE_UNICODE, op.getdescr(), -1)
    +        array.lenbound.bound.intersect(result.intbound)
    +        result.intbound = array.lenbound.bound
     
         def optimize_STRGETITEM(self, op):
             self.emit_operation(op)
    diff --git a/pypy/jit/metainterp/optimizeopt/intutils.py b/pypy/jit/metainterp/optimizeopt/intutils.py
    --- a/pypy/jit/metainterp/optimizeopt/intutils.py
    +++ b/pypy/jit/metainterp/optimizeopt/intutils.py
    @@ -1,4 +1,9 @@
     from pypy.rlib.rarithmetic import ovfcheck, ovfcheck_lshift, LONG_BIT
    +from pypy.jit.metainterp.resoperation import rop, ResOperation
    +from pypy.jit.metainterp.history import BoxInt, ConstInt
    +import sys
    +MAXINT = sys.maxint
    +MININT = -sys.maxint - 1
     
     class IntBound(object):
         _attrs_ = ('has_upper', 'has_lower', 'upper', 'lower')
    @@ -210,11 +215,11 @@
             
         def __repr__(self):
             if self.has_lower:
    -            l = '%4d' % self.lower
    +            l = '%d' % self.lower
             else:
                 l = '-Inf'
             if self.has_upper:
    -            u = '%3d' % self.upper
    +            u = '%d' % self.upper
             else:
                 u = 'Inf'
             return '%s <= x <= %s' % (l, u)
    @@ -224,7 +229,24 @@
             res.has_lower = self.has_lower
             res.has_upper = self.has_upper
             return res
    +
    +    def make_guards(self, box, guards):
    +        if self.has_lower and self.lower > MININT:
    +            bound = self.lower
    +            res = BoxInt()
    +            op = ResOperation(rop.INT_GE, [box, ConstInt(bound)], res)
    +            guards.append(op)
    +            op = ResOperation(rop.GUARD_TRUE, [res], None)
    +            guards.append(op)
    +        if self.has_upper and self.upper < MAXINT:
    +            bound = self.upper
    +            res = BoxInt()
    +            op = ResOperation(rop.INT_LE, [box, ConstInt(bound)], res)
    +            guards.append(op)
    +            op = ResOperation(rop.GUARD_TRUE, [res], None)
    +            guards.append(op)
         
    +
     class IntUpperBound(IntBound):
         def __init__(self, upper):
             self.has_upper = True
    @@ -244,7 +266,23 @@
             self.has_upper = False
             self.has_lower = False
             self.upper = 0
    -        self.lower = 0        
    +        self.lower = 0
    +
    +class ImmutableIntUnbounded(IntUnbounded):
    +    def _raise(self):
    +        raise TypeError('ImmutableIntUnbounded is immutable')
    +    def make_le(self, other):
    +        self._raise()
    +    def make_lt(self, other):
    +        self._raise()
    +    def make_ge(self, other):
    +        self._raise()
    +    def make_gt(self, other):
    +        self._raise()
    +    def make_constant(self, value):
    +        self._raise()
    +    def intersect(self, other):        
    +        self._raise()
     
     def min4(t):
         return min(min(t[0], t[1]), min(t[2], t[3]))
    diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py
    --- a/pypy/jit/metainterp/optimizeopt/optimizer.py
    +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py
    @@ -1,64 +1,105 @@
     from pypy.jit.metainterp import jitprof, resume, compile
     from pypy.jit.metainterp.executor import execute_nonspec
     from pypy.jit.metainterp.history import BoxInt, BoxFloat, Const, ConstInt, REF
    -from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded
    +from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded, \
    +                                                     ImmutableIntUnbounded, \
    +                                                     IntLowerBound, MININT, MAXINT
     from pypy.jit.metainterp.optimizeopt.util import (make_dispatcher_method,
         args_dict)
     from pypy.jit.metainterp.resoperation import rop, ResOperation
     from pypy.jit.metainterp.typesystem import llhelper, oohelper
     from pypy.tool.pairtype import extendabletype
    +from pypy.rlib.debug import debug_start, debug_stop, debug_print
     
     LEVEL_UNKNOWN    = '\x00'
     LEVEL_NONNULL    = '\x01'
     LEVEL_KNOWNCLASS = '\x02'     # might also mean KNOWNARRAYDESCR, for arrays
     LEVEL_CONSTANT   = '\x03'
     
    -import sys
    -MAXINT = sys.maxint
    -MININT = -sys.maxint - 1
    +MODE_ARRAY   = '\x00'
    +MODE_STR     = '\x01'
    +MODE_UNICODE = '\x02'
    +class LenBound(object):
    +    def __init__(self, mode, descr, bound):
    +        self.mode = mode
    +        self.descr = descr
    +        self.bound = bound
     
     class OptValue(object):
         __metaclass__ = extendabletype
    -    _attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound')
    +    _attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound', 'lenbound')
         last_guard_index = -1
     
         level = LEVEL_UNKNOWN
         known_class = None
    -    intbound = None
    +    intbound = ImmutableIntUnbounded()
    +    lenbound = None
     
    -    def __init__(self, box):
    +    def __init__(self, box, level=None, known_class=None, intbound=None):
             self.box = box
    -        self.intbound = IntBound(MININT, MAXINT) #IntUnbounded()
    +        if level is not None:
    +            self.level = level
    +        self.known_class = known_class
    +        if intbound:
    +            self.intbound = intbound
    +        else:
    +            if isinstance(box, BoxInt):
    +                self.intbound = IntBound(MININT, MAXINT)
    +            else:
    +                self.intbound = IntUnbounded()
    +
             if isinstance(box, Const):
                 self.make_constant(box)
             # invariant: box is a Const if and only if level == LEVEL_CONSTANT
     
    +    def make_len_gt(self, mode, descr, val):
    +        if self.lenbound:
    +            assert self.lenbound.mode == mode
    +            assert self.lenbound.descr == descr
    +            self.lenbound.bound.make_gt(IntBound(val, val))
    +        else:
    +            self.lenbound = LenBound(mode, descr, IntLowerBound(val + 1))
    +
    +    def make_guards(self, box):
    +        guards = []
    +        if self.level == LEVEL_CONSTANT:
    +            op = ResOperation(rop.GUARD_VALUE, [box, self.box], None)
    +            guards.append(op)
    +        elif self.level == LEVEL_KNOWNCLASS:
    +            op = ResOperation(rop.GUARD_NONNULL, [box], None)
    +            guards.append(op)            
    +            op = ResOperation(rop.GUARD_CLASS, [box, self.known_class], None)
    +            guards.append(op)
    +        else:
    +            if self.level == LEVEL_NONNULL:
    +                op = ResOperation(rop.GUARD_NONNULL, [box], None)
    +                guards.append(op)
    +            self.intbound.make_guards(box, guards)
    +            if self.lenbound:
    +                lenbox = BoxInt()
    +                if self.lenbound.mode == MODE_ARRAY:
    +                    op = ResOperation(rop.ARRAYLEN_GC, [box], lenbox, self.lenbound.descr)
    +                elif self.lenbound.mode == MODE_STR:
    +                    op = ResOperation(rop.STRLEN, [box], lenbox, self.lenbound.descr)
    +                elif self.lenbound.mode == MODE_UNICODE:
    +                    op = ResOperation(rop.UNICODELEN, [box], lenbox, self.lenbound.descr)
    +                else:
    +                    debug_print("Unknown lenbound mode")
    +                    assert False
    +                guards.append(op)
    +                self.lenbound.bound.make_guards(lenbox, guards)
    +
    +        return guards
    +
         def force_box(self):
             return self.box
     
         def get_key_box(self):
             return self.box
     
    -    def enum_forced_boxes(self, boxes, already_seen):
    -        key = self.get_key_box()
    -        if key not in already_seen:
    -            boxes.append(self.force_box())
    -            already_seen[self.get_key_box()] = None
    -
    -    def get_reconstructed(self, optimizer, valuemap):
    -        if self in valuemap:
    -            return valuemap[self]
    -        new = self.reconstruct_for_next_iteration(optimizer)
    -        valuemap[self] = new
    -        self.reconstruct_childs(new, valuemap)
    -        return new
    -
    -    def reconstruct_for_next_iteration(self, optimizer):
    +    def force_at_end_of_preamble(self, already_forced):
             return self
     
    -    def reconstruct_childs(self, new, valuemap):
    -        pass
    -
         def get_args_for_fail(self, modifier):
             pass
     
    @@ -82,6 +123,7 @@
             assert isinstance(constbox, Const)
             self.box = constbox
             self.level = LEVEL_CONSTANT
    +        
             if isinstance(constbox, ConstInt):
                 val = constbox.getint()
                 self.intbound = IntBound(val, val)
    @@ -222,7 +264,9 @@
     
         def pure(self, opnum, args, result):
             op = ResOperation(opnum, args, result)
    -        self.optimizer.pure_operations[self.optimizer.make_args_key(op)] = op
    +        key = self.optimizer.make_args_key(op)
    +        if key not in self.optimizer.pure_operations:
    +            self.optimizer.pure_operations[key] = op
     
         def has_pure_result(self, opnum, args, descr):
             op = ResOperation(opnum, args, None, descr)
    @@ -235,16 +279,22 @@
         def setup(self):
             pass
     
    +    def turned_constant(self, value):
    +        pass
    +
         def force_at_end_of_preamble(self):
             pass
     
    -    def turned_constant(self, value):
    +    # It is too late to force stuff here, it must be done in force_at_end_of_preamble
    +    def new(self):
    +        raise NotImplementedError
    +
    +    # Called after last operation has been propagated to flush out any posponed ops
    +    def flush(self):
             pass
     
    -    def reconstruct_for_next_iteration(self, optimizer=None, valuemap=None):
    -        #return self.__class__()
    -        raise NotImplementedError
    -
    +    def produce_potential_short_preamble_ops(self, potential_ops):
    +        pass
     
     class Optimizer(Optimization):
     
    @@ -257,14 +307,17 @@
             self.interned_refs = self.cpu.ts.new_ref_dict()
             self.resumedata_memo = resume.ResumeDataLoopMemo(metainterp_sd)
             self.bool_boxes = {}
    -        self.loop_invariant_results = {}
             self.pure_operations = args_dict()
    +        self.emitted_pure_operations = {}
             self.producer = {}
             self.pendingfields = []
             self.posponedop = None
             self.exception_might_have_happened = False
             self.quasi_immutable_deps = None
    +        self.opaque_pointers = {}
             self.newoperations = []
    +        self.emitting_dissabled = False
    +        self.emitted_guards = 0        
             if loop is not None:
                 self.call_pure_results = loop.call_pure_results
     
    @@ -286,39 +339,32 @@
             self.optimizations  = optimizations
     
         def force_at_end_of_preamble(self):
    -        self.resumedata_memo = resume.ResumeDataLoopMemo(self.metainterp_sd)
             for o in self.optimizations:
                 o.force_at_end_of_preamble()
     
    -    def reconstruct_for_next_iteration(self, optimizer=None, valuemap=None):
    -        assert optimizer is None
    -        assert valuemap is None
    -        valuemap = {}
    +    def flush(self):
    +        for o in self.optimizations:
    +            o.flush()
    +        assert self.posponedop is None
    +
    +    def new(self):
    +        assert self.posponedop is None
             new = Optimizer(self.metainterp_sd, self.loop)
    -        optimizations = [o.reconstruct_for_next_iteration(new, valuemap) for o in
    -                         self.optimizations]
    +        optimizations = [o.new() for o in self.optimizations]
             new.set_optimizations(optimizations)
    -
    -        new.values = {}
    -        for box, value in self.values.items():
    -            new.values[box] = value.get_reconstructed(new, valuemap)
    -        new.interned_refs = self.interned_refs
    -        new.bool_boxes = {}
    -        for value in new.bool_boxes.keys():
    -            new.bool_boxes[value.get_reconstructed(new, valuemap)] = None
    -
    -        # FIXME: Move to rewrite.py
    -        new.loop_invariant_results = {}
    -        for key, value in self.loop_invariant_results.items():
    -            new.loop_invariant_results[key] = \
    -                                 value.get_reconstructed(new, valuemap)
    -
    -        new.pure_operations = self.pure_operations
    -        new.producer = self.producer
    -        assert self.posponedop is None
             new.quasi_immutable_deps = self.quasi_immutable_deps
    -
             return new
    +        
    +    def produce_potential_short_preamble_ops(self, sb):
    +        for op in self.emitted_pure_operations:
    +            if op.getopnum() == rop.GETARRAYITEM_GC_PURE or \
    +               op.getopnum() == rop.STRGETITEM or \
    +               op.getopnum() == rop.UNICODEGETITEM:
    +                if not self.getvalue(op.getarg(1)).is_constant():
    +                    continue
    +            sb.add_potential(op)
    +        for opt in self.optimizations:
    +            opt.produce_potential_short_preamble_ops(sb)
     
         def turned_constant(self, value):
             for o in self.optimizations:
    @@ -433,10 +479,11 @@
             return True
     
         def emit_operation(self, op):
    -        ###self.heap_op_optimizer.emitting_operation(op)
    -        self._emit_operation(op)
    -
    -    def _emit_operation(self, op):
    +        if op.returns_bool_result():
    +            self.bool_boxes[self.getvalue(op.result)] = None
    +        if self.emitting_dissabled:
    +            return
    +        
             for i in range(op.numargs()):
                 arg = op.getarg(i)
                 if arg in self.values:
    @@ -445,11 +492,10 @@
             self.metainterp_sd.profiler.count(jitprof.OPT_OPS)
             if op.is_guard():
                 self.metainterp_sd.profiler.count(jitprof.OPT_GUARDS)
    +            self.emitted_guards += 1 # FIXME: can we reuse above counter?
                 op = self.store_final_boxes_in_guard(op)
             elif op.can_raise():
                 self.exception_might_have_happened = True
    -        elif op.returns_bool_result():
    -            self.bool_boxes[self.getvalue(op.result)] = None
             self.newoperations.append(op)
     
         def store_final_boxes_in_guard(self, op):
    @@ -533,6 +579,7 @@
                     return
                 else:
                     self.pure_operations[args] = op
    +                self.emitted_pure_operations[op] = True
     
             # otherwise, the operation remains
             self.emit_operation(op)
    @@ -555,6 +602,35 @@
         def optimize_DEBUG_MERGE_POINT(self, op):
             self.emit_operation(op)
     
    +    def optimize_CAST_OPAQUE_PTR(self, op):
    +        value = self.getvalue(op.getarg(0))
    +        self.opaque_pointers[value] = True
    +        self.make_equal_to(op.result, value)
    +
    +    def optimize_GETARRAYITEM_GC_PURE(self, op):
    +        indexvalue = self.getvalue(op.getarg(1))
    +        if indexvalue.is_constant():
    +            arrayvalue = self.getvalue(op.getarg(0))
    +            arrayvalue.make_len_gt(MODE_ARRAY, op.getdescr(), indexvalue.box.getint())
    +        self.optimize_default(op)
    +
    +    def optimize_STRGETITEM(self, op):
    +        indexvalue = self.getvalue(op.getarg(1))
    +        if indexvalue.is_constant():
    +            arrayvalue = self.getvalue(op.getarg(0))
    +            arrayvalue.make_len_gt(MODE_STR, op.getdescr(), indexvalue.box.getint())
    +        self.optimize_default(op)
    +
    +    def optimize_UNICODEGETITEM(self, op):
    +        indexvalue = self.getvalue(op.getarg(1))
    +        if indexvalue.is_constant():
    +            arrayvalue = self.getvalue(op.getarg(0))
    +            arrayvalue.make_len_gt(MODE_UNICODE, op.getdescr(), indexvalue.box.getint())
    +        self.optimize_default(op)
    +        
    +
    +    
    +
     dispatch_opt = make_dispatcher_method(Optimizer, 'optimize_',
             default=Optimizer.optimize_default)
     
    diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py
    --- a/pypy/jit/metainterp/optimizeopt/rewrite.py
    +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py
    @@ -13,9 +13,16 @@
         """Rewrite operations into equivalent, cheaper operations.
            This includes already executed operations and constants.
         """
    +    def __init__(self):
    +        self.loop_invariant_results = {}
    +        self.loop_invariant_producer = {}
     
    -    def reconstruct_for_next_iteration(self, optimizer, valuemap):
    -        return self
    +    def new(self):
    +        return OptRewrite()
    +        
    +    def produce_potential_short_preamble_ops(self, sb):
    +        for op in self.loop_invariant_producer.values():
    +            sb.add_potential(op)
     
         def propagate_forward(self, op):
             args = self.optimizer.make_args_key(op)
    @@ -344,16 +351,18 @@
             # expects a compile-time constant
             assert isinstance(arg, Const)
             key = make_hashable_int(arg.getint())
    -        resvalue = self.optimizer.loop_invariant_results.get(key, None)
    +        
    +        resvalue = self.loop_invariant_results.get(key, None)
             if resvalue is not None:
                 self.make_equal_to(op.result, resvalue)
                 return
             # change the op to be a normal call, from the backend's point of view
             # there is no reason to have a separate operation for this
    +        self.loop_invariant_producer[key] = op
             op = op.copy_and_change(rop.CALL)
             self.emit_operation(op)
             resvalue = self.getvalue(op.result)
    -        self.optimizer.loop_invariant_results[key] = resvalue
    +        self.loop_invariant_results[key] = resvalue
     
         def _optimize_nullness(self, op, box, expect_nonnull):
             value = self.getvalue(box)
    @@ -424,11 +433,10 @@
             # specifically the given oopspec call.  For non-oopspec calls,
             # oopspecindex is just zero.
             effectinfo = op.getdescr().get_extra_info()
    -        if effectinfo is not None:
    -            oopspecindex = effectinfo.oopspecindex
    -            if oopspecindex == EffectInfo.OS_ARRAYCOPY:
    -                if self._optimize_CALL_ARRAYCOPY(op):
    -                    return
    +        oopspecindex = effectinfo.oopspecindex
    +        if oopspecindex == EffectInfo.OS_ARRAYCOPY:
    +            if self._optimize_CALL_ARRAYCOPY(op):
    +                return
             self.emit_operation(op)
     
         def _optimize_CALL_ARRAYCOPY(self, op):
    diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py
    --- a/pypy/jit/metainterp/optimizeopt/simplify.py
    +++ b/pypy/jit/metainterp/optimizeopt/simplify.py
    @@ -25,6 +25,8 @@
             #     but it's a bit hard to implement robustly if heap.py is also run
             pass
     
    +    optimize_CAST_OPAQUE_PTR = optimize_VIRTUAL_REF
    +
     
     dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_',
             default=OptSimplify.emit_operation)
    diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
    --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
    +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
    @@ -2742,11 +2742,11 @@
         def test_residual_call_invalidate_some_arrays(self):
             ops = """
             [p1, p2, i1]
    -        p3 = getarrayitem_gc(p1, 0, descr=arraydescr2)
    +        p3 = getarrayitem_gc(p2, 0, descr=arraydescr2)
             p4 = getarrayitem_gc(p2, 1, descr=arraydescr2)
             i2 = getarrayitem_gc(p1, 1, descr=arraydescr)
             i3 = call(i1, descr=writearraydescr)
    -        p5 = getarrayitem_gc(p1, 0, descr=arraydescr2)
    +        p5 = getarrayitem_gc(p2, 0, descr=arraydescr2)
             p6 = getarrayitem_gc(p2, 1, descr=arraydescr2)
             i4 = getarrayitem_gc(p1, 1, descr=arraydescr)
             escape(p3)
    @@ -2759,7 +2759,7 @@
             """
             expected = """
             [p1, p2, i1]
    -        p3 = getarrayitem_gc(p1, 0, descr=arraydescr2)
    +        p3 = getarrayitem_gc(p2, 0, descr=arraydescr2)
             p4 = getarrayitem_gc(p2, 1, descr=arraydescr2)
             i2 = getarrayitem_gc(p1, 1, descr=arraydescr)
             i3 = call(i1, descr=writearraydescr)
    @@ -4621,6 +4621,96 @@
             """
             self.optimize_strunicode_loop(ops, expected)
     
    +    def test_intmod_bounds(self):
    +        ops = """
    +        [i0, i1]
    +        i2 = int_mod(i0, 12)
    +        i3 = int_gt(i2, 12)
    +        guard_false(i3) []
    +        i4 = int_lt(i2, -12)
    +        guard_false(i4) []
    +        i5 = int_mod(i1, -12)
    +        i6 = int_lt(i5, -12)
    +        guard_false(i6) []
    +        i7 = int_gt(i5, 12)
    +        guard_false(i7) []
    +        jump(i2, i5)
    +        """
    +        expected = """
    +        [i0, i1]
    +        i2 = int_mod(i0, 12)
    +        i5 = int_mod(i1, -12)
    +        jump(i2, i5)
    +        """
    +        self.optimize_loop(ops, expected)
    +
    +        # This the sequence of resoperations that is generated for a Python
    +        # app-level int % int.  When the modulus is constant and when i0
    +        # is known non-negative it should be optimized to a single int_mod.
    +        ops = """
    +        [i0]
    +        i5 = int_ge(i0, 0)
    +        guard_true(i5) []
    +        i1 = int_mod(i0, 42)
    +        i2 = int_rshift(i1, 63)
    +        i3 = int_and(42, i2)
    +        i4 = int_add(i1, i3)
    +        finish(i4)
    +        """
    +        expected = """
    +        [i0]
    +        i5 = int_ge(i0, 0)
    +        guard_true(i5) []
    +        i1 = int_mod(i0, 42)
    +        finish(i1)
    +        """
    +        py.test.skip("in-progress")
    +        self.optimize_loop(ops, expected)
    +
    +        # Also, 'n % power-of-two' can be turned into int_and(),
    +        # but that's a bit harder to detect here because it turns into
    +        # several operations, and of course it is wrong to just turn
    +        # int_mod(i0, 16) into int_and(i0, 15).
    +        ops = """
    +        [i0]
    +        i1 = int_mod(i0, 16)
    +        i2 = int_rshift(i1, 63)
    +        i3 = int_and(16, i2)
    +        i4 = int_add(i1, i3)
    +        finish(i4)
    +        """
    +        expected = """
    +        [i0]
    +        i4 = int_and(i0, 15)
    +        finish(i4)
    +        """
    +        py.test.skip("harder")
    +        self.optimize_loop(ops, expected)
    +
    +    def test_bounded_lazy_setfield(self):
    +        ops = """
    +        [p0, i0]
    +        i1 = int_gt(i0, 2)
    +        guard_true(i1) []
    +        setarrayitem_gc(p0, 0, 3)
    +        setarrayitem_gc(p0, 2, 4)
    +        setarrayitem_gc(p0, i0, 15)
    +        i2 = getarrayitem_gc(p0, 2)
    +        jump(p0, i2)
    +        """
    +        # Remove the getarrayitem_gc, because we know that p[i0] does not alias
    +        # p0[2]
    +        expected = """
    +        [p0, i0]
    +        i1 = int_gt(i0, 2)
    +        guard_true(i1) []
    +        setarrayitem_gc(p0, i0, 15)
    +        setarrayitem_gc(p0, 0, 3)
    +        setarrayitem_gc(p0, 2, 4)
    +        jump(p0, 4)
    +        """
    +        self.optimize_loop(ops, expected)
    +
     
     class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin):
         pass
    diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py
    --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py
    +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py
    @@ -14,12 +14,15 @@
         can check that the signature of a call is really what you want.
         """
     
    -    def __init__(self, arg_types, typeinfo):
    +    def __init__(self, arg_types, typeinfo, flags):
             self.arg_types = arg_types
             self.typeinfo = typeinfo   # return type
    +        self.flags = flags
     
         def __eq__(self, other):
    -        return self.arg_types == other.arg_types and self.typeinfo == other.typeinfo
    +        return (self.arg_types == other.arg_types and
    +                self.typeinfo == other.typeinfo and
    +                self.flags == other.get_ffi_flags())
     
     class FakeLLObject(object):
     
    @@ -41,24 +44,31 @@
             vable_token_descr = LLtypeMixin.valuedescr
             valuedescr = LLtypeMixin.valuedescr
     
    -        int_float__int = MyCallDescr('if', 'i')
    +        int_float__int_42 = MyCallDescr('if', 'i', 42)
    +        int_float__int_43 = MyCallDescr('if', 'i', 43)
             funcptr = FakeLLObject()
             func = FakeLLObject(_fake_class=Func,
                                 argtypes=[types.sint, types.double],
    -                            restype=types.sint)
    +                            restype=types.sint,
    +                            flags=42)
             func2 = FakeLLObject(_fake_class=Func,
                                  argtypes=[types.sint, types.double],
    -                             restype=types.sint)
    +                             restype=types.sint,
    +                             flags=43)
             #
             def calldescr(cpu, FUNC, oopspecindex, extraeffect=None):
    -            einfo = EffectInfo([], [], [], [], oopspecindex=oopspecindex,
    +            if extraeffect == EffectInfo.EF_RANDOM_EFFECTS:
    +                f = None   # means "can force all" really
    +            else:
    +                f = []
    +            einfo = EffectInfo(f, f, f, f, oopspecindex=oopspecindex,
                                    extraeffect=extraeffect)
                 return cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, einfo)
             #
             libffi_prepare =  calldescr(cpu, FUNC, EffectInfo.OS_LIBFFI_PREPARE)
             libffi_push_arg = calldescr(cpu, FUNC, EffectInfo.OS_LIBFFI_PUSH_ARG)
             libffi_call =     calldescr(cpu, FUNC, EffectInfo.OS_LIBFFI_CALL,
    -                                 EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE)
    +                                    EffectInfo.EF_RANDOM_EFFECTS)
         
         namespace = namespace.__dict__
     
    @@ -79,7 +89,7 @@
             """
             expected = """
             [i0, f1]
    -        i3 = call_release_gil(12345, i0, f1, descr=int_float__int)
    +        i3 = call_release_gil(12345, i0, f1, descr=int_float__int_42)
             guard_not_forced() []
             guard_no_exception() []
             jump(i3, f1)
    @@ -119,7 +129,7 @@
             [i0, f1, p2]
             i4 = force_token()
             setfield_gc(p2, i4, descr=vable_token_descr)
    -        i3 = call_release_gil(12345, i0, f1, descr=int_float__int)
    +        i3 = call_release_gil(12345, i0, f1, descr=int_float__int_42)
             guard_not_forced() [p2]
             guard_no_exception() [p2]
             jump(i3, f1, p2)
    @@ -216,7 +226,7 @@
             call(0, ConstPtr(func),                        descr=libffi_prepare)
             #
             # this "nested" call is nicely optimized
    -        i4 = call_release_gil(67890, i0, f1, descr=int_float__int)
    +        i4 = call_release_gil(67890, i0, f1, descr=int_float__int_43)
             guard_not_forced() []
             guard_no_exception() []
             #
    @@ -261,7 +271,7 @@
             expected = """
             [i0, f1, p2]
             setfield_gc(p2, i0, descr=valuedescr)
    -        i3 = call_release_gil(12345, i0, f1, descr=int_float__int)
    +        i3 = call_release_gil(12345, i0, f1, descr=int_float__int_42)
             guard_not_forced() []
             guard_no_exception() []
             jump(i3, f1, p2)
    diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
    --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
    +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
    @@ -61,7 +61,9 @@
                 boxes = []
             boxes = []
         def clone_if_mutable(self):
    -        return self
    +        return FakeDescr()
    +    def __eq__(self, other):
    +        return isinstance(other, Storage) or isinstance(other, FakeDescr)
     
     
     class BaseTestWithUnroll(BaseTest):
    @@ -69,13 +71,14 @@
         enable_opts = "intbounds:rewrite:virtualize:string:heap:unroll"
     
         def optimize_loop(self, ops, expected, expected_preamble=None,
    -                      call_pure_results=None):
    +                      call_pure_results=None, expected_short=None):
             loop = self.parse(ops)
             if expected != "crash!":
                 expected = self.parse(expected)
             if expected_preamble:
                 expected_preamble = self.parse(expected_preamble)
    -
    +        if expected_short:
    +            expected_short = self.parse(expected_short)
             loop.preamble = TreeLoop('preamble')
             loop.preamble.inputargs = loop.inputargs
             loop.preamble.token = LoopToken()
    @@ -84,17 +87,33 @@
             self._do_optimize_loop(loop, call_pure_results)
             #
             print
    +        print "Preamble:"
             print loop.preamble.inputargs
    -        print '\n'.join([str(o) for o in loop.preamble.operations])
    +        if loop.preamble.operations:
    +            print '\n'.join([str(o) for o in loop.preamble.operations])
    +        else:
    +            print 'Failed!'
             print
    +        print "Loop:"
             print loop.inputargs
             print '\n'.join([str(o) for o in loop.operations])
             print
    +        if expected_short:
    +            print "Short Preamble:"
    +            short = loop.preamble.token.short_preamble[0]
    +            print short.inputargs
    +            print '\n'.join([str(o) for o in short.operations])        
    +            print
    +        
             assert expected != "crash!", "should have raised an exception"
             self.assert_equal(loop, expected)
             if expected_preamble:
                 self.assert_equal(loop.preamble, expected_preamble,
                                   text_right='expected preamble')
    +        if expected_short:
    +            self.assert_equal(short, expected_short,
    +                              text_right='expected short preamble')
    +            
             return loop
     
     class OptimizeOptTest(BaseTestWithUnroll):
    @@ -840,7 +859,13 @@
             p3sub = new_with_vtable(ConstClass(node_vtable2))
             setfield_gc(p3sub, i1, descr=valuedescr)
             setfield_gc(p1, p3sub, descr=nextdescr)
    -        jump(i1, p1, p3sub)
    +        # XXX: We get two extra operations here because the setfield
    +        #      above is the result of forcing p1 and thus not 
    +        #      registered with the heap optimizer. I've makred tests
    +        #      below with VIRTUALHEAP if they suffer from this issue
    +        p3sub2 = getfield_gc(p1, descr=nextdescr) 
    +        guard_nonnull_class(p3sub2, ConstClass(node_vtable2)) []
    +        jump(i1, p1, p3sub2)
             """
             self.optimize_loop(ops, expected, preamble)
     
    @@ -871,7 +896,9 @@
             guard_true(i2b) []
             p3 = new_with_vtable(ConstClass(node_vtable))
             setfield_gc(p3, i2, descr=nextdescr)
    -        jump(p3, i2)
    +        # XXX: VIRTUALHEAP (see above)
    +        i3 = getfield_gc(p3, descr=nextdescr)
    +        jump(p3, i3)
             """
             self.optimize_loop(ops, expected, preamble)
     
    @@ -1166,6 +1193,29 @@
             """
             self.optimize_loop(ops, expected)
     
    +    def test_virtual_field_forced_by_lazy_setfield(self):
    +        ops = """
    +        [i0, p1, p3]
    +        i28 = int_add(i0, 1)
    +        p30 = new_with_vtable(ConstClass(node_vtable))
    +        setfield_gc(p30, i28, descr=nextdescr)
    +        setfield_gc(p3, p30, descr=valuedescr)
    +        p45 = getfield_gc(p3, descr=valuedescr)
    +        i29 = int_add(i28, 1)
    +        jump(i29, p45, p3)
    +        """
    +        preamble = """
    +        [i0, p1, p3]
    +        i28 = int_add(i0, 1)
    +        i29 = int_add(i28, 1)
    +        p30 = new_with_vtable(ConstClass(node_vtable))
    +        setfield_gc(p30, i28, descr=nextdescr)
    +        setfield_gc(p3, p30, descr=valuedescr)
    +        jump(i29, p30, p3)
    +        """
    +        expected = preamble
    +        self.optimize_loop(ops, expected, preamble)
    +
         def test_nonvirtual_1(self):
             ops = """
             [i]
    @@ -1308,15 +1358,78 @@
             ops = """
             [i]
             i1 = getfield_gc(ConstPtr(myptr), descr=valuedescr)
    -        jump(i1)
    -        """
    -        preamble = ops
    -        expected = """
    +        call(i1, descr=nonwritedescr)
    +        jump(i)
    +        """
    +        preamble = """
             [i]
    -        jump(i)
    +        i1 = getfield_gc(ConstPtr(myptr), descr=valuedescr)
    +        call(i1, descr=nonwritedescr)
    +        jump(i, i1)
    +        """
    +        expected = """
    +        [i, i1]
    +        call(i1, descr=nonwritedescr)
    +        jump(i, i1)
             """
             self.optimize_loop(ops, expected, preamble)
     
    +    def test_varray_boxed1(self):
    +        ops = """
    +        [p0, p8]
    +        p11 = getfield_gc(p0, descr=otherdescr)
    +        guard_nonnull(p11) [p0, p8]
    +        guard_class(p11, ConstClass(node_vtable2)) [p0, p8]
    +        p14 = getfield_gc(p11, descr=otherdescr)
    +        guard_isnull(p14) [p0, p8]
    +        p18 = getfield_gc(ConstPtr(myptr), descr=otherdescr)
    +        guard_isnull(p18) [p0, p8]
    +        p31 = new(descr=ssize)
    +        setfield_gc(p31, 0, descr=adescr)
    +        p33 = new_array(0, descr=arraydescr)
    +        setfield_gc(p31, p33, descr=bdescr)
    +        p35 = new_with_vtable(ConstClass(node_vtable))
    +        setfield_gc(p35, p31, descr=valuedescr)
    +        jump(p0, p35)
    +        """
    +        expected = """
    +        [p0]
    +        jump(p0)
    +        """
    +        self.optimize_loop(ops, expected)
    +
    +    def test_varray_boxed_simplified(self):
    +        ops = """
    +        [p0, p8]
    +        p18 = getfield_gc(ConstPtr(myptr), descr=otherdescr)
    +        guard_isnull(p18) [p0, p8]
    +        p31 = new(descr=ssize)
    +        p35 = new_with_vtable(ConstClass(node_vtable))
    +        setfield_gc(p35, p31, descr=valuedescr)        
    +        jump(p0, p35)
    +        """
    +        expected = """
    +        [p0]
    +        jump(p0)
    +        """
    +        self.optimize_loop(ops, expected)
    +
    +    def test_varray_boxed_noconst(self):
    +        ops = """
    +        [p0, p8, p18, p19]
    +        guard_isnull(p18) [p0, p8]
    +        p31 = new(descr=ssize)
    +        p35 = new_with_vtable(ConstClass(node_vtable))
    +        setfield_gc(p35, p31, descr=valuedescr)        
    +        jump(p0, p35, p19, p18)
    +        """
    +        expected = """
    +        [p0, p19]
    +        guard_isnull(p19) [p0]
    +        jump(p0, NULL)
    +        """
    +        self.optimize_loop(ops, expected)
    +        
         def test_varray_1(self):
             ops = """
             [i1]
    @@ -1552,6 +1665,24 @@
             """
             self.optimize_loop(ops, expected)
     
    +    def test_duplicate_getfield_2(self):
    +        ops = """
    +        [p1, p2, i0]
    +        i1 = getfield_gc(p1, descr=valuedescr)
    +        i2 = getfield_gc(p2, descr=valuedescr)
    +        i3 = getfield_gc(p1, descr=valuedescr)
    +        i4 = getfield_gc(p2, descr=valuedescr)
    +        i5 = int_add(i3, i4)
    +        i6 = int_add(i0, i5)
    +        jump(p1, p2, i6)
    +        """
    +        expected = """
    +        [p1, p2, i0, i5]
    +        i6 = int_add(i0, i5)
    +        jump(p1, p2, i6, i5)
    +        """
    +        self.optimize_loop(ops, expected)
    +
         def test_getfield_after_setfield(self):
             ops = """
             [p1, i1]
    @@ -1728,6 +1859,7 @@
             """
             expected = """
             [p1, i1, i2]
    +        setfield_gc(p1, i2, descr=valuedescr)
             jump(p1, i1, i2)
             """
             # in this case, all setfields are removed, because we can prove
    @@ -1872,14 +2004,14 @@
             guard_true(i3) []
             i4 = int_neg(i2)
             setfield_gc(p1, i2, descr=valuedescr)
    -        jump(p1, i1, i2, i4)
    -        """
    -        expected = """
    -        [p1, i1, i2, i4]
    +        jump(p1, i1, i2, i4, i4)
    +        """
    +        expected = """
    +        [p1, i1, i2, i4, i5]
             setfield_gc(p1, i1, descr=valuedescr)
             guard_true(i4) []
             setfield_gc(p1, i2, descr=valuedescr)
    -        jump(p1, i1, i2, 1)
    +        jump(p1, i1, i2, i5, i5)
             """
             self.optimize_loop(ops, expected, preamble)
     
    @@ -1902,14 +2034,14 @@
             i4 = int_neg(i2)
             setfield_gc(p1, NULL, descr=nextdescr)
             escape()
    -        jump(p1, i2, i4)
    -        """
    -        expected = """
    -        [p1, i2, i4]
    +        jump(p1, i2, i4, i4)
    +        """
    +        expected = """
    +        [p1, i2, i4, i5]
             guard_true(i4) [p1]
             setfield_gc(p1, NULL, descr=nextdescr)
             escape()
    -        jump(p1, i2, 1)
    +        jump(p1, i2, i5, i5)
             """
             self.optimize_loop(ops, expected, preamble)
     
    @@ -1931,14 +2063,14 @@
             i4 = int_neg(i2)
             setfield_gc(p1, NULL, descr=nextdescr)
             escape()
    -        jump(p1, i2, i4)
    -        """
    -        expected = """
    -        [p1, i2, i4]
    +        jump(p1, i2, i4, i4)
    +        """
    +        expected = """
    +        [p1, i2, i4, i5]
             guard_true(i4) [i2, p1]
             setfield_gc(p1, NULL, descr=nextdescr)
             escape()
    -        jump(p1, i2, 1)
    +        jump(p1, i2, i5, i5)
             """
             self.optimize_loop(ops, expected)
     
    @@ -1954,14 +2086,22 @@
             setfield_gc(p1, i2, descr=valuedescr)
             jump(p1, i1, i2, i4)
             """
    -        preamble = ops
    -        expected = """
    -        [p1, i1, i2, i4]
    +        preamble = """
    +        [p1, i1, i2, i3]
    +        setfield_gc(p1, i1, descr=valuedescr)
    +        i5 = int_eq(i3, 5)
    +        guard_true(i5) []
    +        i4 = int_neg(i2)
    +        setfield_gc(p1, i2, descr=valuedescr)
    +        jump(p1, i1, i2, i4, i4)
    +        """
    +        expected = """
    +        [p1, i1, i2, i4, i7]
             setfield_gc(p1, i1, descr=valuedescr)
             i5 = int_eq(i4, 5)
             guard_true(i5) []
             setfield_gc(p1, i2, descr=valuedescr)
    -        jump(p1, i1, i2, 5)
    +        jump(p1, i1, i2, i7, i7)
             """
             self.optimize_loop(ops, expected, preamble)
     
    @@ -2035,7 +2175,25 @@
             jump(p1)
             """
             self.optimize_loop(ops, expected)
    -
    +        
    +    def test_duplicate_getarrayitem_2(self):
    +        ops = """
    +        [p1, i0]
    +        i2 = getarrayitem_gc(p1, 0, descr=arraydescr2)
    +        i3 = getarrayitem_gc(p1, 1, descr=arraydescr2)
    +        i4 = getarrayitem_gc(p1, 0, descr=arraydescr2)
    +        i5 = getarrayitem_gc(p1, 1, descr=arraydescr2)
    +        i6 = int_add(i3, i4)
    +        i7 = int_add(i0, i6)
    +        jump(p1, i7)
    +        """
    +        expected = """
    +        [p1, i0, i6]
    +        i7 = int_add(i0, i6)
    +        jump(p1, i7, i6)
    +        """
    +        self.optimize_loop(ops, expected)
    +        
         def test_duplicate_getarrayitem_after_setarrayitem_1(self):
             ops = """
             [p1, p2]
    @@ -2161,15 +2319,15 @@
             p2 = new_with_vtable(ConstClass(node_vtable))
             setfield_gc(p2, p4, descr=nextdescr)
             setfield_gc(p1, p2, descr=nextdescr)
    -        jump(p1, i2, i4, p4)
    -        """
    -        expected = """
    -        [p1, i2, i4, p4]
    +        jump(p1, i2, i4, p4, i4)
    +        """
    +        expected = """
    +        [p1, i2, i4, p4, i5]
             guard_true(i4) [p1, p4]
             p2 = new_with_vtable(ConstClass(node_vtable))
             setfield_gc(p2, p4, descr=nextdescr)
             setfield_gc(p1, p2, descr=nextdescr)
    -        jump(p1, i2, 1, p4)
    +        jump(p1, i2, i5, p4, i5)
             """
             self.optimize_loop(ops, expected, preamble)
     
    @@ -2625,6 +2783,101 @@
             """
             self.optimize_loop(ops, expected, preamble)
     
    +    def test_remove_duplicate_pure_op_ovf_with_lazy_setfield(self):
    +        py.test.skip('this optimization is not yet supprted')
    +        ops = """
    +        [i1, p1]
    +        i3 = int_add_ovf(i1, 1)
    +        guard_no_overflow() []
    +        i3b = int_is_true(i3)
    +        guard_true(i3b) []
    +        setfield_gc(p1, i1, descr=valuedescr)
    +        i4 = int_add_ovf(i1, 1)
    +        guard_no_overflow() []
    +        i4b = int_is_true(i4)
    +        guard_true(i4b) []
    +        escape(i3)
    +        escape(i4)
    +        jump(i1, p1)
    +        """
    +        preamble = """
    +        [i1, p1]
    +        i3 = int_add_ovf(i1, 1)
    +        guard_no_overflow() []
    +        i3b = int_is_true(i3)
    +        guard_true(i3b) []
    +        setfield_gc(p1, i1, descr=valuedescr)        
    +        escape(i3)
    +        escape(i3)
    +        jump(i1, p1, i3)
    +        """
    +        expected = """
    +        [i1, p1, i3]
    +        setfield_gc(p1, i1, descr=valuedescr)        
    +        escape(i3)
    +        escape(i3)
    +        jump(i1, p1, i3)
    +        """
    +        self.optimize_loop(ops, expected, preamble)
    +
    +    def test_ovf_guard_in_short_preamble1(self):
    +        ops = """
    +        [p8, p11, i24]
    +        p26 = new_with_vtable(ConstClass(node_vtable))
    +        setfield_gc(p26, i24, descr=adescr)        
    +        i34 = getfield_gc_pure(p11, descr=valuedescr)
    +        i35 = getfield_gc_pure(p26, descr=adescr)
    +        i36 = int_add_ovf(i34, i35)
    +        guard_no_overflow() []
    +        jump(p8, p11, i35)
    +        """
    +        expected = """
    +        [p8, p11, i26]
    +        jump(p8, p11, i26)        
    +        """
    +        self.optimize_loop(ops, expected)
    +        
    +    def test_ovf_guard_in_short_preamble2(self):
    +        ops = """
    +        [p8, p11, p12]
    +        p16 = getfield_gc(p8, descr=valuedescr)
    +        i17 = getfield_gc(p8, descr=nextdescr)
    +        i19 = getfield_gc(p16, descr=valuedescr)
    +        i20 = int_ge(i17, i19)
    +        guard_false(i20) []
    +        i21 = getfield_gc(p16, descr=otherdescr)
    +        i22 = getfield_gc(p16, descr=nextdescr)
    +        i23 = int_mul(i17, i22)
    +        i24 = int_add(i21, i23)
    +        p26 = new_with_vtable(ConstClass(node_vtable))
    +        setfield_gc(p26, i24, descr=adescr)
    +        i28 = int_add(i17, 1)
    +        setfield_gc(p8, i28, descr=nextdescr)
    +        i34 = getfield_gc_pure(p11, descr=valuedescr)
    +        i35 = getfield_gc_pure(p26, descr=adescr)
    +        guard_nonnull(p12) []
    +        i36 = int_add_ovf(i34, i35)
    +        guard_no_overflow() []
    +        p38 = new_with_vtable(ConstClass(node_vtable))
    +        setfield_gc(p38, i36, descr=adescr)
    +        jump(p8, p11, p26)
    +        """
    +        expected = """
    +        [p8, p11, i24, i39, i19, p16, i21, i34]
    +        i40 = int_ge(i39, i19)
    +        guard_false(i40) []
    +        i41 = getfield_gc(p16, descr=nextdescr)
    +        i42 = int_mul(i39, i41)
    +        i43 = int_add(i21, i42)
    +        i44 = int_add(i39, 1)
    +        setfield_gc(p8, i44, descr=nextdescr)
    +        i45 = int_add_ovf(i34, i43)
    +        guard_no_overflow() []
    +        jump(p8, p11, i43, i44, i19, p16, i21, i34)
    +        """
    +        self.optimize_loop(ops, expected)
    +
    +
         def test_int_and_or_with_zero(self):
             ops = """
             [i0, i1]
    @@ -3899,11 +4152,13 @@
             jump(p4364)
             """
             expected = """
    -        [i0, i1]
    +        [i0]
    +        i1 = int_sub_ovf(i0, 1)
    +        guard_no_overflow() []
             escape(i1)
             i2 = int_add_ovf(i0, 1)
             guard_no_overflow() []
    -        jump(i2, i0)
    +        jump(i2)
             """
             self.optimize_loop(ops, expected)
     
    @@ -4867,32 +5122,38 @@
     
         def test_invariant_ovf(self):
             ops = """
    -        [i0, i1, i10, i11, i12]
    +        [i0, i1, i10, i11, i20, i21]
             i2 = int_add_ovf(i0, i1)
             guard_no_overflow() []
             i3 = int_sub_ovf(i0, i1)
             guard_no_overflow() []
             i4 = int_mul_ovf(i0, i1)
             guard_no_overflow() []
    +        escape(i2)
    +        escape(i3)
    +        escape(i4)
             i24 = int_mul_ovf(i10, i11)
             guard_no_overflow() []
             i23 = int_sub_ovf(i10, i11)
             guard_no_overflow() []
             i22 = int_add_ovf(i10, i11)
             guard_no_overflow() []
    -        jump(i0, i1, i2, i3, i4)
    -        """
    -        expected = """
    -        [i0, i1, i10, i11, i12]
    +        jump(i0, i1, i20, i21, i20, i21)
    +        """
    +        expected = """
    +        [i0, i1, i10, i11, i2, i3, i4]
    +        escape(i2)
    +        escape(i3)
    +        escape(i4)        
             i24 = int_mul_ovf(i10, i11)
             guard_no_overflow() []
             i23 = int_sub_ovf(i10, i11)
             guard_no_overflow() []
             i22 = int_add_ovf(i10, i11)
             guard_no_overflow() []
    -        jump(i0, i1, i10, i11, i12)
    -        """
    -        self.optimize_loop(ops, expected, ops)
    +        jump(i0, i1, i10, i11, i2, i3, i4) 
    +        """
    +        self.optimize_loop(ops, expected)
     
         def test_value_proven_to_be_constant_after_two_iterations(self):
             class FakeDescr(AbstractDescr):
    @@ -4908,8 +5169,8 @@
             ops = """
             [p0, p1, p2, p3, i4, p5, i6, p7, p8, p9, p14]
             guard_value(i4, 3) []
    -        guard_class(p9, 17278984) []
    -        guard_class(p9, 17278984) []
    +        guard_class(p9, ConstClass(node_vtable)) []
    +        guard_class(p9, ConstClass(node_vtable)) []
             p22 = getfield_gc(p9, descr=inst_w_seq)
             guard_nonnull(p22) []
             i23 = getfield_gc(p9, descr=inst_index)
    @@ -4924,11 +5185,11 @@
             guard_class(p14, 17273920) []
             guard_class(p14, 17273920) []
     
    -        p75 = new_with_vtable(17278984)
    +        p75 = new_with_vtable(ConstClass(node_vtable))
             setfield_gc(p75, p14, descr=inst_w_seq)
             setfield_gc(p75, 0, descr=inst_index)
    -        guard_class(p75, 17278984) []
    -        guard_class(p75, 17278984) []
    +        guard_class(p75, ConstClass(node_vtable)) []
    +        guard_class(p75, ConstClass(node_vtable)) []
             p79 = getfield_gc(p75, descr=inst_w_seq)
             guard_nonnull(p79) []
             i80 = getfield_gc(p75, descr=inst_index)
    @@ -4974,6 +5235,7 @@
             """
             expected = """
             [p0]
    +        setfield_gc(p0, p0, descr=valuedescr)
             jump(p0)
             """
             self.optimize_loop(ops, expected, preamble)
    @@ -5060,9 +5322,7 @@
             self.optimize_loop(ops, expected)
     
         # ----------
    -    def optimize_strunicode_loop(self, ops, optops, preamble=None):
    -        if not preamble:
    -            preamble = ops # FIXME: Force proper testing of preamble
    +    def optimize_strunicode_loop(self, ops, optops, preamble):
             # check with the arguments passed in
             self.optimize_loop(ops, optops, preamble)
             # check with replacing 'str' with 'unicode' everywhere
    @@ -5082,7 +5342,7 @@
             [i0]
             jump(i0)
             """
    -        self.optimize_strunicode_loop(ops, expected)
    +        self.optimize_strunicode_loop(ops, expected, expected)
     
         def test_newstr_2(self):
             ops = """
    @@ -5098,7 +5358,7 @@
             [i0, i1]
             jump(i1, i0)
             """
    -        self.optimize_strunicode_loop(ops, expected)
    +        self.optimize_strunicode_loop(ops, expected, expected)
     
         def test_str_concat_1(self):
             ops = """
    @@ -5106,7 +5366,7 @@
             p3 = call(0, p1, p2, descr=strconcatdescr)
             jump(p2, p3)
             """
    -        expected = """
    +        preamble = """
             [p1, p2]
             i1 = strlen(p1)
             i2 = strlen(p2)
    @@ -5114,9 +5374,18 @@
             p3 = newstr(i3)
             copystrcontent(p1, p3, 0, 0, i1)
             copystrcontent(p2, p3, 0, i1, i2)
    -        jump(p2, p3)
    -        """
    -        self.optimize_strunicode_loop(ops, expected)
    +        jump(p2, p3, i2)
    +        """
    +        expected = """
    +        [p1, p2, i1]
    +        i2 = strlen(p2)
    +        i3 = int_add(i1, i2)
    +        p3 = newstr(i3)
    +        copystrcontent(p1, p3, 0, 0, i1)
    +        copystrcontent(p2, p3, 0, i1, i2)
    +        jump(p2, p3, i2)
    +        """
    +        self.optimize_strunicode_loop(ops, expected, preamble)
     
         def test_str_concat_vstr2_str(self):
             ops = """
    @@ -5137,7 +5406,7 @@
             copystrcontent(p2, p3, 0, 2, i2)
             jump(i1, i0, p3)
             """
    -        self.optimize_strunicode_loop(ops, expected)
    +        self.optimize_strunicode_loop(ops, expected, expected)
     
         def test_str_concat_str_vstr2(self):
             ops = """
    @@ -5160,7 +5429,7 @@
             i6 = int_add(i5, 1)      # will be killed by the backend
             jump(i1, i0, p3)
             """
    -        self.optimize_strunicode_loop(ops, expected)
    +        self.optimize_strunicode_loop(ops, expected, expected)
     
         def test_str_concat_str_str_str(self):
             ops = """
    @@ -5169,7 +5438,7 @@
             p5 = call(0, p4, p3, descr=strconcatdescr)
             jump(p2, p3, p5)
             """
    -        expected = """
    +        preamble = """
             [p1, p2, p3]
             i1 = strlen(p1)
             i2 = strlen(p2)
    @@ -5180,9 +5449,20 @@
             copystrcontent(p1, p5, 0, 0, i1)
             copystrcontent(p2, p5, 0, i1, i2)
             copystrcontent(p3, p5, 0, i12, i3)
    -        jump(p2, p3, p5)
    -        """
    -        self.optimize_strunicode_loop(ops, expected)
    +        jump(p2, p3, p5, i2, i3)
    +        """
    +        expected = """
    +        [p1, p2, p3, i1, i2]
    +        i12 = int_add(i1, i2)
    +        i3 = strlen(p3)
    +        i123 = int_add(i12, i3)
    +        p5 = newstr(i123)
    +        copystrcontent(p1, p5, 0, 0, i1)
    +        copystrcontent(p2, p5, 0, i1, i2)
    +        copystrcontent(p3, p5, 0, i12, i3)
    +        jump(p2, p3, p5, i2, i3)
    +        """
    +        self.optimize_strunicode_loop(ops, expected, preamble)
     
         def test_str_concat_str_cstr1(self):
             ops = """
    @@ -5199,7 +5479,7 @@
             strsetitem(p3, i2, 120)     # == ord('x')
             jump(p3)
             """
    -        self.optimize_strunicode_loop(ops, expected)
    +        self.optimize_strunicode_loop(ops, expected, expected)
     
         def test_str_concat_consts(self):
             ops = """
    @@ -5210,17 +5490,58 @@
             escape(p3)
             jump()
             """
    -        preamble = """
    -        []
    -        p3 = call(0, s"ab", s"cde", descr=strconcatdescr)
    -        escape(p3)
    -        jump()
    -        """
             expected = """
             []
             escape(s"abcde")
             jump()
             """
    +        self.optimize_strunicode_loop(ops, expected, expected)
    +
    +    def test_str_slice_len_surviving1(self):
    +        ops = """
    +        [p1, i1, i2, i3]
    +        escape(i3)
    +        p2 = call(0, p1, i1, i2, descr=strslicedescr)
    +        i4 = strlen(p2)
    +        jump(p1, i1, i2, i4)
    +        """
    +        preamble = """
    +        [p1, i1, i2, i3]
    +        escape(i3)
    +        i4 = int_sub(i2, i1)
    +        jump(p1, i1, i2, i4, i4)
    +        """
    +        expected = """
    +        [p1, i1, i2, i3, i4]
    +        escape(i3)
    +        jump(p1, i1, i2, i4, i4)
    +        """
    +        self.optimize_strunicode_loop(ops, expected, preamble)
    +
    +    def test_str_slice_len_surviving2(self):
    +        ops = """
    +        [p1, i1, i2, p2]
    +        i5 = getfield_gc(p2, descr=valuedescr)
    +        escape(i5)
    +        p3 = call(0, p1, i1, i2, descr=strslicedescr)
    +        i4 = strlen(p3)
    +        setfield_gc(p2, i4, descr=valuedescr)
    +        jump(p1, i1, i2, p2)
    +        """
    +        preamble = """
    +        [p1, i1, i2, p2]
    +        i5 = getfield_gc(p2, descr=valuedescr)
    +        escape(i5)
    +        i4 = int_sub(i2, i1)
    +        setfield_gc(p2, i4, descr=valuedescr)
    +        jump(p1, i1, i2, p2, i4, i4)
    +        """
    +        expected = """
    +        [p1, i1, i2, p2, i5, i6]
    +        escape(i5)
    +        setfield_gc(p2, i6, descr=valuedescr)
    +        jump(p1, i1, i2, p2, i6, i6)
    +        """
             self.optimize_strunicode_loop(ops, expected, preamble)
     
         def test_str_slice_1(self):
    @@ -5229,14 +5550,20 @@
             p2 = call(0, p1, i1, i2, descr=strslicedescr)
             jump(p2, i1, i2)
             """
    -        expected = """
    +        preamble = """
             [p1, i1, i2]
             i3 = int_sub(i2, i1)
             p2 = newstr(i3)
             copystrcontent(p1, p2, i1, 0, i3)
    -        jump(p2, i1, i2)
    -        """
    -        self.optimize_strunicode_loop(ops, expected)
    +        jump(p2, i1, i2, i3)
    +        """
    +        expected = """
    +        [p1, i1, i2, i3]
    +        p2 = newstr(i3)
    +        copystrcontent(p1, p2, i1, 0, i3)
    +        jump(p2, i1, i2, i3)
    +        """
    +        self.optimize_strunicode_loop(ops, expected, preamble)
     
         def test_str_slice_2(self):
             ops = """
    @@ -5250,7 +5577,7 @@
             copystrcontent(p1, p2, 0, 0, i2)
             jump(p2, i2)
             """
    -        self.optimize_strunicode_loop(ops, expected)
    +        self.optimize_strunicode_loop(ops, expected, expected)
     
         def test_str_slice_3(self):
             ops = """
    @@ -5259,16 +5586,22 @@
             p3 = call(0, p2, i3, i4, descr=strslicedescr)
             jump(p3, i1, i2, i3, i4)
             """
    -        expected = """
    +        preamble = """
             [p1, i1, i2, i3, i4]
             i0 = int_sub(i2, i1)     # killed by the backend
             i5 = int_sub(i4, i3)
             i6 = int_add(i1, i3)
             p3 = newstr(i5)
             copystrcontent(p1, p3, i6, 0, i5)
    -        jump(p3, i1, i2, i3, i4)
    -        """
    -        self.optimize_strunicode_loop(ops, expected)
    +        jump(p3, i1, i2, i3, i4, i5, i6)
    +        """
    +        expected = """
    +        [p1, i1, i2, i3, i4, i5, i6]
    +        p3 = newstr(i5)
    +        copystrcontent(p1, p3, i6, 0, i5)
    +        jump(p3, i1, i2, i3, i4, i5, i6)
    +        """
    +        self.optimize_strunicode_loop(ops, expected, preamble)
     
         def test_str_slice_getitem1(self):
             ops = """
    @@ -5278,15 +5611,21 @@
             escape(i4)
             jump(p1, i1, i2, i3)
             """
    -        expected = """
    +        preamble = """
             [p1, i1, i2, i3]
             i6 = int_sub(i2, i1)      # killed by the backend
             i5 = int_add(i1, i3)
             i4 = strgetitem(p1, i5)
             escape(i4)
    -        jump(p1, i1, i2, i3)
    -        """
    -        self.optimize_strunicode_loop(ops, expected)
    +        jump(p1, i1, i2, i3, i5)
    +        """
    +        expected = """
    +        [p1, i1, i2, i3, i5]
    +        i4 = strgetitem(p1, i5)
    +        escape(i4)
    +        jump(p1, i1, i2, i3, i5)
    +        """
    +        self.optimize_strunicode_loop(ops, expected, preamble)
     
         def test_str_slice_plain(self):
             ops = """
    @@ -5304,7 +5643,7 @@
             escape(i4)
             jump(i3, i4)
             """
    -        self.optimize_strunicode_loop(ops, expected)
    +        self.optimize_strunicode_loop(ops, expected, expected)
     
         def test_str_slice_concat(self):
             ops = """
    @@ -5313,7 +5652,7 @@
             p4 = call(0, p3, p2, descr=strconcatdescr)
             jump(p4, i1, i2, p2)
             """
    -        expected = """
    +        preamble = """
             [p1, i1, i2, p2]
             i3 = int_sub(i2, i1)     # length of p3
             i4 = strlen(p2)
    @@ -5321,14 +5660,22 @@
             p4 = newstr(i5)
             copystrcontent(p1, p4, i1, 0, i3)
             copystrcontent(p2, p4, 0, i3, i4)
    -        jump(p4, i1, i2, p2)
    -        """
    -        self.optimize_strunicode_loop(ops, expected)
    +        jump(p4, i1, i2, p2, i5, i3, i4)
    +        """
    +        expected = """
    +        [p1, i1, i2, p2, i5, i3, i4]
    +        p4 = newstr(i5)
    +        copystrcontent(p1, p4, i1, 0, i3)
    +        copystrcontent(p2, p4, 0, i3, i4)
    +        jump(p4, i1, i2, p2, i5, i3, i4)
    +        """
    +        self.optimize_strunicode_loop(ops, expected, preamble)
     
         def test_strgetitem_bounds(self):
             ops = """
             [p0, i0]
             i1 = strgetitem(p0, i0)
    +        i10 = strgetitem(p0, i0)
             i2 = int_lt(i1, 256)
             guard_true(i2) []
             i3 = int_ge(i1, 0)
    @@ -5337,6 +5684,7 @@
             """
             expected = """
             [p0, i0]
    +        i1 = strgetitem(p0, i0)
             jump(p0, i0)
             """
             self.optimize_loop(ops, expected)
    @@ -5345,12 +5693,14 @@
             ops = """
             [p0, i0]
             i1 = unicodegetitem(p0, i0)
    +        i10 = unicodegetitem(p0, i0)        
             i2 = int_lt(i1, 0)
             guard_false(i2) []
             jump(p0, i0)
             """
             expected = """
             [p0, i0]
    +        i1 = unicodegetitem(p0, i0)        
             jump(p0, i0)
             """
             self.optimize_loop(ops, expected)
    @@ -5387,7 +5737,7 @@
             self.optimize_loop(ops, expected)
     
         # ----------
    -    def optimize_strunicode_loop_extradescrs(self, ops, optops, preamble=None):
    +    def optimize_strunicode_loop_extradescrs(self, ops, optops, preamble):
             class FakeCallInfoCollection:
                 def callinfo_for_oopspec(self, oopspecindex):
                     calldescrtype = type(LLtypeMixin.strequaldescr)
    @@ -5410,7 +5760,7 @@
             escape(i0)
             jump(p1, p2)
             """
    -        self.optimize_strunicode_loop_extradescrs(ops, ops)
    +        self.optimize_strunicode_loop_extradescrs(ops, ops, ops)
     
         def test_str_equal_noop2(self):
             ops = """
    @@ -5420,7 +5770,7 @@
             escape(i0)
             jump(p1, p2, p3)
             """
    -        expected = """
    +        preamble = """
             [p1, p2, p3]
             i1 = strlen(p1)
             i2 = strlen(p2)
    @@ -5430,10 +5780,19 @@
             copystrcontent(p2, p4, 0, i1, i2)
             i0 = call(0, p3, p4, descr=strequaldescr)
             escape(i0)
    -        jump(p1, p2, p3)
    -        """
    -        self.optimize_strunicode_loop_extradescrs(ops,
    -                                                  expected)
    +        jump(p1, p2, p3, i3, i1, i2)
    +        """
    +        expected = """
    +        [p1, p2, p3, i3, i1, i2]
    +        p4 = newstr(i3)
    +        copystrcontent(p1, p4, 0, 0, i1)
    +        copystrcontent(p2, p4, 0, i1, i2)
    +        i0 = call(0, p3, p4, descr=strequaldescr)
    +        escape(i0)
    +        jump(p1, p2, p3, i3, i1, i2)
    +        """
    +        self.optimize_strunicode_loop_extradescrs(ops, expected,
    +                                                  preamble)
     
         def test_str_equal_slice1(self):
             ops = """
    @@ -5443,15 +5802,21 @@
             escape(i0)
             jump(p1, i1, i2, p3)
             """
    -        expected = """
    +        preamble = """
             [p1, i1, i2, p3]
             i3 = int_sub(i2, i1)
             i0 = call(0, p1, i1, i3, p3, descr=streq_slice_checknull_descr)
             escape(i0)
    -        jump(p1, i1, i2, p3)
    -        """
    -        self.optimize_strunicode_loop_extradescrs(ops,
    -                                                  expected)
    +        jump(p1, i1, i2, p3, i3)
    +        """
    +        expected = """
    +        [p1, i1, i2, p3, i3]
    +        i0 = call(0, p1, i1, i3, p3, descr=streq_slice_checknull_descr)
    +        escape(i0)
    +        jump(p1, i1, i2, p3, i3)
    +        """
    +        self.optimize_strunicode_loop_extradescrs(ops, expected,
    +                                                  preamble)
     
         def test_str_equal_slice2(self):
             ops = """
    @@ -5461,15 +5826,21 @@
             escape(i0)
             jump(p1, i1, i2, p3)
             """
    -        expected = """
    +        preamble = """
             [p1, i1, i2, p3]
             i4 = int_sub(i2, i1)
             i0 = call(0, p1, i1, i4, p3, descr=streq_slice_checknull_descr)
             escape(i0)
    -        jump(p1, i1, i2, p3)
    -        """
    -        self.optimize_strunicode_loop_extradescrs(ops,
    -                                                  expected)
    +        jump(p1, i1, i2, p3, i4)
    +        """
    +        expected = """
    +        [p1, i1, i2, p3, i4]
    +        i0 = call(0, p1, i1, i4, p3, descr=streq_slice_checknull_descr)
    +        escape(i0)
    +        jump(p1, i1, i2, p3, i4)
    +        """
    +        self.optimize_strunicode_loop_extradescrs(ops, expected,
    +                                                  preamble)
     
         def test_str_equal_slice3(self):
             ops = """
    @@ -5481,14 +5852,21 @@
             jump(p1, i1, i2, p3)
             """
             expected = """
    +        [p1, i1, i2, p3, i4]
    +        i0 = call(0, p1, i1, i4, p3, descr=streq_slice_nonnull_descr)
    +        escape(i0)
    +        jump(p1, i1, i2, p3, i4)
    +        """
    +        preamble = """
             [p1, i1, i2, p3]
    +        guard_nonnull(p3) []        
             i4 = int_sub(i2, i1)
             i0 = call(0, p1, i1, i4, p3, descr=streq_slice_nonnull_descr)
             escape(i0)
    -        jump(p1, i1, i2, p3)
    +        jump(p1, i1, i2, p3, i4)
             """
             self.optimize_strunicode_loop_extradescrs(ops,
    -                                                  expected, ops)
    +                                                  expected, preamble)
     
         def test_str_equal_slice4(self):
             ops = """
    @@ -5498,15 +5876,21 @@
             escape(i0)
             jump(p1, i1, i2)
             """
    -        expected = """
    +        preamble = """
             [p1, i1, i2]
             i3 = int_sub(i2, i1)
             i0 = call(0, p1, i1, i3, 120, descr=streq_slice_char_descr)
             escape(i0)
    -        jump(p1, i1, i2)
    -        """
    -        self.optimize_strunicode_loop_extradescrs(ops,
    -                                                  expected)
    +        jump(p1, i1, i2, i3)
    +        """
    +        expected = """
    +        [p1, i1, i2, i3]
    +        i0 = call(0, p1, i1, i3, 120, descr=streq_slice_char_descr)
    +        escape(i0)
    +        jump(p1, i1, i2, i3)
    +        """
    +        self.optimize_strunicode_loop_extradescrs(ops, expected,
    +                                                  preamble)
     
         def test_str_equal_slice5(self):
             ops = """
    @@ -5518,15 +5902,21 @@
             escape(i0)
             jump(p1, i1, i2, i3)
             """
    -        expected = """
    +        preamble = """
             [p1, i1, i2, i3]
             i4 = int_sub(i2, i1)
             i0 = call(0, p1, i1, i4, i3, descr=streq_slice_char_descr)
             escape(i0)
    -        jump(p1, i1, i2, i3)
    -        """
    -        self.optimize_strunicode_loop_extradescrs(ops,
    -                                                  expected)
    +        jump(p1, i1, i2, i3, i4)
    +        """
    +        expected = """
    +        [p1, i1, i2, i3, i4]
    +        i0 = call(0, p1, i1, i4, i3, descr=streq_slice_char_descr)
    +        escape(i0)
    +        jump(p1, i1, i2, i3, i4)
    +        """
    +        self.optimize_strunicode_loop_extradescrs(ops, expected,
    +                                                  preamble)
     
         def test_str_equal_none1(self):
             ops = """
    @@ -5541,7 +5931,7 @@
             escape(i0)
             jump(p1)
             """
    -        self.optimize_strunicode_loop_extradescrs(ops, expected)
    +        self.optimize_strunicode_loop_extradescrs(ops, expected, expected)
     
         def test_str_equal_none2(self):
             ops = """
    @@ -5556,7 +5946,7 @@
             escape(i0)
             jump(p1)
             """
    -        self.optimize_strunicode_loop_extradescrs(ops, expected)
    +        self.optimize_strunicode_loop_extradescrs(ops, expected, expected)
     
         def test_str_equal_nonnull1(self):
             ops = """
    @@ -5572,7 +5962,14 @@
             escape(i0)
             jump(p1)
             """
    -        self.optimize_strunicode_loop_extradescrs(ops, expected)
    +        preamble = """
    +        [p1]
    +        guard_nonnull(p1) []
    +        i0 = call(0, p1, s"hello world", descr=streq_nonnull_descr)
    +        escape(i0)
    +        jump(p1)
    +        """
    +        self.optimize_strunicode_loop_extradescrs(ops, expected, preamble)
     
         def test_str_equal_nonnull2(self):
             ops = """
    @@ -5583,13 +5980,19 @@
             jump(p1)
             """
             expected = """
    +        [p1, i0]
    +        escape(i0)
    +        jump(p1, i0)
    +        """
    +        preamble = """
             [p1]
    +        guard_nonnull(p1) []
             i1 = strlen(p1)
             i0 = int_eq(i1, 0)
             escape(i0)
    -        jump(p1)
    -        """
    -        self.optimize_strunicode_loop_extradescrs(ops, expected)
    +        jump(p1, i0)
    +        """
    +        self.optimize_strunicode_loop_extradescrs(ops, expected, preamble)
     
         def test_str_equal_nonnull3(self):
             ops = """
    @@ -5605,7 +6008,14 @@
             escape(i0)
             jump(p1)
             """
    -        self.optimize_strunicode_loop_extradescrs(ops, expected)
    +        preamble = """
    +        [p1]
    +        guard_nonnull(p1) []
    +        i0 = call(0, p1, 120, descr=streq_nonnull_char_descr)
    +        escape(i0)
    +        jump(p1)
    +        """
    +        self.optimize_strunicode_loop_extradescrs(ops, expected, preamble)
     
         def test_str_equal_nonnull4(self):
             ops = """
    @@ -5615,7 +6025,7 @@
             escape(i0)
             jump(p1, p2)
             """
    -        expected = """
    +        preamble = """
             [p1, p2]
             i1 = strlen(p1)
             i2 = strlen(p2)
    @@ -5625,9 +6035,18 @@
             copystrcontent(p2, p4, 0, i1, i2)
             i0 = call(0, s"hello world", p4, descr=streq_nonnull_descr)
             escape(i0)
    -        jump(p1, p2)
    -        """
    -        self.optimize_strunicode_loop_extradescrs(ops, expected)
    +        jump(p1, p2, i3, i1, i2)
    +        """
    +        expected = """
    +        [p1, p2, i3, i1, i2]
    +        p4 = newstr(i3)
    +        copystrcontent(p1, p4, 0, 0, i1)
    +        copystrcontent(p2, p4, 0, i1, i2)
    +        i0 = call(0, s"hello world", p4, descr=streq_nonnull_descr)
    +        escape(i0)
    +        jump(p1, p2, i3, i1, i2)
    +        """
    +        self.optimize_strunicode_loop_extradescrs(ops, expected, preamble)
     
         def test_str_equal_chars0(self):
             ops = """
    @@ -5642,7 +6061,7 @@
             escape(1)
             jump(i1)
             """
    -        self.optimize_strunicode_loop_extradescrs(ops, expected)
    +        self.optimize_strunicode_loop_extradescrs(ops, expected, expected)
     
         def test_str_equal_chars1(self):
             ops = """
    @@ -5653,13 +6072,42 @@
             escape(i0)
             jump(i1)
             """
    -        expected = """
    +        preamble = """
             [i1]
             i0 = int_eq(i1, 120)     # ord('x')
             escape(i0)
    -        jump(i1)
    -        """
    -        self.optimize_strunicode_loop_extradescrs(ops, expected)
    +        jump(i1, i0)
    +        """
    +        expected = """
    +        [i1, i0]
    +        escape(i0)
    +        jump(i1, i0)
    +        """
    +        self.optimize_strunicode_loop_extradescrs(ops, expected, preamble)
    +
    +    def test_str_equal_nonconst(self):
    +        ops = """
    +        [i1, i2]
    +        p1 = newstr(1)
    +        strsetitem(p1, 0, i1)
    +        p2 = newstr(1)
    +        strsetitem(p2, 0, i2)
    +        i0 = call(0, p1, p2, descr=strequaldescr)
    +        escape(i0)
    +        jump(i1, i2)
    +        """
    +        preamble = """
    +        [i1, i2]
    +        i0 = int_eq(i1, i2)
    +        escape(i0)
    +        jump(i1, i2, i0)
    +        """
    +        expected = """
    +        [i1, i2, i0]
    +        escape(i0)
    +        jump(i1, i2, i0)
    +        """
    +        self.optimize_strunicode_loop_extradescrs(ops, expected, preamble)
     
         def test_str_equal_chars2(self):
             ops = """
    @@ -5680,7 +6128,7 @@
             escape(i0)
             jump(i1, i2)
             """
    
    From noreply at buildbot.pypy.org  Tue Sep  6 19:33:23 2011
    From: noreply at buildbot.pypy.org (edelsohn)
    Date: Tue,  6 Sep 2011 19:33:23 +0200 (CEST)
    Subject: [pypy-commit] pypy ppc-jit-backend: PPC64 updates
    Message-ID: <20110906173323.A0F088203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: edelsohn
    Branch: ppc-jit-backend
    Changeset: r47116:c0df6acc9e45
    Date: 2011-09-06 13:26 -0400
    http://bitbucket.org/pypy/pypy/changeset/c0df6acc9e45/
    
    Log:	PPC64 updates
    
    diff --git a/pypy/jit/backend/ppc/runner.py b/pypy/jit/backend/ppc/runner.py
    --- a/pypy/jit/backend/ppc/runner.py
    +++ b/pypy/jit/backend/ppc/runner.py
    @@ -12,7 +12,7 @@
     from pypy.jit.backend.x86 import regloc
     from pypy.jit.backend.x86.support import values_array
     from pypy.jit.backend.ppc.ppcgen.ppc_assembler import PPCBuilder
    -from pypy.jit.backend.ppc.ppcgen.arch import NONVOLATILES
    +from pypy.jit.backend.ppc.ppcgen.arch import IS_PPC_32, NONVOLATILES
     import sys
     
     from pypy.tool.ansi_print import ansi_log
    @@ -111,10 +111,15 @@
             return reg
     
         def _make_prologue(self, codebuilder):
    -        framesize = 64 + 80
    -        codebuilder.stwu(1, 1, -framesize)
    -        codebuilder.mflr(0)
    -        codebuilder.stw(0, 1, framesize + 4)
    +        framesize = 16 * WORD + 20 * WORD
    +        if IS_PPC_32:
    +            codebuilder.stwu(1, 1, -framesize)
    +            codebuilder.mflr(0)
    +            codebuilder.stw(0, 1, framesize + WORD)
    +        else:
    +            codebuilder.stdu(1, 1, -framesize)
    +            codebuilder.mflr(0)
    +            codebuilder.std(0, 1, framesize + WORD)
             codebuilder.save_nonvolatiles(framesize)
     
         def _make_epilogue(self, codebuilder):
    @@ -142,10 +147,13 @@
                 descr.patch_pos = patch_pos
                 descr.used_mem_indices = used_mem_indices
     
    -            framesize = 64 + 80
    +            framesize = 16 * WORD + 20 * WORD
                 codebuilder.restore_nonvolatiles(framesize)
     
    -            codebuilder.lwz(0, 1, framesize + 4) # 36
    +            if IS_PPC_32:
    +                codebuilder.lwz(0, 1, framesize + WORD) # 36
    +            else:
    +                codebuilder.ld(0, 1, framesize + WORD) # 36
                 codebuilder.mtlr(0)
                 codebuilder.addi(1, 1, framesize)
     
    
    From noreply at buildbot.pypy.org  Tue Sep  6 19:33:24 2011
    From: noreply at buildbot.pypy.org (edelsohn)
    Date: Tue,  6 Sep 2011 19:33:24 +0200 (CEST)
    Subject: [pypy-commit] pypy ppc-jit-backend: PPC64 updates
    Message-ID: <20110906173324.D6BF48203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: edelsohn
    Branch: ppc-jit-backend
    Changeset: r47117:6e91461f4d96
    Date: 2011-09-06 13:26 -0400
    http://bitbucket.org/pypy/pypy/changeset/6e91461f4d96/
    
    Log:	PPC64 updates
    
    diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py
    --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py
    +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py
    @@ -936,16 +936,21 @@
             if IS_PPC_32:
                 self.stwx(source_reg, 0, 0)
             else:
    -            # ? 
    -            self.std(source_reg, 0, 10)
    +            self.stdx(source_reg, 0, 0)
     
         def save_nonvolatiles(self, framesize):
             for i, reg in enumerate(NONVOLATILES):
    -            self.stw(reg, 1, framesize - 4 * i)
    +            if IS_PPC_32:
    +                self.stw(reg, 1, framesize - WORD * i)
    +            else:
    +                self.std(reg, 1, framesize - WORD * i)
     
         def restore_nonvolatiles(self, framesize):
             for i, reg in enumerate(NONVOLATILES):
    -            self.lwz(reg, 1, framesize - i * 4)
    +            if IS_PPC_32:
    +                self.lwz(reg, 1, framesize - WORD * i)
    +            else:
    +                self.ld(reg, 1, framesize - WORD * i)
             
     
         # translate a trace operation to corresponding machine code
    @@ -1430,10 +1435,16 @@
                 for i, arg in enumerate(remaining_args):
                     if isinstance(arg, Box):
                         #self.mr(0, cpu.reg_map[arg])
    -                    self.stw(cpu.reg_map[arg], 1, 8 + WORD * i)
    +                    if IS_PPC_32:
    +                        self.stw(cpu.reg_map[arg], 1, 8 + WORD * i)
    +                    else:
    +                        self.std(cpu.reg_map[arg], 1, 8 + WORD * i)
                     elif isinstance(arg, Const):
                         self.load_word(0, arg.value)
    -                    self.stw(0, 1, 8 + WORD * i)
    +                    if IS_PPC_32:
    +                        self.stw(0, 1, 8 + WORD * i)
    +                    else:
    +                        self.std(0, 1, 8 + WORD * i)
                     else:
                         assert 0, "%s not supported yet" % arg
     
    @@ -1590,11 +1601,14 @@
                 else:
                     assert 0, "arg type not suported"
     
    -        framesize = 64 + 80
    +        framesize = 16 * WORD + 20 * WORD
     
             self.restore_nonvolatiles(framesize)
     
    -        self.lwz(0, 1, framesize + 4) # 36
    +        if IS_PPC_32:
    +            self.lwz(0, 1, framesize + WORD) # 36
    +        else:
    +            self.ld(0, 1, framesize + WORD) # 36
             self.mtlr(0)
             self.addi(1, 1, framesize)
             self.load_word(3, identifier)
    
    From noreply at buildbot.pypy.org  Tue Sep  6 19:35:00 2011
    From: noreply at buildbot.pypy.org (arigo)
    Date: Tue,  6 Sep 2011 19:35:00 +0200 (CEST)
    Subject: [pypy-commit] pypy default: 'Python 2.7 -A' raises TypeError in
    	this corner case.
    Message-ID: <20110906173500.82BAB8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r47118:ebe7ce81d5a1
    Date: 2011-09-06 19:08 +0200
    http://bitbucket.org/pypy/pypy/changeset/ebe7ce81d5a1/
    
    Log:	'Python 2.7 -A' raises TypeError in this corner case.
    
    diff --git a/pypy/objspace/std/test/test_stringformat.py b/pypy/objspace/std/test/test_stringformat.py
    --- a/pypy/objspace/std/test/test_stringformat.py
    +++ b/pypy/objspace/std/test/test_stringformat.py
    @@ -168,7 +168,7 @@
     
         def test_incomplete_format(self):
             raises(ValueError, '%'.__mod__, ((23,),))
    -        raises(ValueError, '%('.__mod__, ({},))
    +        raises((ValueError, TypeError), '%('.__mod__, ({},))
     
         def test_format_char(self):
             import sys
    
    From noreply at buildbot.pypy.org  Tue Sep  6 19:35:01 2011
    From: noreply at buildbot.pypy.org (arigo)
    Date: Tue,  6 Sep 2011 19:35:01 +0200 (CEST)
    Subject: [pypy-commit] pypy default: Tests and fix:
    Message-ID: <20110906173501.B99AB8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r47119:09816858a87a
    Date: 2011-09-06 19:33 +0200
    http://bitbucket.org/pypy/pypy/changeset/09816858a87a/
    
    Log:	Tests and fix:
    
    	object.__str__() was implemented by calling space.repr(), instead of
    	directly calling the __repr__() method. The difference is that the
    	first does type checking on the result, while the latter does not.
    	It's not the job of object.__str__() to do type checking, but of its
    	callers.
    
    diff --git a/pypy/objspace/std/objecttype.py b/pypy/objspace/std/objecttype.py
    --- a/pypy/objspace/std/objecttype.py
    +++ b/pypy/objspace/std/objecttype.py
    @@ -24,7 +24,12 @@
         return w_obj.getrepr(space, '%s object' % (classname,))
     
     def descr__str__(space, w_obj):
    -    return space.repr(w_obj)
    +    w_type = space.type(w_obj)
    +    w_impl = w_type.lookup("__repr__")
    +    if w_impl is None:
    +        raise OperationError(space.w_TypeError,      # can it really occur?
    +                             space.wrap("operand does not support unary str"))
    +    return space.get_and_call_function(w_impl, w_obj)
     
     def descr__class__(space, w_obj):
         return space.type(w_obj)
    diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py
    --- a/pypy/objspace/std/test/test_obj.py
    +++ b/pypy/objspace/std/test/test_obj.py
    @@ -94,3 +94,10 @@
             #assert len(log) == 1
             #assert log[0].message.args == ("object.__init__() takes no parameters",)
             #assert type(log[0].message) is DeprecationWarning
    +
    +    def test_object_str(self):
    +        # obscure case
    +        class A(object):
    +            def __repr__(self):
    +                return 123456
    +        assert A().__str__() == 123456
    diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py
    --- a/pypy/objspace/std/test/test_unicodeobject.py
    +++ b/pypy/objspace/std/test/test_unicodeobject.py
    @@ -780,8 +780,22 @@
             assert type(s) is unicode
             assert s == u'\u1234'
     
    +        # now the same with a new-style class...
    +        class A(object):
    +            def __init__(self, num):
    +                self.num = num
    +            def __str__(self):
    +                return unichr(self.num)
    +
    +        s = '%s' % A(111)    # this is ASCII
    +        assert type(s) is unicode
    +        assert s == chr(111)
    +
    +        s = '%s' % A(0x1234)    # this is not ASCII
    +        assert type(s) is unicode
    +        assert s == u'\u1234'
    +
         def test_formatting_unicode__str__2(self):
    -        skip("this is completely insane")
             class A:
                 def __str__(self):
                     return u'baz'
    @@ -798,9 +812,22 @@
             s = '%s %s' % (a, b)
             assert s == u'baz bar'
     
    +        skip("but this case here is completely insane")
             s = '%s %s' % (b, a)
             assert s == u'foo baz'
     
    +    def test_formatting_unicode__str__3(self):
    +        # "bah" is all I can say
    +        class X(object):
    +            def __repr__(self):
    +                return u'\u1234'
    +        '%s' % X()
    +        #
    +        class X(object):
    +            def __str__(self):
    +                return u'\u1234'
    +        '%s' % X()
    +
         def test_str_subclass(self):
             class Foo9(str):
                 def __unicode__(self):
    
    From noreply at buildbot.pypy.org  Tue Sep  6 19:35:03 2011
    From: noreply at buildbot.pypy.org (arigo)
    Date: Tue,  6 Sep 2011 19:35:03 +0200 (CEST)
    Subject: [pypy-commit] pypy default: Add comment.
    Message-ID: <20110906173503.2DAA98203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r47120:ddff981df9d5
    Date: 2011-09-06 19:34 +0200
    http://bitbucket.org/pypy/pypy/changeset/ddff981df9d5/
    
    Log:	Add comment.
    
    diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py
    --- a/pypy/objspace/std/test/test_obj.py
    +++ b/pypy/objspace/std/test/test_obj.py
    @@ -96,7 +96,8 @@
             #assert type(log[0].message) is DeprecationWarning
     
         def test_object_str(self):
    -        # obscure case
    +        # obscure case: __str__() must delegate to __repr__() without adding
    +        # type checking on its own
             class A(object):
                 def __repr__(self):
                     return 123456
    
    From noreply at buildbot.pypy.org  Tue Sep  6 22:46:53 2011
    From: noreply at buildbot.pypy.org (fijal)
    Date: Tue,  6 Sep 2011 22:46:53 +0200 (CEST)
    Subject: [pypy-commit] pypy inline-dict-ops: fix tests
    Message-ID: <20110906204653.E4E738203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Maciej Fijalkowski 
    Branch: inline-dict-ops
    Changeset: r47121:2227e3fb86ed
    Date: 2011-09-06 22:46 +0200
    http://bitbucket.org/pypy/pypy/changeset/2227e3fb86ed/
    
    Log:	fix tests
    
    diff --git a/pypy/jit/backend/llsupport/test/test_asmmemmgr.py b/pypy/jit/backend/llsupport/test/test_asmmemmgr.py
    --- a/pypy/jit/backend/llsupport/test/test_asmmemmgr.py
    +++ b/pypy/jit/backend/llsupport/test/test_asmmemmgr.py
    @@ -211,14 +211,14 @@
         debug._log = debug.DebugLog()
         try:
             mc._dump(addr, 'test-logname-section')
    -        log = list(debug._log)
    +        log = list(debug._log) 
         finally:
             debug._log = None
         encoded = ''.join(writtencode).encode('hex').upper()
         ataddr = '@%x' % addr
         assert log == [('test-logname-section',
                         [('debug_print', 'CODE_DUMP', ataddr, '+0 ', encoded)])]
    -    #
    +    
         lltype.free(p, flavor='raw')
     
     def test_blockbuildermixin2():
    diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py
    --- a/pypy/jit/backend/llsupport/test/test_descr.py
    +++ b/pypy/jit/backend/llsupport/test/test_descr.py
    @@ -159,7 +159,7 @@
         clsf = getArrayDescrClass(A4)
         assert clsf != cls
         assert clsf == getArrayDescrClass(lltype.GcArray(lltype.Float))
    -    clss = getArrayDescrClass(A5)
    +    clss = getArrayDescrClass(A6)
         assert clss not in (clsf, cls)
         assert clss == getArrayDescrClass(lltype.GcArray(rffi.UINT))
         #
    @@ -174,7 +174,7 @@
         assert descr2.__class__ is GcPtrArrayDescr
         assert descr3.__class__ is NonGcPtrArrayDescr
         assert descr4.__class__ is clsf
    -    assert descr5.__class__ is clss
    +    assert descr6.__class__ is clss
         assert descr1 == get_array_descr(c0, lltype.GcArray(lltype.Char))
         assert not descr1.is_array_of_pointers()
         assert     descr2.is_array_of_pointers()
    
    From noreply at buildbot.pypy.org  Tue Sep  6 22:59:00 2011
    From: noreply at buildbot.pypy.org (boemmels)
    Date: Tue,  6 Sep 2011 22:59:00 +0200 (CEST)
    Subject: [pypy-commit] lang-scheme default: Split tests for symbols &
     strings out of test_simple
    Message-ID: <20110906205900.C57DF8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Juergen Boemmels 
    Branch: 
    Changeset: r5:324223d56732
    Date: 2011-09-05 23:02 +0200
    http://bitbucket.org/pypy/lang-scheme/changeset/324223d56732/
    
    Log:	Split tests for symbols & strings out of test_simple
    
    diff --git a/scheme/test/test_parser.py b/scheme/test/test_parser.py
    --- a/scheme/test/test_parser.py
    +++ b/scheme/test/test_parser.py
    @@ -39,8 +39,13 @@
         w_fixnum = parse_sexpr('1123')
         assert unwrap(w_fixnum) == 1123
         assert isinstance(w_fixnum, W_Integer)
    -    w_fixnum = parse_sexpr('abfa__')
    -    assert isinstance(w_fixnum, W_Symbol)
    +
    +def test_symbol():
    +    w_sym = parse_sexpr('abfa__')
    +    assert isinstance(w_sym, W_Symbol)
    +    assert w_sym.to_string() == 'abfa__'
    +
    +def test_string():
         t = parse_sexpr(r'''"don't believe \"them\""''')
         assert isinstance(t, W_String)
         assert unwrap(t) == 'don\'t believe "them"'
    
    From noreply at buildbot.pypy.org  Tue Sep  6 22:59:01 2011
    From: noreply at buildbot.pypy.org (boemmels)
    Date: Tue,  6 Sep 2011 22:59:01 +0200 (CEST)
    Subject: [pypy-commit] lang-scheme default: More symbol test
    Message-ID: <20110906205901.D885D8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Juergen Boemmels 
    Branch: 
    Changeset: r6:7702fffadbc2
    Date: 2011-09-05 23:19 +0200
    http://bitbucket.org/pypy/lang-scheme/changeset/7702fffadbc2/
    
    Log:	More symbol test
    
    diff --git a/scheme/test/test_parser.py b/scheme/test/test_parser.py
    --- a/scheme/test/test_parser.py
    +++ b/scheme/test/test_parser.py
    @@ -44,6 +44,18 @@
         w_sym = parse_sexpr('abfa__')
         assert isinstance(w_sym, W_Symbol)
         assert w_sym.to_string() == 'abfa__'
    +    
    +    more_syms = ['abc',
    +                 'call/cc',
    +                 '+',
    +                 '-',
    +                 'set!',
    +                 'eqv?',
    +                ]
    +    for s in more_syms:
    +        w_sym = parse_sexpr(s)
    +        assert isinstance(w_sym, W_Symbol)
    +        assert w_sym.to_string() == s
     
     def test_string():
         t = parse_sexpr(r'''"don't believe \"them\""''')
    
    From noreply at buildbot.pypy.org  Tue Sep  6 22:59:02 2011
    From: noreply at buildbot.pypy.org (boemmels)
    Date: Tue,  6 Sep 2011 22:59:02 +0200 (CEST)
    Subject: [pypy-commit] lang-scheme default: Fix strings: Allow escaped
    	backslash
    Message-ID: <20110906205902.EC3D58203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Juergen Boemmels 
    Branch: 
    Changeset: r7:0e79d2ada637
    Date: 2011-09-06 22:58 +0200
    http://bitbucket.org/pypy/lang-scheme/changeset/0e79d2ada637/
    
    Log:	Fix strings: Allow escaped backslash
    
    diff --git a/scheme/ssparser.py b/scheme/ssparser.py
    --- a/scheme/ssparser.py
    +++ b/scheme/ssparser.py
    @@ -2,18 +2,25 @@
     from pypy.rlib.parsing.makepackrat import BacktrackException, Status
     from scheme.object import W_Pair, W_Integer, W_String, symbol, \
             w_nil, W_Boolean, W_Real, quote, qq, unquote, unquote_splicing, \
    -        w_ellipsis
    +        w_ellipsis, SchemeSyntaxError
     
     def str_unquote(s):
         str_lst = []
    -    last_ch = ''
    -    for c in s[1:]:
    -        if last_ch == '\\' and c == '"':
    -            pass
    +    pos = 1
    +    last = len(s)-1
    +    while pos < last:
    +        ch = s[pos]
    +        if ch == '\\':
    +            pos += 1
    +            ch = s[pos]
    +            if ch == '\\' or ch == '\"':
    +                str_lst.append(ch)
    +            else:
    +                raise SchemeSyntaxError
             else:
    -            str_lst.append(last_ch)
    +            str_lst.append(ch)
     
    -        last_ch = c
    +        pos += 1
     
         return ''.join(str_lst)
     
    diff --git a/scheme/test/test_parser.py b/scheme/test/test_parser.py
    --- a/scheme/test/test_parser.py
    +++ b/scheme/test/test_parser.py
    @@ -62,6 +62,16 @@
         assert isinstance(t, W_String)
         assert unwrap(t) == 'don\'t believe "them"'
     
    +    more_strings = [(r'''"simple string"''', r'''simple string'''),
    +                    (r'''"\\ backslash"''', r'''\ backslash'''),
    +                    (r'''"\\\\"''',r'''\\'''),
    +                    (r'''"with \"quotes\""''', r'''with "quotes"'''),
    +                   ]
    +    for code, contents in more_strings:
    +        w_string = parse_sexpr(code)
    +        assert isinstance(w_string, W_String)
    +        assert unwrap(w_string) == contents
    +
     def test_objects():
         w_fixnum = parse_sexpr('-12345')
         assert isinstance(w_fixnum, W_Integer)
    
    From noreply at buildbot.pypy.org  Wed Sep  7 11:02:02 2011
    From: noreply at buildbot.pypy.org (cfbolz)
    Date: Wed,  7 Sep 2011 11:02:02 +0200 (CEST)
    Subject: [pypy-commit] pypy improve-heap-caching-tracing: cache the length
    	of arrays
    Message-ID: <20110907090202.292B182213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Carl Friedrich Bolz 
    Branch: improve-heap-caching-tracing
    Changeset: r47122:e5b582b1252b
    Date: 2011-09-06 14:03 +0200
    http://bitbucket.org/pypy/pypy/changeset/e5b582b1252b/
    
    Log:	cache the length of arrays
    
    diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py
    --- a/pypy/jit/metainterp/heapcache.py
    +++ b/pypy/jit/metainterp/heapcache.py
    @@ -18,6 +18,8 @@
             # heap array cache
             # maps descrs to {index: {from_box: to_box}} dicts
             self.heap_array_cache = {}
    +        # cache the length of arrays
    +        self.length_cache = {}
     
         def invalidate_caches(self, opnum, descr):
             if opnum == rop.SETFIELD_GC:
    @@ -57,6 +59,10 @@
         def new(self, box):
             self.new_boxes[box] = None
     
    +    def new_array(self, box, lengthbox):
    +        self.new(box)
    +        self.arraylen_now_known(box, lengthbox)
    +
         def getfield(self, box, descr):
             d = self.heap_cache.get(descr, None)
             if d:
    @@ -112,23 +118,26 @@
             indexcache = cache.get(index, None)
             cache[index] = self._do_write_with_aliasing(indexcache, box, valuebox)
     
    +    def arraylen(self, box):
    +        return self.length_cache.get(box, None)
    +
    +    def arraylen_now_known(self, box, lengthbox):
    +        self.length_cache[box] = lengthbox
    +
    +    def _replace_box(self, d, oldbox, newbox):
    +        new_d = {}
    +        for frombox, tobox in d.iteritems():
    +            if frombox is oldbox:
    +                frombox = newbox
    +            if tobox is oldbox:
    +                tobox = newbox
    +            new_d[frombox] = tobox
    +        return new_d
    +
         def replace_box(self, oldbox, newbox):
             for descr, d in self.heap_cache.iteritems():
    -            new_d = {}
    -            for frombox, tobox in d.iteritems():
    -                if frombox is oldbox:
    -                    frombox = newbox
    -                if tobox is oldbox:
    -                    tobox = newbox
    -                new_d[frombox] = tobox
    -            self.heap_cache[descr] = new_d
    +            self.heap_cache[descr] = self._replace_box(d, oldbox, newbox)
             for descr, d in self.heap_array_cache.iteritems():
                 for index, cache in d.iteritems():
    -                new_cache = {}
    -                for frombox, tobox in cache.iteritems():
    -                    if frombox is oldbox:
    -                        frombox = newbox
    -                    if tobox is oldbox:
    -                        tobox = newbox
    -                    new_cache[frombox] = tobox
    -                d[index] = new_cache
    +                d[index] = self._replace_box(cache, oldbox, newbox)
    +        self.length_cache = self._replace_box(self.length_cache, oldbox, newbox)
    diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py
    --- a/pypy/jit/metainterp/pyjitpl.py
    +++ b/pypy/jit/metainterp/pyjitpl.py
    @@ -394,9 +394,9 @@
     ##        self.execute(rop.SUBCLASSOF, box1, box2)
     
         @arguments("descr", "box")
    -    def opimpl_new_array(self, itemsizedescr, countbox):
    -        resbox = self.execute_with_descr(rop.NEW_ARRAY, itemsizedescr, countbox)
    -        self.metainterp.heapcache.new(resbox)
    +    def opimpl_new_array(self, itemsizedescr, lengthbox):
    +        resbox = self.execute_with_descr(rop.NEW_ARRAY, itemsizedescr, lengthbox)
    +        self.metainterp.heapcache.new_array(resbox, lengthbox)
             return resbox
     
         @arguments("box", "descr", "box")
    @@ -456,7 +456,12 @@
     
         @arguments("box", "descr")
         def opimpl_arraylen_gc(self, arraybox, arraydescr):
    -        return self.execute_with_descr(rop.ARRAYLEN_GC, arraydescr, arraybox)
    +        lengthbox = self.metainterp.heapcache.arraylen(arraybox)
    +        if lengthbox is None:
    +            lengthbox = self.execute_with_descr(
    +                    rop.ARRAYLEN_GC, arraydescr, arraybox)
    +            self.metainterp.heapcache.arraylen_now_known(arraybox, lengthbox)
    +        return lengthbox
     
         @arguments("orgpc", "box", "descr", "box")
         def opimpl_check_neg_index(self, orgpc, arraybox, arraydescr, indexbox):
    @@ -465,10 +470,9 @@
             negbox = self.implement_guard_value(orgpc, negbox)
             if negbox.getint():
                 # the index is < 0; add the array length to it
    -            lenbox = self.metainterp.execute_and_record(
    -                rop.ARRAYLEN_GC, arraydescr, arraybox)
    +            lengthbox = self.opimpl_arraylen_gc(arraybox, arraydescr)
                 indexbox = self.metainterp.execute_and_record(
    -                rop.INT_ADD, None, indexbox, lenbox)
    +                rop.INT_ADD, None, indexbox, lengthbox)
             return indexbox
     
         @arguments("descr", "descr", "descr", "descr", "box")
    @@ -721,7 +725,7 @@
         def opimpl_arraylen_vable(self, pc, box, fdescr, adescr):
             if self._nonstandard_virtualizable(pc, box):
                 arraybox = self._opimpl_getfield_gc_any(box, fdescr)
    -            return self.execute_with_descr(rop.ARRAYLEN_GC, adescr, arraybox)
    +            return self.opimpl_arraylen_gc(arraybox, adescr)
             vinfo = self.metainterp.jitdriver_sd.virtualizable_info
             virtualizable_box = self.metainterp.virtualizable_boxes[-1]
             virtualizable = vinfo.unwrap_virtualizable_box(virtualizable_box)
    diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py
    --- a/pypy/jit/metainterp/test/test_heapcache.py
    +++ b/pypy/jit/metainterp/test/test_heapcache.py
    @@ -6,6 +6,8 @@
     box2 = object()
     box3 = object()
     box4 = object()
    +lengthbox1 = object()
    +lengthbox2 = object()
     descr1 = object()
     descr2 = object()
     descr3 = object()
    @@ -206,6 +208,15 @@
             assert h.getarrayitem(box3, descr1, index1) is box4
             assert h.getarrayitem(box1, descr1, index1) is box3 # box1 and box3 cannot alias
     
    +    def test_length_cache(self):
    +        h = HeapCache()
    +        h.new_array(box1, lengthbox1)
    +        assert h.arraylen(box1) is lengthbox1
    +
    +        assert h.arraylen(box2) is None
    +        h.arraylen_now_known(box2, lengthbox2)
    +        assert h.arraylen(box2) is lengthbox2
    +
     
         def test_invalidate_cache(self):
             h = HeapCache()
    @@ -252,14 +263,20 @@
             h = HeapCache()
             h.setarrayitem(box1, descr1, index1, box2)
             h.setarrayitem(box1, descr2, index1, box3)
    +        h.arraylen_now_known(box1, lengthbox1)
             h.setarrayitem(box2, descr1, index2, box1)
             h.setarrayitem(box3, descr2, index2, box1)
             h.setarrayitem(box2, descr3, index2, box3)
             h.replace_box(box1, box4)
             assert h.getarrayitem(box1, descr1, index1) is None
             assert h.getarrayitem(box1, descr2, index1) is None
    +        assert h.arraylen(box1) is None
    +        assert h.arraylen(box4) is lengthbox1
             assert h.getarrayitem(box4, descr1, index1) is box2
             assert h.getarrayitem(box4, descr2, index1) is box3
             assert h.getarrayitem(box2, descr1, index2) is box4
             assert h.getarrayitem(box3, descr2, index2) is box4
             assert h.getarrayitem(box2, descr3, index2) is box3
    +
    +        h.replace_box(lengthbox1, lengthbox2)
    +        assert h.arraylen(box4) is lengthbox2
    diff --git a/pypy/jit/metainterp/test/test_tracingopts.py b/pypy/jit/metainterp/test/test_tracingopts.py
    --- a/pypy/jit/metainterp/test/test_tracingopts.py
    +++ b/pypy/jit/metainterp/test/test_tracingopts.py
    @@ -469,3 +469,17 @@
             assert res == 2 * -7 + 2 * -8
             self.check_operations_history(getarrayitem_gc=0)
     
    +    def test_length_caching(self):
    +        class Gbl(object):
    +            pass
    +        g = Gbl()
    +        g.a = [0] * 7
    +        def fn(n):
    +            a = g.a
    +            res = len(a) + len(a)
    +            a1 = [0] * n
    +            g.a = a1
    +            return len(a1) + res
    +        res = self.interp_operations(fn, [7])
    +        assert res == 7 * 3
    +        self.check_operations_history(arraylen_gc=1)
    
    From noreply at buildbot.pypy.org  Wed Sep  7 11:02:03 2011
    From: noreply at buildbot.pypy.org (cfbolz)
    Date: Wed,  7 Sep 2011 11:02:03 +0200 (CEST)
    Subject: [pypy-commit] pypy improve-heap-caching-tracing: typo
    Message-ID: <20110907090203.6A01B822AB@wyvern.cs.uni-duesseldorf.de>
    
    Author: Carl Friedrich Bolz 
    Branch: improve-heap-caching-tracing
    Changeset: r47123:055211ce9263
    Date: 2011-09-07 10:38 +0200
    http://bitbucket.org/pypy/pypy/changeset/055211ce9263/
    
    Log:	typo
    
    diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py
    --- a/pypy/jit/metainterp/heapcache.py
    +++ b/pypy/jit/metainterp/heapcache.py
    @@ -47,7 +47,7 @@
         def is_class_known(self, box):
             return box in self.known_class_boxes
     
    -    def class_now_know(self, box):
    +    def class_now_known(self, box):
             self.known_class_boxes[box] = None
     
         def is_nonstandard_virtualizable(self, box):
    diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py
    --- a/pypy/jit/metainterp/pyjitpl.py
    +++ b/pypy/jit/metainterp/pyjitpl.py
    @@ -375,7 +375,7 @@
             cls = heaptracker.descr2vtable(cpu, sizedescr)
             resbox = self.execute(rop.NEW_WITH_VTABLE, ConstInt(cls))
             self.metainterp.heapcache.new(resbox)
    -        self.metainterp.heapcache.class_now_know(resbox)
    +        self.metainterp.heapcache.class_now_known(resbox)
             return resbox
     
     ##    @FixME  #arguments("box")
    @@ -884,7 +884,7 @@
             clsbox = self.cls_of_box(box)
             if not self.metainterp.heapcache.is_class_known(box):
                 self.generate_guard(rop.GUARD_CLASS, box, [clsbox], resumepc=orgpc)
    -            self.metainterp.heapcache.class_now_know(box)
    +            self.metainterp.heapcache.class_now_known(box)
             return clsbox
     
         @arguments("int", "orgpc")
    diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py
    --- a/pypy/jit/metainterp/test/test_heapcache.py
    +++ b/pypy/jit/metainterp/test/test_heapcache.py
    @@ -40,7 +40,7 @@
             h = HeapCache()
             assert not h.is_class_known(1)
             assert not h.is_class_known(2)
    -        h.class_now_know(1)
    +        h.class_now_known(1)
             assert h.is_class_known(1)
             assert not h.is_class_known(2)
     
    
    From noreply at buildbot.pypy.org  Wed Sep  7 11:02:04 2011
    From: noreply at buildbot.pypy.org (cfbolz)
    Date: Wed,  7 Sep 2011 11:02:04 +0200 (CEST)
    Subject: [pypy-commit] pypy improve-heap-caching-tracing: some XXXs
    Message-ID: <20110907090204.A47FF82213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Carl Friedrich Bolz 
    Branch: improve-heap-caching-tracing
    Changeset: r47124:c6cae1ac723f
    Date: 2011-09-07 10:39 +0200
    http://bitbucket.org/pypy/pypy/changeset/c6cae1ac723f/
    
    Log:	some XXXs
    
    diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py
    --- a/pypy/jit/metainterp/pyjitpl.py
    +++ b/pypy/jit/metainterp/pyjitpl.py
    @@ -367,6 +367,7 @@
     
         @arguments("descr")
         def opimpl_new(self, sizedescr):
    +        # XXX heapcache.new
             return self.execute_with_descr(rop.NEW, sizedescr)
     
         @arguments("descr")
    @@ -478,6 +479,7 @@
         @arguments("descr", "descr", "descr", "descr", "box")
         def opimpl_newlist(self, structdescr, lengthdescr, itemsdescr, arraydescr,
                            sizebox):
    +        # XXX use heapcache
             sbox = self.metainterp.execute_and_record(rop.NEW, structdescr)
             self._opimpl_setfield_gc_any(sbox, lengthdescr, sizebox)
             abox = self.metainterp.execute_and_record(rop.NEW_ARRAY, arraydescr,
    
    From noreply at buildbot.pypy.org  Wed Sep  7 11:02:05 2011
    From: noreply at buildbot.pypy.org (cfbolz)
    Date: Wed,  7 Sep 2011 11:02:05 +0200 (CEST)
    Subject: [pypy-commit] pypy improve-heap-caching-tracing: be more precise
    	about getarrayitem
    Message-ID: <20110907090205.E21A682213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Carl Friedrich Bolz 
    Branch: improve-heap-caching-tracing
    Changeset: r47125:77217d39842f
    Date: 2011-09-07 11:01 +0200
    http://bitbucket.org/pypy/pypy/changeset/77217d39842f/
    
    Log:	be more precise about getarrayitem
    
    diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py
    --- a/pypy/jit/metainterp/heapcache.py
    +++ b/pypy/jit/metainterp/heapcache.py
    @@ -107,6 +107,17 @@
                 if indexcache is not None:
                     return indexcache.get(box, None)
     
    +    def getarrayitem_now_known(self, box, descr, indexbox, valuebox):
    +        if not isinstance(indexbox, ConstInt):
    +            return
    +        index = indexbox.getint()
    +        cache = self.heap_array_cache.setdefault(descr, {})
    +        indexcache = cache.get(index, None)
    +        if indexcache is not None:
    +            indexcache[box] = valuebox
    +        else:
    +            cache[index] = {box: valuebox}
    +
         def setarrayitem(self, box, descr, indexbox, valuebox):
             if not isinstance(indexbox, ConstInt):
                 cache = self.heap_array_cache.get(descr, None)
    @@ -118,6 +129,7 @@
             indexcache = cache.get(index, None)
             cache[index] = self._do_write_with_aliasing(indexcache, box, valuebox)
     
    +
         def arraylen(self, box):
             return self.length_cache.get(box, None)
     
    diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py
    --- a/pypy/jit/metainterp/pyjitpl.py
    +++ b/pypy/jit/metainterp/pyjitpl.py
    @@ -408,7 +408,7 @@
                 return tobox
             resbox = self.execute_with_descr(rop.GETARRAYITEM_GC,
                                              arraydescr, arraybox, indexbox)
    -        self.metainterp.heapcache.setarrayitem(
    +        self.metainterp.heapcache.getarrayitem_now_known(
                     arraybox, arraydescr, indexbox, resbox)
             return resbox
     
    diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py
    --- a/pypy/jit/metainterp/test/test_heapcache.py
    +++ b/pypy/jit/metainterp/test/test_heapcache.py
    @@ -179,6 +179,20 @@
             assert h.getarrayitem(box1, descr1, index1) is None
             assert h.getarrayitem(box1, descr1, index2) is None
     
    +    def test_heapcache_read_fields_multiple_array(self):
    +        h = HeapCache()
    +        h.getarrayitem_now_known(box1, descr1, index1, box2)
    +        h.getarrayitem_now_known(box3, descr1, index1, box4)
    +        assert h.getarrayitem(box1, descr1, index1) is box2
    +        assert h.getarrayitem(box1, descr2, index1) is None
    +        assert h.getarrayitem(box3, descr1, index1) is box4
    +        assert h.getarrayitem(box3, descr2, index1) is None
    +
    +        h.reset()
    +        assert h.getarrayitem(box1, descr1, index1) is None
    +        assert h.getarrayitem(box1, descr2, index1) is None
    +        assert h.getarrayitem(box3, descr1, index1) is None
    +        assert h.getarrayitem(box3, descr2, index1) is None
     
         def test_heapcache_write_fields_multiple_array(self):
             h = HeapCache()
    diff --git a/pypy/jit/metainterp/test/test_tracingopts.py b/pypy/jit/metainterp/test/test_tracingopts.py
    --- a/pypy/jit/metainterp/test/test_tracingopts.py
    +++ b/pypy/jit/metainterp/test/test_tracingopts.py
    @@ -469,6 +469,27 @@
             assert res == 2 * -7 + 2 * -8
             self.check_operations_history(getarrayitem_gc=0)
     
    +    def test_heap_caching_multiple_arrays_getarrayitem(self):
    +        class Gbl(object):
    +            pass
    +        g = Gbl()
    +        g.a1 = [7, 8, 9]
    +        g.a2 = [8, 9, 10, 11]
    +
    +        def fn(i):
    +            if i < 0:
    +                g.a1 = [7, 8, 9]
    +                g.a2 = [7, 8, 9, 10]
    +            jit.promote(i)
    +            a1 = g.a1
    +            a1[i + 1] = 15 # make lists mutable
    +            a2 = g.a2
    +            a2[i + 1] = 19
    +            return a1[i] + a2[i] + a1[i] + a2[i]
    +        res = self.interp_operations(fn, [0])
    +        assert res == 2 * 7 + 2 * 8
    +        self.check_operations_history(getarrayitem_gc=2)
    +
         def test_length_caching(self):
             class Gbl(object):
                 pass
    
    From noreply at buildbot.pypy.org  Wed Sep  7 11:04:04 2011
    From: noreply at buildbot.pypy.org (fijal)
    Date: Wed,  7 Sep 2011 11:04:04 +0200 (CEST)
    Subject: [pypy-commit] pypy space-iterator-improvements: kill dead old
    	commented out code
    Message-ID: <20110907090404.7057382213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Maciej Fijalkowski 
    Branch: space-iterator-improvements
    Changeset: r47126:dc2ecf85cd66
    Date: 2011-09-07 11:02 +0200
    http://bitbucket.org/pypy/pypy/changeset/dc2ecf85cd66/
    
    Log:	kill dead old commented out code
    
    diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py
    --- a/pypy/objspace/std/iterobject.py
    +++ b/pypy/objspace/std/iterobject.py
    @@ -72,10 +72,6 @@
         w_seqiter.index += 1 
         return w_item
     
    -# XXX __length_hint__()
    -##def len__SeqIter(space,  w_seqiter):
    -##    return w_seqiter.getlength(space)
    -
     
     def iter__FastTupleIter(space, w_seqiter):
         return w_seqiter
    @@ -93,10 +89,6 @@
         w_seqiter.index = index + 1
         return w_item
     
    -# XXX __length_hint__()
    -##def len__FastTupleIter(space, w_seqiter):
    -##    return w_seqiter.getlength(space)
    -
     
     def iter__FastListIter(space, w_seqiter):
         return w_seqiter
    @@ -114,10 +106,6 @@
         w_seqiter.index = index + 1
         return w_item
     
    -# XXX __length_hint__()
    -##def len__FastListIter(space, w_seqiter):
    -##    return w_seqiter.getlength(space)
    -
     
     def iter__ReverseSeqIter(space, w_seqiter):
         return w_seqiter
    @@ -135,20 +123,5 @@
             raise OperationError(space.w_StopIteration, space.w_None) 
         return w_item
     
    -# XXX __length_hint__()
    -##def len__ReverseSeqIter(space, w_seqiter):
    -##    if w_seqiter.w_seq is None:
    -##        return space.wrap(0)
    -##    index = w_seqiter.index+1
    -##    w_length = space.len(w_seqiter.w_seq)
    -##    # if length of sequence is less than index :exhaust iterator
    -##    if space.is_true(space.gt(space.wrap(w_seqiter.index), w_length)):
    -##        w_len = space.wrap(0)
    -##        w_seqiter.w_seq = None
    -##    else:
    -##        w_len =space.wrap(index)
    -##    if space.is_true(space.lt(w_len,space.wrap(0))):
    -##        w_len = space.wrap(0)
    -##    return w_len
     
     register_all(vars())
    
    From noreply at buildbot.pypy.org  Wed Sep  7 12:18:13 2011
    From: noreply at buildbot.pypy.org (hakanardo)
    Date: Wed,  7 Sep 2011 12:18:13 +0200 (CEST)
    Subject: [pypy-commit] pypy jit-duplicated_short_boxes: no point in
     importing extra info from preamble about values at these points
    Message-ID: <20110907101813.9643182213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Hakan Ardo 
    Branch: jit-duplicated_short_boxes
    Changeset: r47127:88fb2b9f8a28
    Date: 2011-09-07 09:57 +0200
    http://bitbucket.org/pypy/pypy/changeset/88fb2b9f8a28/
    
    Log:	no point in importing extra info from preamble about values at these
    	points
    
    diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py
    --- a/pypy/jit/metainterp/optimizeopt/optimizer.py
    +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py
    @@ -561,7 +561,7 @@
             for i in range(n):
                 arg = op.getarg(i)
                 try:
    -                value = self.getvalue(arg)
    +                value = self.values[arg]
                 except KeyError:
                     pass
                 else:
    diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py
    --- a/pypy/jit/metainterp/optimizeopt/unroll.py
    +++ b/pypy/jit/metainterp/optimizeopt/unroll.py
    @@ -467,7 +467,7 @@
             inputargs.append(box)
             box = newresult
             if box in self.optimizer.values:
    -            box = self.optimizer.getvalue(box).force_box()
    +            box = self.optimizer.values[box].force_box()
             jumpargs.append(box)
             
     
    
    From noreply at buildbot.pypy.org  Wed Sep  7 12:18:16 2011
    From: noreply at buildbot.pypy.org (hakanardo)
    Date: Wed,  7 Sep 2011 12:18:16 +0200 (CEST)
    Subject: [pypy-commit] pypy jit-duplicated_short_boxes: hg merge default
    Message-ID: <20110907101816.550DA82213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Hakan Ardo 
    Branch: jit-duplicated_short_boxes
    Changeset: r47128:95355f75d469
    Date: 2011-09-07 10:00 +0200
    http://bitbucket.org/pypy/pypy/changeset/95355f75d469/
    
    Log:	hg merge default
    
    diff --git a/lib-python/conftest.py b/lib-python/conftest.py
    --- a/lib-python/conftest.py
    +++ b/lib-python/conftest.py
    @@ -359,7 +359,7 @@
         RegrTest('test_property.py', core=True),
         RegrTest('test_pstats.py'),
         RegrTest('test_pty.py', skip="unsupported extension module"),
    -    RegrTest('test_pwd.py', skip=skip_win32),
    +    RegrTest('test_pwd.py', usemodules="pwd", skip=skip_win32),
         RegrTest('test_py3kwarn.py'),
         RegrTest('test_pyclbr.py'),
         RegrTest('test_pydoc.py'),
    diff --git a/lib_pypy/_elementtree.py b/lib_pypy/_elementtree.py
    new file mode 100644
    --- /dev/null
    +++ b/lib_pypy/_elementtree.py
    @@ -0,0 +1,6 @@
    +# Just use ElementTree.
    +
    +from xml.etree import ElementTree
    +
    +globals().update(ElementTree.__dict__)
    +del __all__
    diff --git a/lib_pypy/_pypy_interact.py b/lib_pypy/_pypy_interact.py
    --- a/lib_pypy/_pypy_interact.py
    +++ b/lib_pypy/_pypy_interact.py
    @@ -56,6 +56,10 @@
                     prompt = getattr(sys, 'ps1', '>>> ')
                 try:
                     line = raw_input(prompt)
    +                # Can be None if sys.stdin was redefined
    +                encoding = getattr(sys.stdin, 'encoding', None)
    +                if encoding and not isinstance(line, unicode):
    +                    line = line.decode(encoding)
                 except EOFError:
                     console.write("\n")
                     break
    diff --git a/lib_pypy/distributed/test/test_distributed.py b/lib_pypy/distributed/test/test_distributed.py
    --- a/lib_pypy/distributed/test/test_distributed.py
    +++ b/lib_pypy/distributed/test/test_distributed.py
    @@ -9,7 +9,7 @@
     class AppTestDistributed(object):
         def setup_class(cls):
             cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
    -            "usemodules":("_stackless",)})
    +            "usemodules":("_continuation",)})
     
         def test_init(self):
             import distributed
    @@ -91,10 +91,8 @@
     
     class AppTestDistributedTasklets(object):
         spaceconfig = {"objspace.std.withtproxy": True,
    -                   "objspace.usemodules._stackless": True}
    +                   "objspace.usemodules._continuation": True}
         def setup_class(cls):
    -        #cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
    -        #    "usemodules":("_stackless",)})
             cls.w_test_env = cls.space.appexec([], """():
             from distributed import test_env
             return test_env
    diff --git a/lib_pypy/distributed/test/test_greensock.py b/lib_pypy/distributed/test/test_greensock.py
    --- a/lib_pypy/distributed/test/test_greensock.py
    +++ b/lib_pypy/distributed/test/test_greensock.py
    @@ -10,7 +10,7 @@
             if not option.runappdirect:
                 py.test.skip("Cannot run this on top of py.py because of PopenGateway")
             cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
    -                                       "usemodules":("_stackless",)})
    +                                       "usemodules":("_continuation",)})
             cls.w_remote_side_code = cls.space.appexec([], """():
             import sys
             sys.path.insert(0, '%s')
    diff --git a/lib_pypy/distributed/test/test_socklayer.py b/lib_pypy/distributed/test/test_socklayer.py
    --- a/lib_pypy/distributed/test/test_socklayer.py
    +++ b/lib_pypy/distributed/test/test_socklayer.py
    @@ -9,7 +9,8 @@
     class AppTestSocklayer:
         def setup_class(cls):
             cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
    -                                       "usemodules":("_stackless","_socket", "select")})
    +                                       "usemodules":("_continuation",
    +                                                     "_socket", "select")})
         
         def test_socklayer(self):
             class X(object):
    diff --git a/lib_pypy/pypy_test/test_coroutine.py b/lib_pypy/pypy_test/test_coroutine.py
    --- a/lib_pypy/pypy_test/test_coroutine.py
    +++ b/lib_pypy/pypy_test/test_coroutine.py
    @@ -2,7 +2,7 @@
     from py.test import skip, raises
     
     try:
    -    from lib_pypy.stackless import coroutine, CoroutineExit
    +    from stackless import coroutine, CoroutineExit
     except ImportError, e:
         skip('cannot import stackless: %s' % (e,))
     
    @@ -20,10 +20,6 @@
             assert not co.is_zombie
     
         def test_is_zombie_del_without_frame(self):
    -        try:
    -            import _stackless # are we on pypy with a stackless build?
    -        except ImportError:
    -            skip("only works on pypy-c-stackless")
             import gc
             res = []
             class MyCoroutine(coroutine):
    @@ -45,10 +41,6 @@
             assert res[0], "is_zombie was False in __del__"
     
         def test_is_zombie_del_with_frame(self):
    -        try:
    -            import _stackless # are we on pypy with a stackless build?
    -        except ImportError:
    -            skip("only works on pypy-c-stackless")
             import gc
             res = []
             class MyCoroutine(coroutine):
    diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py
    --- a/lib_pypy/pyrepl/reader.py
    +++ b/lib_pypy/pyrepl/reader.py
    @@ -576,7 +576,7 @@
             self.console.push_char(char)
             self.handle1(0)
         
    -    def readline(self):
    +    def readline(self, returns_unicode=False):
             """Read a line.  The implementation of this method also shows
             how to drive Reader if you want more control over the event
             loop."""
    @@ -585,6 +585,8 @@
                 self.refresh()
                 while not self.finished:
                     self.handle1()
    +            if returns_unicode:
    +                return self.get_unicode()
                 return self.get_buffer()
             finally:
                 self.restore()
    diff --git a/lib_pypy/pyrepl/readline.py b/lib_pypy/pyrepl/readline.py
    --- a/lib_pypy/pyrepl/readline.py
    +++ b/lib_pypy/pyrepl/readline.py
    @@ -198,7 +198,7 @@
             reader.ps1 = prompt
             return reader.readline()
     
    -    def multiline_input(self, more_lines, ps1, ps2):
    +    def multiline_input(self, more_lines, ps1, ps2, returns_unicode=False):
             """Read an input on possibly multiple lines, asking for more
             lines as long as 'more_lines(unicodetext)' returns an object whose
             boolean value is true.
    @@ -209,7 +209,7 @@
                 reader.more_lines = more_lines
                 reader.ps1 = reader.ps2 = ps1
                 reader.ps3 = reader.ps4 = ps2
    -            return reader.readline()
    +            return reader.readline(returns_unicode=returns_unicode)
             finally:
                 reader.more_lines = saved
     
    diff --git a/lib_pypy/pyrepl/simple_interact.py b/lib_pypy/pyrepl/simple_interact.py
    --- a/lib_pypy/pyrepl/simple_interact.py
    +++ b/lib_pypy/pyrepl/simple_interact.py
    @@ -54,7 +54,8 @@
                 ps1 = getattr(sys, 'ps1', '>>> ')
                 ps2 = getattr(sys, 'ps2', '... ')
                 try:
    -                statement = multiline_input(more_lines, ps1, ps2)
    +                statement = multiline_input(more_lines, ps1, ps2,
    +                                            returns_unicode=True)
                 except EOFError:
                     break
                 more = console.push(statement)
    diff --git a/lib_pypy/stackless.py b/lib_pypy/stackless.py
    --- a/lib_pypy/stackless.py
    +++ b/lib_pypy/stackless.py
    @@ -4,121 +4,124 @@
     Please refer to their documentation.
     """
     
    -DEBUG = True
    -
    -def dprint(*args):
    -    for arg in args:
    -        print arg,
    -    print
     
     import traceback
    -import sys
    +import _continuation
    +from functools import partial
    +
    +class TaskletExit(Exception):
    +    pass
    +
    +CoroutineExit = TaskletExit
    +
    +class GWrap(_continuation.continulet):
    +    """This is just a wrapper around continulet to allow
    +       to stick additional attributes to a continulet.
    +       To be more concrete, we need a backreference to
    +       the coroutine object"""
    +
    +
    +class coroutine(object):
    +    "we can't have continulet as a base, because continulets can't be rebound"
    +
    +    def __init__(self):
    +        self._frame = None
    +        self.is_zombie = False
    +
    +    def __getattr__(self, attr):
    +        return getattr(self._frame, attr)
    +
    +    def __del__(self):
    +        self.is_zombie = True
    +        del self._frame
    +        self._frame = None
    +
    +    def bind(self, func, *argl, **argd):
    +        """coro.bind(f, *argl, **argd) -> None.
    +           binds function f to coro. f will be called with
    +           arguments *argl, **argd
    +        """
    +        if self._frame is None or not self._frame.is_pending():
    +
    +            def _func(c, *args, **kwargs):
    +                return func(*args, **kwargs)
    +            
    +            run = partial(_func, *argl, **argd)
    +            self._frame = frame = GWrap(run)
    +        else:
    +            raise ValueError("cannot bind a bound coroutine")
    +
    +    def switch(self):
    +        """coro.switch() -> returnvalue
    +           switches to coroutine coro. If the bound function
    +           f finishes, the returnvalue is that of f, otherwise
    +           None is returned
    +        """
    +        current = _getcurrent()
    +        current._jump_to(self)
    +
    +    def _jump_to(self, coroutine):
    +        _tls.current_coroutine = coroutine
    +        self._frame.switch(to=coroutine._frame)
    +
    +    def kill(self):
    +        """coro.kill() : kill coroutine coro"""
    +        _tls.current_coroutine = self
    +        self._frame.throw(CoroutineExit)
    +
    +    def _is_alive(self):
    +        if self._frame is None:
    +            return False
    +        return not self._frame.is_pending()
    +    is_alive = property(_is_alive)
    +    del _is_alive
    +
    +    def getcurrent():
    +        """coroutine.getcurrent() -> the currently running coroutine"""
    +        try:
    +            return _getcurrent()
    +        except AttributeError:
    +            return _maincoro
    +    getcurrent = staticmethod(getcurrent)
    +
    +    def __reduce__(self):
    +        raise TypeError, 'pickling is not possible based upon continulets'
    +
    +
    +def _getcurrent():
    +    "Returns the current coroutine (i.e. the one which called this function)."
    +    try:
    +        return _tls.current_coroutine
    +    except AttributeError:
    +        # first call in this thread: current == main
    +        _coroutine_create_main()
    +        return _tls.current_coroutine
    +
     try:
    -    # If _stackless can be imported then TaskletExit and CoroutineExit are 
    -    # automatically added to the builtins.
    -    from _stackless import coroutine, greenlet
    -except ImportError: # we are running from CPython
    -    from greenlet import greenlet, GreenletExit
    -    TaskletExit = CoroutineExit = GreenletExit
    -    del GreenletExit
    -    try:
    -        from functools import partial
    -    except ImportError: # we are not running python 2.5
    -        class partial(object):
    -            # just enough of 'partial' to be usefull
    -            def __init__(self, func, *argl, **argd):
    -                self.func = func
    -                self.argl = argl
    -                self.argd = argd
    +    from thread import _local
    +except ImportError:
    +    class _local(object):    # assume no threads
    +        pass
     
    -            def __call__(self):
    -                return self.func(*self.argl, **self.argd)
    +_tls = _local()
     
    -    class GWrap(greenlet):
    -        """This is just a wrapper around greenlets to allow
    -           to stick additional attributes to a greenlet.
    -           To be more concrete, we need a backreference to
    -           the coroutine object"""
    +def _coroutine_create_main():
    +    # create the main coroutine for this thread
    +    _tls.current_coroutine = None
    +    main_coroutine = coroutine()
    +    main_coroutine.bind(lambda x:x)
    +    _tls.main_coroutine = main_coroutine
    +    _tls.current_coroutine = main_coroutine
    +    return main_coroutine
     
    -    class MWrap(object):
    -        def __init__(self,something):
    -            self.something = something
     
    -        def __getattr__(self, attr):
    -            return getattr(self.something, attr)
    +_maincoro = _coroutine_create_main()
     
    -    class coroutine(object):
    -        "we can't have greenlet as a base, because greenlets can't be rebound"
    -
    -        def __init__(self):
    -            self._frame = None
    -            self.is_zombie = False
    -
    -        def __getattr__(self, attr):
    -            return getattr(self._frame, attr)
    -
    -        def __del__(self):
    -            self.is_zombie = True
    -            del self._frame
    -            self._frame = None
    -
    -        def bind(self, func, *argl, **argd):
    -            """coro.bind(f, *argl, **argd) -> None.
    -               binds function f to coro. f will be called with
    -               arguments *argl, **argd
    -            """
    -            if self._frame is None or self._frame.dead:
    -                self._frame = frame = GWrap()
    -                frame.coro = self
    -            if hasattr(self._frame, 'run') and self._frame.run:
    -                raise ValueError("cannot bind a bound coroutine")
    -            self._frame.run = partial(func, *argl, **argd)
    -
    -        def switch(self):
    -            """coro.switch() -> returnvalue
    -               switches to coroutine coro. If the bound function
    -               f finishes, the returnvalue is that of f, otherwise
    -               None is returned
    -            """
    -            try:
    -                return greenlet.switch(self._frame)
    -            except TypeError, exp: # self._frame is the main coroutine
    -                return greenlet.switch(self._frame.something)
    -
    -        def kill(self):
    -            """coro.kill() : kill coroutine coro"""
    -            self._frame.throw()
    -
    -        def _is_alive(self):
    -            if self._frame is None:
    -                return False
    -            return not self._frame.dead
    -        is_alive = property(_is_alive)
    -        del _is_alive
    -
    -        def getcurrent():
    -            """coroutine.getcurrent() -> the currently running coroutine"""
    -            try:
    -                return greenlet.getcurrent().coro
    -            except AttributeError:
    -                return _maincoro
    -        getcurrent = staticmethod(getcurrent)
    -
    -        def __reduce__(self):
    -            raise TypeError, 'pickling is not possible based upon greenlets'
    -
    -    _maincoro = coroutine()
    -    maingreenlet = greenlet.getcurrent()
    -    _maincoro._frame = frame = MWrap(maingreenlet)
    -    frame.coro = _maincoro
    -    del frame
    -    del maingreenlet
     
     from collections import deque
     
     import operator
    -__all__ = 'run getcurrent getmain schedule tasklet channel coroutine \
    -                greenlet'.split()
    +__all__ = 'run getcurrent getmain schedule tasklet channel coroutine'.split()
     
     _global_task_id = 0
     _squeue = None
    @@ -131,7 +134,8 @@
     def _scheduler_remove(value):
         try:
             del _squeue[operator.indexOf(_squeue, value)]
    -    except ValueError:pass
    +    except ValueError:
    +        pass
     
     def _scheduler_append(value, normal=True):
         if normal:
    diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
    --- a/pypy/config/pypyoption.py
    +++ b/pypy/config/pypyoption.py
    @@ -27,7 +27,7 @@
     # --allworkingmodules
     working_modules = default_modules.copy()
     working_modules.update(dict.fromkeys(
    -    ["_socket", "unicodedata", "mmap", "fcntl", "_locale",
    +    ["_socket", "unicodedata", "mmap", "fcntl", "_locale", "pwd",
          "rctime" , "select", "zipimport", "_lsprof",
          "crypt", "signal", "_rawffi", "termios", "zlib", "bz2",
          "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO",
    @@ -58,6 +58,7 @@
         # unix only modules
         del working_modules["crypt"]
         del working_modules["fcntl"]
    +    del working_modules["pwd"]
         del working_modules["termios"]
         del working_modules["_minimal_curses"]
     
    diff --git a/pypy/config/test/test_config.py b/pypy/config/test/test_config.py
    --- a/pypy/config/test/test_config.py
    +++ b/pypy/config/test/test_config.py
    @@ -281,11 +281,11 @@
     
     def test_underscore_in_option_name():
         descr = OptionDescription("opt", "", [
    -        BoolOption("_stackless", "", default=False),
    +        BoolOption("_foobar", "", default=False),
         ])
         config = Config(descr)
         parser = to_optparse(config)
    -    assert parser.has_option("--_stackless")
    +    assert parser.has_option("--_foobar")
     
     def test_none():
         dummy1 = BoolOption('dummy1', 'doc dummy', default=False, cmdline=None)
    diff --git a/pypy/doc/config/objspace.usemodules._stackless.txt b/pypy/doc/config/objspace.usemodules._stackless.txt
    deleted file mode 100644
    --- a/pypy/doc/config/objspace.usemodules._stackless.txt
    +++ /dev/null
    @@ -1,1 +0,0 @@
    -Deprecated.
    diff --git a/pypy/doc/config/objspace.usemodules.pwd.txt b/pypy/doc/config/objspace.usemodules.pwd.txt
    new file mode 100644
    --- /dev/null
    +++ b/pypy/doc/config/objspace.usemodules.pwd.txt
    @@ -0,0 +1,2 @@
    +Use the 'pwd' module. 
    +This module is expected to be fully working.
    diff --git a/pypy/interpreter/miscutils.py b/pypy/interpreter/miscutils.py
    --- a/pypy/interpreter/miscutils.py
    +++ b/pypy/interpreter/miscutils.py
    @@ -167,3 +167,7 @@
     
         def getmainthreadvalue(self):
             return self._value
    +
    +    def getallvalues(self):
    +        return {0: self._value}
    +
    diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
    --- a/pypy/interpreter/pyopcode.py
    +++ b/pypy/interpreter/pyopcode.py
    @@ -1523,10 +1523,8 @@
     
             if not isinstance(prog, codetype):
                 filename = ''
    -            if not isinstance(prog, str):
    -                if isinstance(prog, basestring):
    -                    prog = str(prog)
    -                elif isinstance(prog, file):
    +            if not isinstance(prog, basestring):
    +                if isinstance(prog, file):
                         filename = prog.name
                         prog = prog.read()
                     else:
    diff --git a/pypy/interpreter/test/test_exec.py b/pypy/interpreter/test/test_exec.py
    --- a/pypy/interpreter/test/test_exec.py
    +++ b/pypy/interpreter/test/test_exec.py
    @@ -219,3 +219,30 @@
                 raise e
     
             assert res == 1
    +
    +    def test_exec_unicode(self):
    +        # 's' is a string
    +        s = "x = u'\xd0\xb9\xd1\x86\xd1\x83\xd0\xba\xd0\xb5\xd0\xbd'"
    +        # 'u' is a unicode
    +        u = s.decode('utf-8')
    +        exec u
    +        assert len(x) == 6
    +        assert ord(x[0]) == 0x0439
    +        assert ord(x[1]) == 0x0446
    +        assert ord(x[2]) == 0x0443
    +        assert ord(x[3]) == 0x043a
    +        assert ord(x[4]) == 0x0435
    +        assert ord(x[5]) == 0x043d
    +
    +    def test_eval_unicode(self):
    +        u = "u'%s'" % unichr(0x1234)
    +        v = eval(u)
    +        assert v == unichr(0x1234)
    +
    +    def test_compile_unicode(self):
    +        s = "x = u'\xd0\xb9\xd1\x86\xd1\x83\xd0\xba\xd0\xb5\xd0\xbd'"
    +        u = s.decode('utf-8')
    +        c = compile(u, '', 'exec')
    +        exec c
    +        assert len(x) == 6
    +        assert ord(x[0]) == 0x0439
    diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py
    --- a/pypy/jit/backend/llsupport/regalloc.py
    +++ b/pypy/jit/backend/llsupport/regalloc.py
    @@ -57,11 +57,13 @@
         all_regs              = []
         no_lower_byte_regs    = []
         save_around_call_regs = []
    -    
    +    frame_reg             = None
    +
         def __init__(self, longevity, frame_manager=None, assembler=None):
             self.free_regs = self.all_regs[:]
             self.longevity = longevity
             self.reg_bindings = {}
    +        self.bindings_to_frame_reg = {}
             self.position = -1
             self.frame_manager = frame_manager
             self.assembler = assembler
    @@ -218,6 +220,10 @@
             self.reg_bindings[v] = loc
             return loc
     
    +    def force_allocate_frame_reg(self, v):
    +        """ Allocate the new variable v in the frame register."""
    +        self.bindings_to_frame_reg[v] = None
    +
         def force_spill_var(self, var):
             self._sync_var(var)
             try:
    @@ -236,6 +242,8 @@
             try:
                 return self.reg_bindings[box]
             except KeyError:
    +            if box in self.bindings_to_frame_reg:
    +                return self.frame_reg
                 return self.frame_manager.loc(box)
     
         def return_constant(self, v, forbidden_vars=[], selected_reg=None):
    @@ -264,8 +272,9 @@
             self._check_type(v)
             if isinstance(v, Const):
                 return self.return_constant(v, forbidden_vars, selected_reg)
    -        
             prev_loc = self.loc(v)
    +        if prev_loc is self.frame_reg and selected_reg is None:
    +            return prev_loc
             loc = self.force_allocate_reg(v, forbidden_vars, selected_reg,
                                           need_lower_byte=need_lower_byte)
             if prev_loc is not loc:
    diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py
    --- a/pypy/jit/backend/x86/assembler.py
    +++ b/pypy/jit/backend/x86/assembler.py
    @@ -957,6 +957,7 @@
             if (isinstance(from_loc, RegLoc) and from_loc.is_xmm) or (isinstance(to_loc, RegLoc) and to_loc.is_xmm):
                 self.mc.MOVSD(to_loc, from_loc)
             else:
    +            assert to_loc is not ebp
                 self.mc.MOV(to_loc, from_loc)
     
         regalloc_mov = mov # legacy interface
    @@ -2510,11 +2511,6 @@
     
         genop_discard_cond_call_gc_wb_array = genop_discard_cond_call_gc_wb
     
    -    def genop_force_token(self, op, arglocs, resloc):
    -        # RegAlloc.consider_force_token ensures this:
    -        assert isinstance(resloc, RegLoc)
    -        self.mc.LEA_rb(resloc.value, FORCE_INDEX_OFS)
    -
         def not_implemented_op_discard(self, op, arglocs):
             not_implemented("not implemented operation: %s" % op.getopname())
     
    diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py
    --- a/pypy/jit/backend/x86/regalloc.py
    +++ b/pypy/jit/backend/x86/regalloc.py
    @@ -29,6 +29,7 @@
         all_regs = [eax, ecx, edx, ebx, esi, edi]
         no_lower_byte_regs = [esi, edi]
         save_around_call_regs = [eax, edx, ecx]
    +    frame_reg = ebp
     
         REGLOC_TO_GCROOTMAP_REG_INDEX = {
             ebx: 1,
    @@ -312,8 +313,11 @@
                         self.fm.frame_bindings[arg] = loc
                 else:
                     if isinstance(loc, RegLoc):
    -                    self.rm.reg_bindings[arg] = loc
    -                    used[loc] = None
    +                    if loc is ebp:
    +                        self.rm.bindings_to_frame_reg[arg] = None
    +                    else:
    +                        self.rm.reg_bindings[arg] = loc
    +                        used[loc] = None
                     else:
                         self.fm.frame_bindings[arg] = loc
             self.rm.free_regs = []
    @@ -1358,8 +1362,8 @@
                                                 self.assembler.datablockwrapper)
     
         def consider_force_token(self, op):
    -        loc = self.rm.force_allocate_reg(op.result)
    -        self.Perform(op, [], loc)
    +        # the FORCE_TOKEN operation returns directly 'ebp'
    +        self.rm.force_allocate_frame_reg(op.result)
     
         def not_implemented_op(self, op):
             not_implemented("not implemented operation: %s" % op.getopname())
    diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py
    --- a/pypy/jit/backend/x86/runner.py
    +++ b/pypy/jit/backend/x86/runner.py
    @@ -119,7 +119,8 @@
                 setitem(index, null)
     
         def get_latest_force_token(self):
    -        return self.assembler.fail_ebp + FORCE_INDEX_OFS
    +        # the FORCE_TOKEN operation and this helper both return 'ebp'.
    +        return self.assembler.fail_ebp
     
         def execute_token(self, executable_token):
             addr = executable_token._x86_bootstrap_code
    @@ -153,8 +154,9 @@
                                            flavor='raw', zero=True,
                                            immortal=True)
     
    -    def force(self, addr_of_force_index):
    +    def force(self, addr_of_force_token):
             TP = rffi.CArrayPtr(lltype.Signed)
    +        addr_of_force_index = addr_of_force_token + FORCE_INDEX_OFS
             fail_index = rffi.cast(TP, addr_of_force_index)[0]
             assert fail_index >= 0, "already forced!"
             faildescr = self.get_fail_descr_from_number(fail_index)
    @@ -164,7 +166,7 @@
             # start of "no gc operation!" block
             fail_index_2 = self.assembler.grab_frame_values(
                 bytecode,
    -            addr_of_force_index - FORCE_INDEX_OFS,
    +            addr_of_force_token,
                 self.all_null_registers)
             self.assembler.leave_jitted_hook()
             # end of "no gc operation!" block
    diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py
    --- a/pypy/jit/metainterp/optimizeopt/heap.py
    +++ b/pypy/jit/metainterp/optimizeopt/heap.py
    @@ -25,7 +25,7 @@
             #      'cached_fields'.
             #
             self._cached_fields = {}
    -        self._cached_fields_getfield_op = {}        
    +        self._cached_fields_getfield_op = {}
             self._lazy_setfield = None
             self._lazy_setfield_registered = False
     
    @@ -81,7 +81,7 @@
         def remember_field_value(self, structvalue, fieldvalue, getfield_op=None):
             assert self._lazy_setfield is None
             self._cached_fields[structvalue] = fieldvalue
    -        self._cached_fields_getfield_op[structvalue] = getfield_op        
    +        self._cached_fields_getfield_op[structvalue] = getfield_op
     
         def force_lazy_setfield(self, optheap, can_cache=True):
             op = self._lazy_setfield
    @@ -167,7 +167,7 @@
     
         def new(self):
             return OptHeap()
    -        
    +
         def produce_potential_short_preamble_ops(self, sb):
             descrkeys = self.cached_fields.keys()
             if not we_are_translated():
    diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
    --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
    +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
    @@ -4711,6 +4711,33 @@
             """
             self.optimize_loop(ops, expected)
     
    +    def test_forced_virtuals_aliasing(self):
    +        ops = """
    +        [i0, i1]
    +        p0 = new(descr=ssize)
    +        p1 = new(descr=ssize)
    +        escape(p0)
    +        escape(p1)
    +        setfield_gc(p0, i0, descr=adescr)
    +        setfield_gc(p1, i1, descr=adescr)
    +        i2 = getfield_gc(p0, descr=adescr)
    +        jump(i2, i2)
    +        """
    +        expected = """
    +        [i0, i1]
    +        p0 = new(descr=ssize)
    +        escape(p0)
    +        p1 = new(descr=ssize)
    +        escape(p1)
    +        setfield_gc(p0, i0, descr=adescr)
    +        setfield_gc(p1, i1, descr=adescr)
    +        jump(i0, i0)
    +        """
    +        py.test.skip("not implemented")
    +        # setfields on things that used to be virtual still can't alias each
    +        # other
    +        self.optimize_loop(ops, expected)
    +
     
     class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin):
         pass
    diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py
    --- a/pypy/module/_rawffi/test/test__rawffi.py
    +++ b/pypy/module/_rawffi/test/test__rawffi.py
    @@ -639,33 +639,6 @@
             a1.free()
             cb.free()
     
    -    def test_another_callback_in_stackless(self):
    -        try:
    -            import _stackless
    -        except ImportError:
    -            skip("only valid in a stackless pypy-c")
    -
    -        import _rawffi
    -        lib = _rawffi.CDLL(self.lib_name)
    -        runcallback = lib.ptr('runcallback', ['P'], 'q')
    -        def callback():
    -            co = _stackless.coroutine()
    -            def f():
    -                pass
    -            try:
    -                co.bind(f)
    -                co.switch()
    -            except RuntimeError:
    -                return 1<<42
    -            return -5
    -
    -        cb = _rawffi.CallbackPtr(callback, [], 'q')
    -        a1 = cb.byptr()
    -        res = runcallback(a1)
    -        assert res[0] == 1<<42
    -        a1.free()
    -        cb.free()
    -
         def test_raising_callback(self):
             import _rawffi, sys
             import StringIO
    diff --git a/pypy/module/_stackless/__init__.py b/pypy/module/_stackless/__init__.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/__init__.py
    +++ /dev/null
    @@ -1,36 +0,0 @@
    -# Package initialisation
    -from pypy.interpreter.mixedmodule import MixedModule
    -
    -class Module(MixedModule):
    -    """
    -    This module implements Stackless for applications.
    -    """
    -
    -    appleveldefs = {
    -        'GreenletExit' : 'app_greenlet.GreenletExit',
    -        'GreenletError' : 'app_greenlet.GreenletError',
    -    }
    -
    -    interpleveldefs = {
    -        'tasklet'    : 'interp_stackless.tasklet',
    -        'coroutine'  : 'interp_coroutine.AppCoroutine',
    -        'greenlet'   : 'interp_greenlet.AppGreenlet',
    -        'usercostate': 'interp_composable_coroutine.W_UserCoState',
    -        '_return_main' : 'interp_coroutine.return_main',
    -        'get_stack_depth_limit': 'interp_coroutine.get_stack_depth_limit',
    -        'set_stack_depth_limit': 'interp_coroutine.set_stack_depth_limit',
    -    }
    -
    -    def setup_after_space_initialization(self):
    -        # post-installing classmethods/staticmethods which
    -        # are not yet directly supported
    -        from pypy.module._stackless.interp_coroutine import post_install as post_install_coro
    -        post_install_coro(self)
    -        from pypy.module._stackless.interp_greenlet import post_install as post_install_greenlet
    -        post_install_greenlet(self)
    -
    -        if self.space.config.translation.gc == 'marksweep':
    -            from pypy.module._stackless.interp_clonable import post_install as post_install_clonable
    -            self.extra_interpdef('clonable', 'interp_clonable.AppClonableCoroutine')
    -            self.extra_interpdef('fork',     'interp_clonable.fork')
    -            post_install_clonable(self)
    diff --git a/pypy/module/_stackless/app_greenlet.py b/pypy/module/_stackless/app_greenlet.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/app_greenlet.py
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -class GreenletExit(Exception):
    -    pass
    -
    -class GreenletError(Exception):
    -    pass
    diff --git a/pypy/module/_stackless/interp_clonable.py b/pypy/module/_stackless/interp_clonable.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/interp_clonable.py
    +++ /dev/null
    @@ -1,106 +0,0 @@
    -from pypy.interpreter.error import OperationError
    -from pypy.interpreter.typedef import TypeDef
    -from pypy.interpreter.gateway import interp2app
    -from pypy.module._stackless.interp_coroutine import AppCoroutine, AppCoState
    -from pypy.module._stackless.interp_coroutine import makeStaticMethod
    -from pypy.module._stackless.rcoroutine import AbstractThunk
    -from pypy.module._stackless.rclonable import InterpClonableMixin
    -
    -
    -class AppClonableCoroutine(AppCoroutine, InterpClonableMixin):
    -
    -    def newsubctx(self):
    -        self.hello_local_pool()
    -        AppCoroutine.newsubctx(self)
    -        self.goodbye_local_pool()
    -
    -    def hello(self):
    -        self.hello_local_pool()
    -        AppCoroutine.hello(self)
    -
    -    def goodbye(self):
    -        AppCoroutine.goodbye(self)
    -        self.goodbye_local_pool()
    -
    -    def descr_method__new__(space, w_subtype):
    -        co = space.allocate_instance(AppClonableCoroutine, w_subtype)
    -        costate = AppClonableCoroutine._get_state(space)
    -        AppClonableCoroutine.__init__(co, space, state=costate)
    -        return space.wrap(co)
    -
    -    def _get_state(space):
    -        return space.fromcache(AppClonableCoState)
    -    _get_state = staticmethod(_get_state)
    -
    -    def w_getcurrent(space):
    -        return space.wrap(AppClonableCoroutine._get_state(space).current)
    -    w_getcurrent = staticmethod(w_getcurrent)
    -
    -    def w_clone(self):
    -        space = self.space
    -        costate = self.costate
    -        if costate.current is self:
    -            raise OperationError(space.w_RuntimeError,
    -                                 space.wrap("clone() cannot clone the "
    -                                            "current coroutine"
    -                                            "; use fork() instead"))
    -        copy = AppClonableCoroutine(space, state=costate)
    -        copy.subctx = self.clone_into(copy, self.subctx)
    -        return space.wrap(copy)
    -
    -    def descr__reduce__(self, space):
    -        raise OperationError(space.w_TypeError,
    -                             space.wrap("_stackless.clonable instances are "
    -                                        "not picklable"))
    -
    -
    -AppClonableCoroutine.typedef = TypeDef("clonable", AppCoroutine.typedef,
    -    __new__    = interp2app(AppClonableCoroutine.descr_method__new__.im_func),
    -    getcurrent = interp2app(AppClonableCoroutine.w_getcurrent),
    -    clone      = interp2app(AppClonableCoroutine.w_clone),
    -    __reduce__ = interp2app(AppClonableCoroutine.descr__reduce__),
    -)
    -
    -class AppClonableCoState(AppCoState):
    -    def post_install(self):
    -        self.current = self.main = AppClonableCoroutine(self.space, state=self)
    -        self.main.subctx.clear_framestack()      # wack
    -
    -def post_install(module):
    -    makeStaticMethod(module, 'clonable', 'getcurrent')
    -    space = module.space
    -    AppClonableCoroutine._get_state(space).post_install()
    -
    -# ____________________________________________________________
    -
    -class ForkThunk(AbstractThunk):
    -    def __init__(self, coroutine):
    -        self.coroutine = coroutine
    -        self.newcoroutine = None
    -    def call(self):
    -        oldcoro = self.coroutine
    -        self.coroutine = None
    -        newcoro = AppClonableCoroutine(oldcoro.space, state=oldcoro.costate)
    -        newcoro.subctx = oldcoro.clone_into(newcoro, oldcoro.subctx)
    -        newcoro.parent = oldcoro
    -        self.newcoroutine = newcoro
    -
    -def fork(space):
    -    """Fork, as in the Unix fork(): the call returns twice, and the return
    -    value of the call is either the new 'child' coroutine object (if returning
    -    into the parent), or None (if returning into the child).  This returns
    -    into the parent first, which can switch to the child later.
    -    """
    -    costate = AppClonableCoroutine._get_state(space)
    -    current = costate.current
    -    if current is costate.main:
    -        raise OperationError(space.w_RuntimeError,
    -                             space.wrap("cannot fork() in the main "
    -                                        "clonable coroutine"))
    -    thunk = ForkThunk(current)
    -    coro_fork = AppClonableCoroutine(space, state=costate)
    -    coro_fork.bind(thunk)
    -    coro_fork.switch()
    -    # we resume here twice.  The following would need explanations about
    -    # why it returns the correct thing in both the parent and the child...
    -    return space.wrap(thunk.newcoroutine)
    diff --git a/pypy/module/_stackless/interp_composable_coroutine b/pypy/module/_stackless/interp_composable_coroutine
    deleted file mode 100644
    --- a/pypy/module/_stackless/interp_composable_coroutine
    +++ /dev/null
    @@ -1,33 +0,0 @@
    -from pypy.interpreter.baseobjspace import Wrappable
    -from pypy.interpreter.typedef import TypeDef, interp2app
    -from pypy.module._stackless.coroutine import AppCoState, AppCoroutine
    -
    -
    -class W_UserCoState(Wrappable):
    -    def __init__(self, space):
    -        self.costate = AppCoState(space)
    -        self.costate.post_install()
    -
    -    def descr_method__new__(space, w_subtype):
    -        costate = space.allocate_instance(W_UserCoState, w_subtype)
    -        W_UserCoState.__init__(costate, space)
    -        return space.wrap(costate)
    -
    -    def w_getcurrent(self):
    -        space = self.costate.space
    -        return space.wrap(self.costate.current)
    -
    -    def w_spawn(self, w_subtype=None):
    -        space = self.costate.space
    -        if space.is_w(w_subtype, space.w_None):
    -            w_subtype = space.gettypeobject(AppCoroutine.typedef)
    -        co = space.allocate_instance(AppCoroutine, w_subtype)
    -        AppCoroutine.__init__(co, space, state=self.costate)
    -        return space.wrap(co)
    -
    -W_UserCoState.typedef = TypeDef("usercostate",
    -    __new__ = interp2app(W_UserCoState.descr_method__new__.im_func),
    -    __module__ = '_stackless',
    -    getcurrent = interp2app(W_UserCoState.w_getcurrent),
    -    spawn      = interp2app(W_UserCoState.w_spawn),
    -)
    diff --git a/pypy/module/_stackless/interp_composable_coroutine.py b/pypy/module/_stackless/interp_composable_coroutine.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/interp_composable_coroutine.py
    +++ /dev/null
    @@ -1,34 +0,0 @@
    -from pypy.interpreter.baseobjspace import Wrappable
    -from pypy.interpreter.typedef import TypeDef, interp2app
    -from pypy.module._stackless.interp_coroutine import AppCoState, AppCoroutine
    -
    -
    -class W_UserCoState(Wrappable):
    -    def __init__(self, space):
    -        self.costate = AppCoState(space)
    -        self.costate.post_install()
    -
    -    def descr_method__new__(space, w_subtype):
    -        costate = space.allocate_instance(W_UserCoState, w_subtype)
    -        W_UserCoState.__init__(costate, space)
    -        return space.wrap(costate)
    -
    -    def w_getcurrent(self):
    -        space = self.costate.space
    -        return space.wrap(self.costate.current)
    -
    -    def w_spawn(self, w_subtype=None):
    -        space = self.costate.space
    -        if space.is_w(w_subtype, space.w_None):
    -            w_subtype = space.gettypeobject(AppCoroutine.typedef)
    -        co = space.allocate_instance(AppCoroutine, w_subtype)
    -        AppCoroutine.__init__(co, space, state=self.costate)
    -        return space.wrap(co)
    -
    -W_UserCoState.typedef = TypeDef("usercostate",
    -    __new__ = interp2app(W_UserCoState.descr_method__new__.im_func),
    -    __module__ = '_stackless',
    -    getcurrent = interp2app(W_UserCoState.w_getcurrent),
    -    spawn      = interp2app(W_UserCoState.w_spawn),
    -)
    -W_UserCoState.acceptable_as_base_class = False
    diff --git a/pypy/module/_stackless/interp_coroutine.py b/pypy/module/_stackless/interp_coroutine.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/interp_coroutine.py
    +++ /dev/null
    @@ -1,403 +0,0 @@
    -"""
    -Coroutine implementation for application level on top
    -of the internal coroutines.
    -This is an extensible concept. Multiple implementations
    -of concurrency can exist together, if they follow the
    -basic concept of maintaining their own costate.
    -
    -There is also some diversification possible by using
    -multiple costates for the same type. This leads to
    -disjoint switchable sets within the same type.
    -
    -I'm not so sure to what extent the opposite is possible, too.
    -I.e., merging the costate of tasklets and greenlets would
    -allow them to be parents of each other. Needs a bit more
    -experience to decide where to set the limits.
    -"""
    -
    -from pypy.interpreter.argument import Arguments
    -from pypy.interpreter.typedef import GetSetProperty, TypeDef
    -from pypy.interpreter.gateway import interp2app, unwrap_spec
    -from pypy.interpreter.error import OperationError, operationerrfmt
    -
    -from pypy.module._stackless.stackless_flags import StacklessFlags
    -from pypy.module._stackless.rcoroutine import Coroutine, BaseCoState, AbstractThunk, CoroutineExit
    -
    -from pypy.module.exceptions.interp_exceptions import W_SystemExit, _new_exception
    -
    -from pypy.rlib import rstack, jit # for resume points
    -from pypy.tool import stdlib_opcode as pythonopcode
    -
    -class _AppThunk(AbstractThunk):
    -
    -    def __init__(self, space, costate, w_obj, args):
    -        self.space = space
    -        self.costate = costate
    -        if not space.is_true(space.callable(w_obj)):
    -            raise operationerrfmt(
    -                space.w_TypeError,
    -                "'%s' object is not callable",
    -                space.type(w_obj).getname(space))
    -        self.w_func = w_obj
    -        self.args = args
    -
    -    def call(self):
    -        costate = self.costate
    -        w_result = self.space.call_args(self.w_func, self.args)
    -        costate.w_tempval = w_result
    -
    -class _ResumeThunk(AbstractThunk):
    -    def __init__(self, space, costate, w_frame):
    -        self.space = space
    -        self.costate = costate
    -        self.w_frame = w_frame
    -
    -    def call(self):
    -        w_result = resume_frame(self.space, self.w_frame)
    -        # costate.w_tempval = w_result #XXX?
    -
    -
    -W_CoroutineExit = _new_exception('CoroutineExit', W_SystemExit,
    -                        """Coroutine killed manually.""")
    -
    -# Should be moved to interp_stackless.py if it's ever implemented... Currently
    -# used by pypy/lib/stackless.py.
    -W_TaskletExit = _new_exception('TaskletExit', W_SystemExit,
    -            """Tasklet killed manually.""")
    -
    -class AppCoroutine(Coroutine): # XXX, StacklessFlags):
    -
    -    def __init__(self, space, state=None):
    -        self.space = space
    -        if state is None:
    -            state = AppCoroutine._get_state(space)
    -        Coroutine.__init__(self, state)
    -        self.flags = 0
    -        self.newsubctx()
    -
    -    def newsubctx(self):
    -        ec = self.space.getexecutioncontext()
    -        self.subctx = ec.Subcontext()
    -
    -    def descr_method__new__(space, w_subtype):
    -        co = space.allocate_instance(AppCoroutine, w_subtype)
    -        AppCoroutine.__init__(co, space)
    -        return space.wrap(co)
    -
    -    def _get_state(space):
    -        return space.fromcache(AppCoState)
    -    _get_state = staticmethod(_get_state)
    -
    -    def w_bind(self, w_func, __args__):
    -        space = self.space
    -        if self.frame is not None:
    -            raise OperationError(space.w_ValueError, space.wrap(
    -                "cannot bind a bound Coroutine"))
    -        state = self.costate
    -        thunk = _AppThunk(space, state, w_func, __args__)
    -        self.bind(thunk)
    -
    -    def w_switch(self):
    -        space = self.space
    -        if self.frame is None:
    -            raise OperationError(space.w_ValueError, space.wrap(
    -                "cannot switch to an unbound Coroutine"))
    -        state = self.costate
    -        self.switch()
    -        w_ret, state.w_tempval = state.w_tempval, space.w_None
    -        return w_ret
    -
    -    def switch(self):
    -        space = self.space
    -        try:
    -            Coroutine.switch(self)
    -        except CoroutineExit:
    -            raise OperationError(self.costate.w_CoroutineExit, space.w_None)
    -
    -    def w_finished(self, w_excinfo):
    -        pass
    -
    -    def finish(self, operror=None):
    -        space = self.space
    -        if isinstance(operror, OperationError):
    -            w_exctype = operror.w_type
    -            w_excvalue = operror.get_w_value(space)
    -            w_exctraceback = operror.get_traceback()
    -            w_excinfo = space.newtuple([w_exctype, w_excvalue, w_exctraceback])
    -
    -            if w_exctype is self.costate.w_CoroutineExit:
    -                self.coroutine_exit = True
    -        else:
    -            w_N = space.w_None
    -            w_excinfo = space.newtuple([w_N, w_N, w_N])
    -
    -        return space.call_method(space.wrap(self),'finished', w_excinfo)
    -
    -    def hello(self):
    -        ec = self.space.getexecutioncontext()
    -        self.subctx.enter(ec)
    -
    -    def goodbye(self):
    -        ec = self.space.getexecutioncontext()
    -        self.subctx.leave(ec)
    -
    -    def w_kill(self):
    -        self.kill()
    -
    -    def w_throw(self, w_type, w_value=None, w_traceback=None):
    -        space = self.space
    -
    -        operror = OperationError(w_type, w_value)
    -        operror.normalize_exception(space)
    -
    -        if not space.is_w(w_traceback, space.w_None):
    -            from pypy.interpreter import pytraceback
    -            tb = space.interpclass_w(w_traceback)
    -            if tb is None or not space.is_true(space.isinstance(tb,
    -                space.gettypeobject(pytraceback.PyTraceback.typedef))):
    -                raise OperationError(space.w_TypeError,
    -                      space.wrap("throw: arg 3 must be a traceback or None"))
    -            operror.set_traceback(tb)
    -
    -        self._kill(operror)
    -
    -    def _userdel(self):
    -        if self.get_is_zombie():
    -            return
    -        self.set_is_zombie(True)
    -        self.space.userdel(self.space.wrap(self))
    -
    -    def w_getcurrent(space):
    -        return space.wrap(AppCoroutine._get_state(space).current)
    -    w_getcurrent = staticmethod(w_getcurrent)
    -
    -    def w_getmain(space):
    -        return space.wrap(AppCoroutine._get_state(space).main)
    -    w_getmain = staticmethod(w_getmain)
    -
    -    # pickling interface
    -    def descr__reduce__(self, space):
    -        # this is trying to be simplistic at the moment.
    -        # we neither allow to pickle main (which can become a mess
    -        # since it has some deep anchestor frames)
    -        # nor we allow to pickle the current coroutine.
    -        # rule: switch before pickling.
    -        # you cannot construct the tree that you are climbing.
    -        from pypy.interpreter.mixedmodule import MixedModule
    -        w_mod    = space.getbuiltinmodule('_stackless')
    -        mod      = space.interp_w(MixedModule, w_mod)
    -        w_mod2    = space.getbuiltinmodule('_pickle_support')
    -        mod2      = space.interp_w(MixedModule, w_mod2)
    -        w_new_inst = mod.get('coroutine')
    -        w        = space.wrap
    -        nt = space.newtuple
    -        ec = self.space.getexecutioncontext()
    -
    -        if self is self.costate.main:
    -            return nt([mod.get('_return_main'), nt([])])
    -
    -        thunk = self.thunk
    -        if isinstance(thunk, _AppThunk):
    -            w_args, w_kwds = thunk.args.topacked()
    -            w_thunk = nt([thunk.w_func, w_args, w_kwds])
    -        else:
    -            w_thunk = space.w_None
    -
    -        tup_base = [
    -            ]
    -        tup_state = [
    -            w(self.flags),
    -            self.subctx.getstate(space),
    -            w_thunk,
    -            w(self.parent),
    -            ]
    -
    -        return nt([w_new_inst, nt(tup_base), nt(tup_state)])
    -
    -    def descr__setstate__(self, space, w_args):
    -        w_flags, w_state, w_thunk, w_parent = space.unpackiterable(w_args,
    -                                                        expected_length=4)
    -        self.flags = space.int_w(w_flags)
    -        if space.is_w(w_parent, space.w_None):
    -            w_parent = self.w_getmain(space)
    -        self.parent = space.interp_w(AppCoroutine, w_parent)
    -        ec = self.space.getexecutioncontext()
    -        self.subctx.setstate(space, w_state)
    -        if space.is_w(w_thunk, space.w_None):
    -            if space.is_w(w_state, space.w_None):
    -                self.thunk = None
    -            else:
    -                self.bind(_ResumeThunk(space, self.costate, self.subctx.topframe))
    -        else:
    -            w_func, w_args, w_kwds = space.unpackiterable(w_thunk,
    -                                                          expected_length=3)
    -            args = Arguments.frompacked(space, w_args, w_kwds)
    -            self.bind(_AppThunk(space, self.costate, w_func, args))
    -
    -
    -# _mixin_ did not work
    -for methname in StacklessFlags.__dict__:
    -    meth = getattr(StacklessFlags, methname)
    -    if hasattr(meth, 'im_func'):
    -        setattr(AppCoroutine, meth.__name__, meth.im_func)
    -del meth, methname
    -
    -def w_get_is_zombie(self, space):
    -    return space.wrap(self.get_is_zombie())
    -AppCoroutine.w_get_is_zombie = w_get_is_zombie
    -
    -def w_get_is_alive(self, space):
    -    return space.wrap(self.is_alive())
    -AppCoroutine.w_get_is_alive = w_get_is_alive
    -
    -def w_descr__framestack(self, space):
    -    assert isinstance(self, AppCoroutine)
    -    counter = 0
    -    f = self.subctx.topframe
    -    while f is not None:
    -        counter += 1
    -        f = f.f_backref()
    -    items = [None] * counter
    -    f = self.subctx.topframe
    -    while f is not None:
    -        counter -= 1
    -        assert counter >= 0
    -        items[counter] = space.wrap(f)
    -        f = f.f_backref()
    -    assert counter == 0
    -    return space.newtuple(items)
    -
    -def makeStaticMethod(module, classname, funcname):
    -    "NOT_RPYTHON"
    -    space = module.space
    -    w_klass = space.getattr(space.wrap(module), space.wrap(classname))
    -    # HACK HACK HACK
    -    # make the typeobject mutable for a while
    -    from pypy.objspace.std.typeobject import W_TypeObject
    -    assert isinstance(w_klass, W_TypeObject)
    -    old_flag = w_klass.flag_heaptype
    -    w_klass.flag_heaptype = True
    -
    -    space.appexec([w_klass, space.wrap(funcname)], """
    -        (klass, funcname):
    -            func = getattr(klass, funcname)
    -            setattr(klass, funcname, staticmethod(func.im_func))
    -    """)
    -    w_klass.flag_heaptype = old_flag
    -
    -def post_install(module):
    -    makeStaticMethod(module, 'coroutine', 'getcurrent')
    -    makeStaticMethod(module, 'coroutine', 'getmain')
    -    space = module.space
    -    AppCoroutine._get_state(space).post_install()
    -
    -# space.appexec("""() :
    -
    -# maybe use __spacebind__ for postprocessing
    -
    -AppCoroutine.typedef = TypeDef("coroutine",
    -    __new__ = interp2app(AppCoroutine.descr_method__new__.im_func),
    -    bind = interp2app(AppCoroutine.w_bind),
    -    switch = interp2app(AppCoroutine.w_switch),
    -    kill = interp2app(AppCoroutine.w_kill),
    -    throw = interp2app(AppCoroutine.w_throw),
    -    finished = interp2app(AppCoroutine.w_finished),
    -    is_alive = GetSetProperty(AppCoroutine.w_get_is_alive),
    -    is_zombie = GetSetProperty(AppCoroutine.w_get_is_zombie,
    -      doc=AppCoroutine.get_is_zombie.__doc__), #--- this flag is a bit obscure
    -      # and not useful (it's totally different from Coroutine.is_zombie(), too)
    -      # but lib/stackless.py uses it
    -    _framestack = GetSetProperty(w_descr__framestack),
    -    getcurrent = interp2app(AppCoroutine.w_getcurrent),
    -    getmain = interp2app(AppCoroutine.w_getmain),
    -    __reduce__   = interp2app(AppCoroutine.descr__reduce__),
    -    __setstate__ = interp2app(AppCoroutine.descr__setstate__),
    -    __module__ = '_stackless',
    -)
    -
    -class AppCoState(BaseCoState):
    -    def __init__(self, space):
    -        BaseCoState.__init__(self)
    -        self.w_tempval = space.w_None
    -        self.space = space
    -
    -        # XXX Workaround: for now we need to instantiate these classes
    -        # explicitly for translation to work
    -        W_CoroutineExit(space)
    -        W_TaskletExit(space)
    -
    -        # Exporting new exception to space
    -        self.w_CoroutineExit = space.gettypefor(W_CoroutineExit)
    -        space.setitem(
    -                      space.exceptions_module.w_dict,
    -                      space.new_interned_str('CoroutineExit'),
    -                      self.w_CoroutineExit)
    -        space.setitem(space.builtin.w_dict,
    -                      space.new_interned_str('CoroutineExit'),
    -                      self.w_CoroutineExit)
    -
    -        # Should be moved to interp_stackless.py if it's ever implemented...
    -        self.w_TaskletExit = space.gettypefor(W_TaskletExit)
    -        space.setitem(
    -                      space.exceptions_module.w_dict,
    -                      space.new_interned_str('TaskletExit'),
    -                      self.w_TaskletExit)
    -        space.setitem(space.builtin.w_dict,
    -                      space.new_interned_str('TaskletExit'),
    -                      self.w_TaskletExit)
    -
    -    def post_install(self):
    -        self.current = self.main = AppCoroutine(self.space, state=self)
    -        self.main.subctx.clear_framestack()      # wack
    -
    -def return_main(space):
    -    return AppCoroutine._get_state(space).main
    -
    -def get_stack_depth_limit(space):
    -    return space.wrap(rstack.get_stack_depth_limit())
    -
    - at unwrap_spec(limit=int)
    -def set_stack_depth_limit(space, limit):
    -    rstack.set_stack_depth_limit(limit)
    -
    -
    -# ___________________________________________________________________
    -# unpickling trampoline
    -
    -def resume_frame(space, w_frame):
    -    from pypy.interpreter.pyframe import PyFrame
    -    frame = space.interp_w(PyFrame, w_frame, can_be_None=True)
    -    w_result = space.w_None
    -    operr = None
    -    executioncontext = frame.space.getexecutioncontext()
    -    while frame is not None:
    -        code = frame.pycode.co_code
    -        instr = frame.last_instr
    -        opcode = ord(code[instr])
    -        map = pythonopcode.opmap
    -        call_ops = [map['CALL_FUNCTION'], map['CALL_FUNCTION_KW'], map['CALL_FUNCTION_VAR'],
    -                    map['CALL_FUNCTION_VAR_KW'], map['CALL_METHOD']]
    -        assert opcode in call_ops
    -        instr += 1
    -        oparg = ord(code[instr]) | ord(code[instr + 1]) << 8
    -        nargs = oparg & 0xff
    -        nkwds = (oparg >> 8) & 0xff
    -        if nkwds == 0:     # only positional arguments
    -            # fast paths leaves things on the stack, pop them
    -            if space.config.objspace.opcodes.CALL_METHOD and opcode == map['CALL_METHOD']:
    -                frame.dropvalues(nargs + 2)
    -            elif opcode == map['CALL_FUNCTION']:
    -                frame.dropvalues(nargs + 1)
    -
    -        # small hack: unlink frame out of the execution context, because
    -        # execute_frame will add it there again
    -        executioncontext.topframeref = jit.non_virtual_ref(frame.f_backref())
    -        frame.last_instr = instr + 1 # continue after the call
    -        try:
    -            w_result = frame.execute_frame(w_result, operr)
    -        except OperationError, operr:
    -            pass
    -        frame = frame.f_backref()
    -    if operr:
    -        raise operr
    -    return w_result
    diff --git a/pypy/module/_stackless/interp_greenlet.py b/pypy/module/_stackless/interp_greenlet.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/interp_greenlet.py
    +++ /dev/null
    @@ -1,238 +0,0 @@
    -from pypy.interpreter.argument import Arguments
    -from pypy.interpreter.typedef import GetSetProperty, TypeDef
    -from pypy.interpreter.gateway import interp2app
    -from pypy.interpreter.gateway import NoneNotWrapped
    -from pypy.interpreter.error import OperationError
    -
    -from pypy.module._stackless.rcoroutine import Coroutine, BaseCoState
    -from pypy.module._stackless.rcoroutine import AbstractThunk, syncstate
    -from pypy.module._stackless.interp_coroutine import makeStaticMethod
    -
    -
    -class GreenletThunk(AbstractThunk):
    -
    -    def __init__(self, greenlet):
    -        self.greenlet = greenlet
    -
    -    def call(self):
    -        greenlet = self.greenlet
    -        greenlet.active = True
    -        try:
    -            space = greenlet.space
    -            args_w = greenlet.costate.args_w
    -            __args__ = Arguments(space, args_w)
    -            try:
    -                w_run = space.getattr(space.wrap(greenlet), space.wrap('run'))
    -                greenlet.w_callable = None
    -                w_result = space.call_args(w_run, __args__)
    -            except OperationError, operror:
    -                if not operror.match(space, greenlet.costate.w_GreenletExit):
    -                    raise
    -                w_result = operror.get_w_value(space)
    -        finally:
    -            greenlet.active = False
    -        greenlet.costate.args_w = [w_result]
    -
    -class AppGreenletCoState(BaseCoState):
    -    def __init__(self, space):
    -        BaseCoState.__init__(self)
    -        self.args_w = None
    -        self.space = space
    -        self.w_GreenletExit  = get(space, "GreenletExit")
    -        self.w_GreenletError = get(space, "GreenletError")
    -
    -    def post_install(self):
    -        self.current = self.main = AppGreenlet(self.space, is_main=True)
    -
    -class AppGreenlet(Coroutine):
    -    def __init__(self, space, w_callable=None, is_main=False):
    -        Coroutine.__init__(self, self._get_state(space))
    -        self.space = space
    -        self.w_callable = w_callable
    -        self.active = is_main
    -        self.subctx = space.getexecutioncontext().Subcontext()
    -        if is_main:
    -            self.subctx.clear_framestack()      # wack
    -        else:
    -            self.bind(GreenletThunk(self))
    -
    -    def descr_method__new__(space, w_subtype, __args__):
    -        co = space.allocate_instance(AppGreenlet, w_subtype)
    -        AppGreenlet.__init__(co, space)
    -        return space.wrap(co)
    -
    -    def descr_method__init__(self, w_run=NoneNotWrapped,
    -                                   w_parent=NoneNotWrapped):
    -        if w_run is not None:
    -            self.set_run(w_run)
    -        if w_parent is not None:
    -            self.set_parent(w_parent)
    -
    -    def _get_state(space):
    -        return space.fromcache(AppGreenletCoState)
    -    _get_state = staticmethod(_get_state)
    -
    -    def hello(self):
    -        ec = self.space.getexecutioncontext()
    -        self.subctx.enter(ec)
    -
    -    def goodbye(self):
    -        ec = self.space.getexecutioncontext()
    -        self.subctx.leave(ec)
    -
    -    def w_getcurrent(space):
    -        return space.wrap(AppGreenlet._get_state(space).current)
    -    w_getcurrent = staticmethod(w_getcurrent)
    -
    -    def w_switch(self, args_w):
    -        # Find the switch target - it might be a parent greenlet
    -        space = self.space
    -        costate = self.costate
    -        target = self
    -        while target.isdead():
    -            target = target.parent
    -            assert isinstance(target, AppGreenlet)
    -        # Switch to it
    -        costate.args_w = args_w
    -        if target is not costate.current:
    -            target.switch()
    -        else:
    -            # case not handled in Coroutine.switch()
    -            syncstate._do_things_to_do()
    -        result_w = costate.args_w
    -        costate.args_w = None
    -        # costate.args_w can be set to None above for throw(), but then
    -        # switch() should have raised.  At this point cosstate.args_w != None.
    -        assert result_w is not None
    -        # Return the result of a switch, packaging it in a tuple if
    -        # there is more than one value.
    -        if len(result_w) == 1:
    -            return result_w[0]
    -        return space.newtuple(result_w)
    -
    -    def w_throw(self, w_type=None, w_value=None, w_traceback=None):
    -        space = self.space
    -        if space.is_w(w_type, space.w_None):
    -            w_type = self.costate.w_GreenletExit
    -        # Code copied from RAISE_VARARGS but slightly modified.  Not too nice.
    -        operror = OperationError(w_type, w_value)
    -        operror.normalize_exception(space)
    -        if not space.is_w(w_traceback, space.w_None):
    -            from pypy.interpreter import pytraceback
    -            tb = space.interpclass_w(w_traceback)
    -            if tb is None or not space.is_true(space.isinstance(tb, 
    -                space.gettypeobject(pytraceback.PyTraceback.typedef))):
    -                raise OperationError(space.w_TypeError,
    -                      space.wrap("throw: arg 3 must be a traceback or None"))
    -            operror.set_traceback(tb)
    -        # Dead greenlet: turn GreenletExit into a regular return
    -        if self.isdead() and operror.match(space, self.costate.w_GreenletExit):
    -            args_w = [operror.get_w_value(space)]
    -        else:
    -            syncstate.push_exception(operror)
    -            args_w = None
    -        return self.w_switch(args_w)
    -
    -    def _userdel(self):
    -        self.space.userdel(self.space.wrap(self))
    -
    -    def isdead(self):
    -        return self.thunk is None and not self.active
    -
    -    def w_get_is_dead(self, space):
    -        return space.newbool(self.isdead())
    -
    -    def descr__nonzero__(self):
    -        return self.space.newbool(self.active)
    -
    -    def w_get_run(self, space):
    -        w_run = self.w_callable
    -        if w_run is None:
    -            raise OperationError(space.w_AttributeError, space.wrap("run"))
    -        return w_run
    -
    -    def set_run(self, w_run):
    -        space = self.space
    -        if self.thunk is None:
    -            raise OperationError(space.w_AttributeError,
    -                                 space.wrap("run cannot be set "
    -                                            "after the start of the greenlet"))
    -        self.w_callable = w_run
    -
    -    def w_set_run(self, space, w_run):
    -        self.set_run(w_run)
    -
    -    def w_del_run(self, space):
    -        if self.w_callable is None:
    -            raise OperationError(space.w_AttributeError, space.wrap("run"))
    -        self.w_callable = None
    -
    -    def w_get_parent(self, space):
    -        return space.wrap(self.parent)
    -
    -    def set_parent(self, w_parent):
    -        space = self.space
    -        newparent = space.interp_w(AppGreenlet, w_parent)
    -        if newparent.costate is not self.costate:
    -            raise OperationError(self.costate.w_GreenletError,
    -                                 space.wrap("invalid foreign parent"))
    -        curr = newparent
    -        while curr:
    -            if curr is self:
    -                raise OperationError(space.w_ValueError,
    -                                     space.wrap("cyclic parent chain"))
    -            curr = curr.parent
    -        self.parent = newparent
    -
    -    def w_set_parent(self, space, w_parent):
    -        self.set_parent(w_parent)
    -
    -    def w_get_frame(self, space):
    -        if not self.active or self.costate.current is self:
    -            f = None
    -        else:
    -            f = self.subctx.topframe
    -        return space.wrap(f)
    -
    -def get(space, name):
    -    w_module = space.getbuiltinmodule('_stackless')
    -    return space.getattr(w_module, space.wrap(name))
    -
    -def post_install(module):
    -    "NOT_RPYTHON"
    -    makeStaticMethod(module, 'greenlet', 'getcurrent')
    -    space = module.space
    -    state = AppGreenlet._get_state(space)
    -    state.post_install()
    -    w_greenlet = get(space, 'greenlet')
    -    # HACK HACK HACK
    -    # make the typeobject mutable for a while
    -    from pypy.objspace.std.typeobject import W_TypeObject
    -    assert isinstance(w_greenlet, W_TypeObject)
    -    old_flag = w_greenlet.flag_heaptype
    -    w_greenlet.flag_heaptype = True
    -    space.appexec([w_greenlet,
    -                   state.w_GreenletExit,
    -                   state.w_GreenletError], """
    -    (greenlet, exit, error):
    -        greenlet.GreenletExit = exit
    -        greenlet.error = error
    -    """)
    -    w_greenlet.flag_heaptype = old_flag
    -
    -AppGreenlet.typedef = TypeDef("greenlet",
    -    __new__ = interp2app(AppGreenlet.descr_method__new__.im_func),
    -    __init__ = interp2app(AppGreenlet.descr_method__init__),
    -    switch = interp2app(AppGreenlet.w_switch),
    -    dead = GetSetProperty(AppGreenlet.w_get_is_dead),
    -    run = GetSetProperty(AppGreenlet.w_get_run,
    -                         AppGreenlet.w_set_run,
    -                         AppGreenlet.w_del_run),
    -    parent = GetSetProperty(AppGreenlet.w_get_parent,
    -                            AppGreenlet.w_set_parent),
    -    getcurrent = interp2app(AppGreenlet.w_getcurrent),
    -    throw = interp2app(AppGreenlet.w_throw),
    -    gr_frame = GetSetProperty(AppGreenlet.w_get_frame),
    -    __nonzero__ = interp2app(AppGreenlet.descr__nonzero__),
    -    __module__ = '_stackless',
    -)
    diff --git a/pypy/module/_stackless/interp_stackless.py b/pypy/module/_stackless/interp_stackless.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/interp_stackless.py
    +++ /dev/null
    @@ -1,28 +0,0 @@
    -from pypy.interpreter.baseobjspace import Wrappable
    -from pypy.interpreter.typedef import TypeDef
    -from pypy.interpreter.gateway import interp2app
    -import os
    -
    -
    -class tasklet(Wrappable):
    -
    -    def __init__(self, space):
    -        self.space = space
    -        self.flags = 0
    -        self.state = None
    -
    -    def descr_method__new__(space, w_subtype):
    -        t = space.allocate_instance(tasklet, w_subtype)
    -        tasklet.__init__(t, space)
    -        return space.wrap(t)
    -
    -    def w_demo(self):
    -        output("42")
    -
    -tasklet.typedef = TypeDef("tasklet",
    -    __new__ = interp2app(tasklet.descr_method__new__.im_func),
    -    demo = interp2app(tasklet.w_demo),
    -)
    -
    -def output(stuff):
    -    os.write(2, stuff + '\n')
    diff --git a/pypy/module/_stackless/rclonable.py b/pypy/module/_stackless/rclonable.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/rclonable.py
    +++ /dev/null
    @@ -1,87 +0,0 @@
    -from pypy.module._stackless.interp_coroutine import AbstractThunk, Coroutine
    -from pypy.rlib.rgc import gc_swap_pool, gc_clone
    -from pypy.rlib.objectmodel import we_are_translated
    -
    -
    -class InterpClonableMixin:
    -    local_pool = None
    -    _mixin_ = True
    -
    -    def hello_local_pool(self):
    -        if we_are_translated():
    -            self.saved_pool = gc_swap_pool(self.local_pool)
    -
    -    def goodbye_local_pool(self):
    -        if we_are_translated():
    -            self.local_pool = gc_swap_pool(self.saved_pool)
    -            self.saved_pool = None
    -
    -    def clone_into(self, copy, extradata=None):
    -        if not we_are_translated():
    -            raise NotImplementedError
    -        # cannot gc_clone() directly self, because it is not in its own
    -        # local_pool.  Moreover, it has a __del__, which cloning doesn't
    -        # support properly at the moment.
    -        copy.parent = self.parent
    -        # the hello/goodbye pair has two purposes: it forces
    -        # self.local_pool to be computed even if it was None up to now,
    -        # and it puts the 'data' tuple in the correct pool to be cloned.
    -        self.hello_local_pool()
    -        data = (self.frame, extradata)
    -        self.goodbye_local_pool()
    -        # clone!
    -        data, copy.local_pool = gc_clone(data, self.local_pool)
    -        copy.frame, extradata = data
    -        copy.thunk = self.thunk # in case we haven't switched to self yet
    -        return extradata
    -
    -
    -class InterpClonableCoroutine(Coroutine, InterpClonableMixin):
    -
    -    def hello(self):
    -        self.hello_local_pool()
    -
    -    def goodbye(self):
    -        self.goodbye_local_pool()
    -
    -    def clone(self):
    -        # hack, this is overridden in AppClonableCoroutine
    -        if self.getcurrent() is self:
    -            raise RuntimeError("clone() cannot clone the current coroutine; "
    -                               "use fork() instead")
    -        copy = InterpClonableCoroutine(self.costate)
    -        self.clone_into(copy)
    -        return copy
    -
    -
    -class ForkThunk(AbstractThunk):
    -    def __init__(self, coroutine):
    -        self.coroutine = coroutine
    -        self.newcoroutine = None
    -    def call(self):
    -        oldcoro = self.coroutine
    -        self.coroutine = None
    -        newcoro = oldcoro.clone()
    -        newcoro.parent = oldcoro
    -        self.newcoroutine = newcoro
    -
    -def fork():
    -    """Fork, as in the Unix fork(): the call returns twice, and the return
    -    value of the call is either the new 'child' coroutine object (if returning
    -    into the parent), or None (if returning into the child).  This returns
    -    into the parent first, which can switch to the child later.
    -    """
    -    current = InterpClonableCoroutine.getcurrent()
    -    if not isinstance(current, InterpClonableCoroutine):
    -        raise RuntimeError("fork() in a non-clonable coroutine")
    -    thunk = ForkThunk(current)
    -    coro_fork = InterpClonableCoroutine()
    -    coro_fork.bind(thunk)
    -    coro_fork.switch()
    -    # we resume here twice.  The following would need explanations about
    -    # why it returns the correct thing in both the parent and the child...
    -    return thunk.newcoroutine
    -
    -##    from pypy.rpython.lltypesystem import lltype, lloperation
    -##    lloperation.llop.debug_view(lltype.Void, current, thunk,
    -##        lloperation.llop.gc_x_size_header(lltype.Signed))
    diff --git a/pypy/module/_stackless/rcoroutine.py b/pypy/module/_stackless/rcoroutine.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/rcoroutine.py
    +++ /dev/null
    @@ -1,10 +0,0 @@
    -from pypy.rlib.rcoroutine import make_coroutine_classes
    -from pypy.interpreter.baseobjspace import Wrappable
    -
    -d = make_coroutine_classes(Wrappable)
    -
    -Coroutine = d['Coroutine']
    -BaseCoState = d['BaseCoState']
    -AbstractThunk = d['AbstractThunk']
    -syncstate = d['syncstate']
    -CoroutineExit = d['CoroutineExit']
    diff --git a/pypy/module/_stackless/stackless_flags.py b/pypy/module/_stackless/stackless_flags.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/stackless_flags.py
    +++ /dev/null
    @@ -1,201 +0,0 @@
    -"""
    -basic definitions for tasklet flags.
    -For simplicity and compatibility,
    -they are defined the same for coroutines,
    -even if they are not used.
    -
    -taken from tasklet_structs.h
    -----------------------------
    -
    -/***************************************************************************
    -
    -    Tasklet Flag Definition
    -    -----------------------
    -
    -    blocked:        The tasklet is either waiting in a channel for
    -                    writing (1) or reading (-1) or not blocked (0).
    -                    Maintained by the channel logic. Do not change.
    -
    -    atomic:         If true, schedulers will never switch. Driven by
    -                    the code object or dynamically, see below.
    -
    -    ignore_nesting: Allows auto-scheduling, even if nesting_level
    -                    is not zero.
    -
    -    autoschedule:   The tasklet likes to be auto-scheduled. User driven.
    -
    -    block_trap:     Debugging aid. Whenever the tasklet would be
    -                    blocked by a channel, an exception is raised.
    -
    -    is_zombie:      This tasklet is almost dead, its deallocation has
    -                    started. The tasklet *must* die at some time, or the
    -                    process can never end.
    -
    -    pending_irq:    If set, an interrupt was issued during an atomic
    -                    operation, and should be handled when possible.
    -
    -
    -    Policy for atomic/autoschedule and switching:
    -    ---------------------------------------------
    -    A tasklet switch can always be done explicitly by calling schedule().
    -    Atomic and schedule are concerned with automatic features.
    -
    -    atomic  autoschedule
    -
    -        1       any     Neither a scheduler nor a watchdog will
    -                        try to switch this tasklet.
    -
    -        0       0       The tasklet can be stopped on desire, or it
    -                        can be killed by an exception.
    -
    -        0       1       Like above, plus auto-scheduling is enabled.
    -
    -    Default settings:
    -    -----------------
    -    All flags are zero by default.
    -
    - ***************************************************************************/
    -
    -typedef struct _tasklet_flags {
    -        int blocked: 2;
    -        unsigned int atomic: 1;
    -        unsigned int ignore_nesting: 1;
    -        unsigned int autoschedule: 1;
    -        unsigned int block_trap: 1;
    -        unsigned int is_zombie: 1;
    -        unsigned int pending_irq: 1;
    -} PyTaskletFlagStruc;
    -"""
    -
    -from pypy.rlib.rarithmetic import LONG_BIT, intmask
    -
    -class BitSetDef(object):
    -    __slots__ = "_names __dict__ _attrname".split()
    -
    -    def __init__(self, _attrname):
    -        self._names = []
    -        self._attrname = _attrname
    -        
    -    def __setattr__(self, key, value):
    -        if key not in self.__slots__:
    -            assert key not in self.__dict__
    -            self._names.append(key)
    -        object.__setattr__(self, key, value)
    -
    -    def __iter__(self):
    -        return self._enum_objects()
    -    
    -    def _enum_objects(self):
    -        for name in self._names:
    -            yield name, getattr(self, name)
    -
    -# negative values are user-writable
    -flags = BitSetDef("flags")
    -flags.blocked           =   2, """writing (1) or reading (-1) or not blocked (0)"""
    -flags.atomic            =  -1, """If true, schedulers will never switch"""
    -flags.ignore_nesting    =  -1, """allow auto-scheduling in nested interpreters"""
    -flags.autoschedule      =  -1, """enable auto-scheduling"""
    -flags.block_trap        =  -1, """raise an exception instead of blocking"""
    -flags.is_zombie         =   1, """__del__ is in progress"""
    -flags.pending_irq       =   1, """an interrupt occured while being atomic"""
    -
    -def make_get_bits(name, bits, shift):
    -    """ return a bool for single bits, signed int otherwise """
    -    signmask = 1 << (bits - 1 + shift)
    -    lshift = bits + shift
    -    rshift = bits
    -    if bits == 1:
    -        return "bool(%s & 0x%x)" % (name, signmask)
    -    else:
    -        return "intmask(%s << (LONG_BIT-%d)) >> (LONG_BIT-%d)" % (name, lshift, rshift)
    -
    -def make_set_bits(name, bits, shift):
    -    datamask = int('1' * bits, 2)
    -    clearmask = datamask << shift
    -    return "%s & ~0x%x | (value & 0x%x) << %d" % (name, clearmask, datamask, shift)
    -
    -def gen_code():
    -    from cStringIO import StringIO
    -    f = StringIO()
    -    print >> f, "class StacklessFlags(object):"
    -    print >> f, "    _mixin_ = True"
    -    shift = 0
    -    field = "self.%s" % flags._attrname
    -    for name, (bits, doc) in flags:
    -        write, bits = bits < 0, abs(bits)
    -        print >> f
    -        print >> f, '    def get_%s(self):' % name
    -        print >> f, '        """%s"""' % doc
    -        print >> f, '        return %s' % make_get_bits(field, bits, shift)
    -        print >> f, '    def set_%s(self, value):' % name
    -        print >> f, '        """%s"""' % doc
    -        print >> f, '        %s = %s' % (field, make_set_bits(field, bits, shift))
    -        print >> f, '    set_%s._public = %s' % (name, write)
    -        shift += bits
    -    return f.getvalue()
    -
    -# BEGIN generated code
    -class StacklessFlags(object):
    -    _mixin_ = True
    -
    -    def get_blocked(self):
    -        """writing (1) or reading (-1) or not blocked (0)"""
    -        return intmask(self.flags << (LONG_BIT-2)) >> (LONG_BIT-2)
    -    def set_blocked(self, value):
    -        """writing (1) or reading (-1) or not blocked (0)"""
    -        self.flags = self.flags & ~0x3 | (value & 0x3) << 0
    -    set_blocked._public = False
    -
    -    def get_atomic(self):
    -        """If true, schedulers will never switch"""
    -        return bool(self.flags & 0x4)
    -    def set_atomic(self, value):
    -        """If true, schedulers will never switch"""
    -        self.flags = self.flags & ~0x4 | (value & 0x1) << 2
    -    set_atomic._public = True
    -
    -    def get_ignore_nesting(self):
    -        """allow auto-scheduling in nested interpreters"""
    -        return bool(self.flags & 0x8)
    -    def set_ignore_nesting(self, value):
    -        """allow auto-scheduling in nested interpreters"""
    -        self.flags = self.flags & ~0x8 | (value & 0x1) << 3
    -    set_ignore_nesting._public = True
    -
    -    def get_autoschedule(self):
    -        """enable auto-scheduling"""
    -        return bool(self.flags & 0x10)
    -    def set_autoschedule(self, value):
    -        """enable auto-scheduling"""
    -        self.flags = self.flags & ~0x10 | (value & 0x1) << 4
    -    set_autoschedule._public = True
    -
    -    def get_block_trap(self):
    -        """raise an exception instead of blocking"""
    -        return bool(self.flags & 0x20)
    -    def set_block_trap(self, value):
    -        """raise an exception instead of blocking"""
    -        self.flags = self.flags & ~0x20 | (value & 0x1) << 5
    -    set_block_trap._public = True
    -
    -    def get_is_zombie(self):
    -        """__del__ is in progress"""
    -        return bool(self.flags & 0x40)
    -    def set_is_zombie(self, value):
    -        """__del__ is in progress"""
    -        self.flags = self.flags & ~0x40 | (value & 0x1) << 6
    -    set_is_zombie._public = False
    -
    -    def get_pending_irq(self):
    -        """an interrupt occured while being atomic"""
    -        return bool(self.flags & 0x80)
    -    def set_pending_irq(self, value):
    -        """an interrupt occured while being atomic"""
    -        self.flags = self.flags & ~0x80 | (value & 0x1) << 7
    -    set_pending_irq._public = False
    -
    -# END generated code
    -
    -if __name__ == '__main__':
    -    # paste this into the file
    -    print gen_code()
    diff --git a/pypy/module/_stackless/test/__init__.py b/pypy/module/_stackless/test/__init__.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/test/__init__.py
    +++ /dev/null
    @@ -1,1 +0,0 @@
    -#
    \ No newline at end of file
    diff --git a/pypy/module/_stackless/test/conftest.py b/pypy/module/_stackless/test/conftest.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/test/conftest.py
    +++ /dev/null
    @@ -1,8 +0,0 @@
    -import sys
    -import py.test
    -
    -def pytest_runtest_setup(item):
    -    py.test.importorskip('greenlet')
    -    if sys.platform == 'win32':
    -        py.test.skip("stackless tests segfault on Windows")
    -
    diff --git a/pypy/module/_stackless/test/slp_test_pickle.py b/pypy/module/_stackless/test/slp_test_pickle.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/test/slp_test_pickle.py
    +++ /dev/null
    @@ -1,35 +0,0 @@
    -from pypy.conftest import gettestobjspace
    -
    -# app-level testing of coroutine pickling
    -
    -class AppTest_Pickle:
    -
    -    def setup_class(cls):
    -        space = gettestobjspace(usemodules=('_stackless',))
    -        cls.space = space
    -
    -    def test_simple_ish(self):
    -
    -        output = []
    -        import _stackless
    -        def f(coro, n, x):
    -            if n == 0:
    -                coro.switch()
    -                return
    -            f(coro, n-1, 2*x)
    -            output.append(x)
    -
    -        def example():
    -            main_coro = _stackless.coroutine.getcurrent()
    -            sub_coro = _stackless.coroutine()
    -            sub_coro.bind(f, main_coro, 5, 1)
    -            sub_coro.switch()
    -
    -            import pickle
    -            pckl = pickle.dumps(sub_coro)
    -            new_coro = pickle.loads(pckl)
    -
    -            new_coro.switch()
    -
    -        example()
    -        assert output == [16, 8, 4, 2, 1]
    diff --git a/pypy/module/_stackless/test/test_choicepoint.py b/pypy/module/_stackless/test/test_choicepoint.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/test/test_choicepoint.py
    +++ /dev/null
    @@ -1,85 +0,0 @@
    -import py; py.test.skip("clonable coroutines not really maintained any more")
    -
    -from pypy.rlib.rcoroutine import AbstractThunk
    -from pypy.module._stackless.rclonable import InterpClonableCoroutine as ClonableCoroutine
    -
    -class ChoicePointHolder(object):
    -    def __init__(self):
    -        self.choicepoints = []
    -        self.clone_me = False
    -        self.answer = 0
    -        self.solutions_count = 0
    -
    -    def next_choice(self):
    -        return self.choicepoints.pop()
    -
    -    def add(self, choice, answer=0):
    -        self.choicepoints.append((choice, answer))
    -
    -    def more_choices(self):
    -        return bool(self.choicepoints)
    -
    -    def choice(self):
    -        #os.write(1, "choice\n")
    -        self.clone_me = True
    -        self.g_main.switch()
    -        #os.write(1, "answer: %d\n" % (self.answer,))
    -        return self.answer
    -
    -    def fail(self):
    -        self.g_main.switch()
    -        assert False
    -
    -choicepoints = ChoicePointHolder()
    -
    -# ____________________________________________________________
    -
    -class SearchTask(AbstractThunk):
    -    def call(self):
    -        path = []
    -        for i in range(10):
    -            res = choicepoints.choice()
    -            assert len(path) == i
    -            path.append(res)
    -            #os.write(1, "{%x} trying: %s\n" % (id(path), path))
    -            if i == 3:
    -                import gc; gc.collect()
    -        #os.write(1, "{%x} found a solution: %s\n" % (id(path), path))
    -        choicepoints.solutions_count += 1
    -
    -# ____________________________________________________________
    -
    -
    -class SearchAllTask(AbstractThunk):
    -    def call(self):
    -        search_coro = ClonableCoroutine()
    -        search_coro.bind(SearchTask())
    -        choicepoints.add(search_coro)
    -
    -        #os.write(1, "starting\n")
    -        while choicepoints.more_choices():
    -            searcher, nextvalue = choicepoints.next_choice()
    -            choicepoints.clone_me = False
    -            choicepoints.answer = nextvalue
    -            #os.write(1, '<<< {%x} %d\n' % (id(searcher), nextvalue))
    -            searcher.switch()
    -            #os.write(1, '>>> %d\n' % (choicepoints.clone_me,))
    -            if choicepoints.clone_me:
    -                searcher2 = searcher.clone()
    -                #os.write(1, 'searcher = {%x}, searcher2 = {%x}\n' % (
    -                #    id(searcher), id(searcher2)))
    -                choicepoints.add(searcher, 5)
    -                choicepoints.add(searcher2, 4)
    -
    -def entry_point():
    -    choicepoints.g_main = ClonableCoroutine()
    -    choicepoints.g_main.bind(SearchAllTask())
    -    choicepoints.g_main.switch()
    -    return choicepoints.solutions_count
    -
    -def test_choicepoint():
    -    from pypy.translator.c.test import test_newgc
    -    tester = test_newgc.TestUsingStacklessFramework()
    -    fn = tester.getcompiled(entry_point)
    -    res = fn()
    -    assert res == 2 ** 10
    diff --git a/pypy/module/_stackless/test/test_clonable.py b/pypy/module/_stackless/test/test_clonable.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/test/test_clonable.py
    +++ /dev/null
    @@ -1,187 +0,0 @@
    -import py; py.test.skip("clonable coroutines not really maintained any more")
    -
    -from pypy.conftest import gettestobjspace, option
    -import py, sys
    -
    -# app-level testing of coroutine cloning
    -
    -class AppTestClonable:
    -
    -    def setup_class(cls):
    -        if not option.runappdirect:
    -            py.test.skip('pure appdirect test (run with -A)')
    -        cls.space = space = gettestobjspace(usemodules=('_stackless',))
    -        if not space.is_true(space.appexec([], """():
    -            import _stackless
    -            return hasattr(_stackless, 'clonable')
    -        """)):
    -            py.test.skip('no _stackless.clonable')
    -
    -
    -    def test_solver(self):
    -        import _stackless
    -
    -        class Fail(Exception):
    -            pass
    -
    -        class Success(Exception):
    -            pass
    -
    -        def first_solution(func):
    -            global next_answer
    -            co = _stackless.clonable()
    -            co.bind(func)
    -            pending = [(co, None)]
    -            while pending:
    -                co, next_answer = pending.pop()
    -                try:
    -                    co.switch()
    -                except Fail:
    -                    pass
    -                except Success, e:
    -                    return e.args[0]
    -                else:
    -                    # zero_or_one() called, clone the coroutine
    -                    co2 = co.clone()
    -                    pending.append((co2, 1))
    -                    pending.append((co, 0))
    -            raise Fail("no solution")
    -
    -        pending = []
    -        main = _stackless.clonable.getcurrent()
    -
    -        def zero_or_one():
    -            main.switch()
    -            return next_answer
    -
    -        # ____________________________________________________________
    -
    -        invalid_prefixes = {
    -            (0, 0): True,
    -            (0, 1, 0): True,
    -            (0, 1, 1): True,
    -            (1, 0): True,
    -            (1, 1, 0, 0): True,
    -            }
    -
    -        def example():
    -            test = []
    -            for n in range(5):
    -                test.append(zero_or_one())
    -                if tuple(test) in invalid_prefixes:
    -                    raise Fail
    -            raise Success(test)
    -
    -        res = first_solution(example)
    -        assert res == [1, 1, 0, 1, 0]
    -
    -
    -    def test_myself_may_not_be_me_any_more(self):
    -        import gc
    -        from _stackless import clonable
    -
    -        counter = [0]
    -
    -        def runner():
    -            while 1:
    -                assert clonable.getcurrent() is coro
    -                counter[0] += 1
    -                main.switch()
    -
    -        main = clonable.getcurrent()
    -        coro = clonable()
    -        coro.bind(runner)
    -
    -        coro.switch()
    -        assert counter == [1]
    -
    -        assert clonable.getcurrent() is main
    -        coro1 = coro.clone()
    -        assert counter == [1]
    -        assert clonable.getcurrent() is main
    -        coro.switch()
    -        assert counter == [2]
    -        coro.switch()
    -        assert counter == [3]
    -        assert clonable.getcurrent() is main
    -        del coro1
    -        gc.collect()
    -        #print "collected!"
    -        assert clonable.getcurrent() is main
    -        assert counter == [3]
    -        coro.switch()
    -        assert clonable.getcurrent() is main
    -        assert counter == [4]
    -
    -
    -    def test_fork(self):
    -        import _stackless
    -
    -        class Fail(Exception):
    -            pass
    -
    -        class Success(Exception):
    -            pass
    -
    -        def first_solution(func):
    -            global next_answer
    -            co = _stackless.clonable()
    -            co.bind(func)
    -            try:
    -                co.switch()
    -            except Success, e:
    -                return e.args[0]
    -
    -        def zero_or_one():
    -            sub = _stackless.fork()
    -            if sub is not None:
    -                # in the parent: run the child first
    -                try:
    -                    sub.switch()
    -                except Fail:
    -                    pass
    -                # then proceed with answer '1'
    -                return 1
    -            else:
    -                # in the child: answer '0'
    -                return 0
    -
    -        # ____________________________________________________________
    -
    -        invalid_prefixes = {
    -            (0, 0): True,
    -            (0, 1, 0): True,
    -            (0, 1, 1): True,
    -            (1, 0): True,
    -            (1, 1, 0, 0): True,
    -            }
    -
    -        def example():
    -            test = []
    -            for n in range(5):
    -                test.append(zero_or_one())
    -                if tuple(test) in invalid_prefixes:
    -                    raise Fail
    -            raise Success(test)
    -
    -        res = first_solution(example)
    -        assert res == [1, 1, 0, 1, 0]
    -
    -    def test_clone_before_start(self):
    -        """Tests that a clonable coroutine can be
    -        cloned before it is started
    -        (this used to fail with a segmentation fault)
    -        """
    -        import _stackless
    -
    -        counter = [0]
    -        def simple_coro():
    -            print "hello"
    -            counter[0] += 1
    -
    -        s = _stackless.clonable()
    -        s.bind(simple_coro)
    -        t = s.clone()
    -        s.switch()
    -        t.switch()
    -        assert counter[0] == 2
    diff --git a/pypy/module/_stackless/test/test_composable_coroutine.py b/pypy/module/_stackless/test/test_composable_coroutine.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/test/test_composable_coroutine.py
    +++ /dev/null
    @@ -1,133 +0,0 @@
    -""" a faith is the connection between past and future that divides the
    -    application into switch-compatible chunks.
    -    -- stakkars
    -"""
    -from pypy.conftest import gettestobjspace
    -from py.test import skip
    -
    -class AppTest_ComposableCoroutine:
    -
    -    def setup_class(cls):
    -        space = gettestobjspace(usemodules=('_stackless',))
    -        cls.space = space
    -
    -        cls.w_generator_ = space.appexec([], """():
    -            import _stackless
    -
    -            generators_costate = _stackless.usercostate()
    -            main = generators_costate.getcurrent()
    -
    -            class generator_iterator(_stackless.coroutine):
    -
    -                def __iter__(self):
    -                    return self
    -
    -                def next(self):
    -                    if self.gi_answer is not None:
    -                        raise ValueError('stackless-generator'
    -                                         ' already executing')
    -                    self.gi_answer = []
    -                    self.gi_caller = generators_costate.getcurrent()
    -                    self.switch()
    -                    answer = self.gi_answer
    -                    self.gi_answer = None
    -                    if answer:
    -                        return answer[0]
    -                    else:
    -                        raise StopIteration
    -
    -            def generator(f):
    -                def myfunc(*args, **kwds):
    -                    g = generators_costate.spawn(generator_iterator)
    -                    g.gi_answer = None
    -                    g.bind(f, *args, **kwds)
    -                    return g
    -                return myfunc
    -
    -            def Yield(value):
    -                g = generators_costate.getcurrent()
    -                if g is main:
    -                    raise ValueError('Yield() outside any stackless-generator')
    -                assert isinstance(g, generator_iterator)
    -                assert g.gi_answer == []
    -                g.gi_answer.append(value)
    -                g.gi_caller.switch()
    -
    -            generator.Yield = Yield
    -            generator._costate = generators_costate
    -            return (generator,)
    -        """)
    -
    -    def test_simple_costate(self):
    -        import _stackless
    -        costate = _stackless.usercostate()
    -        main = costate.getcurrent()
    -
    -        result = []
    -        def f():
    -            result.append(costate.getcurrent())
    -        co = costate.spawn()
    -        co.bind(f)
    -        co.switch()
    -        assert result == [co]
    -
    -    def test_generator(self):
    -        generator, = self.generator_
    -
    -        def squares(n):
    -            for i in range(n):
    -                generator.Yield(i*i)
    -        squares = generator(squares)
    -
    -        lst1 = [i*i for i in range(10)]
    -        for got in squares(10):
    -            expected = lst1.pop(0)
    -            assert got == expected
    -        assert lst1 == []
    -
    -    def test_multiple_costates(self):
    -        """Test that two independent costates mix transparently:
    -
    -        - compute_costate, used for a coroutine that fills a list with
    -                           some more items each time it is switched to
    -
    -        - generators_costate, used interally by self.generator (see above)
    -        """
    -
    -        import _stackless
    -        generator, = self.generator_
    -
    -        # you can see how it fails if we don't have two different costates
    -        # by setting compute_costate to generator._costate instead
    -        compute_costate = _stackless.usercostate()
    -        compute_main = compute_costate.getcurrent()
    -        lst = []
    -
    -        def filler():     # -> 0, 1, 2, 100, 101, 102, 200, 201, 202, 300 ...
    -            for k in range(5):
    -                for j in range(3):
    -                    lst.append(100 * k + j)
    -                compute_main.switch()
    -
    -        filler_co = compute_costate.spawn()
    -        filler_co.bind(filler)
    -
    -        def grab_next_value():
    -            while not lst:
    -                #print 'filling more...'
    -                filler_co.switch()
    -                #print 'now lst =', lst
    -            #print 'grabbing', lst[0]
    -            return lst.pop(0)
    -
    -        def squares(n):
    -            for i in range(n):
    -                #print 'square:', i
    -                generator.Yield(i*grab_next_value())
    -        squares = generator(squares)
    -
    -        lst1 = [0, 1, 4,  300, 404, 510,  1200, 1407, 1616,  2700]
    -        for got in squares(10):
    -            expected = lst1.pop(0)
    -            assert got == expected
    -        assert lst1 == []
    diff --git a/pypy/module/_stackless/test/test_coroutine.py b/pypy/module/_stackless/test/test_coroutine.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/test/test_coroutine.py
    +++ /dev/null
    @@ -1,168 +0,0 @@
    -from pypy.conftest import gettestobjspace, option
    -from py.test import skip
    -
    -
    -class AppTest_Coroutine:
    -
    -    def setup_class(cls):
    -        space = gettestobjspace(usemodules=('_stackless',))
    -        cls.space = space
    -
    -    def test_raise_propagate(self):
    -        import _stackless as stackless
    -        co = stackless.coroutine()
    -        def f():
    -            return 1/0
    -        co.bind(f)
    -        try:
    -            co.switch()
    -        except ZeroDivisionError:
    -            pass
    -        else:
    -            raise AssertionError("exception not propagated")
    -
    -    def test_strange_test(self):
    -        from _stackless import coroutine
    -        def f():
    -            print "in new coro"
    -            return 42
    -        def create():
    -            b = coroutine()
    -            b.bind(f)
    -            print "bound"
    -            b.switch()
    -            print "switched"
    -            return b
    -        a = coroutine()
    -        a.bind(create)
    -        b = a.switch()
    -        # now b.parent = a
    -        def nothing():
    -            pass
    -        a.bind(nothing)
    -        def kill():
    -            # this sets a.parent = b
    -            a.kill()
    -        b.bind(kill)
    -        b.switch()
    -
    -    def test_kill(self):
    -        import _stackless as stackless
    -        co = stackless.coroutine()
    -        def f():
    -            pass
    -        co.bind(f)
    -        assert co.is_alive
    -        co.kill()
    -        assert not co.is_alive
    -
    -    def test_kill_running(self):
    -        coroutineexit = []
    -        import _stackless as stackless
    -        main = stackless.coroutine.getcurrent()
    -        result = []
    -        co = stackless.coroutine()
    -        def f():
    -            x = 2
    -            try:
    -                result.append(1)
    -                main.switch()
    -                x = 3
    -            except CoroutineExit:
    -                coroutineexit.append(True)
    -                raise
    -            finally:
    -                result.append(x)
    -            result.append(4)
    -        co.bind(f)
    -        assert co.is_alive
    -        co.switch()
    -        assert co.is_alive
    -        assert result == [1]
    -        co.kill()
    -        assert not co.is_alive
    -        assert result == [1, 2]
    -        assert coroutineexit == [True]
    -
    -    def test_bogus_bind(self):
    -        import _stackless as stackless
    -        co = stackless.coroutine()
    -        def f():
    -            pass
    -        co.bind(f)
    -        raises(ValueError, co.bind, f)
    -
    -    def test__framestack(self):
    -        import _stackless as stackless
    -        main = stackless.coroutine.getmain()
    -        co = stackless.coroutine()
    -        def g():
    -            return co._framestack
    -        def f():
    -            return g()
    -
    -        co.bind(f)
    -        stack = co.switch()
    -        assert stack == () # running corountine, _framestack is empty
    -
    -        co = stackless.coroutine()
    -        def g():
    -            return main.switch()
    -        def f():
    -            return g()
    -
    -        co.bind(f)
    -        co.switch()
    -        stack = co._framestack
    -        assert len(stack) == 2
    -        assert stack[0].f_code is f.func_code
    -        assert stack[1].f_code is g.func_code
    -
    -        co = stackless.coroutine()
    -
    -
    -
    -class AppTestDirect:
    -    def setup_class(cls):
    -        if not option.runappdirect:
    -            skip('pure appdirect test (run with -A)')
    -        cls.space = gettestobjspace(usemodules=('_stackless',))
    -
    -    def test_stack_depth_limit(self):
    -        import sys
    -        import _stackless as stackless
    -        st = stackless.get_stack_depth_limit()
    -        try:
    -            stackless.set_stack_depth_limit(1)
    -            assert stackless.get_stack_depth_limit() == 1
    -            try:
    -                co = stackless.coroutine()
    -                def f():
    -                    pass
    -                co.bind(f)
    -                co.switch()
    -            except RuntimeError:
    -                pass
    -        finally:
    -            stackless.set_stack_depth_limit(st)
    -
    -class TestRandomThings:
    -    def setup_class(cls):
    -        cls.space = gettestobjspace(usemodules=('_stackless',))
    -
    -    def test___del___handling(self):
    -        space = self.space
    -        w_l = space.newlist([])
    -        coro = space.appexec([w_l], """(l):
    -            from _stackless import coroutine
    -            class MyCoroutine(coroutine):
    -                def __del__(self):
    -                    l.append(self.is_zombie)
    -            return MyCoroutine()
    -        """)
    -        coro.__del__()
    -        space.user_del_action.perform(space.getexecutioncontext(), None)
    -        coro._kill_finally()
    -        assert space.len_w(w_l) == 1
    -        res = space.is_true(space.getitem(w_l, space.wrap(0)))
    -        assert res
    diff --git a/pypy/module/_stackless/test/test_greenlet.py b/pypy/module/_stackless/test/test_greenlet.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/test/test_greenlet.py
    +++ /dev/null
    @@ -1,643 +0,0 @@
    -from pypy.conftest import gettestobjspace, skip_on_missing_buildoption
    -
    -class AppTest_Greenlet:
    -
    -    def setup_class(cls):
    -        space = gettestobjspace(usemodules=('_stackless',))
    -        cls.space = space
    -
    -    def test_very_simple(self):
    -        from _stackless import greenlet
    -        lst = []
    -        def f(x):
    -            lst.append(x)
    -            return x + 10
    -        g = greenlet(f)
    -        assert not g
    -        res = g.switch(20)
    -        assert res == 30
    -        assert lst == [20]
    -        assert g.dead
    -        assert not g
    -
    -    def test_switch_back_to_main(self):
    -        from _stackless import greenlet
    -        lst = []
    -        main = greenlet.getcurrent()
    -        def f(x):
    -            lst.append(x)
    -            x = main.switch(x + 10)
    -            return 40 + x 
    -        g = greenlet(f)
    -        res = g.switch(20)
    -        assert res == 30
    -        assert lst == [20]
    -        assert not g.dead
    -        res = g.switch(2)
    -        assert res == 42
    -        assert g.dead
    -
    -    def test_simple(self):
    -        from _stackless import greenlet
    -        lst = []
    -        gs = []
    -        def f():
    -            lst.append(1)
    -            greenlet.getcurrent().parent.switch()
    -            lst.append(3)
    -        g = greenlet(f)
    -        lst.append(0)
    -        g.switch()
    -        lst.append(2)
    -        g.switch()
    -        lst.append(4)
    -        assert lst == range(5)
    -
    -    def test_exception_simple(self):
    -        from _stackless import greenlet
    -        def f():
    -            raise ValueError
    -        g1 = greenlet(f)
    -        raises(ValueError, g1.switch)
    -
    -    def test_exception_propagate(self):
    -        from _stackless import greenlet
    -        def f():
    -            raise ValueError
    -        def g():
    -            return g1.switch()
    -        g1 = greenlet(f)
    -        g2 = greenlet(g)
    -        raises(ValueError, g1.switch)
    -        g1 = greenlet(f)
    -        raises(ValueError, g2.switch)
    -
    -
    -    def test_exc_info_save_restore(self):
    -        from _stackless import greenlet
    -        import sys
    -        def f():
    -            try:
    -                raise ValueError('fun')
    -            except:
    -                exc_info = sys.exc_info()
    -                greenlet(h).switch()
    -                assert exc_info == sys.exc_info()
    -
    -        def h():
    -            assert sys.exc_info() == (None, None, None)
    -
    -        greenlet(f).switch()
    -
    -    def test_exception(self):
    -        from _stackless import greenlet
    -        import sys
    -        def fmain(seen):
    -            try:
    -                greenlet.getcurrent().parent.switch()
    -            except:
    -                seen.append(sys.exc_info()[0])
    -                raise
    -            raise ValueError
    -        seen = []
    -        g1 = greenlet(fmain)
    -        g2 = greenlet(fmain)
    -        g1.switch(seen)
    -        g2.switch(seen)
    -        raises(TypeError, "g2.parent = 1")
    -        g2.parent = g1
    -        assert seen == []
    -        raises(ValueError, g2.switch)
    -        assert seen == [ValueError]
    -        g2.switch()
    -        assert seen == [ValueError]
    -
    -    def test_send_exception(self):
    -        from _stackless import greenlet
    -        import sys
    -        def send_exception(g, exc):
    -            # note: send_exception(g, exc)  can be now done with  g.throw(exc).
    -            # the purpose of this test is to explicitely check the propagation rules.
    -            def crasher(exc):
    -                raise exc
    -            g1 = greenlet(crasher)
    -            g1.parent = g
    -            g1.switch(exc)
    -        def fmain(seen):
    -            try:
    -                greenlet.getcurrent().parent.switch()
    -            except:
    -                seen.append(sys.exc_info()[0])
    -                raise
    -            raise ValueError
    -
    -        seen = []
    -        g1 = greenlet(fmain)
    -        g1.switch(seen)
    -        raises(KeyError, "send_exception(g1, KeyError)")
    -        assert seen == [KeyError]
    -        seen = []
    -        g1 = greenlet(fmain)
    -        g1.switch(seen)
    -        raises(KeyError, "g1.throw(KeyError)")
    -        assert seen == [KeyError]
    -        assert g1.dead
    -
    -    def test_frame(self):
    -        from _stackless import greenlet
    -        import sys
    -        def f1():
    -            f = sys._getframe(0)
    -            assert f.f_back is None
    -            greenlet.getcurrent().parent.switch(f)
    -            return "meaning of life"
    -        g = greenlet(f1)
    -        frame = g.switch()
    -        assert frame is g.gr_frame
    -        assert g
    -        next = g.switch()
    -        assert not g
    -        assert next == "meaning of life"
    -        assert g.gr_frame is None
    -
    -    def test_mixing_greenlet_coroutine(self):
    -        from _stackless import greenlet, coroutine
    -        lst = []
    -        def f():
    -            lst.append(1)
    -            greenlet.getcurrent().parent.switch()
    -            lst.append(3)
    -        def make_h(c):
    -            def h():
    -                g = greenlet(f)
    -                lst.append(0)
    -                g.switch()
    -                c.switch()
    -                lst.append(2)
    -                g.switch()
    -                c.switch()
    -                lst.append(4)
    -                c.switch()
    -            return h
    -        c1 = coroutine.getcurrent()
    -        c2 = coroutine()
    -        c3 = coroutine()
    -        c2.bind(make_h(c3))
    -        c3.bind(make_h(c2))
    -        c2.switch()
    -        assert lst == [0, 1, 0, 1, 2, 3, 2, 3, 4, 4]
    -
    -    def test_dealloc(self):
    -        skip("not working yet")
    -        from _stackless import greenlet
    -        import sys
    -        def fmain(seen):
    -            try:
    -                greenlet.getcurrent().parent.switch()
    -            except:
    -                seen.append(sys.exc_info()[0])
    -                raise
    -            raise ValueError
    -        seen = []
    -        seen = []
    -        g1 = greenlet(fmain)
    -        g2 = greenlet(fmain)
    -        g1.switch(seen)
    -        g2.switch(seen)
    -        assert seen == []
    -        del g1
    -        assert seen == [greenlet.GreenletExit]
    -        del g2
    -        assert seen == [greenlet.GreenletExit, greenlet.GreenletExit]
    -
    -
    -# ____________________________________________________________
    -#
    -# The tests from greenlets.
    -# For now, without the ones that involve threads
    -#
    -class AppTest_PyMagicTestGreenlet:
    -
    -    def setup_class(cls):
    -        space = gettestobjspace(usemodules=('_stackless',))
    -        cls.space = space
    -        cls.w_glob = space.appexec([], """():
    -            import sys
    -            from _stackless import greenlet
    -
    -            class SomeError(Exception):
    -                pass
    -
    -            def fmain(seen):
    -                try:
    -                    greenlet.getcurrent().parent.switch()
    -                except:
    -                    seen.append(sys.exc_info()[0])
    -                    raise
    -                raise SomeError
    -
    -            class Glob: pass
    -            glob = Glob()
    -            glob.__dict__.update(locals())
    -            return glob
    -        """)
    -
    -    def test_simple(self):
    -        greenlet = self.glob.greenlet
    -        lst = []
    -        def f():
    -            lst.append(1)
    -            greenlet.getcurrent().parent.switch()
    -            lst.append(3)
    -        g = greenlet(f)
    -        lst.append(0)
    -        g.switch()
    -        lst.append(2)
    -        g.switch()
    -        lst.append(4)
    -        assert lst == range(5)
    -
    -    def test_exception(self):
    -        greenlet  = self.glob.greenlet
    -        fmain     = self.glob.fmain
    -        SomeError = self.glob.SomeError
    -        seen = []
    -        g1 = greenlet(fmain)
    -        g2 = greenlet(fmain)
    -        g1.switch(seen)
    -        g2.switch(seen)
    -        g2.parent = g1
    -        assert seen == []
    -        raises(SomeError, g2.switch)
    -        assert seen == [SomeError]
    -        g2.switch()
    -        assert seen == [SomeError]
    -
    -    def test_send_exception(self):
    -        greenlet  = self.glob.greenlet
    -        fmain     = self.glob.fmain
    -        def send_exception(g, exc):
    -            # note: send_exception(g, exc)  can be now done with  g.throw(exc).
    -            # the purpose of this test is to explicitely check the
    -            # propagation rules.
    -            def crasher(exc):
    -                raise exc
    -            g1 = greenlet(crasher, parent=g)
    -            g1.switch(exc)
    -
    -        seen = []
    -        g1 = greenlet(fmain)
    -        g1.switch(seen)
    -        raises(KeyError, "send_exception(g1, KeyError)")
    -        assert seen == [KeyError]
    -
    -    def test_dealloc(self):
    -        skip("XXX in-progress: GC handling of greenlets")
    -        import gc
    -        greenlet = self.glob.greenlet
    -        fmain    = self.glob.fmain
    -        seen = []
    -        g1 = greenlet(fmain)
    -        g2 = greenlet(fmain)
    -        g1.switch(seen)
    -        g2.switch(seen)
    -        assert seen == []
    -        del g1
    -        gc.collect()
    -        assert seen == [greenlet.GreenletExit]
    -        del g2
    -        gc.collect()
    -        assert seen == [greenlet.GreenletExit, greenlet.GreenletExit]
    -
    -    def test_frame(self):
    -        import sys
    -        greenlet = self.glob.greenlet
    -        def f1():
    -            f = sys._getframe(0)
    -            assert f.f_back is None
    -            greenlet.getcurrent().parent.switch(f)
    -            return "meaning of life"
    -        g = greenlet(f1)
    -        frame = g.switch()
    -        assert frame is g.gr_frame
    -        assert g
    -        next = g.switch()
    -        assert not g
    -        assert next == "meaning of life"
    -        assert g.gr_frame is None
    -
    -
    -class AppTest_PyMagicTestThrow:
    -
    -    def setup_class(cls):
    -        space = gettestobjspace(usemodules=('_stackless',))
    -        cls.space = space
    -
    -    def test_class(self):
    -        from _stackless import greenlet
    -        def switch(*args):
    -            return greenlet.getcurrent().parent.switch(*args)
    -
    -        def f():
    -            try:
    -                switch("ok")
    -            except RuntimeError:
    -                switch("ok")
    -                return
    -            switch("fail")
    -
    -        g = greenlet(f)
    -        res = g.switch()
    -        assert res == "ok"
    -        res = g.throw(RuntimeError)
    -        assert res == "ok"
    -
    -    def test_val(self):
    -        from _stackless import greenlet
    -        def switch(*args):
    -            return greenlet.getcurrent().parent.switch(*args)
    -
    -        def f():
    -            try:
    -                switch("ok")
    -            except RuntimeError, val:
    -                if str(val) == "ciao":
    -                    switch("ok")
    -                    return
    -            switch("fail")
    -
    -        g = greenlet(f)
    -        res = g.switch()
    -        assert res == "ok"
    -        res = g.throw(RuntimeError("ciao"))
    -        assert res == "ok"
    -
    -        g = greenlet(f)
    -        res = g.switch()
    -        assert res == "ok"
    -        res = g.throw(RuntimeError, "ciao")
    -        assert res == "ok"
    -
    -    def test_kill(self):
    -        from _stackless import greenlet
    -        def switch(*args):
    -            return greenlet.getcurrent().parent.switch(*args)
    -
    -        def f():
    -            switch("ok")
    -            switch("fail")
    -
    -        g = greenlet(f)
    -        res = g.switch()
    -        assert res == "ok"
    -        res = g.throw()
    -        assert isinstance(res, greenlet.GreenletExit)
    -        assert g.dead
    -        res = g.throw()    # immediately eaten by the already-dead greenlet
    -        assert isinstance(res, greenlet.GreenletExit)
    -
    -    def test_throw_goes_to_original_parent(self):
    -        from _stackless import greenlet
    -        main = greenlet.getcurrent()
    -        def f1():
    -            try:
    -                main.switch("f1 ready to catch")
    -            except IndexError:
    -                return "caught"
    -            else:
    -                return "normal exit"
    -        def f2():
    -            main.switch("from f2")
    -
    -        g1 = greenlet(f1)
    -        g2 = greenlet(f2, parent=g1)
    -        raises(IndexError, g2.throw, IndexError)
    -        assert g2.dead
    -        assert g1.dead
    -
    -        g1 = greenlet(f1)
    -        g2 = greenlet(f2, parent=g1)
    -        res = g1.switch()
    -        assert res == "f1 ready to catch"
    -        res = g2.throw(IndexError)
    -        assert res == "caught"
    -        assert g2.dead
    -        assert g1.dead
    -
    -        g1 = greenlet(f1)
    -        g2 = greenlet(f2, parent=g1)
    -        res = g1.switch()
    -        assert res == "f1 ready to catch"
    -        res = g2.switch()
    -        assert res == "from f2"
    -        res = g2.throw(IndexError)
    -        assert res == "caught"
    -        assert g2.dead
    -        assert g1.dead
    -            
    -
    -class AppTest_PyMagicTestGenerator:
    -
    -    def setup_class(cls):
    -        space = gettestobjspace(usemodules=('_stackless',))
    -        cls.space = space
    -
    -    def test_generator(self):
    -        from _stackless import greenlet
    -
    -        class genlet(greenlet):
    -
    -            def __init__(self, *args, **kwds):
    -                self.args = args
    -                self.kwds = kwds
    -
    -            def run(self):
    -                fn, = self.fn
    -                fn(*self.args, **self.kwds)
    -
    -            def __iter__(self):
    -                return self
    -
    -            def next(self):
    -                self.parent = greenlet.getcurrent()
    -                result = self.switch()
    -                if self:
    -                    return result
    -                else:
    -                    raise StopIteration
    -
    -        def Yield(value):
    -            g = greenlet.getcurrent()
    -            while not isinstance(g, genlet):
    -                if g is None:
    -                    raise RuntimeError, 'yield outside a genlet'
    -                g = g.parent
    -            g.parent.switch(value)
    -
    -        def generator(func):
    -            class generator(genlet):
    -                fn = (func,)
    -            return generator
    -
    -        # ___ test starts here ___
    -        seen = []
    -        def g(n):
    -            for i in range(n):
    -                seen.append(i)
    -                Yield(i)
    -        g = generator(g)
    -        for k in range(3):
    -            for j in g(5):
    -                seen.append(j)
    -        assert seen == 3 * [0, 0, 1, 1, 2, 2, 3, 3, 4, 4]
    -
    -
    -class AppTest_PyMagicTestGeneratorNested:
    -
    -    def setup_class(cls):
    -        space = gettestobjspace(usemodules=('_stackless',))
    -        cls.space = space
    -        cls.w_glob = space.appexec([], """():
    -            from _stackless import greenlet
    -
    -            class genlet(greenlet):
    -
    -                def __init__(self, *args, **kwds):
    -                    self.args = args
    -                    self.kwds = kwds
    -                    self.child = None
    -
    -                def run(self):
    -                    fn, = self.fn
    -                    fn(*self.args, **self.kwds)
    -
    -                def __iter__(self):
    -                    return self
    -
    -                def set_child(self, child):
    -                    self.child = child
    -
    -                def next(self):
    -                    if self.child:
    -                        child = self.child
    -                        while child.child:
    -                            tmp = child
    -                            child = child.child
    -                            tmp.child = None
    -
    -                        result = child.switch()
    -                    else:
    -                        self.parent = greenlet.getcurrent()            
    -                        result = self.switch()
    -
    -                    if self:
    -                        return result
    -                    else:
    -                        raise StopIteration
    -
    -            def Yield(value, level = 1):
    -                g = greenlet.getcurrent()
    -
    -                while level != 0:
    -                    if not isinstance(g, genlet):
    -                        raise RuntimeError, 'yield outside a genlet'
    -                    if level > 1:
    -                        g.parent.set_child(g)
    -                    g = g.parent
    -                    level -= 1
    -
    -                g.switch(value)
    -
    -            def Genlet(func):
    -                class Genlet(genlet):
    -                    fn = (func,)
    -                return Genlet
    -
    -            class Glob: pass
    -            glob = Glob()
    -            glob.__dict__.update(locals())
    -            return glob
    -        """)
    -
    -    def test_genlet_1(self):
    -        Genlet = self.glob.Genlet
    -        Yield  = self.glob.Yield
    -
    -        def g1(n, seen):
    -            for i in range(n):
    -                seen.append(i+1)
    -                yield i
    -
    -        def g2(n, seen):
    -            for i in range(n):
    -                seen.append(i+1)
    -                Yield(i)
    -
    -        g2 = Genlet(g2)
    -
    -        def nested(i):
    -            Yield(i)
    -
    -        def g3(n, seen):
    -            for i in range(n):
    -                seen.append(i+1)
    -                nested(i)
    -        g3 = Genlet(g3)
    -
    -        raises(RuntimeError, Yield, 10)
    -        for g in [g1, g2, g3]:
    -            seen = []
    -            for k in range(3):
    -                for j in g(5, seen):
    -                    seen.append(j)
    -            assert seen == 3 * [1, 0, 2, 1, 3, 2, 4, 3, 5, 4]
    -        raises(RuntimeError, Yield, 10)
    -
    -    def test_nested_genlets(self):
    -        Genlet = self.glob.Genlet
    -        Yield  = self.glob.Yield
    -        def a(n):
    -            if n == 0:
    -                return
    -            for ii in ax(n-1):
    -                Yield(ii)
    -            Yield(n)
    -        ax = Genlet(a)
    -        seen = []
    -        for ii in ax(5):
    -            seen.append(ii)
    -        assert seen == [1, 2, 3, 4, 5]
    -
    -    def test_perms(self):
    -        Genlet = self.glob.Genlet
    -        Yield  = self.glob.Yield
    -        def perms(l):
    -            if len(l) > 1:
    -                for e in l:
    -                    # No syntactical sugar for generator expressions
    -                    [Yield([e] + p) for p in perms([x for x in l if x!=e])]
    -            else:
    -                Yield(l)
    -        perms = Genlet(perms)
    -        gen_perms = perms(range(4))
    -        permutations = list(gen_perms)
    -        assert len(permutations) == 4*3*2*1
    -        assert [0,1,2,3] in permutations
    -        assert [3,2,1,0] in permutations
    -
    -    def test_layered_genlets(self):
    -        Genlet = self.glob.Genlet
    -        Yield  = self.glob.Yield
    -        def gr1(n):
    -            for ii in range(1, n):
    -                Yield(ii)
    -                Yield(ii * ii, 2)
    -        gr1 = Genlet(gr1)
    -        def gr2(n, seen):
    -            for ii in gr1(n):
    -                seen.append(ii)
    -        gr2 = Genlet(gr2)
    -        seen = []
    -        for ii in gr2(5, seen):
    -            seen.append(ii)
    -        assert seen == [1, 1, 2, 4, 3, 9, 4, 16]
    diff --git a/pypy/module/_stackless/test/test_interp_clonable.py b/pypy/module/_stackless/test/test_interp_clonable.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/test/test_interp_clonable.py
    +++ /dev/null
    @@ -1,118 +0,0 @@
    -"""
    -testing cloning
    -"""
    -import py; py.test.skip("clonable coroutines not really maintained any more")
    -
    -from pypy import conftest; conftest.translation_test_so_skip_if_appdirect()
    -from pypy.translator.c import gc
    -from pypy.rpython.memory.gctransform import stacklessframework
    -from pypy.rpython.memory.test import test_transformed_gc
    -from pypy.module._stackless.rclonable import InterpClonableCoroutine as ClonableCoroutine
    -from pypy.module._stackless.rclonable import AbstractThunk, fork
    -
    -class TestClonableCoroutine(test_transformed_gc.GCTest):
    -
    -    gcname = "marksweep"
    -    stacklessgc = True
    -    class gcpolicy(gc.StacklessFrameworkGcPolicy):
    -        class transformerclass(stacklessframework.StacklessFrameworkGCTransformer):
    -            GC_PARAMS = {'start_heap_size': 4096 }
    -
    -    def test_clone(self):
    -        class T(AbstractThunk):
    -            def __init__(self, result):
    -                self.result = result
    -            def call(self):
    -                self.result.append(2)
    -                ClonableCoroutine.getmain().switch()
    -                self.result.append(4)
    -        def f():
    -            result = []
    -            coro = ClonableCoroutine()
    -            coro.bind(T(result))
    -            result.append(1)
    -            coro.switch()
    -            coro2 = coro.clone()
    -            result.append(3)
    -            coro2.switch()
    -            result.append(5)
    -            coro.switch()
    -            result.append(6)
    -            n = 0
    -            for i in result:
    -                n = n*10 + i
    -            return n
    -
    -        run = self.runner(f)
    -        res = run([])
    -        assert res == 1234546
    -
    -    def test_clone_local_state(self):
    -        class T(AbstractThunk):
    -            def __init__(self, result):
    -                self.result = result
    -            def call(self):
    -                localstate = []
    -                localstate.append(10)
    -                self.result.append(2)
    -                ClonableCoroutine.getmain().switch()
    -                localstate.append(20)
    -                if localstate == [10, 20]:
    -                    self.result.append(4)
    -                else:
    -                    self.result.append(0)
    -        def f():
    -            result = []
    -            coro = ClonableCoroutine()
    -            coro.bind(T(result))
    -            result.append(1)
    -            coro.switch()
    -            coro2 = coro.clone()
    -            result.append(3)
    -            coro2.switch()
    -            result.append(5)
    -            coro.switch()
    -            result.append(6)
    -            n = 0
    -            for i in result:
    -                n = n*10 + i
    -            return n
    -
    -        run = self.runner(f)
    -        res = run([])
    -        assert res == 1234546
    -
    -    def test_fork(self):
    -        class T(AbstractThunk):
    -            def __init__(self, result):
    -                self.result = result
    -            def call(self):
    -                localdata = [10]
    -                self.result.append(2)
    -                newcoro = fork()
    -                localdata.append(20)
    -                if newcoro is not None:
    -                    # in the parent
    -                    self.result.append(3)
    -                    newcoro.switch()
    -                    self.result.append(5)
    -                else:
    -                    # in the child
    -                    self.result.append(4)
    -                localdata.append(30)
    -                self.result.append(localdata != [10, 20, 30])
    -        def f():
    -            result = []
    -            coro = ClonableCoroutine()
    -            coro.bind(T(result))
    -            result.append(1)
    -            coro.switch()
    -            result.append(6)
    -            n = 0
    -            for i in result:
    -                n = n*10 + i
    -            return n
    -
    -        run = self.runner(f)
    -        res = run([])
    -        assert res == 12340506
    diff --git a/pypy/module/_stackless/test/test_pickle.py b/pypy/module/_stackless/test/test_pickle.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/test/test_pickle.py
    +++ /dev/null
    @@ -1,487 +0,0 @@
    -from pypy.conftest import gettestobjspace, option
    -import py
    -
    -# app-level testing of coroutine pickling
    -
    -
    -class AppTestBasic:
    -    def setup_class(cls):
    -        cls.space = gettestobjspace(usemodules=('_stackless',))
    -
    -    def test_pickle_main(self):
    -        import _stackless, pickle
    -        main = _stackless.coroutine.getcurrent()
    -        s = pickle.dumps(main)
    -        c = pickle.loads(s)
    -        assert c is main
    -
    -
    -class AppTestPickle:
    -
    -    def setup_class(cls):
    -        cls.space = gettestobjspace(usemodules=('_stackless',), CALL_METHOD=True)
    -
    -    def test_pickle_coroutine_empty(self):
    -        # this test is limited to basic pickling.
    -        # real stacks can only tested with a stackless pypy build.
    -        import _stackless as stackless
    -        co = stackless.coroutine()
    -        import pickle
    -        pckl = pickle.dumps(co)
    -        co2 = pickle.loads(pckl)
    -        # the empty unpickled coroutine can still be used:
    -        result = []
    -        co2.bind(result.append, 42)
    -        co2.switch()
    -        assert result == [42]
    -
    -    def test_pickle_coroutine_bound(self):
    -        import pickle
    -        import _stackless
    -        lst = [4]
    -        co = _stackless.coroutine()
    -        co.bind(lst.append, 2)
    -        pckl = pickle.dumps((co, lst))
    -
    -        (co2, lst2) = pickle.loads(pckl)
    -        assert lst2 == [4]
    -        co2.switch()
    -        assert lst2 == [4, 2]
    -
    -
    -    def test_simple_ish(self):
    -
    -        import new, sys
    -
    -        mod = new.module('mod')
    -        sys.modules['mod'] = mod
    -        try:
    -            exec '''
    -output = []
    -import _stackless
    -def f(coro, n, x):
    -    if n == 0:
    -        coro.switch()
    -        return
    -    f(coro, n-1, 2*x)
    -    output.append(x)
    -
    -def example():
    -    main_coro = _stackless.coroutine.getcurrent()
    -    sub_coro = _stackless.coroutine()
    -    sub_coro.bind(f, main_coro, 5, 1)
    -    sub_coro.switch()
    -
    -    import pickle
    -    pckl = pickle.dumps(sub_coro)
    -    new_coro = pickle.loads(pckl)
    -
    -    new_coro.switch()
    -
    -example()
    -assert output == [16, 8, 4, 2, 1]
    -''' in mod.__dict__
    -        finally:
    -            del sys.modules['mod']
    -
    -    def test_pickle_again(self):
    -
    -        import new, sys
    -
    -        mod = new.module('mod')
    -        sys.modules['mod'] = mod
    -        try:
    -            exec '''
    -output = []
    -import _stackless
    -def f(coro, n, x):
    -    if n == 0:
    -        coro.switch()
    -        return
    -    f(coro, n-1, 2*x)
    -    output.append(x)
    -
    -def example():
    -    main_coro = _stackless.coroutine.getcurrent()
    -    sub_coro = _stackless.coroutine()
    -    sub_coro.bind(f, main_coro, 5, 1)
    -    sub_coro.switch()
    -
    -    import pickle
    -    pckl = pickle.dumps(sub_coro)
    -    new_coro = pickle.loads(pckl)
    -    pckl = pickle.dumps(new_coro)
    -    newer_coro = pickle.loads(pckl)
    -
    -    newer_coro.switch()
    -
    -example()
    -assert output == [16, 8, 4, 2, 1]
    -''' in mod.__dict__
    -        finally:
    -            del sys.modules['mod']
    -
    -    def test_kwargs(self):
    -
    -        import new, sys
    -
    -        mod = new.module('mod')
    -        sys.modules['mod'] = mod
    -        try:
    -            exec '''
    -output = []
    -import _stackless
    -def f(coro, n, x, step=4):
    -    if n == 0:
    -        coro.switch()
    -        return
    -    f(coro, n-1, 2*x, step=1)
    -    output.append(x)
    -
    -def example():
    -    main_coro = _stackless.coroutine.getcurrent()
    -    sub_coro = _stackless.coroutine()
    -    sub_coro.bind(f, main_coro, 5, 1, 1)
    -    sub_coro.switch()
    -
    -    import pickle
    -    pckl = pickle.dumps(sub_coro)
    -    new_coro = pickle.loads(pckl)
    -
    -    new_coro.switch()
    -
    -example()
    -assert output == [16, 8, 4, 2, 1]
    -''' in mod.__dict__
    -        finally:
    -            del sys.modules['mod']
    -
    -    def test_starstarargs(self):
    -
    -        import new, sys
    -
    -        mod = new.module('mod')
    -        sys.modules['mod'] = mod
    -        try:
    -            exec '''
    -output = []
    -import _stackless
    -def f(coro, n, x, step=4):
    -    if n == 0:
    -        coro.switch()
    -        return
    -    f(coro, n-1, 2*x, **{'step': 1})
    -    output.append(x)
    -
    -def example():
    -    main_coro = _stackless.coroutine.getcurrent()
    -    sub_coro = _stackless.coroutine()
    -    sub_coro.bind(f, main_coro, 5, 1, 1)
    -    sub_coro.switch()
    -
    -    import pickle
    -    pckl = pickle.dumps(sub_coro)
    -    new_coro = pickle.loads(pckl)
    -
    -    new_coro.switch()
    -
    -example()
    -assert output == [16, 8, 4, 2, 1]
    -''' in mod.__dict__
    -        finally:
    -            del sys.modules['mod']
    -
    -    def test_closure(self):
    -        import new, sys
    -
    -        mod = new.module('mod')
    -        sys.modules['mod'] = mod
    -        try:
    -            exec '''
    -output = []
    -import _stackless
    -def example():
    -    main_coro = _stackless.coroutine.getcurrent()
    -    sub_coro = _stackless.coroutine()
    -    y = 3
    -    def f(coro, n, x):
    -        if n == 0:
    -            coro.switch()
    -            return
    -        f(coro, n-1, 2*x)
    -        output.append(x+y)
    -
    -    sub_coro.bind(f, main_coro, 5, 1)
    -    sub_coro.switch()
    -
    -    import pickle
    -    pckl = pickle.dumps(sub_coro)
    -    new_coro = pickle.loads(pckl)
    -
    -    new_coro.switch()
    -
    -example()
    -assert output == [19, 11, 7, 5, 4]
    -''' in mod.__dict__
    -        finally:
    -            del sys.modules['mod']
    -
    -    def test_exception(self):
    -        import new, sys
    -
    -        mod = new.module('mod')
    -        sys.modules['mod'] = mod
    -        try:
    -            exec '''
    -output = []
    -import _stackless
    -def f(coro):
    -    try:
    -        raise ValueError
    -    except:
    -        coro.switch()
    -        import sys
    -        t, v, tb = sys.exc_info()
    -        output.append(t)
    -
    -def example():
    -    main_coro = _stackless.coroutine.getcurrent()
    -    sub_coro = _stackless.coroutine()
    -
    -    sub_coro.bind(f, main_coro)
    -    sub_coro.switch()
    -
    -    import pickle
    -    pckl = pickle.dumps(sub_coro)
    -    new_coro = pickle.loads(pckl)
    -
    -    new_coro.switch()
    -
    -example()
    -assert output == [ValueError]
    -''' in mod.__dict__
    -        finally:
    -            del sys.modules['mod']
    -
    -    def test_exception_after_unpickling(self):
    -
    -        import new, sys
    -
    -        mod = new.module('mod')
    -        sys.modules['mod'] = mod
    -        try:
    -            exec '''
    -output = []
    -import _stackless
    -def f(coro, n, x):
    -    if n == 0:
    -        coro.switch()
    -        raise ValueError
    -    try:
    -        f(coro, n-1, 2*x)
    -    finally:
    -        output.append(x)
    -
    -def example():
    -    main_coro = _stackless.coroutine.getcurrent()
    -    sub_coro = _stackless.coroutine()
    -    sub_coro.bind(f, main_coro, 5, 1)
    -    sub_coro.switch()
    -
    -    import pickle
    -    pckl = pickle.dumps(sub_coro)
    -    new_coro = pickle.loads(pckl)
    -
    -    try:
    -        sub_coro.switch()
    -    except ValueError:
    -        pass
    -    else:
    -        assert 0
    -    try:
    -        new_coro.switch()
    -    except ValueError:
    -        pass
    -    else:
    -        assert 0
    -
    -example()
    -assert output == [16, 8, 4, 2, 1] * 2
    -''' in mod.__dict__
    -        finally:
    -            del sys.modules['mod']
    -
    -    def test_loop(self):
    -        import new, sys
    -
    -        mod = new.module('mod')
    -        sys.modules['mod'] = mod
    -        try:
    -            exec '''
    -output = []
    -import _stackless
    -def f(coro):
    -    for x in (1,2,3):
    -        coro.switch()
    -        output.append(x)
    -
    -def example():
    -    main_coro = _stackless.coroutine.getcurrent()
    -    sub_coro = _stackless.coroutine()
    -
    -    sub_coro.bind(f, main_coro)
    -    sub_coro.switch()
    -
    -    import pickle
    -    pckl = pickle.dumps(sub_coro)
    -    new_coro = pickle.loads(pckl)
    -
    -    new_coro.switch()
    -    new_coro.switch()
    -    new_coro.switch()
    -
    -example()
    -assert output == [1, 2, 3]
    -''' in mod.__dict__
    -        finally:
    -            del sys.modules['mod']
    -
    -    def test_valstack(self):
    -        import new, sys
    -
    -        mod = new.module('mod')
    -        sys.modules['mod'] = mod
    -        try:
    -            exec '''
    -output = []
    -import _stackless
    -def f(coro):
    -    r = 1+g(coro)+3
    -    output.append(r)
    -
    -def g(coro):
    -    coro.switch()
    -    return 2
    -
    -def example():
    -    main_coro = _stackless.coroutine.getcurrent()
    -    sub_coro = _stackless.coroutine()
    -
    -    sub_coro.bind(f, main_coro)
    -    sub_coro.switch()
    -
    -    import pickle
    -    pckl = pickle.dumps(sub_coro)
    -    new_coro = pickle.loads(pckl)
    -
    -    new_coro.switch()
    -
    -
    -example()
    -assert output == [6]
    -''' in mod.__dict__
    -        finally:
    -            del sys.modules['mod']
    -
    -
    -    def test_exec_and_locals(self):
    -        import new, sys
    -
    -        mod = new.module('mod')
    -        sys.modules['mod'] = mod
    -        try:
    -            exec '''
    -output = []
    -import _stackless
    -
    -def f(coro):
    -    x = None
    -    exec "x = 9"
    -    coro.switch()
    -    output.append(x)
    -
    -def example():
    -    main_coro = _stackless.coroutine.getcurrent()
    -    sub_coro = _stackless.coroutine()
    -    sub_coro.bind(f, main_coro)
    -    sub_coro.switch()
    -
    -    import pickle
    -    pckl = pickle.dumps(sub_coro)
    -    new_coro = pickle.loads(pckl)
    -
    -    new_coro.switch()
    -
    -example()
    -assert output == [9]
    -''' in mod.__dict__
    -        finally:
    -            del sys.modules['mod']
    -
    -
    -    def test_solver(self):
    -        import new, sys
    -
    -        mod = new.module('mod')
    -        sys.modules['mod'] = mod
    -        try:
    -            exec '''
    -import _stackless, pickle
    -
    -class Fail(Exception):
    -    pass
    -
    -class Success(Exception):
    -    pass
    -
    -def first_solution(func):
    -    global next_answer
    -    co = _stackless.coroutine()
    -    co.bind(func)
    -    pending = [(co, None)]
    -    while pending:
    -        co, next_answer = pending.pop()
    -        try:
    -            co.switch()
    -        except Fail:
    -            pass
    -        except Success, e:
    -            return e.args[0]
    -        else:
    -            # zero_or_one() called, clone the coroutine
    -            # NB. this seems to be quite slow
    -            co2 = pickle.loads(pickle.dumps(co))
    -            pending.append((co2, 1))
    -            pending.append((co, 0))
    -    raise Fail("no solution")
    -
    -pending = []
    -main = _stackless.coroutine.getcurrent()
    -
    -def zero_or_one():
    -    main.switch()
    -    return next_answer
    -
    -# ____________________________________________________________
    -
    -invalid_prefixes = {
    -    (0, 0): True,
    -    (0, 1, 0): True,
    -    (0, 1, 1): True,
    -    (1, 0): True,
    -    (1, 1, 0, 0): True,
    -    }
    -
    -def example():
    -    test = []
    -    for n in range(5):
    -        test.append(zero_or_one())
    -        if tuple(test) in invalid_prefixes:
    -            raise Fail
    -    raise Success(test)
    -
    -res = first_solution(example)
    -assert res == [1, 1, 0, 1, 0]
    -''' in mod.__dict__
    -        finally:
    -            del sys.modules['mod']
    diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py
    --- a/pypy/module/marshal/interp_marshal.py
    +++ b/pypy/module/marshal/interp_marshal.py
    @@ -40,7 +40,7 @@
             reader = FileReader(space, w_f)
         try:
             u = Unmarshaller(space, reader)
    -        return u.load_w_obj(False)
    +        return u.load_w_obj()
         finally:
             reader.finished()
     
    @@ -49,7 +49,7 @@
     ignored."""
         space.timer.start("marshal loads")
         u = StringUnmarshaller(space, w_str)
    -    obj = u.load_w_obj(False)
    +    obj = u.load_w_obj()
         space.timer.stop("marshal loads")
         return obj
     
    @@ -424,7 +424,7 @@
             lng = self.get_lng()
             return self.get(lng)
     
    -    def get_w_obj(self, allow_null):
    +    def get_w_obj(self, allow_null=False):
             space = self.space
             w_ret = space.w_None # something not None
             tc = self.get1()
    @@ -434,9 +434,9 @@
                     'NULL object in marshal data'))
             return w_ret
     
    -    def load_w_obj(self, allow_null):
    +    def load_w_obj(self):
             try:
    -            return self.get_w_obj(allow_null)
    +            return self.get_w_obj()
             except rstackovf.StackOverflow:
                 rstackovf.check_stack_overflow()
                 self._overflow()
    diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py
    --- a/pypy/module/micronumpy/__init__.py
    +++ b/pypy/module/micronumpy/__init__.py
    @@ -26,13 +26,19 @@
             ("copysign", "copysign"),
             ("cos", "cos"),
             ("divide", "divide"),
    +        ("equal", "equal"),
             ("exp", "exp"),
             ("fabs", "fabs"),
             ("floor", "floor"),
    +        ("greater", "greater"),
    +        ("greater_equal", "greater_equal"),
    +        ("less", "less"),
    +        ("less_equal", "less_equal"),
             ("maximum", "maximum"),
             ("minimum", "minimum"),
             ("multiply", "multiply"),
             ("negative", "negative"),
    +        ("not_equal", "not_equal"),
             ("reciprocal", "reciprocal"),
             ("sign", "sign"),
             ("sin", "sin"),
    diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py
    --- a/pypy/module/micronumpy/interp_dtype.py
    +++ b/pypy/module/micronumpy/interp_dtype.py
    @@ -129,6 +129,16 @@
             ))
         return impl
     
    +def raw_binop(func):
    +    # Returns the result unwrapped.
    +    @functools.wraps(func)
    +    def impl(self, v1, v2):
    +        return func(self,
    +            self.for_computation(self.unbox(v1)),
    +            self.for_computation(self.unbox(v2))
    +        )
    +    return impl
    +
     def unaryop(func):
         @functools.wraps(func)
         def impl(self, v):
    @@ -170,8 +180,24 @@
     
         def bool(self, v):
             return bool(self.for_computation(self.unbox(v)))
    +    @raw_binop
    +    def eq(self, v1, v2):
    +        return v1 == v2
    +    @raw_binop
         def ne(self, v1, v2):
    -        return self.for_computation(self.unbox(v1)) != self.for_computation(self.unbox(v2))
    +        return v1 != v2
    +    @raw_binop
    +    def lt(self, v1, v2):
    +        return v1 < v2
    +    @raw_binop
    +    def le(self, v1, v2):
    +        return v1 <= v2
    +    @raw_binop
    +    def gt(self, v1, v2):
    +        return v1 > v2
    +    @raw_binop
    +    def ge(self, v1, v2):
    +        return v1 >= v2
     
     
     class FloatArithmeticDtype(ArithmaticTypeMixin):
    @@ -224,7 +250,7 @@
             return math.tan(v)
         @unaryop
         def arcsin(self, v):
    -        if v < -1.0 or  v > 1.0:
    +        if v < -1.0 or v > 1.0:
                 return rfloat.NAN
             return math.asin(v)
         @unaryop
    diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py
    --- a/pypy/module/micronumpy/interp_numarray.py
    +++ b/pypy/module/micronumpy/interp_numarray.py
    @@ -74,6 +74,13 @@
         descr_pow = _binop_impl("power")
         descr_mod = _binop_impl("mod")
     
    +    descr_eq = _binop_impl("equal")
    +    descr_ne = _binop_impl("not_equal")
    +    descr_lt = _binop_impl("less")
    +    descr_le = _binop_impl("less_equal")
    +    descr_gt = _binop_impl("greater")
    +    descr_ge = _binop_impl("greater_equal")
    +
         def _binop_right_impl(ufunc_name):
             def impl(self, space, w_other):
                 w_other = scalar_w(space,
    @@ -404,10 +411,11 @@
         """
         Intermediate class for performing binary operations.
         """
    -    def __init__(self, signature, res_dtype, left, right):
    +    def __init__(self, signature, calc_dtype, res_dtype, left, right):
             VirtualArray.__init__(self, signature, res_dtype)
             self.left = left
             self.right = right
    +        self.calc_dtype = calc_dtype
     
         def _del_sources(self):
             self.left = None
    @@ -421,14 +429,14 @@
             return self.right.find_size()
     
         def _eval(self, i):
    -        lhs = self.left.eval(i).convert_to(self.res_dtype)
    -        rhs = self.right.eval(i).convert_to(self.res_dtype)
    +        lhs = self.left.eval(i).convert_to(self.calc_dtype)
    +        rhs = self.right.eval(i).convert_to(self.calc_dtype)
     
             sig = jit.promote(self.signature)
             assert isinstance(sig, signature.Signature)
             call_sig = sig.components[0]
             assert isinstance(call_sig, signature.Call2)
    -        return call_sig.func(self.res_dtype, lhs, rhs)
    +        return call_sig.func(self.calc_dtype, lhs, rhs)
     
     class ViewArray(BaseArray):
         """
    @@ -573,18 +581,28 @@
         __pos__ = interp2app(BaseArray.descr_pos),
         __neg__ = interp2app(BaseArray.descr_neg),
         __abs__ = interp2app(BaseArray.descr_abs),
    +
         __add__ = interp2app(BaseArray.descr_add),
         __sub__ = interp2app(BaseArray.descr_sub),
         __mul__ = interp2app(BaseArray.descr_mul),
         __div__ = interp2app(BaseArray.descr_div),
         __pow__ = interp2app(BaseArray.descr_pow),
         __mod__ = interp2app(BaseArray.descr_mod),
    +
         __radd__ = interp2app(BaseArray.descr_radd),
         __rsub__ = interp2app(BaseArray.descr_rsub),
         __rmul__ = interp2app(BaseArray.descr_rmul),
         __rdiv__ = interp2app(BaseArray.descr_rdiv),
         __rpow__ = interp2app(BaseArray.descr_rpow),
         __rmod__ = interp2app(BaseArray.descr_rmod),
    +
    +    __eq__ = interp2app(BaseArray.descr_eq),
    +    __ne__ = interp2app(BaseArray.descr_ne),
    +    __lt__ = interp2app(BaseArray.descr_lt),
    +    __le__ = interp2app(BaseArray.descr_le),
    +    __gt__ = interp2app(BaseArray.descr_gt),
    +    __ge__ = interp2app(BaseArray.descr_ge),
    +
         __repr__ = interp2app(BaseArray.descr_repr),
         __str__ = interp2app(BaseArray.descr_str),
     
    diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py
    --- a/pypy/module/micronumpy/interp_ufuncs.py
    +++ b/pypy/module/micronumpy/interp_ufuncs.py
    @@ -113,10 +113,11 @@
         argcount = 2
     
         def __init__(self, func, name, promote_to_float=False, promote_bools=False,
    -        identity=None):
    +        identity=None, comparison_func=False):
     
             W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity)
             self.func = func
    +        self.comparison_func = comparison_func
             self.signature = signature.Call2(func)
             self.reduce_signature = signature.BaseSignature()
     
    @@ -127,18 +128,25 @@
             [w_lhs, w_rhs] = args_w
             w_lhs = convert_to_array(space, w_lhs)
             w_rhs = convert_to_array(space, w_rhs)
    -        res_dtype = find_binop_result_dtype(space,
    +        calc_dtype = find_binop_result_dtype(space,
                 w_lhs.find_dtype(), w_rhs.find_dtype(),
                 promote_to_float=self.promote_to_float,
                 promote_bools=self.promote_bools,
             )
    +        if self.comparison_func:
    +            res_dtype = space.fromcache(interp_dtype.W_BoolDtype)
    +        else:
    +            res_dtype = calc_dtype
             if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar):
    -            return self.func(res_dtype, w_lhs.value, w_rhs.value).wrap(space)
    +            return self.func(calc_dtype,
    +                w_lhs.value.convert_to(calc_dtype),
    +                w_rhs.value.convert_to(calc_dtype)
    +            ).wrap(space)
     
             new_sig = signature.Signature.find_sig([
                 self.signature, w_lhs.signature, w_rhs.signature
             ])
    -        w_res = Call2(new_sig, res_dtype, w_lhs, w_rhs)
    +        w_res = Call2(new_sig, calc_dtype, res_dtype, w_lhs, w_rhs)
             w_lhs.add_invalidates(w_res)
             w_rhs.add_invalidates(w_res)
             return w_res
    @@ -209,13 +217,16 @@
         return space.fromcache(interp_dtype.W_Float64Dtype)
     
     
    -def ufunc_dtype_caller(ufunc_name, op_name, argcount):
    +def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func):
         if argcount == 1:
             def impl(res_dtype, value):
                 return getattr(res_dtype, op_name)(value)
         elif argcount == 2:
             def impl(res_dtype, lvalue, rvalue):
    -            return getattr(res_dtype, op_name)(lvalue, rvalue)
    +            res = getattr(res_dtype, op_name)(lvalue, rvalue)
    +            if comparison_func:
    +                res = space.fromcache(interp_dtype.W_BoolDtype).box(res)
    +            return res
         return func_with_new_name(impl, ufunc_name)
     
     class UfuncState(object):
    @@ -229,6 +240,13 @@
                 ("mod", "mod", 2, {"promote_bools": True}),
                 ("power", "pow", 2, {"promote_bools": True}),
     
    +            ("equal", "eq", 2, {"comparison_func": True}),
    +            ("not_equal", "ne", 2, {"comparison_func": True}),
    +            ("less", "lt", 2, {"comparison_func": True}),
    +            ("less_equal", "le", 2, {"comparison_func": True}),
    +            ("greater", "gt", 2, {"comparison_func": True}),
    +            ("greater_equal", "ge", 2, {"comparison_func": True}),
    +
                 ("maximum", "max", 2),
                 ("minimum", "min", 2),
     
    @@ -262,7 +280,9 @@
                 identity = space.fromcache(interp_dtype.W_Int64Dtype).adapt_val(identity)
             extra_kwargs["identity"] = identity
     
    -        func = ufunc_dtype_caller(ufunc_name, op_name, argcount)
    +        func = ufunc_dtype_caller(space, ufunc_name, op_name, argcount,
    +            comparison_func=extra_kwargs.get("comparison_func", False)
    +        )
             if argcount == 1:
                 ufunc = W_Ufunc1(func, ufunc_name, **extra_kwargs)
             elif argcount == 2:
    diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py
    --- a/pypy/module/micronumpy/test/test_numarray.py
    +++ b/pypy/module/micronumpy/test/test_numarray.py
    @@ -557,6 +557,26 @@
             assert array([1.2, 5]).dtype is dtype(float)
             assert array([]).dtype is dtype(float)
     
    +    def test_comparison(self):
    +        import operator
    +        from numpy import array, dtype
    +
    +        a = array(range(5))
    +        b = array(range(5), float)
    +        for func in [
    +            operator.eq, operator.ne, operator.lt, operator.le, operator.gt,
    +            operator.ge
    +        ]:
    +            c = func(a, 3)
    +            assert c.dtype is dtype(bool)
    +            for i in xrange(5):
    +                assert c[i] == func(a[i], 3)
    +
    +            c = func(b, 3)
    +            assert c.dtype is dtype(bool)
    +            for i in xrange(5):
    +                assert c[i] == func(b[i], 3)
    +
     
     class AppTestSupport(object):
         def setup_class(cls):
    diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py
    --- a/pypy/module/micronumpy/test/test_ufuncs.py
    +++ b/pypy/module/micronumpy/test/test_ufuncs.py
    @@ -310,4 +310,30 @@
             assert add.reduce([1, 2, 3]) == 6
             assert maximum.reduce([1]) == 1
             assert maximum.reduce([1, 2, 3]) == 3
    -        raises(ValueError, maximum.reduce, [])
    \ No newline at end of file
    +        raises(ValueError, maximum.reduce, [])
    +
    +    def test_comparisons(self):
    +        import operator
    +        from numpy import equal, not_equal, less, less_equal, greater, greater_equal
    +
    +        for ufunc, func in [
    +            (equal, operator.eq),
    +            (not_equal, operator.ne),
    +            (less, operator.lt),
    +            (less_equal, operator.le),
    +            (greater, operator.gt),
    +            (greater_equal, operator.ge),
    +        ]:
    +            for a, b in [
    +                (3, 3),
    +                (3, 4),
    +                (4, 3),
    +                (3.0, 3.0),
    +                (3.0, 3.5),
    +                (3.5, 3.0),
    +                (3.0, 3),
    +                (3, 3.0),
    +                (3.5, 3),
    +                (3, 3.5),
    +            ]:
    +                assert ufunc(a, b) is func(a, b)
    diff --git a/pypy/module/pwd/__init__.py b/pypy/module/pwd/__init__.py
    new file mode 100644
    --- /dev/null
    +++ b/pypy/module/pwd/__init__.py
    @@ -0,0 +1,25 @@
    +from pypy.interpreter.mixedmodule import MixedModule
    +
    +class Module(MixedModule):
    +    """
    +    This module provides access to the Unix password database.
    +    It is available on all Unix versions.
    +
    +    Password database entries are reported as 7-tuples containing the following
    +    items from the password database (see `'), in order:
    +    pw_name, pw_passwd, pw_uid, pw_gid, pw_gecos, pw_dir, pw_shell.
    +    The uid and gid items are integers, all others are strings. An
    +    exception is raised if the entry asked for cannot be found.
    +    """
    +
    +    interpleveldefs = {
    +        'getpwuid': 'interp_pwd.getpwuid',
    +        'getpwnam': 'interp_pwd.getpwnam',
    +        'getpwall': 'interp_pwd.getpwall',
    +    }
    +
    +    appleveldefs = {
    +        'struct_passwd': 'app_pwd.struct_passwd',
    +        'struct_pwent': 'app_pwd.struct_passwd',
    +    }
    +
    diff --git a/pypy/module/pwd/app_pwd.py b/pypy/module/pwd/app_pwd.py
    new file mode 100644
    --- /dev/null
    +++ b/pypy/module/pwd/app_pwd.py
    @@ -0,0 +1,20 @@
    +from _structseq import structseqtype, structseqfield
    +
    +class struct_passwd:
    +    """
    +    pwd.struct_passwd: Results from getpw*() routines.
    +
    +    This object may be accessed either as a tuple of
    +      (pw_name,pw_passwd,pw_uid,pw_gid,pw_gecos,pw_dir,pw_shell)
    +    or via the object attributes as named in the above tuple.
    +    """
    +    __metaclass__ = structseqtype
    +    name = "pwd.struct_passwd"
    +
    +    pw_name   = structseqfield(0, "user name")
    +    pw_passwd = structseqfield(1, "password")
    +    pw_uid    = structseqfield(2, "user id")
    +    pw_gid    = structseqfield(3, "group id")
    +    pw_gecos  = structseqfield(4, "real name")
    +    pw_dir    = structseqfield(5, "home directory")
    +    pw_shell  = structseqfield(6, "shell program")
    diff --git a/pypy/module/pwd/interp_pwd.py b/pypy/module/pwd/interp_pwd.py
    new file mode 100644
    --- /dev/null
    +++ b/pypy/module/pwd/interp_pwd.py
    @@ -0,0 +1,95 @@
    +from pypy.translator.tool.cbuild import ExternalCompilationInfo
    +from pypy.rpython.tool import rffi_platform
    +from pypy.rpython.lltypesystem import rffi, lltype
    +from pypy.interpreter.gateway import interp2app, unwrap_spec
    +from pypy.interpreter.error import OperationError, operationerrfmt
    +from pypy.rlib.rarithmetic import intmask
    +
    +eci = ExternalCompilationInfo(
    +    includes=['pwd.h']
    +    )
    +
    +class CConfig:
    +    _compilation_info_ = eci
    +
    +    uid_t = rffi_platform.SimpleType("uid_t")
    +
    +    passwd = rffi_platform.Struct(
    +        'struct passwd',
    +        [('pw_name', rffi.CCHARP),
    +         ('pw_passwd', rffi.CCHARP),
    +         ('pw_uid', rffi.INT),
    +         ('pw_gid', rffi.INT),
    +         ('pw_gecos', rffi.CCHARP),
    +         ('pw_dir', rffi.CCHARP),
    +         ('pw_shell', rffi.CCHARP),
    +         ])
    +
    +config = rffi_platform.configure(CConfig)
    +passwd_p = lltype.Ptr(config['passwd'])
    +uid_t = config['uid_t']
    +
    +def external(name, args, result, **kwargs):
    +    return rffi.llexternal(name, args, result, compilation_info=eci, **kwargs)
    +
    +c_getpwuid = external("getpwuid", [uid_t], passwd_p)
    +c_getpwnam = external("getpwnam", [rffi.CCHARP], passwd_p)
    +c_setpwent = external("setpwent", [], lltype.Void)
    +c_getpwent = external("getpwent", [], passwd_p)
    +c_endpwent = external("endpwent", [], lltype.Void)
    +
    +def make_struct_passwd(space, pw):
    +    w_passwd_struct = space.getattr(space.getbuiltinmodule('pwd'),
    +                                    space.wrap('struct_passwd'))
    +    w_tuple = space.newtuple([
    +        space.wrap(rffi.charp2str(pw.c_pw_name)),
    +        space.wrap(rffi.charp2str(pw.c_pw_passwd)),
    +        space.wrap(intmask(pw.c_pw_uid)),
    +        space.wrap(intmask(pw.c_pw_gid)),
    +        space.wrap(rffi.charp2str(pw.c_pw_gecos)),
    +        space.wrap(rffi.charp2str(pw.c_pw_dir)),
    +        space.wrap(rffi.charp2str(pw.c_pw_shell)),
    +        ])
    +    return space.call_function(w_passwd_struct, w_tuple)
    +
    + at unwrap_spec(uid=int)
    +def getpwuid(space, uid):
    +    """
    +    getpwuid(uid) -> (pw_name,pw_passwd,pw_uid,
    +                      pw_gid,pw_gecos,pw_dir,pw_shell)
    +    Return the password database entry for the given numeric user ID.
    +    See pwd.__doc__ for more on password database entries.
    +    """
    +    pw = c_getpwuid(uid)
    +    if not pw:
    +        raise operationerrfmt(space.w_KeyError,
    +            "getpwuid(): uid not found: %d", uid)
    +    return make_struct_passwd(space, pw)
    +
    + at unwrap_spec(name=str)
    +def getpwnam(space, name):
    +    """
    +    getpwnam(name) -> (pw_name,pw_passwd,pw_uid,
    +                        pw_gid,pw_gecos,pw_dir,pw_shell)
    +    Return the password database entry for the given user name.
    +    See pwd.__doc__ for more on password database entries.
    +    """
    +    pw = c_getpwnam(name)
    +    if not pw:
    +        raise operationerrfmt(space.w_KeyError,
    +            "getpwnam(): name not found: %s", name)
    +    return make_struct_passwd(space, pw)
    +
    +def getpwall(space):
    +    users_w = []
    +    c_setpwent()
    +    try:
    +        while True:
    +            pw = c_getpwent()
    +            if not pw:
    +                break
    +            users_w.append(make_struct_passwd(space, pw))
    +    finally:
    +        c_endpwent()
    +    return space.newlist(users_w)
    +    
    diff --git a/pypy/module/pwd/test/test_pwd.py b/pypy/module/pwd/test/test_pwd.py
    new file mode 100644
    --- /dev/null
    +++ b/pypy/module/pwd/test/test_pwd.py
    @@ -0,0 +1,28 @@
    +from pypy.conftest import gettestobjspace
    +
    +class AppTestPwd:
    +    def setup_class(cls):
    +        cls.space = gettestobjspace(usemodules=['pwd'])
    +
    +    def test_getpwuid(self):
    +        import pwd
    +        raises(KeyError, pwd.getpwuid, -1)
    +        pw = pwd.getpwuid(0)
    +        assert pw.pw_name == 'root'
    +        assert isinstance(pw.pw_passwd, str)
    +        assert pw.pw_uid == 0
    +        assert pw.pw_gid == 0
    +        assert pw.pw_dir == '/root'
    +        assert pw.pw_shell.startswith('/')
    +        #
    +        assert type(pw.pw_uid) is int
    +        assert type(pw.pw_gid) is int
    +
    +    def test_getpwnam(self):
    +        import pwd
    +        raises(KeyError, pwd.getpwnam, '~invalid~')
    +        assert pwd.getpwnam('root').pw_name == 'root'
    +
    +    def test_getpwall(self):
    +        import pwd
    +        assert pwd.getpwnam('root') in pwd.getpwall()
    diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py
    --- a/pypy/module/pypyjit/test_pypy_c/model.py
    +++ b/pypy/module/pypyjit/test_pypy_c/model.py
    @@ -2,7 +2,10 @@
     import sys
     import re
     import os.path
    -from _pytest.assertion import newinterpret
    +try:
    +    from _pytest.assertion import newinterpret
    +except ImportError:   # e.g. Python 2.5
    +    newinterpret = None
     from pypy.tool.jitlogparser.parser import SimpleParser, Function, TraceForOpcode
     from pypy.tool.jitlogparser.storage import LoopStorage
     
    @@ -196,7 +199,7 @@
                         source = str(source.deindent()).strip()
             except py.error.ENOENT:
                 source = None
    -        if source and source.startswith('self._assert('):
    +        if source and source.startswith('self._assert(') and newinterpret:
                 # transform self._assert(x, 'foo') into assert x, 'foo'
                 source = source.replace('self._assert(', 'assert ')
                 source = source[:-1] # remove the trailing ')'
    diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py
    --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py
    +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py
    @@ -1,3 +1,4 @@
    +from __future__ import with_statement
     import sys, os
     import types
     import subprocess
    diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py
    --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py
    +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py
    @@ -142,6 +142,7 @@
                 i = 0
                 b = B(1)
                 while i < 100:
    +                b.x
                     v = b.x # ID: loadattr
                     i += v
                 return i
    @@ -150,8 +151,6 @@
             loop, = log.loops_by_filename(self.filepath)
             assert loop.match_by_id('loadattr',
             '''
    -        guard_not_invalidated(descr=...)
    -        i16 = arraylen_gc(p10, descr=)
             i19 = call(ConstClass(ll_dict_lookup), _, _, _, descr=...)
             guard_no_exception(descr=...)
             i21 = int_and(i19, _)
    diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py
    --- a/pypy/module/sys/__init__.py
    +++ b/pypy/module/sys/__init__.py
    @@ -47,6 +47,7 @@
             'pypy_initial_path'     : 'state.pypy_initial_path',
     
             '_getframe'             : 'vm._getframe', 
    +        '_current_frames'       : 'vm._current_frames', 
             'setrecursionlimit'     : 'vm.setrecursionlimit', 
             'getrecursionlimit'     : 'vm.getrecursionlimit', 
             'setcheckinterval'      : 'vm.setcheckinterval', 
    diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py
    --- a/pypy/module/sys/test/test_sysmodule.py
    +++ b/pypy/module/sys/test/test_sysmodule.py
    @@ -1,6 +1,6 @@
     # -*- coding: iso-8859-1 -*-
     import autopath
    -from pypy.conftest import option
    +from pypy.conftest import option, gettestobjspace
     from py.test import raises
     from pypy.interpreter.gateway import app2interp_temp
     import sys
    @@ -524,3 +524,51 @@
             # If this ever actually becomes a compilation option this test should
             # be changed.
             assert sys.float_repr_style == "short"
    +
    +class AppTestCurrentFrames:
    +
    +    def test_current_frames(self):
    +        try:
    +            import thread
    +        except ImportError:
    +            pass
    +        else:
    +            skip('This test requires an intepreter without threads')
    +        import sys
    +
    +        def f():
    +            return sys._current_frames()
    +        frames = f()
    +        assert frames.keys() == [0]
    +        assert frames[0].f_code.co_name == 'f'
    +
    +class AppTestCurrentFramesWithThread(AppTestCurrentFrames):
    +    def setup_class(cls):
    +        cls.space = gettestobjspace(usemodules=('thread',))
    +
    +    def test_current_frames(self):
    +        import sys
    +        import time
    +        import thread
    +
    +        thread_id = thread.get_ident()
    +        self.ready = False
    +        def other_thread():
    +            self.ready = True
    +            print "thread started"
    +            time.sleep(5)
    +        thread.start_new_thread(other_thread, ())
    +
    +        def f():
    +            for i in range(100):
    +                if self.ready: break
    +                time.sleep(0.1)
    +            return sys._current_frames()
    +        
    +        frames = f()
    +        thisframe = frames.pop(thread_id)
    +        assert thisframe.f_code.co_name == 'f'
    +
    +        assert len(frames) == 1
    +        _, other_frame = frames.popitem()
    +        assert other_frame.f_code.co_name == 'other_thread'
    diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py
    --- a/pypy/module/sys/vm.py
    +++ b/pypy/module/sys/vm.py
    @@ -43,6 +43,23 @@
         f.mark_as_escaped()
         return space.wrap(f)
     
    +def _current_frames(space):
    +    """_current_frames() -> dictionary
    +
    +    Return a dictionary mapping each current thread T's thread id to T's
    +    current stack frame.
    +
    +    This function should be used for specialized purposes only."""
    +    w_result = space.newdict()
    +    ecs = space.threadlocals.getallvalues()
    +    for thread_ident, ec in ecs.items():
    +        f = ec.gettopframe_nohidden()
    +        f.mark_as_escaped()
    +        space.setitem(w_result,
    +                      space.wrap(thread_ident),
    +                      space.wrap(f))
    +    return w_result                      
    +
     def setrecursionlimit(space, w_new_limit):
         """setrecursionlimit() sets the maximum number of nested calls that
     can occur before a RuntimeError is raised.  On PyPy the limit is
    diff --git a/pypy/module/test_lib_pypy/test_distributed/test_distributed.py b/pypy/module/test_lib_pypy/test_distributed/test_distributed.py
    --- a/pypy/module/test_lib_pypy/test_distributed/test_distributed.py
    +++ b/pypy/module/test_lib_pypy/test_distributed/test_distributed.py
    @@ -1,3 +1,4 @@
    +import py; py.test.skip("xxx remove")
     
     """ Controllers tests
     """
    @@ -8,7 +9,7 @@
     class AppTestDistributed(object):
         def setup_class(cls):
             cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
    -            "usemodules":("_stackless",)})
    +            "usemodules":("_continuation",)})
     
         def test_init(self):
             import distributed
    @@ -90,14 +91,12 @@
     
     class AppTestDistributedTasklets(object):
         spaceconfig = {"objspace.std.withtproxy": True,
    -                   "objspace.usemodules._stackless": True}
    +                   "objspace.usemodules._continuation": True}
         reclimit = sys.getrecursionlimit()
     
         def setup_class(cls):
             import py.test
             py.test.importorskip('greenlet')
    -        #cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
    -        #    "usemodules":("_stackless",)})
             cls.w_test_env_ = cls.space.appexec([], """():
             from distributed import test_env
             return (test_env,)
    diff --git a/pypy/module/test_lib_pypy/test_distributed/test_greensock.py b/pypy/module/test_lib_pypy/test_distributed/test_greensock.py
    --- a/pypy/module/test_lib_pypy/test_distributed/test_greensock.py
    +++ b/pypy/module/test_lib_pypy/test_distributed/test_greensock.py
    @@ -1,5 +1,4 @@
    -
    -import py
    +import py; py.test.skip("xxx remove")
     from pypy.conftest import gettestobjspace, option
     
     def setup_module(mod):
    @@ -10,7 +9,7 @@
             if not option.runappdirect:
                 py.test.skip("Cannot run this on top of py.py because of PopenGateway")
             cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
    -                                       "usemodules":("_stackless",)})
    +                                       "usemodules":("_continuation",)})
             cls.w_remote_side_code = cls.space.appexec([], """():
             import sys
             sys.path.insert(0, '%s')
    diff --git a/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py b/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py
    --- a/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py
    +++ b/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py
    @@ -1,4 +1,4 @@
    -import py
    +import py; py.test.skip("xxx remove")
     from pypy.conftest import gettestobjspace
     
     def setup_module(mod):
    @@ -9,7 +9,8 @@
     class AppTestSocklayer:
         def setup_class(cls):
             cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
    -                                       "usemodules":("_stackless","_socket", "select")})
    +                                       "usemodules":("_continuation",
    +                                                     "_socket", "select")})
         
         def test_socklayer(self):
             class X(object):
    diff --git a/pypy/module/test_lib_pypy/test_stackless_pickle.py b/pypy/module/test_lib_pypy/test_stackless_pickle.py
    --- a/pypy/module/test_lib_pypy/test_stackless_pickle.py
    +++ b/pypy/module/test_lib_pypy/test_stackless_pickle.py
    @@ -1,3 +1,4 @@
    +import py; py.test.skip("XXX port me")
     from pypy.conftest import gettestobjspace, option
     
     class AppTest_Stackless:
    diff --git a/pypy/module/thread/threadlocals.py b/pypy/module/thread/threadlocals.py
    --- a/pypy/module/thread/threadlocals.py
    +++ b/pypy/module/thread/threadlocals.py
    @@ -43,6 +43,9 @@
             ident = self._mainthreadident
             return self._valuedict.get(ident, None)
     
    +    def getallvalues(self):
    +        return self._valuedict
    +
         def enter_thread(self, space):
             "Notification that the current thread is just starting."
             ec = space.getexecutioncontext()
    diff --git a/pypy/objspace/std/marshal_impl.py b/pypy/objspace/std/marshal_impl.py
    --- a/pypy/objspace/std/marshal_impl.py
    +++ b/pypy/objspace/std/marshal_impl.py
    @@ -325,10 +325,10 @@
         # of building a list of tuples.
         w_dic = space.newdict()
         while 1:
    -        w_key = u.get_w_obj(True)
    +        w_key = u.get_w_obj(allow_null=True)
             if w_key is None:
                 break
    -        w_value = u.get_w_obj(False)
    +        w_value = u.get_w_obj()
             space.setitem(w_dic, w_key, w_value)
         return w_dic
     register(TYPE_DICT, unmarshal_DictMulti)
    @@ -364,7 +364,7 @@
     # so we no longer can handle it in interp_marshal.atom_strlist
     
     def unmarshal_str(u):
    -    w_obj = u.get_w_obj(False)
    +    w_obj = u.get_w_obj()
         try:
             return u.space.str_w(w_obj)
         except OperationError, e:
    diff --git a/pypy/objspace/std/objecttype.py b/pypy/objspace/std/objecttype.py
    --- a/pypy/objspace/std/objecttype.py
    +++ b/pypy/objspace/std/objecttype.py
    @@ -24,7 +24,12 @@
         return w_obj.getrepr(space, '%s object' % (classname,))
     
     def descr__str__(space, w_obj):
    -    return space.repr(w_obj)
    +    w_type = space.type(w_obj)
    +    w_impl = w_type.lookup("__repr__")
    +    if w_impl is None:
    +        raise OperationError(space.w_TypeError,      # can it really occur?
    +                             space.wrap("operand does not support unary str"))
    +    return space.get_and_call_function(w_impl, w_obj)
     
     def descr__class__(space, w_obj):
         return space.type(w_obj)
    diff --git a/pypy/objspace/std/test/test_methodcache.py b/pypy/objspace/std/test/test_methodcache.py
    --- a/pypy/objspace/std/test/test_methodcache.py
    +++ b/pypy/objspace/std/test/test_methodcache.py
    @@ -88,30 +88,37 @@
       
         def test_many_names(self):
             import __pypy__
    -        class A(object):
    -            foo = 5
    -            bar = 6
    -            baz = 7
    -            xyz = 8
    -            stuff = 9
    -            a = 10
    -            foobar = 11
    +        for j in range(20):
    +            class A(object):
    +                foo = 5
    +                bar = 6
    +                baz = 7
    +                xyz = 8
    +                stuff = 9
    +                a = 10
    +                foobar = 11
     
    -        a = A()
    -        names = [name for name in A.__dict__.keys()
    -                      if not name.startswith('_')]
    -        names.sort()
    -        names_repeated = names * 10
    -        result = []
    -        __pypy__.reset_method_cache_counter()
    -        for name in names_repeated:
    -            result.append(getattr(a, name))
    -        append_counter = __pypy__.method_cache_counter("append")
    -        names_counters = [__pypy__.method_cache_counter(name)
    -                          for name in names]
    -        assert append_counter[0] >= 5 * len(names)
    -        for name, count in zip(names, names_counters):
    -            assert count[0] >= 5, str((name, count))
    +            a = A()
    +            names = [name for name in A.__dict__.keys()
    +                          if not name.startswith('_')]
    +            names.sort()
    +            names_repeated = names * 10
    +            result = []
    +            __pypy__.reset_method_cache_counter()
    +            for name in names_repeated:
    +                result.append(getattr(a, name))
    +            append_counter = __pypy__.method_cache_counter("append")
    +            names_counters = [__pypy__.method_cache_counter(name)
    +                              for name in names]
    +            try:
    +                assert append_counter[0] >= 10 * len(names) - 1
    +                for name, count in zip(names, names_counters):
    +                    assert count == (9, 1), str((name, count))
    +                break
    +            except AssertionError:
    +                pass
    +        else:
    +            raise
     
         def test_mutating_bases(self):
             class C(object):
    diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py
    --- a/pypy/objspace/std/test/test_obj.py
    +++ b/pypy/objspace/std/test/test_obj.py
    @@ -94,3 +94,11 @@
             #assert len(log) == 1
             #assert log[0].message.args == ("object.__init__() takes no parameters",)
             #assert type(log[0].message) is DeprecationWarning
    +
    +    def test_object_str(self):
    +        # obscure case: __str__() must delegate to __repr__() without adding
    +        # type checking on its own
    +        class A(object):
    +            def __repr__(self):
    +                return 123456
    +        assert A().__str__() == 123456
    diff --git a/pypy/objspace/std/test/test_stringformat.py b/pypy/objspace/std/test/test_stringformat.py
    --- a/pypy/objspace/std/test/test_stringformat.py
    +++ b/pypy/objspace/std/test/test_stringformat.py
    @@ -168,7 +168,7 @@
     
         def test_incomplete_format(self):
             raises(ValueError, '%'.__mod__, ((23,),))
    -        raises(ValueError, '%('.__mod__, ({},))
    +        raises((ValueError, TypeError), '%('.__mod__, ({},))
     
         def test_format_char(self):
             import sys
    diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py
    --- a/pypy/objspace/std/test/test_unicodeobject.py
    +++ b/pypy/objspace/std/test/test_unicodeobject.py
    @@ -780,8 +780,22 @@
             assert type(s) is unicode
             assert s == u'\u1234'
     
    +        # now the same with a new-style class...
    +        class A(object):
    +            def __init__(self, num):
    +                self.num = num
    +            def __str__(self):
    +                return unichr(self.num)
    +
    +        s = '%s' % A(111)    # this is ASCII
    +        assert type(s) is unicode
    +        assert s == chr(111)
    +
    +        s = '%s' % A(0x1234)    # this is not ASCII
    +        assert type(s) is unicode
    +        assert s == u'\u1234'
    +
         def test_formatting_unicode__str__2(self):
    -        skip("this is completely insane")
             class A:
                 def __str__(self):
                     return u'baz'
    @@ -798,9 +812,22 @@
             s = '%s %s' % (a, b)
             assert s == u'baz bar'
     
    +        skip("but this case here is completely insane")
             s = '%s %s' % (b, a)
             assert s == u'foo baz'
     
    +    def test_formatting_unicode__str__3(self):
    +        # "bah" is all I can say
    +        class X(object):
    +            def __repr__(self):
    +                return u'\u1234'
    +        '%s' % X()
    +        #
    +        class X(object):
    +            def __str__(self):
    +                return u'\u1234'
    +        '%s' % X()
    +
         def test_str_subclass(self):
             class Foo9(str):
                 def __unicode__(self):
    diff --git a/pypy/rlib/rcoroutine.py b/pypy/rlib/rcoroutine.py
    deleted file mode 100644
    --- a/pypy/rlib/rcoroutine.py
    +++ /dev/null
    @@ -1,357 +0,0 @@
    -"""
    -Basic Concept:
    ---------------
    -
    -All concurrency is expressed by some means of coroutines.
    -This is the lowest possible exposable interface.
    -
    -A coroutine is a structure that controls a sequence
    -of continuations in time. It contains a frame object
    -that is a restartable stack chain. This frame object
    -is updated on every switch.
    -
    -The frame can be None. Either the coroutine is not yet
    -bound, or it is the current coroutine of some costate.
    -See below. XXX rewrite a definition of these terms.
    -
    -There is always a notation of a "current" and a "last"
    -coroutine. Current has no frame and represents the
    -running program. last is needed to keep track of the
    -coroutine that receives a new frame chain after a switch.
    -
    -A costate object holds last and current.
    -There are different coroutine concepts existing in
    -parallel, like plain interp-level coroutines and
    -app-level structures like coroutines, greenlets and
    -tasklets.
    -Every concept is associated with its own costate object.
    -This allows for peaceful co-existence of many concepts.
    -The type of a switch is determined by the target's costate.
    -"""
    -
    -import py; py.test.skip("fixme: rewrite using rlib.rstacklet")
    -# XXX ^^^ the reason it is not done is that pypy.rlib.rcoroutine
    -# plus pypy/module/_stackless look like faaaaaar too much code
    -# to me :-(
    -
    -from pypy.rlib.rstack import yield_current_frame_to_caller
    -from pypy.rlib.objectmodel import we_are_translated
    -
    -from pypy.interpreter.error import OperationError
    -
    -try:
    -    from greenlet import greenlet
    -    main_greenlet = greenlet.getcurrent()
    -except (ImportError, ValueError):
    -    def greenlet(*args, **kwargs):
    -        raise NotImplementedError("need either greenlets or a translated version of pypy")
    -
    -class FrameChain(object):
    -    """Greenlet-based emulation of the primitive rstack 'frames' of RPython"""
    -
    -    def __init__(self, thunk=None):
    -        if thunk:
    -            self.greenlet = greenlet(thunk)
    -        else:
    -            self.greenlet = greenlet.getcurrent()
    -
    -    def switch(self):
    -        last = FrameChain()
    -        return self.greenlet.switch(last)
    -
    -import sys, os
    -
    -def make_coroutine_classes(baseclass):
    -    class BaseCoState(object):
    -        def __init__(self):
    -            self.current = self.main = None
    -
    -        def __repr__(self):
    -            "NOT_RPYTHON"
    -            # for debugging only
    -            return '<%s current=%r>' % (self.__class__.__name__, self.current)
    -
    -        def update(self, new):
    -            syncstate.leaving = self.current
    -            syncstate.entering = new
    -            self.current = new
    -            frame, new.frame = new.frame, None
    -            return frame
    -
    -
    -    class CoState(BaseCoState):
    -        def __init__(self):
    -            BaseCoState.__init__(self)
    -            self.current = self.main = Coroutine(self)
    -
    -    class CoroutineDamage(SystemError):
    -        pass
    -
    -
    -    class SyncState(object):
    -        def __init__(self):
    -            self.reset()
    -
    -        def reset(self):
    -            self.default_costate = None
    -            self.leaving = None
    -            self.entering = None
    -            self.things_to_do = False
    -            self.temp_exc = None
    -            self.to_delete = []
    -
    -        def switched(self, incoming_frame):
    -            left = syncstate.leaving
    -            entered = syncstate.entering
    -            syncstate.leaving = syncstate.entering = None
    -            if left is not None:   # mostly to work around an annotation problem;
    -                                   # should not really be None
    -                left.frame = incoming_frame
    -                left.goodbye()
    -            if entered is not None:
    -                entered.hello()
    -            if self.things_to_do:
    -                self._do_things_to_do()
    -
    -        def push_exception(self, exc):
    -            self.things_to_do = True
    -            self.temp_exc = exc
    -
    -        def check_for_zombie(self, obj):
    -            return obj in self.to_delete
    -
    -        def postpone_deletion(self, obj):
    -            self.to_delete.append(obj)
    -            self.things_to_do = True
    -
    -        def _do_things_to_do(self):
    -            if self.temp_exc is not None:
    -                # somebody left an unhandled exception and switched to us.
    -                # this both provides default exception handling and the
    -                # way to inject an exception, like CoroutineExit.
    -                e, self.temp_exc = self.temp_exc, None
    -                self.things_to_do = bool(self.to_delete)
    -                raise e
    -            while self.to_delete:
    -                delete, self.to_delete = self.to_delete, []
    -                for obj in delete:
    -                    obj.parent = obj.costate.current
    -                    obj._kill_finally()
    -            else:
    -                self.things_to_do = False
    -
    -        def _freeze_(self):
    -            self.reset()
    -            return False
    -
    -    syncstate = SyncState()
    -
    -
    -    class CoroutineExit(SystemExit):
    -        # XXX SystemExit's __init__ creates problems in bookkeeper.
    -        def __init__(self):
    -            pass
    -
    -    class AbstractThunk(object):
    -        def call(self):
    -            raise NotImplementedError("abstract base class")
    -
    -
    -    class Coroutine(baseclass):
    -        def __init__(self, state=None):
    -            self.frame = None
    -            if state is None:
    -                state = self._get_default_costate()
    -            self.costate = state
    -            self.parent = None
    -            self.thunk = None
    -            self.coroutine_exit = False
    -
    -        def __repr__(self):
    -            'NOT_RPYTHON'
    -            # just for debugging
    -            if hasattr(self, '__name__'):
    -                return '' % (self.__name__, self.frame, self.thunk is not None)
    -            else:
    -                return '' % (self.frame, self.thunk is not None)
    -
    -        def _get_default_costate():
    -            state = syncstate.default_costate
    -            if state is None:
    -                state = syncstate.default_costate = CoState()
    -            return state
    -        _get_default_costate = staticmethod(_get_default_costate)
    -
    -        def _get_default_parent(self):
    -            return self.costate.current
    -
    -        def bind(self, thunk):
    -            assert isinstance(thunk, AbstractThunk)
    -            if self.frame is not None:
    -                raise CoroutineDamage
    -            if self.parent is None:
    -                self.parent = self._get_default_parent()
    -            assert self.parent is not None
    -            self.thunk = thunk
    -            if we_are_translated():
    -                self.frame = self._bind()
    -            else:
    -                self.frame = self._greenlet_bind()
    -
    -        def _greenlet_bind(self):
    -            weak = [self]
    -            def _greenlet_execute(incoming_frame):
    -                try:
    -                    chain2go2next = weak[0]._execute(incoming_frame)
    -                except:
    -                    # no exception is supposed to get out of _execute()
    -                    # better report it directly into the main greenlet then,
    -                    # and hidden to prevent catching
    -                    main_greenlet.throw(AssertionError(
    -                        "unexpected exception out of Coroutine._execute()",
    -                        *sys.exc_info()))
    -                    assert 0
    -                del weak[0]
    -                greenlet.getcurrent().parent = chain2go2next.greenlet
    -                return None   # as the result of the FrameChain.switch()
    -            chain = FrameChain(_greenlet_execute)
    -            return chain
    -
    -        def _bind(self):
    -            state = self.costate
    -            incoming_frame = yield_current_frame_to_caller()
    -            self = state.current
    -            return self._execute(incoming_frame)
    -
    -        def _execute(self, incoming_frame):
    -            state = self.costate
    -            try:
    -                try:
    -                    try:
    -                        exc = None
    -                        thunk = self.thunk
    -                        self.thunk = None
    -                        syncstate.switched(incoming_frame)
    -                        thunk.call()
    -                    except Exception, e:
    -                        exc = e
    -                        raise
    -                finally:
    -                    # warning! we must reload the 'self' from the costate,
    -                    # because after a clone() the 'self' of both copies
    -                    # point to the original!
    -                    self = state.current
    -                    self.finish(exc)
    -            except CoroutineExit:
    -                pass
    -            except Exception, e:
    -                if self.coroutine_exit is False:
    -                    # redirect all unhandled exceptions to the parent
    -                    syncstate.push_exception(e)
    -
    -            while self.parent is not None and self.parent.frame is None:
    -                # greenlet behavior is fine
    -                self.parent = self.parent.parent
    -            return state.update(self.parent)
    -
    -        def switch(self):
    -            if self.frame is None:
    -                # considered a programming error.
    -                # greenlets and tasklets have different ideas about this.
    -                raise CoroutineDamage
    -            state = self.costate
    -            incoming_frame = state.update(self).switch()
    -            syncstate.switched(incoming_frame)
    -
    -        def kill(self):
    -            self._kill(CoroutineExit())
    -
    -        def _kill(self, exc):
    -            if self.frame is None:
    -                return
    -            state = self.costate
    -            syncstate.push_exception(exc)
    -            # careful here - if setting self.parent to state.current would
    -            # create a loop, break it.  The assumption is that 'self'
    -            # will die, so that state.current's chain of parents can be
    -            # modified to skip 'self' without too many people noticing.
    -            p = state.current
    -            if p is self or self.parent is None:
    -                pass  # killing the current of the main - don't change any parent
    -            else:
    -                while p.parent is not None:
    -                    if p.parent is self:
    -                        p.parent = self.parent
    -                        break
    -                    p = p.parent
    -                self.parent = state.current
    -            self.switch()
    -
    -        def _kill_finally(self):
    -            try:
    -                self._userdel()
    -            except Exception:
    -                pass # maybe print a warning?
    -            self.kill()
    -
    -        __already_postponed = False
    -    
    -        def __del__(self):
    -            # provide the necessary clean-up
    -            # note that AppCoroutine has to take care about this
    -            # as well, including a check for user-supplied __del__.
    -            # Additionally note that in the context of __del__, we are
    -            # not in the position to issue a switch.
    -            # we defer it completely.
    -            
    -            # it is necessary to check whether syncstate is None because CPython
    -            # sets it to None when it cleans up the modules, which will lead to
    -            # very strange effects
    -
    -            if not we_are_translated():
    -                # we need to make sure that we postpone each coroutine only once on
    -                # top of CPython, because this resurrects the coroutine and CPython
    -                # calls __del__ again, thus postponing and resurrecting the
    -                # coroutine once more :-(
    -                if self.__already_postponed:
    -                    return
    -                self.__already_postponed = True
    -            if syncstate is not None:
    -                syncstate.postpone_deletion(self)
    -
    -        # coroutines need complete control over their __del__ behaviour. In
    -        # particular they need to care about calling space.userdel themselves
    -        handle_del_manually = True
    -
    -        def _userdel(self):
    -            # override this for exposed coros
    -            pass
    -
    -        def is_alive(self):
    -            return self.frame is not None or self is self.costate.current
    -
    -        def is_zombie(self):
    -            return self.frame is not None and syncstate.check_for_zombie(self)
    -
    -        def getcurrent():
    -            costate = Coroutine._get_default_costate()
    -            return costate.current
    -        getcurrent = staticmethod(getcurrent)
    -
    -        def getmain():
    -            costate = Coroutine._get_default_costate()
    -            return costate.main
    -        getmain = staticmethod(getmain)
    -
    -        def hello(self):
    -            "Called when execution is transferred into this coroutine."
    -
    -        def goodbye(self):
    -            "Called just after execution is transferred away from this coroutine."
    -
    -        def finish(self, exc=None):
    -            "stephan forgot me"
    -
    -    return locals()
    -
    -# _________________________________________________
    diff --git a/pypy/rlib/test/test_rcoroutine.py b/pypy/rlib/test/test_rcoroutine.py
    deleted file mode 100644
    --- a/pypy/rlib/test/test_rcoroutine.py
    +++ /dev/null
    @@ -1,348 +0,0 @@
    -"""
    -testing coroutines at interprepter level
    -"""
    -import py
    -import os
    -from pypy import conftest; conftest.translation_test_so_skip_if_appdirect()
    -from pypy.rlib.rcoroutine import make_coroutine_classes
    -from pypy.translator.c.test.test_stackless import StacklessTest
    -from pypy.translator.c import gc
    -
    -def setup_module(mod):
    -    py.test.importorskip('greenlet')
    -
    -d = make_coroutine_classes(object)
    -syncstate = d['syncstate']
    -Coroutine = d['Coroutine']
    -AbstractThunk = d['AbstractThunk']
    -
    -def output(stuff):
    -    os.write(2, stuff + '\n')
    -
    -class _TestCoroutine(StacklessTest):
    -    backendopt = True
    -    Coroutine = Coroutine
    -
    -    def setup_method(self, method):
    -        syncstate.reset()
    -
    -    def _freeze_(self):    # for 'self.Coroutine'
    -        return True
    -
    -    def test_coroutine1(self):
    -
    -        def g(lst, coros):
    -            coro_f, coro_g, coro_h = coros
    -            lst.append(2)
    -            output('g appended 2')
    -            coro_h.switch()
    -            lst.append(5)
    -            output('g appended 5')
    -
    -        def h(lst, coros):
    -            coro_f, coro_g, coro_h = coros
    -            lst.append(3)
    -            output('h appended 3')
    -            coro_f.switch()
    -            lst.append(7)
    -            output('h appended 7')
    -
    -        class T(AbstractThunk):
    -            def __init__(self, func, arg1, arg2):
    -                self.func = func
    -                self.arg1 = arg1
    -                self.arg2 = arg2
    -            def call(self):
    -                self.func(self.arg1, self.arg2)
    -
    -        def f():
    -            lst = [1]
    -            coro_f = Coroutine.getcurrent()
    -            coro_g = self.Coroutine()
    -            coro_h = self.Coroutine()
    -            coros = [coro_f, coro_g, coro_h]
    -            thunk_g = T(g, lst, coros)
    -            output('binding g after f set 1')
    -            coro_g.bind(thunk_g)
    -            thunk_h = T(h, lst, coros)
    -            output('binding h after f set 1')
    -            coro_h.bind(thunk_h)
    -            output('switching to g')
    -            coro_g.switch()
    -            lst.append(4)
    -            output('f appended 4')
    -            coro_g.switch()
    -            lst.append(6)
    -            output('f appended 6')
    -            coro_h.switch()
    -            lst.append(8)
    -            output('f appended 8')
    -            n = 0
    -            for i in lst:
    -                n = n*10 + i
    -            return n
    -
    -        data = self.wrap_stackless_function(f)
    -        assert data == 12345678
    -
    -    def test_coroutine2(self):
    -
    -        class TBase(AbstractThunk):
    -            def call(self):
    -                pass
    -
    -        class T(TBase):
    -            def __init__(self, func, arg1, arg2):
    -                self.func = func
    -                self.arg1 = arg1
    -                self.arg2 = arg2
    -            def call(self):
    -                self.res = self.func(self.arg1, self.arg2)
    -
    -        class T1(TBase):
    -            def __init__(self, func, arg1):
    -                self.func = func
    -                self.arg1 = arg1
    -            def call(self):
    -                self.res = self.func(self.arg1)
    -
    -        def g(lst, coros):
    -            coro_f1, coro_g, coro_h = coros
    -            lst.append(2)
    -            output('g appended 2')
    -            coro_h.switch()
    -            lst.append(5)
    -            output('g appended 5')
    -            output('exiting g')
    -
    -        def h(lst, coros):
    -            coro_f1, coro_g, coro_h = coros
    -            lst.append(3)
    -            output('h appended 3')
    -            coro_f1.switch()
    -            lst.append(7)
    -            output('h appended 7')
    -            output('exiting h')
    -
    -        def f1(coro_f1):
    -            lst = [1]
    -            coro_g = self.Coroutine()
    -            coro_g.__name__ = 'coro_g'
    -            coro_h = self.Coroutine()
    -            coro_h.__name__ = 'coro_h'
    -            coros = [coro_f1, coro_g, coro_h]
    -            thunk_g = T(g, lst, coros)
    -            output('binding g after f1 set 1')
    -            coro_g.bind(thunk_g)
    -            thunk_h = T(h, lst, coros)
    -            output('binding h after f1 set 1')
    -            coro_h.bind(thunk_h)
    -            output('switching to g')
    -            coro_g.switch()
    -            lst.append(4)
    -            output('f1 appended 4')
    -            coro_g.switch()
    -            lst.append(6)
    -            output('f1 appended 6')
    -            coro_h.switch()
    -            lst.append(8)
    -            output('f1 appended 8')
    -            n = 0
    -            for i in lst:
    -                n = n*10 + i
    -            output('exiting f1')
    -            return n     
    -
    -        def f():
    -            coro_f = Coroutine.getcurrent()
    -            coro_f.__name__ = 'coro_f'
    -            coro_f1 = self.Coroutine()
    -            coro_f1.__name__ = 'coro_f1'
    -            thunk_f1 = T1(f1, coro_f1)
    -            output('binding f1 after f set 1')
    -            coro_f1.bind(thunk_f1)
    -            coro_f1.switch()
    -            output('return to main :-(')
    -            return thunk_f1.res
    -
    -        data = self.wrap_stackless_function(f)
    -        assert data == 12345678
    -
    -    def test_kill_raise_del_coro(self):
    -        class T(AbstractThunk):
    -            def __init__(self, func, arg):
    -                self.func = func
    -                self.arg = arg
    -            def call(self):
    -                self.func(self.arg, self)
    -
    -        def g(nrec, t, count=0):
    -            t.count = count
    -            if nrec < 0:
    -                raise ValueError
    -            if nrec:
    -                g(nrec-1, t, count+1)
    -            Coroutine.getmain().switch()
    -
    -        def f():
    -            assert Coroutine.getmain().frame is None
    -            coro_g = self.Coroutine()
    -            coro_g.__name__ = 'coro_g'
    -            thunk_g = T(g, 42)
    -            coro_g.bind(thunk_g)
    -            coro_g.switch()
    -            res = thunk_g.count
    -            res *= 10
    -            res |= coro_g.frame is not None
    -            # testing kill
    -            coro_g.kill()
    -            res *= 10
    -            res |= coro_g.frame is None
    -            coro_g = self.Coroutine()
    -            # see what happens if we __del__
    -            thunk_g = T(g, -42)
    -            coro_g.bind(thunk_g)
    -            try:
    -                coro_g.switch()
    -            except ValueError:
    -                res += 500
    -            return res
    -
    -        data = self.wrap_stackless_function(f)
    -        assert data == 4711
    -
    -    def test_tree_compare(self):
    -        class Node:
    -            def __init__(self, value, left=None, right=None):
    -                self.value = value
    -                self.left = left
    -                self.right = right
    -            def __repr__(self):
    -                return 'Node(%r, %r, %r)'%(self.value, self.left, self.right)
    -
    -        tree1 = Node(1, Node(2, Node(3)))
    -        tree2 = Node(1, Node(3, Node(2)))
    -        tree3 = Node(1, Node(2), Node(3))
    -
    -        class Producer(AbstractThunk):
    -            def __init__(self, tree, objects, consumer):
    -                self.tree = tree
    -                self.objects = objects
    -                self.consumer = consumer
    -            def produce(self, t):
    -                if t is None:
    -                    return
    -                self.objects.append(t.value)
    -                self.consumer.switch()
    -                self.produce(t.left)
    -                self.produce(t.right)
    -            def call(self):
    -                self.produce(self.tree)
    -                while 1:
    -                    self.consumer.switch()
    -        class Consumer(AbstractThunk):
    -            def __init__(self, tree, objects, producer):
    -                self.tree = tree
    -                self.objects = objects
    -                self.producer = producer
    -            def consume(self, t):
    -                if t is None:
    -                    return True
    -                self.producer.switch()
    -                if not self.objects:
    -                    return False
    -                if self.objects.pop(0) != t.value:
    -                    return False
    -                if not self.consume(t.left):
    -                    return False
    -                return self.consume(t.right)
    -
    -            def call(self):
    -                self.result = self.consume(self.tree)
    -                Coroutine.getmain().switch()
    -
    -        def pre_order_eq(t1, t2):
    -            objects = []
    -            producer = self.Coroutine()
    -            consumer = self.Coroutine()
    -
    -            producer.bind(Producer(t1, objects, consumer))
    -            cons = Consumer(t2, objects, producer)
    -            consumer.bind(cons)
    -
    -            consumer.switch()
    -
    -            return cons.result
    -
    -        def ep():
    -            return int("%d%d%d%d"%(pre_order_eq(tree1, tree2),
    -                                   pre_order_eq(tree1, tree1),
    -                                   pre_order_eq(tree1, tree3),
    -                                   pre_order_eq(tree2, tree1),
    -                                   ))
    -
    -        output = self.wrap_stackless_function(ep)
    -        assert output == int('0110')
    -
    -    def test_hello_goodbye(self):
    -
    -        class C(Coroutine):
    -            n = 2
    -            def __init__(self, n):
    -                Coroutine.__init__(self)
    -                self.n = n
    -            def hello(self):
    -                costate.hello_goodbye *= 10
    -                costate.hello_goodbye += self.n
    -            def goodbye(self):
    -                costate.hello_goodbye *= 10
    -                costate.hello_goodbye += self.n + 1
    -
    -        class T(AbstractThunk):
    -            def call(self):
    -                pass
    -
    -        costate = Coroutine._get_default_costate()
    -        costate.current.__class__ = C
    -        costate.hello_goodbye = 0
    -
    -        def ep():
    -            syncstate.default_costate = costate
    -            costate.hello_goodbye = 0
    -            c1 = C(4)
    -            c1.bind(T())
    -            c1.switch()
    -            return costate.hello_goodbye
    -
    -        output = self.wrap_stackless_function(ep)
    -        # expected result:
    -        #   goodbye main   3
    -        #   hello   c1     4
    -        #   goodbye c1     5
    -        #   hello   main   2
    -        assert output == 3452
    -
    -    def test_raise_propagate(self):
    -        class T(AbstractThunk):
    -            def call(self):
    -                raise ValueError
    -
    -        def ep():
    -            c = self.Coroutine()
    -            c.bind(T())
    -            try:
    -                c.switch()
    -            except ValueError:
    -                return 100
    -            else:
    -                return -5
    -
    -        output = self.wrap_stackless_function(ep)
    -        assert output == 100
    -
    -
    -TestCoroutine = _TestCoroutine # to activate
    -class TestCoroutineOnCPython(_TestCoroutine):
    -    def wrap_stackless_function(self, func):
    -        return func()
    -
    diff --git a/pypy/rpython/module/ll_os_stat.py b/pypy/rpython/module/ll_os_stat.py
    --- a/pypy/rpython/module/ll_os_stat.py
    +++ b/pypy/rpython/module/ll_os_stat.py
    @@ -49,19 +49,8 @@
         ]
     N_INDEXABLE_FIELDS = 10
     
    -# for now, check the host Python to know which st_xxx fields exist
    -STAT_FIELDS = [(_name, _TYPE) for (_name, _TYPE) in ALL_STAT_FIELDS
    -                              if hasattr(os.stat_result, _name)]
    -
    -STAT_FIELD_TYPES = dict(STAT_FIELDS)      # {'st_xxx': TYPE}
    -
    -STAT_FIELD_NAMES = [_name for (_name, _TYPE) in ALL_STAT_FIELDS
    -                          if _name in STAT_FIELD_TYPES]
    -
    -del _name, _TYPE
    -
     # For OO backends, expose only the portable fields (the first 10).
    -PORTABLE_STAT_FIELDS = STAT_FIELDS[:N_INDEXABLE_FIELDS]
    +PORTABLE_STAT_FIELDS = ALL_STAT_FIELDS[:N_INDEXABLE_FIELDS]
     
     # ____________________________________________________________
     #
    @@ -142,17 +131,22 @@
         includes = INCLUDES
     )
     
    -if sys.platform != 'win32':
    +if TIMESPEC is not None:
    +    class CConfig_for_timespec:
    +        _compilation_info_ = compilation_info
    +        TIMESPEC = TIMESPEC
    +    TIMESPEC = lltype.Ptr(
    +        platform.configure(CConfig_for_timespec)['TIMESPEC'])
    +
    +
    +def posix_declaration(try_to_add=None):
    +    global STAT_STRUCT
     
         LL_STAT_FIELDS = STAT_FIELDS[:]
    +    if try_to_add:
    +        LL_STAT_FIELDS.append(try_to_add)
     
         if TIMESPEC is not None:
    -        class CConfig_for_timespec:
    -            _compilation_info_ = compilation_info
    -            TIMESPEC = TIMESPEC
    -
    -        TIMESPEC = lltype.Ptr(
    -            platform.configure(CConfig_for_timespec)['TIMESPEC'])
     
             def _expand(lst, originalname, timespecname):
                 for i, (_name, _TYPE) in enumerate(lst):
    @@ -178,9 +172,34 @@
         class CConfig:
             _compilation_info_ = compilation_info
             STAT_STRUCT = platform.Struct('struct %s' % _name_struct_stat, LL_STAT_FIELDS)
    -    config = platform.configure(CConfig)
    +    try:
    +        config = platform.configure(CConfig)
    +    except platform.CompilationError:
    +        if try_to_add:
    +            return    # failed to add this field, give up
    +        raise
     
         STAT_STRUCT = lltype.Ptr(config['STAT_STRUCT'])
    +    if try_to_add:
    +        STAT_FIELDS.append(try_to_add)
    +
    +
    +# This lists only the fields that have been found on the underlying platform.
    +# Initially only the PORTABLE_STAT_FIELDS, but more may be added by the
    +# following loop.
    +STAT_FIELDS = PORTABLE_STAT_FIELDS[:]
    +
    +if sys.platform != 'win32':
    +    posix_declaration()
    +    for _i in range(len(PORTABLE_STAT_FIELDS), len(ALL_STAT_FIELDS)):
    +        posix_declaration(ALL_STAT_FIELDS[_i])
    +    del _i
    +
    +# these two global vars only list the fields defined in the underlying platform
    +STAT_FIELD_TYPES = dict(STAT_FIELDS)      # {'st_xxx': TYPE}
    +STAT_FIELD_NAMES = [_name for (_name, _TYPE) in STAT_FIELDS]
    +del _name, _TYPE
    +
     
     def build_stat_result(st):
         # only for LL backends
    diff --git a/pypy/rpython/module/test/test_ll_os_stat.py b/pypy/rpython/module/test/test_ll_os_stat.py
    --- a/pypy/rpython/module/test/test_ll_os_stat.py
    +++ b/pypy/rpython/module/test/test_ll_os_stat.py
    @@ -2,6 +2,16 @@
     import sys, os
     import py
     
    +
    +class TestLinuxImplementation:
    +    def setup_class(cls):
    +        if not sys.platform.startswith('linux'):
    +            py.test.skip("linux specific tests")
    +
    +    def test_has_all_fields(self):
    +        assert ll_os_stat.STAT_FIELDS == ll_os_stat.ALL_STAT_FIELDS[:13]
    +
    +
     class TestWin32Implementation:
         def setup_class(cls):
             if sys.platform != 'win32':
    diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py
    --- a/pypy/tool/jitlogparser/parser.py
    +++ b/pypy/tool/jitlogparser/parser.py
    @@ -8,6 +8,7 @@
         bridge = None
         offset = None
         asm = None
    +    failargs = ()
     
         def __init__(self, name, args, res, descr):
             self.name = name
    @@ -18,8 +19,8 @@
             if self._is_guard:
                 self.guard_no = int(self.descr[len(' " + arg)
    -        if len(argv) > 2 and argv[1] == '--heapsize':
    -            # Undocumented option, handled at interp-level.
    -            # It has silently no effect with some GCs.
    -            # It works in Boehm and in the semispace or generational GCs
    -            # (but see comments in semispace.py:set_max_heap_size()).
    -            # At the moment this option exists mainly to support sandboxing.
    -            from pypy.rlib import rgc
    -            rgc.set_max_heap_size(int(argv[2]))
    -            argv = argv[:1] + argv[3:]
    -        try:
    -            try:
    -                space.timer.start("space.startup")
    -                space.call_function(w_run_toplevel, w_call_startup_gateway)
    -                space.timer.stop("space.startup")
    -                w_executable = space.wrap(argv[0])
    -                w_argv = space.newlist([space.wrap(s) for s in argv[1:]])
    -                space.timer.start("w_entry_point")
    -                w_exitcode = space.call_function(w_entry_point, w_executable, w_argv, w_os)
    -                space.timer.stop("w_entry_point")
    -                exitcode = space.int_w(w_exitcode)
    -                # try to pull it all in
    -            ##    from pypy.interpreter import main, interactive, error
    -            ##    con = interactive.PyPyConsole(space)
    -            ##    con.interact()
    -            except OperationError, e:
    -                debug("OperationError:")
    -                debug(" operror-type: " + e.w_type.getname(space))
    -                debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
    -                return 1
    -        finally:
    -            try:
    -                space.timer.start("space.finish")
    -                space.call_function(w_run_toplevel, w_call_finish_gateway)
    -                space.timer.stop("space.finish")
    -            except OperationError, e:
    -                debug("OperationError:")
    -                debug(" operror-type: " + e.w_type.getname(space))
    -                debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
    -                return 1
    -        space.timer.stop("Entrypoint")
    -        space.timer.dump()
    -        return exitcode
    -    return entry_point
    -
    -def call_finish(space):
    -    space.finish()
    -
    -def call_startup(space):
    -    space.startup()
    -
    -# _____ Define and setup target ___
    -
    -# for now this will do for option handling
    -
    -class PyPyTarget(object):
    -
    -    usage = SUPPRESS_USAGE
    -
    -    take_options = True
    -
    -    def opt_parser(self, config):
    -        parser = to_optparse(config, useoptions=["objspace.*"],
    -                             parserkwargs={'usage': self.usage})
    -        return parser
    -
    -    def handle_config(self, config, translateconfig):
    -        self.translateconfig = translateconfig
    -        # set up the objspace optimizations based on the --opt argument
    -        from pypy.config.pypyoption import set_pypy_opt_level
    -        set_pypy_opt_level(config, translateconfig.opt)
    -
    -        # as of revision 27081, multimethod.py uses the InstallerVersion1 by default
    -        # because it is much faster both to initialize and run on top of CPython.
    -        # The InstallerVersion2 is optimized for making a translator-friendly
    -        # structure for low level backends. However, InstallerVersion1 is still
    -        # preferable for high level backends, so we patch here.
    -
    -        from pypy.objspace.std import multimethod
    -        if config.objspace.std.multimethods == 'mrd':
    -            assert multimethod.InstallerVersion1.instance_counter == 0,\
    -                   'The wrong Installer version has already been instatiated'
    -            multimethod.Installer = multimethod.InstallerVersion2
    -        elif config.objspace.std.multimethods == 'doubledispatch':
    -            # don't rely on the default, set again here
    -            assert multimethod.InstallerVersion2.instance_counter == 0,\
    -                   'The wrong Installer version has already been instatiated'
    -            multimethod.Installer = multimethod.InstallerVersion1
    -
    -    def print_help(self, config):
    -        self.opt_parser(config).print_help()
    -
    -    def get_additional_config_options(self):
    -        from pypy.config.pypyoption import pypy_optiondescription
    -        return pypy_optiondescription
    -
    -    def target(self, driver, args):
    -        driver.exe_name = 'pypy-%(backend)s'
    -
    -        config = driver.config
    -        parser = self.opt_parser(config)
    -
    -        parser.parse_args(args)
    -
    -        # expose the following variables to ease debugging
    -        global space, entry_point
    -
    -        if config.objspace.allworkingmodules:
    -            from pypy.config.pypyoption import enable_allworkingmodules
    -            enable_allworkingmodules(config)
    -
    -        if config.translation.thread:
    -            config.objspace.usemodules.thread = True
    -        elif config.objspace.usemodules.thread:
    -            try:
    -                config.translation.thread = True
    -            except ConflictConfigError:
    -                # If --allworkingmodules is given, we reach this point
    -                # if threads cannot be enabled (e.g. they conflict with
    -                # something else).  In this case, we can try setting the
    -                # usemodules.thread option to False again.  It will
    -                # cleanly fail if that option was set to True by the
    -                # command-line directly instead of via --allworkingmodules.
    -                config.objspace.usemodules.thread = False
    -
    -        if config.translation.stackless:
    -            config.objspace.usemodules._stackless = True
    -        elif config.objspace.usemodules._stackless:
    -            try:
    -                config.translation.stackless = True
    -            except ConflictConfigError:
    -                raise ConflictConfigError("please use the --stackless option "
    -                                          "to translate.py instead of "
    -                                          "--withmod-_stackless directly")
    -
    -        if not config.translation.rweakref:
    -            config.objspace.usemodules._weakref = False
    -
    -        if self.translateconfig.goal_options.jit:
    -            config.objspace.usemodules.pypyjit = True
    -        elif config.objspace.usemodules.pypyjit:
    -            self.translateconfig.goal_options.jit = True
    -
    -        if config.translation.backend == "cli":
    -            config.objspace.usemodules.clr = True
    -        # XXX did it ever work?
    -        #elif config.objspace.usemodules.clr:
    -        #    config.translation.backend == "cli"
    -
    -        config.objspace.nofaking = True
    -        config.objspace.compiler = "ast"
    -        config.translating = True
    -
    -        import translate
    -        translate.log_config(config.objspace, "PyPy config object")
    - 
    -        # obscure hack to stuff the translation options into the translated PyPy
    -        import pypy.module.sys
    -        options = make_dict(config)
    -        wrapstr = 'space.wrap(%r)' % (options)
    -        pypy.module.sys.Module.interpleveldefs['pypy_translation_info'] = wrapstr
    -
    -        return self.get_entry_point(config)
    -
    -    def portal(self, driver):
    -        from pypy.module.pypyjit.portal import get_portal
    -        return get_portal(driver)
    -    
    -    def get_entry_point(self, config):
    -        space = make_objspace(config)
    -
    -        # manually imports app_main.py
    -        filename = os.path.join(this_dir, 'app_main.py')
    -        w_dict = space.newdict()
    -        space.exec_(open(filename).read(), w_dict, w_dict)
    -        for modulename in EXTRA_MODULES:
    -            print 'pre-importing', modulename
    -            space.exec_("import " + modulename, w_dict, w_dict)
    -        print 'phew, ready'
    -        entry_point = create_entry_point(space, w_dict)
    -
    -        return entry_point, None, PyPyAnnotatorPolicy(single_space = space)
    -
    -    def interface(self, ns):
    -        for name in ['take_options', 'handle_config', 'print_help', 'target',
    -                     'portal',
    -                     'get_additional_config_options']:
    -            ns[name] = getattr(self, name)
    -
    -
    -PyPyTarget().interface(globals())
    -
    
    From noreply at buildbot.pypy.org  Wed Sep  7 12:58:58 2011
    From: noreply at buildbot.pypy.org (arigo)
    Date: Wed,  7 Sep 2011 12:58:58 +0200 (CEST)
    Subject: [pypy-commit] pypy default: OS/X fix.
    Message-ID: <20110907105858.4BE1982213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r47129:e556f833d5d4
    Date: 2011-09-07 12:58 +0200
    http://bitbucket.org/pypy/pypy/changeset/e556f833d5d4/
    
    Log:	OS/X fix.
    
    diff --git a/pypy/module/pwd/test/test_pwd.py b/pypy/module/pwd/test/test_pwd.py
    --- a/pypy/module/pwd/test/test_pwd.py
    +++ b/pypy/module/pwd/test/test_pwd.py
    @@ -5,14 +5,17 @@
             cls.space = gettestobjspace(usemodules=['pwd'])
     
         def test_getpwuid(self):
    -        import pwd
    +        import pwd, sys
             raises(KeyError, pwd.getpwuid, -1)
             pw = pwd.getpwuid(0)
             assert pw.pw_name == 'root'
             assert isinstance(pw.pw_passwd, str)
             assert pw.pw_uid == 0
             assert pw.pw_gid == 0
    -        assert pw.pw_dir == '/root'
    +        if sys.platform.startswith('linux'):
    +            assert pw.pw_dir == '/root'
    +        else:
    +            assert pw.pw_dir.startswith('/')
             assert pw.pw_shell.startswith('/')
             #
             assert type(pw.pw_uid) is int
    
    From noreply at buildbot.pypy.org  Wed Sep  7 13:05:47 2011
    From: noreply at buildbot.pypy.org (arigo)
    Date: Wed,  7 Sep 2011 13:05:47 +0200 (CEST)
    Subject: [pypy-commit] pypy default: Fix tests on OS/X (hopefully).
    Message-ID: <20110907110547.8A1B082213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r47130:5190d760b26c
    Date: 2011-09-07 13:05 +0200
    http://bitbucket.org/pypy/pypy/changeset/5190d760b26c/
    
    Log:	Fix tests on OS/X (hopefully).
    
    diff --git a/pypy/module/pypyjit/test_pypy_c/test__ffi.py b/pypy/module/pypyjit/test_pypy_c/test__ffi.py
    --- a/pypy/module/pypyjit/test_pypy_c/test__ffi.py
    +++ b/pypy/module/pypyjit/test_pypy_c/test__ffi.py
    @@ -29,11 +29,13 @@
             pow_addr, res = log.result
             assert res == 8.0 * 300
             loop, = log.loops_by_filename(self.filepath)
    +        if 'ConstClass(pow)' in repr(loop):   # e.g. OS/X
    +            pow_addr = 'ConstClass(pow)'
             assert loop.match_by_id('fficall', """
                 guard_not_invalidated(descr=...)
                 i17 = force_token()
                 setfield_gc(p0, i17, descr=<.* .*PyFrame.vable_token .*>)
    -            f21 = call_release_gil(%d, 2.000000, 3.000000, descr=)
    +            f21 = call_release_gil(%s, 2.000000, 3.000000, descr=)
                 guard_not_forced(descr=...)
                 guard_no_exception(descr=...)
             """ % pow_addr)
    @@ -129,4 +131,5 @@
             assert opnames.count('call_release_gil') == 1
             idx = opnames.index('call_release_gil')
             call = ops[idx]
    -        assert int(call.args[0]) == fabs_addr
    +        assert (call.args[0] == 'ConstClass(fabs)' or    # e.g. OS/X
    +                int(call.args[0]) == fabs_addr)
    
    From noreply at buildbot.pypy.org  Wed Sep  7 14:55:50 2011
    From: noreply at buildbot.pypy.org (antocuni)
    Date: Wed,  7 Sep 2011 14:55:50 +0200 (CEST)
    Subject: [pypy-commit] pypy ffistruct: move _ffi.Field and most of
     compute_shape to interp-level
    Message-ID: <20110907125550.76FC282213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Antonio Cuni 
    Branch: ffistruct
    Changeset: r47131:f348de8d9bd3
    Date: 2011-09-07 09:43 +0200
    http://bitbucket.org/pypy/pypy/changeset/f348de8d9bd3/
    
    Log:	move _ffi.Field and most of compute_shape to interp-level
    
    diff --git a/pypy/module/_ffi/__init__.py b/pypy/module/_ffi/__init__.py
    --- a/pypy/module/_ffi/__init__.py
    +++ b/pypy/module/_ffi/__init__.py
    @@ -8,9 +8,9 @@
             'FuncPtr': 'interp_funcptr.W_FuncPtr',
             'get_libc':'interp_funcptr.get_libc',
             '_StructDescr': 'interp_struct.W__StructDescr',
    +        'Field':     'interp_struct.W_Field',
         }
     
         appleveldefs = {
             'Structure': 'app_struct.Structure',
    -        'Field':     'app_struct.Field',
             }
    diff --git a/pypy/module/_ffi/app_struct.py b/pypy/module/_ffi/app_struct.py
    --- a/pypy/module/_ffi/app_struct.py
    +++ b/pypy/module/_ffi/app_struct.py
    @@ -1,20 +1,5 @@
     import _ffi
     
    -class Field(object):
    -
    -    def __init__(self, name, ffitype):
    -        self.name = name
    -        self.ffitype = ffitype
    -        self.offset = -1
    -
    -    ## def __get__(self, obj, cls=None):
    -    ##     if obj is None:
    -    ##         return self
    -    ##     return getfield(obj._buffer, self.ffitype, self.offset)
    -
    -    ## def __set__(self, obj, value):
    -    ##     setfield(obj._buffer, self.ffitype, self.offset, value)
    -
     class MetaStructure(type):
     
         def __new__(cls, name, bases, dic):
    @@ -26,15 +11,9 @@
             fields = dic.get('_fields_')
             if fields is None:
                 return
    -        size = 0
    -        ffitypes = []
    +        struct_descr = _ffi._StructDescr(name, fields)
             for field in fields:
    -            field.offset = size # XXX: alignment!
    -            size += field.ffitype.sizeof()
    -            ffitypes.append(field.ffitype)
                 dic[field.name] = field
    -        alignment = 0 # XXX
    -        struct_descr = _ffi._StructDescr(name, size, alignment, ffitypes)
             dic['_struct_'] = struct_descr
     
     
    diff --git a/pypy/module/_ffi/test/test_struct.py b/pypy/module/_ffi/test/test_struct.py
    --- a/pypy/module/_ffi/test/test_struct.py
    +++ b/pypy/module/_ffi/test/test_struct.py
    @@ -3,9 +3,13 @@
     class AppTestStruct(BaseAppTestFFI):
     
         def test__StructDescr(self):
    -        from _ffi import _StructDescr, types
    +        from _ffi import _StructDescr, Field, types
             longsize = types.slong.sizeof()
    -        descr = _StructDescr('foo', longsize*2, 0, [types.slong, types.slong])
    +        fields = [
    +            Field('x', types.slong),
    +            Field('y', types.slong),
    +            ]
    +        descr = _StructDescr('foo', fields)
             assert descr.ffitype.sizeof() == longsize*2
             assert descr.ffitype.name == 'struct foo'
     
    @@ -24,4 +28,3 @@
             assert Point.y.offset == longsize
             assert Point._struct_.ffitype.sizeof() == longsize*2
             assert Point._struct_.ffitype.name == 'struct Point'
    -        
    
    From noreply at buildbot.pypy.org  Wed Sep  7 14:55:51 2011
    From: noreply at buildbot.pypy.org (antocuni)
    Date: Wed,  7 Sep 2011 14:55:51 +0200 (CEST)
    Subject: [pypy-commit] pypy ffistruct: bah,
    	forgot to add this file in the previous checkin
    Message-ID: <20110907125551.AC51E82213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Antonio Cuni 
    Branch: ffistruct
    Changeset: r47132:bbd9a00c7369
    Date: 2011-09-07 09:46 +0200
    http://bitbucket.org/pypy/pypy/changeset/bbd9a00c7369/
    
    Log:	bah, forgot to add this file in the previous checkin
    
    diff --git a/pypy/module/_ffi/interp_struct.py b/pypy/module/_ffi/interp_struct.py
    new file mode 100644
    --- /dev/null
    +++ b/pypy/module/_ffi/interp_struct.py
    @@ -0,0 +1,66 @@
    +from pypy.rpython.lltypesystem import lltype
    +from pypy.rlib import clibffi
    +from pypy.interpreter.baseobjspace import Wrappable
    +from pypy.interpreter.typedef import TypeDef, interp_attrproperty
    +from pypy.interpreter.gateway import interp2app, unwrap_spec
    +from pypy.objspace.std.typetype import type_typedef
    +from pypy.module._ffi.interp_ffitype import W_FFIType
    +
    +class W_Field(Wrappable):
    +
    +    def __init__(self, name, w_ffitype):
    +        self.name = name
    +        self.w_ffitype = w_ffitype
    +        self.offset = -1
    +
    +    @staticmethod
    +    @unwrap_spec(name=str)
    +    def descr_new(space, w_type, name, w_ffitype):
    +        w_ffitype = space.interp_w(W_FFIType, w_ffitype)
    +        return W_Field(name, w_ffitype)
    +
    +W_Field.typedef = TypeDef(
    +    'Field',
    +    __new__ = interp2app(W_Field.descr_new),
    +    name = interp_attrproperty('name', W_Field),
    +    ffitype = interp_attrproperty('w_ffitype', W_Field),
    +    offset = interp_attrproperty('offset', W_Field),
    +    )
    +
    +
    +# ==============================================================================
    +
    +
    +class W__StructDescr(Wrappable):
    +
    +    def __init__(self, name, ffistruct):
    +        self.ffistruct = ffistruct
    +        self.ffitype = W_FFIType('struct %s' % name, ffistruct.ffistruct, 'fixme')
    +
    +    @staticmethod
    +    @unwrap_spec(name=str)
    +    def descr_new(space, w_type, name, w_fields):
    +        size = 0
    +        alignment = 0 # XXX
    +        fields_w = space.fixedview(w_fields)
    +        field_types = []
    +        for w_field in fields_w:
    +            w_field = space.interp_w(W_Field, w_field)
    +            w_field.offset = size # XXX: alignment!
    +            size += w_field.w_ffitype.sizeof()
    +            field_types.append(w_field.w_ffitype.ffitype)
    +        #
    +        ffistruct = clibffi.make_struct_ffitype_e(size, alignment, field_types)
    +        return W__StructDescr(name, ffistruct)
    +
    +    def __del__(self):
    +        if self.ffistruct:
    +            lltype.free(self.ffistruct, flavor='raw')
    +
    +
    +W__StructDescr.typedef = TypeDef(
    +    '_StructDescr',
    +    __new__ = interp2app(W__StructDescr.descr_new),
    +    ffitype = interp_attrproperty('ffitype', W__StructDescr),
    +    )
    +
    
    From noreply at buildbot.pypy.org  Wed Sep  7 14:55:52 2011
    From: noreply at buildbot.pypy.org (antocuni)
    Date: Wed,  7 Sep 2011 14:55:52 +0200 (CEST)
    Subject: [pypy-commit] pypy ffistruct: make it possible to allocate a struct,
     and set/get fields on it.  The only supported type is 'long' so far
    Message-ID: <20110907125552.E3BF582213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Antonio Cuni 
    Branch: ffistruct
    Changeset: r47133:63d9d0f04f6a
    Date: 2011-09-07 11:48 +0200
    http://bitbucket.org/pypy/pypy/changeset/63d9d0f04f6a/
    
    Log:	make it possible to allocate a struct, and set/get fields on it.
    	The only supported type is 'long' so far
    
    diff --git a/pypy/module/_ffi/interp_struct.py b/pypy/module/_ffi/interp_struct.py
    --- a/pypy/module/_ffi/interp_struct.py
    +++ b/pypy/module/_ffi/interp_struct.py
    @@ -1,10 +1,10 @@
    -from pypy.rpython.lltypesystem import lltype
    +from pypy.rpython.lltypesystem import lltype, rffi
     from pypy.rlib import clibffi
     from pypy.interpreter.baseobjspace import Wrappable
     from pypy.interpreter.typedef import TypeDef, interp_attrproperty
     from pypy.interpreter.gateway import interp2app, unwrap_spec
     from pypy.objspace.std.typetype import type_typedef
    -from pypy.module._ffi.interp_ffitype import W_FFIType
    +from pypy.module._ffi.interp_ffitype import W_FFIType, app_types
     
     class W_Field(Wrappable):
     
    @@ -30,12 +30,15 @@
     
     # ==============================================================================
     
    -
     class W__StructDescr(Wrappable):
     
    -    def __init__(self, name, ffistruct):
    +    def __init__(self, name, fields_w, ffistruct):
             self.ffistruct = ffistruct
    -        self.ffitype = W_FFIType('struct %s' % name, ffistruct.ffistruct, 'fixme')
    +        self.w_ffitype = W_FFIType('struct %s' % name, ffistruct.ffistruct, 'fixme')
    +        self.fields_w = fields_w
    +        self.name2w_field = {}
    +        for w_field in fields_w:
    +            self.name2w_field[w_field.name] = w_field
     
         @staticmethod
         @unwrap_spec(name=str)
    @@ -51,7 +54,15 @@
                 field_types.append(w_field.w_ffitype.ffitype)
             #
             ffistruct = clibffi.make_struct_ffitype_e(size, alignment, field_types)
    -        return W__StructDescr(name, ffistruct)
    +        return W__StructDescr(name, fields_w, ffistruct)
    +
    +    def allocate(self, space):
    +        return W__StructInstance(self)
    +
    +    #@jit.elidable...
    +    def get_type_and_offset_for_field(self, name):
    +        w_field = self.name2w_field[name]
    +        return w_field.w_ffitype, w_field.offset
     
         def __del__(self):
             if self.ffistruct:
    @@ -61,6 +72,60 @@
     W__StructDescr.typedef = TypeDef(
         '_StructDescr',
         __new__ = interp2app(W__StructDescr.descr_new),
    -    ffitype = interp_attrproperty('ffitype', W__StructDescr),
    +    ffitype = interp_attrproperty('w_ffitype', W__StructDescr),
    +    allocate = interp2app(W__StructDescr.allocate),
         )
     
    +
    +# ==============================================================================
    +
    +class W__StructInstance(Wrappable):
    +
    +    _immutable_fields_ = ['structdescr', 'rawmem']
    +
    +    def __init__(self, structdescr):
    +        self.structdescr = structdescr
    +        size = structdescr.w_ffitype.sizeof()
    +        self.rawmem = lltype.malloc(rffi.VOIDP.TO, size, flavor='raw',
    +                                    zero=True, add_memory_pressure=True)
    +
    +    def __del__(self):
    +        if self.rawmem:
    +            lltype.free(self.rawmem, flavor='raw')
    +            self.rawmem = lltype.nullptr(rffi.VOIDP.TO)
    +
    +    def getaddr(self, space):
    +        addr = rffi.cast(rffi.ULONG, self.rawmem)
    +        return space.wrap(addr)
    +
    +    @unwrap_spec(name=str)
    +    def getfield(self, space, name):
    +        w_ffitype, offset = self.structdescr.get_type_and_offset_for_field(name)
    +        assert w_ffitype is app_types.slong # XXX: handle all cases
    +        FIELD_TYPE  = rffi.LONG
    +        #
    +        addr = rffi.ptradd(self.rawmem, offset)
    +        PTR_FIELD = lltype.Ptr(rffi.CArray(FIELD_TYPE))
    +        value = rffi.cast(PTR_FIELD, addr)[0]
    +        #
    +        return space.wrap(value)
    +
    +    @unwrap_spec(name=str)
    +    def setfield(self, space, name, w_value):
    +        w_ffitype, offset = self.structdescr.get_type_and_offset_for_field(name)
    +        assert w_ffitype is app_types.slong # XXX: handle all cases
    +        FIELD_TYPE  = rffi.LONG
    +        value = space.int_w(w_value)
    +        #
    +        addr = rffi.ptradd(self.rawmem, offset)
    +        PTR_FIELD = lltype.Ptr(rffi.CArray(FIELD_TYPE))
    +        rffi.cast(PTR_FIELD, addr)[0] = value
    +
    +
    +
    +W__StructInstance.typedef = TypeDef(
    +    '_StructInstance',
    +    getaddr  = interp2app(W__StructInstance.getaddr),
    +    getfield = interp2app(W__StructInstance.getfield),
    +    setfield = interp2app(W__StructInstance.setfield),
    +    )
    diff --git a/pypy/module/_ffi/test/test_struct.py b/pypy/module/_ffi/test/test_struct.py
    --- a/pypy/module/_ffi/test/test_struct.py
    +++ b/pypy/module/_ffi/test/test_struct.py
    @@ -2,6 +2,20 @@
     
     class AppTestStruct(BaseAppTestFFI):
     
    +    def setup_class(cls):
    +        BaseAppTestFFI.setup_class.im_func(cls)
    +        #
    +        def read_raw_mem(self, addr, typename, length):
    +            import ctypes
    +            addr = ctypes.cast(addr, ctypes.c_void_p)
    +            c_type = getattr(ctypes, typename)
    +            array_type = ctypes.POINTER(c_type * length)
    +            ptr_array = ctypes.cast(addr, array_type)
    +            array = ptr_array[0]
    +            lst = [array[i] for i in range(length)]
    +            return lst
    +        cls.w_read_raw_mem = cls.space.wrap(read_raw_mem)
    +        
         def test__StructDescr(self):
             from _ffi import _StructDescr, Field, types
             longsize = types.slong.sizeof()
    @@ -28,3 +42,19 @@
             assert Point.y.offset == longsize
             assert Point._struct_.ffitype.sizeof() == longsize*2
             assert Point._struct_.ffitype.name == 'struct Point'
    +
    +    def test_getfield_setfield(self):
    +        from _ffi import _StructDescr, Field, types
    +        longsize = types.slong.sizeof()
    +        fields = [
    +            Field('x', types.slong),
    +            Field('y', types.slong),
    +            ]
    +        descr = _StructDescr('foo', fields)
    +        struct = descr.allocate()
    +        struct.setfield('x', 42)
    +        struct.setfield('y', 43)
    +        assert struct.getfield('x') == 42
    +        assert struct.getfield('y') == 43
    +        mem = self.read_raw_mem(struct.getaddr(), 'c_long', 2)
    +        assert mem == [42, 43]
    
    From noreply at buildbot.pypy.org  Wed Sep  7 14:55:54 2011
    From: noreply at buildbot.pypy.org (antocuni)
    Date: Wed,  7 Sep 2011 14:55:54 +0200 (CEST)
    Subject: [pypy-commit] pypy ffistruct: move the code to get/set arbitrary
     struct fields to rlib.libffi. Eventually,
     these two functions will be recognized and optimized by the JIT
    Message-ID: <20110907125554.24BF482213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Antonio Cuni 
    Branch: ffistruct
    Changeset: r47134:750b3e0a7cf1
    Date: 2011-09-07 14:53 +0200
    http://bitbucket.org/pypy/pypy/changeset/750b3e0a7cf1/
    
    Log:	move the code to get/set arbitrary struct fields to rlib.libffi.
    	Eventually, these two functions will be recognized and optimized by
    	the JIT
    
    diff --git a/pypy/module/_ffi/interp_struct.py b/pypy/module/_ffi/interp_struct.py
    --- a/pypy/module/_ffi/interp_struct.py
    +++ b/pypy/module/_ffi/interp_struct.py
    @@ -1,5 +1,6 @@
     from pypy.rpython.lltypesystem import lltype, rffi
     from pypy.rlib import clibffi
    +from pypy.rlib import libffi
     from pypy.interpreter.baseobjspace import Wrappable
     from pypy.interpreter.typedef import TypeDef, interp_attrproperty
     from pypy.interpreter.gateway import interp2app, unwrap_spec
    @@ -104,10 +105,7 @@
             assert w_ffitype is app_types.slong # XXX: handle all cases
             FIELD_TYPE  = rffi.LONG
             #
    -        addr = rffi.ptradd(self.rawmem, offset)
    -        PTR_FIELD = lltype.Ptr(rffi.CArray(FIELD_TYPE))
    -        value = rffi.cast(PTR_FIELD, addr)[0]
    -        #
    +        value = libffi.struct_getfield(FIELD_TYPE, self.rawmem, offset)
             return space.wrap(value)
     
         @unwrap_spec(name=str)
    @@ -117,9 +115,7 @@
             FIELD_TYPE  = rffi.LONG
             value = space.int_w(w_value)
             #
    -        addr = rffi.ptradd(self.rawmem, offset)
    -        PTR_FIELD = lltype.Ptr(rffi.CArray(FIELD_TYPE))
    -        rffi.cast(PTR_FIELD, addr)[0] = value
    +        libffi.struct_setfield(FIELD_TYPE, self.rawmem, offset, value)
     
     
     
    diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py
    --- a/pypy/rlib/libffi.py
    +++ b/pypy/rlib/libffi.py
    @@ -410,3 +410,29 @@
     
         def getaddressindll(self, name):
             return dlsym(self.lib, name)
    +
    +
    +# ======================================================================
    +
    + at jit.dont_look_inside
    + at specialize.arg(0)
    +def struct_getfield(TYPE, addr, offset):
    +    """
    +    Read the field of type TYPE at addr+offset.
    +    addr is of type rffi.VOIDP, offset is an int.
    +    """
    +    addr = rffi.ptradd(addr, offset)
    +    PTR_FIELD = lltype.Ptr(rffi.CArray(TYPE))
    +    return rffi.cast(PTR_FIELD, addr)[0]
    +
    +
    + at jit.dont_look_inside
    + at specialize.arg(0)
    +def struct_setfield(TYPE, addr, offset, value):
    +    """
    +    Read the field of type TYPE at addr+offset.
    +    addr is of type rffi.VOIDP, offset is an int.
    +    """
    +    addr = rffi.ptradd(addr, offset)
    +    PTR_FIELD = lltype.Ptr(rffi.CArray(TYPE))
    +    rffi.cast(PTR_FIELD, addr)[0] = value
    diff --git a/pypy/rlib/test/test_libffi.py b/pypy/rlib/test/test_libffi.py
    --- a/pypy/rlib/test/test_libffi.py
    +++ b/pypy/rlib/test/test_libffi.py
    @@ -5,7 +5,7 @@
     from pypy.rlib.rarithmetic import r_singlefloat, r_longlong, r_ulonglong
     from pypy.rlib.test.test_clibffi import BaseFfiTest, get_libm_name, make_struct_ffitype_e
     from pypy.rlib.libffi import CDLL, Func, get_libc_name, ArgChain, types
    -from pypy.rlib.libffi import IS_32_BIT
    +from pypy.rlib.libffi import IS_32_BIT, struct_getfield, struct_setfield
     
     class TestLibffiMisc(BaseFfiTest):
     
    @@ -52,6 +52,24 @@
             del lib
             assert not ALLOCATED
     
    +    def test_struct_fields(self):
    +        longsize = 4 if IS_32_BIT else 8
    +        POINT = lltype.Struct('POINT', ('x', rffi.LONG), ('y', rffi.LONG))
    +        p = lltype.malloc(POINT, flavor='raw')
    +        p.x = 42
    +        p.y = 43
    +        addr = rffi.cast(rffi.VOIDP, p)
    +        assert struct_getfield(rffi.LONG, addr, 0) == 42
    +        assert struct_getfield(rffi.LONG, addr, longsize) == 43
    +        #
    +        struct_setfield(rffi.LONG, addr, 0, 123)
    +        struct_setfield(rffi.LONG, addr, longsize, 321)
    +        assert p.x == 123
    +        assert p.y == 321
    +        #
    +        lltype.free(p, flavor='raw')
    +        
    +
     class TestLibffiCall(BaseFfiTest):
         """
         Test various kind of calls through libffi.
    
    From noreply at buildbot.pypy.org  Wed Sep  7 14:55:55 2011
    From: noreply at buildbot.pypy.org (antocuni)
    Date: Wed,  7 Sep 2011 14:55:55 +0200 (CEST)
    Subject: [pypy-commit] pypy ffistruct: disable this check for now,
    	or else it won't translate
    Message-ID: <20110907125555.5969982213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Antonio Cuni 
    Branch: ffistruct
    Changeset: r47135:55a06bcd9f06
    Date: 2011-09-07 14:55 +0200
    http://bitbucket.org/pypy/pypy/changeset/55a06bcd9f06/
    
    Log:	disable this check for now, or else it won't translate
    
    diff --git a/pypy/module/_ffi/interp_ffitype.py b/pypy/module/_ffi/interp_ffitype.py
    --- a/pypy/module/_ffi/interp_ffitype.py
    +++ b/pypy/module/_ffi/interp_ffitype.py
    @@ -13,8 +13,9 @@
             self.ffitype = ffitype
             self.w_datashape = w_datashape
             self.w_pointer_to = w_pointer_to
    -        if self.is_struct():
    -            assert w_datashape is not None
    +        ## XXX: re-enable this check when the ffistruct branch is done
    +        ## if self.is_struct():
    +        ##     assert w_datashape is not None
     
         def descr_deref_pointer(self, space):
             if self.w_pointer_to is None:
    diff --git a/pypy/module/_ffi/interp_struct.py b/pypy/module/_ffi/interp_struct.py
    --- a/pypy/module/_ffi/interp_struct.py
    +++ b/pypy/module/_ffi/interp_struct.py
    @@ -35,7 +35,7 @@
     
         def __init__(self, name, fields_w, ffistruct):
             self.ffistruct = ffistruct
    -        self.w_ffitype = W_FFIType('struct %s' % name, ffistruct.ffistruct, 'fixme')
    +        self.w_ffitype = W_FFIType('struct %s' % name, ffistruct.ffistruct, None)
             self.fields_w = fields_w
             self.name2w_field = {}
             for w_field in fields_w:
    
    From noreply at buildbot.pypy.org  Wed Sep  7 15:13:14 2011
    From: noreply at buildbot.pypy.org (cfbolz)
    Date: Wed,  7 Sep 2011 15:13:14 +0200 (CEST)
    Subject: [pypy-commit] pypy improve-heap-caching-tracing: fix XXX: also
     record newness of non-vtable memory
    Message-ID: <20110907131314.B6E1382213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Carl Friedrich Bolz 
    Branch: improve-heap-caching-tracing
    Changeset: r47136:f7596aea7542
    Date: 2011-09-07 11:35 +0200
    http://bitbucket.org/pypy/pypy/changeset/f7596aea7542/
    
    Log:	fix XXX: also record newness of non-vtable memory add a new one XXX
    
    diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py
    --- a/pypy/jit/metainterp/pyjitpl.py
    +++ b/pypy/jit/metainterp/pyjitpl.py
    @@ -367,8 +367,9 @@
     
         @arguments("descr")
         def opimpl_new(self, sizedescr):
    -        # XXX heapcache.new
    -        return self.execute_with_descr(rop.NEW, sizedescr)
    +        resbox = self.execute_with_descr(rop.NEW, sizedescr)
    +        self.metainterp.heapcache.new(resbox)
    +        return resbox
     
         @arguments("descr")
         def opimpl_new_with_vtable(self, sizedescr):
    @@ -544,6 +545,7 @@
             if tobox is not None:
                 return tobox
             resbox = self.execute_with_descr(opnum, fielddescr, box)
    +        # XXX getfield_now_known
             self.metainterp.heapcache.setfield(box, fielddescr, resbox)
             return resbox
     
    diff --git a/pypy/jit/metainterp/test/test_tracingopts.py b/pypy/jit/metainterp/test/test_tracingopts.py
    --- a/pypy/jit/metainterp/test/test_tracingopts.py
    +++ b/pypy/jit/metainterp/test/test_tracingopts.py
    @@ -450,6 +450,26 @@
             assert res == 2 * -7 + 2 * -8
             self.check_operations_history(getfield_gc=0)
     
    +    def test_heap_caching_multiple_tuples(self):
    +        class Gbl(object):
    +            pass
    +        g = Gbl()
    +        def gn(a1, a2):
    +            return a1[0] + a2[0]
    +        def fn(n):
    +            a1 = (n, )
    +            g.a = a1
    +            a2 = (n - 1, )
    +            g.a = a2
    +            jit.promote(n)
    +            return a1[0] + a2[0] + gn(a1, a2)
    +        res = self.interp_operations(fn, [7])
    +        assert res == 2 * 7 + 2 * 6
    +        self.check_operations_history(getfield_gc_pure=0)
    +        res = self.interp_operations(fn, [-7])
    +        assert res == 2 * -7 + 2 * -8
    +        self.check_operations_history(getfield_gc_pure=0)
    +
         def test_heap_caching_multiple_arrays(self):
             class Gbl(object):
                 pass
    
    From noreply at buildbot.pypy.org  Wed Sep  7 15:13:15 2011
    From: noreply at buildbot.pypy.org (cfbolz)
    Date: Wed,  7 Sep 2011 15:13:15 +0200 (CEST)
    Subject: [pypy-commit] pypy improve-heap-caching-tracing: test and fix: make
     getfield use getfield_now_known
    Message-ID: <20110907131315.F245E82213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Carl Friedrich Bolz 
    Branch: improve-heap-caching-tracing
    Changeset: r47137:7213ef516d50
    Date: 2011-09-07 14:02 +0200
    http://bitbucket.org/pypy/pypy/changeset/7213ef516d50/
    
    Log:	test and fix: make getfield use getfield_now_known
    
    diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py
    --- a/pypy/jit/metainterp/pyjitpl.py
    +++ b/pypy/jit/metainterp/pyjitpl.py
    @@ -545,8 +545,7 @@
             if tobox is not None:
                 return tobox
             resbox = self.execute_with_descr(opnum, fielddescr, box)
    -        # XXX getfield_now_known
    -        self.metainterp.heapcache.setfield(box, fielddescr, resbox)
    +        self.metainterp.heapcache.getfield_now_known(box, fielddescr, resbox)
             return resbox
     
         @arguments("orgpc", "box", "descr")
    diff --git a/pypy/jit/metainterp/test/test_tracingopts.py b/pypy/jit/metainterp/test/test_tracingopts.py
    --- a/pypy/jit/metainterp/test/test_tracingopts.py
    +++ b/pypy/jit/metainterp/test/test_tracingopts.py
    @@ -428,27 +428,38 @@
             self.check_operations_history(getfield_gc=0)
             return
     
    -
         def test_heap_caching_multiple_objects(self):
             class Gbl(object):
                 pass
             g = Gbl()
             class A(object):
                 pass
    +        a1 = A()
    +        g.a1 = a1
    +        a1.x = 7
    +        a2 = A()
    +        g.a2 = a2
    +        a2.x = 7
    +        def gn(a1, a2):
    +            return a1.x + a2.x
             def fn(n):
    -            a1 = A()
    -            g.a = a1
    -            a1.x = n
    -            a2 = A()
    -            g.a = a2
    -            a2.x = n - 1
    -            return a1.x + a2.x + a1.x + a2.x
    -        res = self.interp_operations(fn, [7])
    -        assert res == 2 * 7 + 2 * 6
    -        self.check_operations_history(getfield_gc=0)
    +            if n < 0:
    +                a1 = A()
    +                g.a1 = a1
    +                a1.x = n
    +                a2 = A()
    +                g.a2 = a2
    +                a2.x = n - 1
    +            else:
    +                a1 = g.a1
    +                a2 = g.a2
    +            return a1.x + a2.x + gn(a1, a2)
             res = self.interp_operations(fn, [-7])
             assert res == 2 * -7 + 2 * -8
    -        self.check_operations_history(getfield_gc=0)
    +        self.check_operations_history(setfield_gc=4, getfield_gc=0)
    +        res = self.interp_operations(fn, [7])
    +        assert res == 4 * 7
    +        self.check_operations_history(getfield_gc=4)
     
         def test_heap_caching_multiple_tuples(self):
             class Gbl(object):
    
    From noreply at buildbot.pypy.org  Wed Sep  7 15:13:17 2011
    From: noreply at buildbot.pypy.org (cfbolz)
    Date: Wed,  7 Sep 2011 15:13:17 +0200 (CEST)
    Subject: [pypy-commit] pypy improve-heap-caching-tracing: make list
     allocation go through the heap cache
    Message-ID: <20110907131317.38C2C82213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Carl Friedrich Bolz 
    Branch: improve-heap-caching-tracing
    Changeset: r47138:60e23189885e
    Date: 2011-09-07 14:13 +0200
    http://bitbucket.org/pypy/pypy/changeset/60e23189885e/
    
    Log:	make list allocation go through the heap cache
    
    diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py
    --- a/pypy/jit/metainterp/pyjitpl.py
    +++ b/pypy/jit/metainterp/pyjitpl.py
    @@ -480,11 +480,9 @@
         @arguments("descr", "descr", "descr", "descr", "box")
         def opimpl_newlist(self, structdescr, lengthdescr, itemsdescr, arraydescr,
                            sizebox):
    -        # XXX use heapcache
    -        sbox = self.metainterp.execute_and_record(rop.NEW, structdescr)
    +        sbox = self.opimpl_new(structdescr)
             self._opimpl_setfield_gc_any(sbox, lengthdescr, sizebox)
    -        abox = self.metainterp.execute_and_record(rop.NEW_ARRAY, arraydescr,
    -                                                  sizebox)
    +        abox = self.opimpl_new_array(arraydescr, sizebox)
             self._opimpl_setfield_gc_any(sbox, itemsdescr, abox)
             return sbox
     
    diff --git a/pypy/jit/metainterp/test/test_tracingopts.py b/pypy/jit/metainterp/test/test_tracingopts.py
    --- a/pypy/jit/metainterp/test/test_tracingopts.py
    +++ b/pypy/jit/metainterp/test/test_tracingopts.py
    @@ -521,6 +521,29 @@
             assert res == 2 * 7 + 2 * 8
             self.check_operations_history(getarrayitem_gc=2)
     
    +
    +    def test_heap_caching_multiple_lists(self):
    +        class Gbl(object):
    +            pass
    +        g = Gbl()
    +        g.l = []
    +        def fn(n):
    +            if n < -100:
    +                g.l.append(1)
    +            a1 = [n, n, n]
    +            g.l = a1
    +            a1[0] = n
    +            a2 = [n, n, n]
    +            g.l = a2
    +            a2[0] = n - 1
    +            return a1[0] + a2[0] + a1[0] + a2[0]
    +        res = self.interp_operations(fn, [7])
    +        assert res == 2 * 7 + 2 * 6
    +        self.check_operations_history(getarrayitem_gc=0, getfield_gc=0)
    +        res = self.interp_operations(fn, [-7])
    +        assert res == 2 * -7 + 2 * -8
    +        self.check_operations_history(getarrayitem_gc=0, getfield_gc=0)
    +
         def test_length_caching(self):
             class Gbl(object):
                 pass
    
    From noreply at buildbot.pypy.org  Wed Sep  7 15:13:18 2011
    From: noreply at buildbot.pypy.org (cfbolz)
    Date: Wed,  7 Sep 2011 15:13:18 +0200 (CEST)
    Subject: [pypy-commit] pypy improve-heap-caching-tracing: add a sanity
     check: whenever a get operation is not generated, because the heap
    Message-ID: <20110907131318.7349782213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Carl Friedrich Bolz 
    Branch: improve-heap-caching-tracing
    Changeset: r47139:4a4c1b008238
    Date: 2011-09-07 15:12 +0200
    http://bitbucket.org/pypy/pypy/changeset/4a4c1b008238/
    
    Log:	add a sanity check: whenever a get operation is not generated,
    	because the heap cache finds it, look at the concrete objects and
    	see whether their contents corresponds to what the heapcache says it
    	should
    
    diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py
    --- a/pypy/jit/metainterp/pyjitpl.py
    +++ b/pypy/jit/metainterp/pyjitpl.py
    @@ -406,6 +406,11 @@
             tobox = self.metainterp.heapcache.getarrayitem(
                     arraybox, arraydescr, indexbox)
             if tobox:
    +            # sanity check: see whether the current array value
    +            # corresponds to what the cache thinks the value is
    +            resbox = executor.execute(self.metainterp.cpu, self.metainterp,
    +                                      rop.GETARRAYITEM_GC, arraydescr, arraybox, indexbox)
    +            assert resbox.constbox().same_constant(tobox.constbox())
                 return tobox
             resbox = self.execute_with_descr(rop.GETARRAYITEM_GC,
                                              arraydescr, arraybox, indexbox)
    @@ -541,6 +546,10 @@
         def _opimpl_getfield_gc_any_pureornot(self, opnum, box, fielddescr):
             tobox = self.metainterp.heapcache.getfield(box, fielddescr)
             if tobox is not None:
    +            # sanity check: see whether the current struct value
    +            # corresponds to what the cache thinks the value is
    +            resbox = executor.execute(self.metainterp.cpu, self.metainterp,
    +                                      rop.GETFIELD_GC, fielddescr, box)
                 return tobox
             resbox = self.execute_with_descr(opnum, fielddescr, box)
             self.metainterp.heapcache.getfield_now_known(box, fielddescr, resbox)
    
    From noreply at buildbot.pypy.org  Wed Sep  7 15:50:06 2011
    From: noreply at buildbot.pypy.org (antocuni)
    Date: Wed,  7 Sep 2011 15:50:06 +0200 (CEST)
    Subject: [pypy-commit] pypy ffistruct: fix import
    Message-ID: <20110907135006.2333A82213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Antonio Cuni 
    Branch: ffistruct
    Changeset: r47140:f307fe1ce048
    Date: 2011-09-07 15:09 +0200
    http://bitbucket.org/pypy/pypy/changeset/f307fe1ce048/
    
    Log:	fix import
    
    diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py
    --- a/pypy/module/_rawffi/interp_rawffi.py
    +++ b/pypy/module/_rawffi/interp_rawffi.py
    @@ -253,7 +253,7 @@
             # XXX: this assumes that you have the _ffi module enabled. In the long
             # term, probably we will move the code for build structures and arrays
             # from _rawffi to _ffi
    -        from pypy.module._ffi.interp_ffi import W_FFIType
    +        from pypy.module._ffi.interp_ffitype import W_FFIType
             return W_FFIType('', self.get_basic_ffi_type(), self)
     
         @unwrap_spec(n=int)
    
    From noreply at buildbot.pypy.org  Wed Sep  7 15:50:07 2011
    From: noreply at buildbot.pypy.org (antocuni)
    Date: Wed,  7 Sep 2011 15:50:07 +0200 (CEST)
    Subject: [pypy-commit] pypy ffistruct: bah,
     apparently interp2app functions cannot be class methods
    Message-ID: <20110907135007.552EA82213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Antonio Cuni 
    Branch: ffistruct
    Changeset: r47141:839a7545459d
    Date: 2011-09-07 15:23 +0200
    http://bitbucket.org/pypy/pypy/changeset/839a7545459d/
    
    Log:	bah, apparently interp2app functions cannot be class methods
    
    diff --git a/pypy/module/_ffi/interp_struct.py b/pypy/module/_ffi/interp_struct.py
    --- a/pypy/module/_ffi/interp_struct.py
    +++ b/pypy/module/_ffi/interp_struct.py
    @@ -14,15 +14,14 @@
             self.w_ffitype = w_ffitype
             self.offset = -1
     
    -    @staticmethod
    -    @unwrap_spec(name=str)
    -    def descr_new(space, w_type, name, w_ffitype):
    -        w_ffitype = space.interp_w(W_FFIType, w_ffitype)
    -        return W_Field(name, w_ffitype)
    + at unwrap_spec(name=str)
    +def descr_new_field(space, w_type, name, w_ffitype):
    +    w_ffitype = space.interp_w(W_FFIType, w_ffitype)
    +    return W_Field(name, w_ffitype)
     
     W_Field.typedef = TypeDef(
         'Field',
    -    __new__ = interp2app(W_Field.descr_new),
    +    __new__ = interp2app(descr_new_field),
         name = interp_attrproperty('name', W_Field),
         ffitype = interp_attrproperty('w_ffitype', W_Field),
         offset = interp_attrproperty('offset', W_Field),
    @@ -41,22 +40,6 @@
             for w_field in fields_w:
                 self.name2w_field[w_field.name] = w_field
     
    -    @staticmethod
    -    @unwrap_spec(name=str)
    -    def descr_new(space, w_type, name, w_fields):
    -        size = 0
    -        alignment = 0 # XXX
    -        fields_w = space.fixedview(w_fields)
    -        field_types = []
    -        for w_field in fields_w:
    -            w_field = space.interp_w(W_Field, w_field)
    -            w_field.offset = size # XXX: alignment!
    -            size += w_field.w_ffitype.sizeof()
    -            field_types.append(w_field.w_ffitype.ffitype)
    -        #
    -        ffistruct = clibffi.make_struct_ffitype_e(size, alignment, field_types)
    -        return W__StructDescr(name, fields_w, ffistruct)
    -
         def allocate(self, space):
             return W__StructInstance(self)
     
    @@ -70,9 +53,26 @@
                 lltype.free(self.ffistruct, flavor='raw')
     
     
    + at unwrap_spec(name=str)
    +def descr_new_structdescr(space, w_type, name, w_fields):
    +    size = 0
    +    alignment = 0 # XXX
    +    fields_w = space.fixedview(w_fields)
    +    field_types = []
    +    for w_field in fields_w:
    +        w_field = space.interp_w(W_Field, w_field)
    +        w_field.offset = size # XXX: alignment!
    +        size += w_field.w_ffitype.sizeof()
    +        field_types.append(w_field.w_ffitype.ffitype)
    +    #
    +    ffistruct = clibffi.make_struct_ffitype_e(size, alignment, field_types)
    +    return W__StructDescr(name, fields_w, ffistruct)
    +
    +
    +
     W__StructDescr.typedef = TypeDef(
         '_StructDescr',
    -    __new__ = interp2app(W__StructDescr.descr_new),
    +    __new__ = interp2app(descr_new_structdescr),
         ffitype = interp_attrproperty('w_ffitype', W__StructDescr),
         allocate = interp2app(W__StructDescr.allocate),
         )
    
    From noreply at buildbot.pypy.org  Wed Sep  7 16:11:48 2011
    From: noreply at buildbot.pypy.org (arigo)
    Date: Wed,  7 Sep 2011 16:11:48 +0200 (CEST)
    Subject: [pypy-commit] benchmarks default: Reduce the delay and number of
    	retries.
    Message-ID: <20110907141148.EB81A82213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r143:867c2dc2b16d
    Date: 2011-09-07 16:11 +0200
    http://bitbucket.org/pypy/benchmarks/changeset/867c2dc2b16d/
    
    Log:	Reduce the delay and number of retries.
    
    diff --git a/saveresults.py b/saveresults.py
    --- a/saveresults.py
    +++ b/saveresults.py
    @@ -93,7 +93,7 @@
         info += str(data['commitid']) + ", benchmark " + data['benchmark']
         print(info)
         try:
    -        retries = [10, 20, 30, 60, 150, 300]
    +        retries = [1, 2, 3, 6]
             while True:
                 try:
                     f = urllib2.urlopen(SPEEDURL + 'result/add/', params)
    
    From noreply at buildbot.pypy.org  Wed Sep  7 16:12:05 2011
    From: noreply at buildbot.pypy.org (edelsohn)
    Date: Wed,  7 Sep 2011 16:12:05 +0200 (CEST)
    Subject: [pypy-commit] pypy ppc-jit-backend: Import WORD from arch.py
    Message-ID: <20110907141205.3FEB682213@wyvern.cs.uni-duesseldorf.de>
    
    Author: edelsohn
    Branch: ppc-jit-backend
    Changeset: r47142:ec6bced5ed4f
    Date: 2011-09-07 10:05 -0400
    http://bitbucket.org/pypy/pypy/changeset/ec6bced5ed4f/
    
    Log:	Import WORD from arch.py
    
    diff --git a/pypy/jit/backend/ppc/runner.py b/pypy/jit/backend/ppc/runner.py
    --- a/pypy/jit/backend/ppc/runner.py
    +++ b/pypy/jit/backend/ppc/runner.py
    @@ -12,7 +12,7 @@
     from pypy.jit.backend.x86 import regloc
     from pypy.jit.backend.x86.support import values_array
     from pypy.jit.backend.ppc.ppcgen.ppc_assembler import PPCBuilder
    -from pypy.jit.backend.ppc.ppcgen.arch import IS_PPC_32, NONVOLATILES
    +from pypy.jit.backend.ppc.ppcgen.arch import IS_PPC_32, WORD, NONVOLATILES
     import sys
     
     from pypy.tool.ansi_print import ansi_log
    
    From noreply at buildbot.pypy.org  Wed Sep  7 16:53:03 2011
    From: noreply at buildbot.pypy.org (antocuni)
    Date: Wed,  7 Sep 2011 16:53:03 +0200 (CEST)
    Subject: [pypy-commit] pypy ffistruct: turn the interplevel KeyError into an
     applevel AttributeError
    Message-ID: <20110907145303.5A21F82213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Antonio Cuni 
    Branch: ffistruct
    Changeset: r47143:845ee1dd654a
    Date: 2011-09-07 16:48 +0200
    http://bitbucket.org/pypy/pypy/changeset/845ee1dd654a/
    
    Log:	turn the interplevel KeyError into an applevel AttributeError
    
    diff --git a/pypy/module/_ffi/interp_struct.py b/pypy/module/_ffi/interp_struct.py
    --- a/pypy/module/_ffi/interp_struct.py
    +++ b/pypy/module/_ffi/interp_struct.py
    @@ -4,6 +4,7 @@
     from pypy.interpreter.baseobjspace import Wrappable
     from pypy.interpreter.typedef import TypeDef, interp_attrproperty
     from pypy.interpreter.gateway import interp2app, unwrap_spec
    +from pypy.interpreter.error import operationerrfmt
     from pypy.objspace.std.typetype import type_typedef
     from pypy.module._ffi.interp_ffitype import W_FFIType, app_types
     
    @@ -32,7 +33,8 @@
     
     class W__StructDescr(Wrappable):
     
    -    def __init__(self, name, fields_w, ffistruct):
    +    def __init__(self, space, name, fields_w, ffistruct):
    +        self.space = space
             self.ffistruct = ffistruct
             self.w_ffitype = W_FFIType('struct %s' % name, ffistruct.ffistruct, None)
             self.fields_w = fields_w
    @@ -45,7 +47,11 @@
     
         #@jit.elidable...
         def get_type_and_offset_for_field(self, name):
    -        w_field = self.name2w_field[name]
    +        try:
    +            w_field = self.name2w_field[name]
    +        except KeyError:
    +            raise operationerrfmt(self.space.w_AttributeError, '%s', name)
    +
             return w_field.w_ffitype, w_field.offset
     
         def __del__(self):
    diff --git a/pypy/module/_ffi/test/test_struct.py b/pypy/module/_ffi/test/test_struct.py
    --- a/pypy/module/_ffi/test/test_struct.py
    +++ b/pypy/module/_ffi/test/test_struct.py
    @@ -58,3 +58,15 @@
             assert struct.getfield('y') == 43
             mem = self.read_raw_mem(struct.getaddr(), 'c_long', 2)
             assert mem == [42, 43]
    +
    +    def test_missing_field(self):
    +        from _ffi import _StructDescr, Field, types
    +        longsize = types.slong.sizeof()
    +        fields = [
    +            Field('x', types.slong),
    +            Field('y', types.slong),
    +            ]
    +        descr = _StructDescr('foo', fields)
    +        struct = descr.allocate()
    +        raises(AttributeError, "struct.getfield('missing')")
    +        raises(AttributeError, "struct.setfield('missing', 42)")
    
    From noreply at buildbot.pypy.org  Wed Sep  7 16:53:04 2011
    From: noreply at buildbot.pypy.org (antocuni)
    Date: Wed,  7 Sep 2011 16:53:04 +0200 (CEST)
    Subject: [pypy-commit] pypy ffistruct: this method is elidable
    Message-ID: <20110907145304.8FA9182213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Antonio Cuni 
    Branch: ffistruct
    Changeset: r47144:303389ce8fb7
    Date: 2011-09-07 16:50 +0200
    http://bitbucket.org/pypy/pypy/changeset/303389ce8fb7/
    
    Log:	this method is elidable
    
    diff --git a/pypy/module/_ffi/interp_struct.py b/pypy/module/_ffi/interp_struct.py
    --- a/pypy/module/_ffi/interp_struct.py
    +++ b/pypy/module/_ffi/interp_struct.py
    @@ -1,6 +1,7 @@
     from pypy.rpython.lltypesystem import lltype, rffi
     from pypy.rlib import clibffi
     from pypy.rlib import libffi
    +from pypy.rlib import jit
     from pypy.interpreter.baseobjspace import Wrappable
     from pypy.interpreter.typedef import TypeDef, interp_attrproperty
     from pypy.interpreter.gateway import interp2app, unwrap_spec
    @@ -45,7 +46,7 @@
         def allocate(self, space):
             return W__StructInstance(self)
     
    -    #@jit.elidable...
    +    @jit.elidable
         def get_type_and_offset_for_field(self, name):
             try:
                 w_field = self.name2w_field[name]
    
    From noreply at buildbot.pypy.org  Wed Sep  7 16:53:05 2011
    From: noreply at buildbot.pypy.org (antocuni)
    Date: Wed,  7 Sep 2011 16:53:05 +0200 (CEST)
    Subject: [pypy-commit] pypy ffistruct: fix the result of a bad rebase
    Message-ID: <20110907145305.C215282213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Antonio Cuni 
    Branch: ffistruct
    Changeset: r47145:4906bb0ec80b
    Date: 2011-09-07 16:52 +0200
    http://bitbucket.org/pypy/pypy/changeset/4906bb0ec80b/
    
    Log:	fix the result of a bad rebase
    
    diff --git a/pypy/module/_ffi/interp_struct.py b/pypy/module/_ffi/interp_struct.py
    --- a/pypy/module/_ffi/interp_struct.py
    +++ b/pypy/module/_ffi/interp_struct.py
    @@ -73,7 +73,7 @@
             field_types.append(w_field.w_ffitype.ffitype)
         #
         ffistruct = clibffi.make_struct_ffitype_e(size, alignment, field_types)
    -    return W__StructDescr(name, fields_w, ffistruct)
    +    return W__StructDescr(space, name, fields_w, ffistruct)
     
     
     
    
    From noreply at buildbot.pypy.org  Wed Sep  7 16:55:28 2011
    From: noreply at buildbot.pypy.org (antocuni)
    Date: Wed,  7 Sep 2011 16:55:28 +0200 (CEST)
    Subject: [pypy-commit] pypy ffistruct: rpython fix
    Message-ID: <20110907145528.6D8AE82213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Antonio Cuni 
    Branch: ffistruct
    Changeset: r47146:660db963a4f4
    Date: 2011-09-07 16:54 +0200
    http://bitbucket.org/pypy/pypy/changeset/660db963a4f4/
    
    Log:	rpython fix
    
    diff --git a/pypy/module/_ffi/interp_struct.py b/pypy/module/_ffi/interp_struct.py
    --- a/pypy/module/_ffi/interp_struct.py
    +++ b/pypy/module/_ffi/interp_struct.py
    @@ -65,15 +65,17 @@
         size = 0
         alignment = 0 # XXX
         fields_w = space.fixedview(w_fields)
    +    fields_w2 = [] # its items are annotated as W_Field
         field_types = []
         for w_field in fields_w:
             w_field = space.interp_w(W_Field, w_field)
             w_field.offset = size # XXX: alignment!
             size += w_field.w_ffitype.sizeof()
    +        fields_w2.append(w_field)
             field_types.append(w_field.w_ffitype.ffitype)
         #
         ffistruct = clibffi.make_struct_ffitype_e(size, alignment, field_types)
    -    return W__StructDescr(space, name, fields_w, ffistruct)
    +    return W__StructDescr(space, name, fields_w2, ffistruct)
     
     
     
    
    From noreply at buildbot.pypy.org  Wed Sep  7 17:38:47 2011
    From: noreply at buildbot.pypy.org (antocuni)
    Date: Wed,  7 Sep 2011 17:38:47 +0200 (CEST)
    Subject: [pypy-commit] pypy ffistruct: make it elidable_promote,
     it should completely optimize away the offset/type lookup
    Message-ID: <20110907153847.EE2EF82213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Antonio Cuni 
    Branch: ffistruct
    Changeset: r47147:f407a607b971
    Date: 2011-09-07 17:35 +0200
    http://bitbucket.org/pypy/pypy/changeset/f407a607b971/
    
    Log:	make it elidable_promote, it should completely optimize away the
    	offset/type lookup
    
    diff --git a/pypy/module/_ffi/interp_struct.py b/pypy/module/_ffi/interp_struct.py
    --- a/pypy/module/_ffi/interp_struct.py
    +++ b/pypy/module/_ffi/interp_struct.py
    @@ -46,7 +46,7 @@
         def allocate(self, space):
             return W__StructInstance(self)
     
    -    @jit.elidable
    +    @jit.elidable_promote()
         def get_type_and_offset_for_field(self, name):
             try:
                 w_field = self.name2w_field[name]
    
    From noreply at buildbot.pypy.org  Wed Sep  7 17:38:49 2011
    From: noreply at buildbot.pypy.org (antocuni)
    Date: Wed,  7 Sep 2011 17:38:49 +0200 (CEST)
    Subject: [pypy-commit] pypy ffistruct: merge heads
    Message-ID: <20110907153849.33E5782213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Antonio Cuni 
    Branch: ffistruct
    Changeset: r47148:65841158be6f
    Date: 2011-09-07 17:38 +0200
    http://bitbucket.org/pypy/pypy/changeset/65841158be6f/
    
    Log:	merge heads
    
    diff --git a/pypy/module/_ffi/interp_struct.py b/pypy/module/_ffi/interp_struct.py
    --- a/pypy/module/_ffi/interp_struct.py
    +++ b/pypy/module/_ffi/interp_struct.py
    @@ -65,15 +65,17 @@
         size = 0
         alignment = 0 # XXX
         fields_w = space.fixedview(w_fields)
    +    fields_w2 = [] # its items are annotated as W_Field
         field_types = []
         for w_field in fields_w:
             w_field = space.interp_w(W_Field, w_field)
             w_field.offset = size # XXX: alignment!
             size += w_field.w_ffitype.sizeof()
    +        fields_w2.append(w_field)
             field_types.append(w_field.w_ffitype.ffitype)
         #
         ffistruct = clibffi.make_struct_ffitype_e(size, alignment, field_types)
    -    return W__StructDescr(space, name, fields_w, ffistruct)
    +    return W__StructDescr(space, name, fields_w2, ffistruct)
     
     
     
    
    From noreply at buildbot.pypy.org  Wed Sep  7 18:54:03 2011
    From: noreply at buildbot.pypy.org (hakanardo)
    Date: Wed,  7 Sep 2011 18:54:03 +0200 (CEST)
    Subject: [pypy-commit] pypy jit-str_in_preamble: closing barnch.
     jit-short_from_state gave us string optimization in the preamble without
     the
    Message-ID: <20110907165403.D0D0E82213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Hakan Ardo 
    Branch: jit-str_in_preamble
    Changeset: r47149:4723df6a9296
    Date: 2011-09-07 18:53 +0200
    http://bitbucket.org/pypy/pypy/changeset/4723df6a9296/
    
    Log:	closing barnch. jit-short_from_state gave us string optimization in
    	the preamble without the drawbacks of this branch.
    
    
    From noreply at buildbot.pypy.org  Wed Sep  7 19:14:56 2011
    From: noreply at buildbot.pypy.org (antocuni)
    Date: Wed,  7 Sep 2011 19:14:56 +0200 (CEST)
    Subject: [pypy-commit] pypy ffistruct: we can't promote strings
    Message-ID: <20110907171456.4FCEC82213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Antonio Cuni 
    Branch: ffistruct
    Changeset: r47150:ba6f286d77a8
    Date: 2011-09-07 17:55 +0200
    http://bitbucket.org/pypy/pypy/changeset/ba6f286d77a8/
    
    Log:	we can't promote strings
    
    diff --git a/pypy/module/_ffi/interp_struct.py b/pypy/module/_ffi/interp_struct.py
    --- a/pypy/module/_ffi/interp_struct.py
    +++ b/pypy/module/_ffi/interp_struct.py
    @@ -46,7 +46,7 @@
         def allocate(self, space):
             return W__StructInstance(self)
     
    -    @jit.elidable_promote()
    +    @jit.elidable_promote('0')
         def get_type_and_offset_for_field(self, name):
             try:
                 w_field = self.name2w_field[name]
    
    From noreply at buildbot.pypy.org  Wed Sep  7 21:13:54 2011
    From: noreply at buildbot.pypy.org (hakanardo)
    Date: Wed,  7 Sep 2011 21:13:54 +0200 (CEST)
    Subject: [pypy-commit] pypy jit-duplicated_short_boxes: close merged branch
    Message-ID: <20110907191354.3306D82213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Hakan Ardo 
    Branch: jit-duplicated_short_boxes
    Changeset: r47151:ccb8b7b10a93
    Date: 2011-09-07 20:55 +0200
    http://bitbucket.org/pypy/pypy/changeset/ccb8b7b10a93/
    
    Log:	close merged branch
    
    
    From noreply at buildbot.pypy.org  Wed Sep  7 21:13:55 2011
    From: noreply at buildbot.pypy.org (hakanardo)
    Date: Wed,  7 Sep 2011 21:13:55 +0200 (CEST)
    Subject: [pypy-commit] pypy default: Merge jit-duplicated_short_boxes,
     which introduces 3 things:
    Message-ID: <20110907191355.916E7822AB@wyvern.cs.uni-duesseldorf.de>
    
    Author: Hakan Ardo 
    Branch: 
    Changeset: r47152:9f54c3c0bb32
    Date: 2011-09-07 21:12 +0200
    http://bitbucket.org/pypy/pypy/changeset/9f54c3c0bb32/
    
    Log:	Merge jit-duplicated_short_boxes, which introduces 3 things:
    
    	1: ValueImporter. Instead of placing guards for the state of every
    	Value that survives from the preamble into the peeled loop in the
    	short preamble, only place guards for the Values that are actually
    	used during the optimization. This reduces the size of the short
    	preambles significanlty.
    
    	2: UnrollableOptimizer. The optimizer has become more complex to
    	support unrolling, which has made it slower. Some of this complexity
    	is not needed when compiling bridges and has been split out into a
    	UnrollableOptimizer subclass which is only used for compiling loops
    	when unroll is enabled.
    
    	3: A strategy for solving conflicts among the short boxes. Conflicts
    	araises in situations such as:
    
    	 [i1, i2] i3 = int_add(i1, 1) i4 = int_mul(i3, 2) escape(i4)
    	jump(i1, i3)
    
    	Specializing the peeled loop to the state at the end of the preamble
    	would specialize it to situations when it's inputargs are of the
    	form [i1, i1+1]. We dont want that. Instead a new box, i5, is
    	introduced, represeting the second argument of the jump as well as
    	the second inputargument of the peeled loop. Prior to this branch it
    	was the other way around, a new box was introduced to represent
    	i1+1. That resulted in the loop invariant int_mul would not be moved
    	out of the loop. Instead a potential int_mul(i2, 2) would have been
    	removed.
    
    	The same kind of situation can occure when a setfield_is cahced. In
    	that case, the set of pure ops that's optimized out would be quite
    	random. This branch introduces a strict priority order:
    	  - ops found in the original trace
    	  - synthetic ops (setfields converted to getfields)
    	  - inputargs
    	  - potential ops that was never promoted to short_boxes This makes the
    	effect of the optimizations less random and should always remove
    	loop invariant ops. Non loop invariant cases can still benefit from
    	unrolling but in exactly what situations has become more
    	complicated.
    
    diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py
    --- a/pypy/jit/metainterp/optimizeopt/heap.py
    +++ b/pypy/jit/metainterp/optimizeopt/heap.py
    @@ -37,6 +37,12 @@
                 self.force_lazy_setfield(optheap)
                 assert not self.possible_aliasing(optheap, structvalue)
             cached_fieldvalue = self._cached_fields.get(structvalue, None)
    +
    +        # Hack to ensure constants are imported from the preamble
    +        if cached_fieldvalue and fieldvalue.is_constant(): 
    +            optheap.optimizer.ensure_imported(cached_fieldvalue)
    +            cached_fieldvalue = self._cached_fields.get(structvalue, None)
    +
             if cached_fieldvalue is not fieldvalue:
                 # common case: store the 'op' as lazy_setfield, and register
                 # myself in the optheap's _lazy_setfields_and_arrayitems list
    @@ -132,9 +138,7 @@
                             result = newresult
                         getop = ResOperation(rop.GETFIELD_GC, [op.getarg(0)],
                                              result, op.getdescr())
    -                    getop = shortboxes.add_potential(getop)
    -                    self._cached_fields_getfield_op[structvalue] = getop
    -                    self._cached_fields[structvalue] = optimizer.getvalue(result)
    +                    shortboxes.add_potential(getop, synthetic=True)
                     elif op.result is not None:
                         shortboxes.add_potential(op)
     
    diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py
    --- a/pypy/jit/metainterp/optimizeopt/optimizer.py
    +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py
    @@ -10,6 +10,7 @@
     from pypy.jit.metainterp.typesystem import llhelper, oohelper
     from pypy.tool.pairtype import extendabletype
     from pypy.rlib.debug import debug_start, debug_stop, debug_print
    +from pypy.rlib.objectmodel import specialize
     
     LEVEL_UNKNOWN    = '\x00'
     LEVEL_NONNULL    = '\x01'
    @@ -25,6 +26,9 @@
             self.descr = descr
             self.bound = bound
     
    +    def clone(self):
    +        return LenBound(self.mode, self.descr, self.bound.clone())
    +
     class OptValue(object):
         __metaclass__ = extendabletype
         _attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound', 'lenbound')
    @@ -88,8 +92,27 @@
                         assert False
                     guards.append(op)
                     self.lenbound.bound.make_guards(lenbox, guards)
    +        return guards
     
    -        return guards
    +    def import_from(self, other, optimizer):
    +        assert self.level <= LEVEL_NONNULL
    +        if other.level == LEVEL_CONSTANT:
    +            self.make_constant(other.get_key_box())
    +            optimizer.turned_constant(self)
    +        elif other.level == LEVEL_KNOWNCLASS:
    +            self.make_constant_class(other.known_class, -1)
    +        else:
    +            if other.level == LEVEL_NONNULL:
    +                self.ensure_nonnull()
    +            self.intbound.intersect(other.intbound)
    +            if other.lenbound:
    +                if self.lenbound:
    +                    assert other.lenbound.mode == self.lenbound.mode
    +                    assert other.lenbound.descr == self.lenbound.descr
    +                    self.lenbound.bound.intersect(other.lenbound.bound)
    +                else:
    +                    self.lenbound = other.lenbound.clone()
    +                    
     
         def force_box(self):
             return self.box
    @@ -308,7 +331,6 @@
             self.resumedata_memo = resume.ResumeDataLoopMemo(metainterp_sd)
             self.bool_boxes = {}
             self.pure_operations = args_dict()
    -        self.emitted_pure_operations = {}
             self.producer = {}
             self.pendingfields = []
             self.posponedop = None
    @@ -316,12 +338,11 @@
             self.quasi_immutable_deps = None
             self.opaque_pointers = {}
             self.newoperations = []
    -        self.emitting_dissabled = False
    -        self.emitted_guards = 0        
             if loop is not None:
                 self.call_pure_results = loop.call_pure_results
     
             self.set_optimizations(optimizations)
    +        self.setup()
     
         def set_optimizations(self, optimizations):
             if optimizations:
    @@ -348,23 +369,18 @@
             assert self.posponedop is None
     
         def new(self):
    +        new = Optimizer(self.metainterp_sd, self.loop)
    +        return self._new(new)
    +
    +    def _new(self, new):
             assert self.posponedop is None
    -        new = Optimizer(self.metainterp_sd, self.loop)
             optimizations = [o.new() for o in self.optimizations]
             new.set_optimizations(optimizations)
             new.quasi_immutable_deps = self.quasi_immutable_deps
             return new
             
         def produce_potential_short_preamble_ops(self, sb):
    -        for op in self.emitted_pure_operations:
    -            if op.getopnum() == rop.GETARRAYITEM_GC_PURE or \
    -               op.getopnum() == rop.STRGETITEM or \
    -               op.getopnum() == rop.UNICODEGETITEM:
    -                if not self.getvalue(op.getarg(1)).is_constant():
    -                    continue
    -            sb.add_potential(op)
    -        for opt in self.optimizations:
    -            opt.produce_potential_short_preamble_ops(sb)
    +        raise NotImplementedError('This is implemented in unroll.UnrollableOptimizer')
     
         def turned_constant(self, value):
             for o in self.optimizations:
    @@ -386,19 +402,26 @@
             else:
                 return box
     
    +    @specialize.argtype(0)
         def getvalue(self, box):
             box = self.getinterned(box)
             try:
                 value = self.values[box]
             except KeyError:
                 value = self.values[box] = OptValue(box)
    +        self.ensure_imported(value)
             return value
     
    +    def ensure_imported(self, value):
    +        pass
    +
    +    @specialize.argtype(0)
         def get_constant_box(self, box):
             if isinstance(box, Const):
                 return box
             try:
                 value = self.values[box]
    +            self.ensure_imported(value)
             except KeyError:
                 return None
             if value.is_constant():
    @@ -481,18 +504,22 @@
         def emit_operation(self, op):
             if op.returns_bool_result():
                 self.bool_boxes[self.getvalue(op.result)] = None
    -        if self.emitting_dissabled:
    -            return
    +        self._emit_operation(op)
             
    +    @specialize.argtype(0)
    +    def _emit_operation(self, op):        
             for i in range(op.numargs()):
                 arg = op.getarg(i)
    -            if arg in self.values:
    -                box = self.values[arg].force_box()
    -                op.setarg(i, box)
    +            try:
    +                value = self.values[arg]
    +            except KeyError:
    +                pass
    +            else:
    +                self.ensure_imported(value)
    +                op.setarg(i, value.force_box())
             self.metainterp_sd.profiler.count(jitprof.OPT_OPS)
             if op.is_guard():
                 self.metainterp_sd.profiler.count(jitprof.OPT_GUARDS)
    -            self.emitted_guards += 1 # FIXME: can we reuse above counter?
                 op = self.store_final_boxes_in_guard(op)
             elif op.can_raise():
                 self.exception_might_have_happened = True
    @@ -544,6 +571,7 @@
             args[n+1] = op.getdescr()
             return args
     
    +    @specialize.argtype(0)
         def optimize_default(self, op):
             canfold = op.is_always_pure()
             if op.is_ovf():
    @@ -579,13 +607,16 @@
                     return
                 else:
                     self.pure_operations[args] = op
    -                self.emitted_pure_operations[op] = True
    +                self.remember_emitting_pure(op)
     
             # otherwise, the operation remains
             self.emit_operation(op)
             if nextop:
                 self.emit_operation(nextop)
     
    +    def remember_emitting_pure(self, op):
    +        pass
    +    
         def constant_fold(self, op):
             argboxes = [self.get_constant_box(op.getarg(i))
                         for i in range(op.numargs())]
    diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
    --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
    +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
    @@ -472,7 +472,13 @@
             [i0]
             jump(i0)
             """
    -        self.optimize_loop(ops, expected, preamble)
    +        short = """
    +        [i0]
    +        i1 = int_is_true(i0)
    +        guard_value(i1, 1) []
    +        jump(i0)
    +        """
    +        self.optimize_loop(ops, expected, preamble, expected_short=short)
     
         def test_bound_int_is_true(self):
             ops = """
    @@ -6997,6 +7003,26 @@
             """
             self.optimize_loop(ops, expected)
             
    +    def test_cached_pure_func_of_equal_fields(self):
    +        ops = """
    +        [p5, p6]
    +        i10 = getfield_gc(p5, descr=valuedescr)
    +        i11 = getfield_gc(p6, descr=nextdescr)
    +        i12 = int_add(i10, 7)
    +        i13 = int_add(i11, 7)
    +        call(i12, i13, descr=nonwritedescr)
    +        setfield_gc(p6, i10, descr=nextdescr)        
    +        jump(p5, p6)
    +        """
    +        expected = """
    +        [p5, p6, i14, i12, i10]
    +        i13 = int_add(i14, 7)
    +        call(i12, i13, descr=nonwritedescr)
    +        setfield_gc(p6, i10, descr=nextdescr)        
    +        jump(p5, p6, i10, i12, i10)
    +        """
    +        self.optimize_loop(ops, expected)
    +        
         def test_forced_counter(self):
             # XXX: VIRTUALHEAP (see above)
             py.test.skip("would be fixed by make heap optimizer aware of virtual setfields")
    @@ -7086,8 +7112,84 @@
             """
             self.optimize_loop(ops, expected)
     
    +    def test_import_constants_when_folding_pure_operations(self):
    +        ops = """
    +        [p0]
    +        f1 = getfield_gc(p0, descr=valuedescr)
    +        f2 = float_abs(f1)
    +        call(7.0, descr=nonwritedescr)
    +        setfield_gc(p0, -7.0, descr=valuedescr)
    +        jump(p0)
    +        """
    +        expected = """
    +        [p0]
    +        call(7.0, descr=nonwritedescr)
    +        jump(p0)
    +        """
    +        self.optimize_loop(ops, expected)
    +
    +    def test_exploding_duplicatipon(self):
    +        ops = """
    +        [i1, i2]
    +        i3 = int_add(i1, i1)
    +        i4 = int_add(i3, i3)
    +        i5 = int_add(i4, i4)
    +        i6 = int_add(i5, i5)
    +        call(i6, descr=nonwritedescr)
    +        jump(i1, i3)
    +        """
    +        expected = """
    +        [i1, i2, i6, i3]
    +        call(i6, descr=nonwritedescr)
    +        jump(i1, i3, i6, i3)
    +        """
    +        short = """
    +        [i1, i2]
    +        i3 = int_add(i1, i1)
    +        i4 = int_add(i3, i3)
    +        i5 = int_add(i4, i4)
    +        i6 = int_add(i5, i5)
    +        jump(i1, i2, i6, i3)
    +        """
    +        self.optimize_loop(ops, expected, expected_short=short)
    +
    +    def test_prioritize_getfield1(self):
    +        ops = """
    +        [p1, p2]
    +        i1 = getfield_gc(p1, descr=valuedescr)
    +        setfield_gc(p2, i1, descr=nextdescr)
    +        i2 = int_neg(i1)
    +        call(i2, descr=nonwritedescr)
    +        jump(p1, p2)
    +        """
    +        expected = """
    +        [p1, p2, i2, i1]
    +        call(i2, descr=nonwritedescr)
    +        setfield_gc(p2, i1, descr=nextdescr)        
    +        jump(p1, p2, i2, i1)
    +        """
    +        self.optimize_loop(ops, expected)
    +
    +    def test_prioritize_getfield2(self):
    +        # Same as previous, but with descrs intercahnged which means
    +        # that the getfield is discovered first when looking for
    +        # potential short boxes during tests
    +        ops = """
    +        [p1, p2]
    +        i1 = getfield_gc(p1, descr=nextdescr)
    +        setfield_gc(p2, i1, descr=valuedescr)
    +        i2 = int_neg(i1)
    +        call(i2, descr=nonwritedescr)
    +        jump(p1, p2)
    +        """
    +        expected = """
    +        [p1, p2, i2, i1]
    +        call(i2, descr=nonwritedescr)
    +        setfield_gc(p2, i1, descr=valuedescr)        
    +        jump(p1, p2, i2, i1)
    +        """
    +        self.optimize_loop(ops, expected)
             
    -
     class TestLLtype(OptimizeOptTest, LLtypeMixin):
         pass
             
    diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py
    --- a/pypy/jit/metainterp/optimizeopt/unroll.py
    +++ b/pypy/jit/metainterp/optimizeopt/unroll.py
    @@ -70,6 +70,47 @@
             self.snapshot_map[snapshot] = new_snapshot
             return new_snapshot
     
    +class UnrollableOptimizer(Optimizer):
    +    def setup(self):
    +        self.importable_values = {}
    +        self.emitting_dissabled = False
    +        self.emitted_guards = 0
    +        self.emitted_pure_operations = {}
    +
    +    def ensure_imported(self, value):
    +        if not self.emitting_dissabled and value in self.importable_values:
    +            imp = self.importable_values[value]
    +            del self.importable_values[value]
    +            imp.import_value(value)
    +
    +    def emit_operation(self, op):
    +        if op.returns_bool_result():
    +            self.bool_boxes[self.getvalue(op.result)] = None
    +        if self.emitting_dissabled:
    +            return
    +        if op.is_guard():
    +            self.emitted_guards += 1 # FIXME: can we use counter in self._emit_operation?
    +        self._emit_operation(op)
    +
    +    def new(self):
    +        new = UnrollableOptimizer(self.metainterp_sd, self.loop)
    +        return self._new(new)
    +
    +    def remember_emitting_pure(self, op):
    +        self.emitted_pure_operations[op] = True
    +
    +    def produce_potential_short_preamble_ops(self, sb):
    +        for op in self.emitted_pure_operations:
    +            if op.getopnum() == rop.GETARRAYITEM_GC_PURE or \
    +               op.getopnum() == rop.STRGETITEM or \
    +               op.getopnum() == rop.UNICODEGETITEM:
    +                if not self.getvalue(op.getarg(1)).is_constant():
    +                    continue
    +            sb.add_potential(op)
    +        for opt in self.optimizations:
    +            opt.produce_potential_short_preamble_ops(sb)
    +
    +
     
     class UnrollOptimizer(Optimization):
         """Unroll the loop into two iterations. The first one will
    @@ -77,7 +118,7 @@
         distinction anymore)"""
     
         def __init__(self, metainterp_sd, loop, optimizations):
    -        self.optimizer = Optimizer(metainterp_sd, loop, optimizations)
    +        self.optimizer = UnrollableOptimizer(metainterp_sd, loop, optimizations)
             self.cloned_operations = []
             for op in self.optimizer.loop.operations:
                 newop = op.clone()
    @@ -150,6 +191,7 @@
                     args = ", ".join([logops.repr_of_arg(arg) for arg in short_inputargs])
                     debug_print('short inputargs: ' + args)
                     self.short_boxes.debug_print(logops)
    +                
     
                 # Force virtuals amoung the jump_args of the preamble to get the
                 # operations needed to setup the proper state of those virtuals
    @@ -161,8 +203,9 @@
                     if box in seen:
                         continue
                     seen[box] = True
    -                value = preamble_optimizer.getvalue(box)
    -                inputarg_setup_ops.extend(value.make_guards(box))
    +                preamble_value = preamble_optimizer.getvalue(box)
    +                value = self.optimizer.getvalue(box)
    +                value.import_from(preamble_value, self.optimizer)
                 for box in short_inputargs:
                     if box in seen:
                         continue
    @@ -181,23 +224,16 @@
                 for op in self.short_boxes.operations():
                     self.ensure_short_op_emitted(op, self.optimizer, seen)
                     if op and op.result:
    -                    # The order of these guards is not important as 
    -                    # self.optimizer.emitting_dissabled is False
    -                    value = preamble_optimizer.getvalue(op.result)
    -                    for guard in value.make_guards(op.result):
    -                        self.optimizer.send_extra_operation(guard)
    +                    preamble_value = preamble_optimizer.getvalue(op.result)
    +                    value = self.optimizer.getvalue(op.result)
    +                    imp = ValueImporter(self, preamble_value, op)
    +                    self.optimizer.importable_values[value] = imp
                         newresult = self.optimizer.getvalue(op.result).get_key_box()
                         if newresult is not op.result:
                             self.short_boxes.alias(newresult, op.result)
                 self.optimizer.flush()
                 self.optimizer.emitting_dissabled = False
     
    -            # XXX Hack to prevent the arraylen/strlen/unicodelen ops generated
    -            #     by value.make_guards() from ending up in pure_operations
    -            for key, op in self.optimizer.pure_operations.items():
    -                if not self.short_boxes.has_producer(op.result):
    -                    del self.optimizer.pure_operations[key]
    -
                 initial_inputargs_len = len(inputargs)
                 self.inliner = Inliner(loop.inputargs, jump_args)
     
    @@ -276,16 +312,11 @@
     
             short_jumpargs = inputargs[:]
     
    -        short = []
    -        short_seen = {}
    +        short = self.short = []
    +        short_seen = self.short_seen = {}
             for box, const in self.constant_inputargs.items():
                 short_seen[box] = True
     
    -        for op in self.short_boxes.operations():
    -            if op is not None:
    -                if len(self.getvalue(op.result).make_guards(op.result)) > 0:
    -                    self.add_op_to_short(op, short, short_seen, False, True)
    -
             # This loop is equivalent to the main optimization loop in
             # Optimizer.propagate_all_forward
             jumpop = None
    @@ -380,7 +411,7 @@
             if op.is_ovf():
                 guard = ResOperation(rop.GUARD_NO_OVERFLOW, [], None)
                 optimizer.send_extra_operation(guard)
    -        
    +
         def add_op_to_short(self, op, short, short_seen, emit=True, guards_needed=False):
             if op is None:
                 return None
    @@ -536,6 +567,13 @@
                             loop_token.failed_states.append(virtual_state)
             self.emit_operation(op)
     
    +class ValueImporter(object):
    +    def __init__(self, unroll, value, op):
    +        self.unroll = unroll
    +        self.preamble_value = value
    +        self.op = op
     
    -
    -
    +    def import_value(self, value):
    +        value.import_from(self.preamble_value, self.unroll.optimizer)
    +        self.unroll.add_op_to_short(self.op, self.unroll.short, self.unroll.short_seen, False, True)        
    +        
    diff --git a/pypy/jit/metainterp/optimizeopt/virtualize.py b/pypy/jit/metainterp/optimizeopt/virtualize.py
    --- a/pypy/jit/metainterp/optimizeopt/virtualize.py
    +++ b/pypy/jit/metainterp/optimizeopt/virtualize.py
    @@ -58,6 +58,9 @@
         def _really_force(self):
             raise NotImplementedError("abstract base")
     
    +    def import_from(self, other, optimizer):
    +        raise NotImplementedError("should not be called at this level")
    +    
     def get_fielddescrlist_cache(cpu):
         if not hasattr(cpu, '_optimizeopt_fielddescrlist_cache'):
             result = descrlist_dict()
    diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py
    --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py
    +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py
    @@ -12,6 +12,7 @@
     from pypy.rlib.objectmodel import we_are_translated
     from pypy.rlib.debug import debug_start, debug_stop, debug_print
     from pypy.rlib.objectmodel import we_are_translated
    +import os
     
     class AbstractVirtualStateInfo(resume.AbstractVirtualInfo):
         position = -1
    @@ -461,8 +462,10 @@
     class ShortBoxes(object):
         def __init__(self, optimizer, surviving_boxes):
             self.potential_ops = {}
    -        self.duplicates = {}
    +        self.alternatives = {}
    +        self.synthetic = {}
             self.aliases = {}
    +        self.rename = {}
             self.optimizer = optimizer
             for box in surviving_boxes:
                 self.potential_ops[box] = None
    @@ -476,33 +479,81 @@
                 except BoxNotProducable:
                     pass
     
    +    def prioritized_alternatives(self, box):
    +        if box not in self.alternatives:
    +            return [self.potential_ops[box]]
    +        alts = self.alternatives[box]
    +        hi, lo = 0, len(alts) - 1
    +        while hi < lo:
    +            if alts[lo] is None: # Inputarg, lowest priority
    +                alts[lo], alts[-1] = alts[-1], alts[lo]
    +                lo -= 1
    +            elif alts[lo] not in self.synthetic: # Hi priority
    +                alts[hi], alts[lo] = alts[lo], alts[hi]
    +                hi += 1
    +            else: # Low priority
    +                lo -= 1
    +        return alts
    +            
    +    def renamed(self, box):
    +        if box in self.rename:
    +            return self.rename[box]
    +        return box
    +    
    +    def add_to_short(self, box, op):
    +        if op:
    +            op = op.clone()
    +            for i in range(op.numargs()):
    +                op.setarg(i, self.renamed(op.getarg(i)))
    +        if box in self.short_boxes:
    +            if op is None:
    +                oldop = self.short_boxes[box].clone()
    +                oldres = oldop.result
    +                newbox = oldop.result = oldres.clonebox()
    +                self.rename[box] = newbox
    +                self.short_boxes[box] = None
    +                self.short_boxes[newbox] = oldop
    +            else:
    +                newop = op.clone()
    +                newbox = newop.result = op.result.clonebox()
    +                self.short_boxes[newop.result] = newop
    +            value = self.optimizer.getvalue(box)
    +            self.optimizer.make_equal_to(newbox, value)
    +        else:
    +            self.short_boxes[box] = op
    +        
         def produce_short_preamble_box(self, box):
             if box in self.short_boxes:
                 return 
             if isinstance(box, Const):
                 return 
             if box in self.potential_ops:
    -            op = self.potential_ops[box]
    -            if op:
    -                for arg in op.getarglist():
    -                    self.produce_short_preamble_box(arg)
    -            self.short_boxes[box] = op
    +            ops = self.prioritized_alternatives(box)
    +            produced_one = False
    +            for op in ops:
    +                try:
    +                    if op:
    +                        for arg in op.getarglist():
    +                            self.produce_short_preamble_box(arg)
    +                except BoxNotProducable:
    +                    pass
    +                else:
    +                    produced_one = True
    +                    self.add_to_short(box, op)
    +            if not produced_one:
    +                raise BoxNotProducable
             else:
                 raise BoxNotProducable
     
    -    def add_potential(self, op):
    +    def add_potential(self, op, synthetic=False):
             if op.result not in self.potential_ops:
                 self.potential_ops[op.result] = op
    -            return op
    -        newop = op.clone()
    -        newop.result = op.result.clonebox()
    -        self.potential_ops[newop.result] = newop
    -        if op.result in self.duplicates:
    -            self.duplicates[op.result].append(newop.result)
             else:
    -            self.duplicates[op.result] = [newop.result]
    -        self.optimizer.make_equal_to(newop.result, self.optimizer.getvalue(op.result))
    -        return newop
    +            if op.result not in self.alternatives:
    +                self.alternatives[op.result] = [self.potential_ops[op.result]]
    +            self.alternatives[op.result].append(op)
    +        if synthetic:
    +            self.synthetic[op] = True
     
         def debug_print(self, logops):
             debug_start('jit-short-boxes')
    diff --git a/pypy/jit/metainterp/test/test_virtualstate.py b/pypy/jit/metainterp/test/test_virtualstate.py
    --- a/pypy/jit/metainterp/test/test_virtualstate.py
    +++ b/pypy/jit/metainterp/test/test_virtualstate.py
    @@ -2,7 +2,7 @@
     import py
     from pypy.jit.metainterp.optimize import InvalidLoop
     from pypy.jit.metainterp.optimizeopt.virtualstate import VirtualStateInfo, VStructStateInfo, \
    -     VArrayStateInfo, NotVirtualStateInfo, VirtualState
    +     VArrayStateInfo, NotVirtualStateInfo, VirtualState, ShortBoxes
     from pypy.jit.metainterp.optimizeopt.optimizer import OptValue
     from pypy.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr, ConstInt, ConstPtr
     from pypy.rpython.lltypesystem import lltype
    @@ -11,6 +11,7 @@
     from pypy.jit.metainterp.history import TreeLoop, LoopToken
     from pypy.jit.metainterp.optimizeopt.test.test_optimizeopt import FakeDescr, FakeMetaInterpStaticData
     from pypy.jit.metainterp.optimize import RetraceLoop
    +from pypy.jit.metainterp.resoperation import ResOperation, rop
     
     class TestBasic:
         someptr1 = LLtypeMixin.myptr
    @@ -129,6 +130,7 @@
                 info.fieldstate = [info]
                 assert info.generalization_of(info, {}, {})
     
    +
     class BaseTestGenerateGuards(BaseTest):
         def guards(self, info1, info2, box, expected):
             info1.position = info2.position = 0
    @@ -910,3 +912,111 @@
     class TestLLtypeBridges(BaseTestBridges, LLtypeMixin):
         pass
     
    +class FakeOptimizer:
    +    def make_equal_to(*args):
    +        pass
    +    def getvalue(*args):
    +        pass
    +
    +class TestShortBoxes:
    +    p1 = BoxPtr()
    +    p2 = BoxPtr()
    +    p3 = BoxPtr()
    +    p4 = BoxPtr()
    +    i1 = BoxInt()
    +    i2 = BoxInt()
    +    i3 = BoxInt()
    +    i4 = BoxInt()
    +    
    +    def test_short_box_duplication_direct(self):
    +        class Optimizer(FakeOptimizer):
    +            def produce_potential_short_preamble_ops(_self, sb):
    +                sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1))
    +                sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.i1))
    +        sb = ShortBoxes(Optimizer(), [self.p1, self.p2])
    +        assert len(sb.short_boxes) == 4
    +        assert self.i1 in sb.short_boxes
    +        assert sum([op.result is self.i1 for op in sb.short_boxes.values() if op]) == 1
    +
    +    def test_dont_duplicate_potential_boxes(self):
    +        class Optimizer(FakeOptimizer):
    +            def produce_potential_short_preamble_ops(_self, sb):
    +                sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1))
    +                sb.add_potential(ResOperation(rop.GETFIELD_GC, [BoxPtr()], self.i1))
    +                sb.add_potential(ResOperation(rop.INT_NEG, [self.i1], self.i2))
    +                sb.add_potential(ResOperation(rop.INT_ADD, [ConstInt(7), self.i2],
    +                                              self.i3))
    +        sb = ShortBoxes(Optimizer(), [self.p1, self.p2])
    +        assert len(sb.short_boxes) == 5
    +
    +    def test_prioritize1(self):
    +        class Optimizer(FakeOptimizer):
    +            def produce_potential_short_preamble_ops(_self, sb):
    +                sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1))
    +                sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.i1))
    +                sb.add_potential(ResOperation(rop.INT_NEG, [self.i1], self.i2))
    +        sb = ShortBoxes(Optimizer(), [self.p1, self.p2])
    +        assert len(sb.short_boxes.values()) == 5
    +        int_neg = [op for op in sb.short_boxes.values()
    +                   if op and op.getopnum() == rop.INT_NEG]
    +        assert len(int_neg) == 1
    +        int_neg = int_neg[0]
    +        getfield = [op for op in sb.short_boxes.values()
    +                    if op and op.result == int_neg.getarg(0)]
    +        assert len(getfield) == 1
    +        assert getfield[0].getarg(0) in [self.p1, self.p2]
    +
    +    def test_prioritize1bis(self):
    +        class Optimizer(FakeOptimizer):
    +            def produce_potential_short_preamble_ops(_self, sb):
    +                sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1),
    +                                 synthetic=True)
    +                sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.i1),
    +                                 synthetic=True)
    +                sb.add_potential(ResOperation(rop.INT_NEG, [self.i1], self.i2))
    +        sb = ShortBoxes(Optimizer(), [self.p1, self.p2])
    +        assert len(sb.short_boxes.values()) == 5
    +        int_neg = [op for op in sb.short_boxes.values()
    +                   if op and op.getopnum() == rop.INT_NEG]
    +        assert len(int_neg) == 1
    +        int_neg = int_neg[0]
    +        getfield = [op for op in sb.short_boxes.values()
    +                    if op and op.result == int_neg.getarg(0)]
    +        assert len(getfield) == 1
    +        assert getfield[0].getarg(0) in [self.p1, self.p2]
    +        
    +    def test_prioritize2(self):
    +        class Optimizer(FakeOptimizer):
    +            def produce_potential_short_preamble_ops(_self, sb):
    +                sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1),
    +                                 synthetic=True)
    +                sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.i1))
    +                sb.add_potential(ResOperation(rop.INT_NEG, [self.i1], self.i2))
    +        sb = ShortBoxes(Optimizer(), [self.p1, self.p2])
    +        assert len(sb.short_boxes.values()) == 5
    +        int_neg = [op for op in sb.short_boxes.values()
    +                   if op and op.getopnum() == rop.INT_NEG]
    +        assert len(int_neg) == 1
    +        int_neg = int_neg[0]
    +        getfield = [op for op in sb.short_boxes.values()
    +                    if op and op.result == int_neg.getarg(0)]
    +        assert len(getfield) == 1
    +        assert getfield[0].getarg(0) == self.p2
    +        
    +    def test_prioritize3(self):
    +        class Optimizer(FakeOptimizer):
    +            def produce_potential_short_preamble_ops(_self, sb):
    +                sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1))
    +                sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.i1),
    +                                 synthetic=True)
    +                sb.add_potential(ResOperation(rop.INT_NEG, [self.i1], self.i2))
    +        sb = ShortBoxes(Optimizer(), [self.p1, self.p2])
    +        assert len(sb.short_boxes.values()) == 5
    +        int_neg = [op for op in sb.short_boxes.values()
    +                   if op and op.getopnum() == rop.INT_NEG]
    +        assert len(int_neg) == 1
    +        int_neg = int_neg[0]
    +        getfield = [op for op in sb.short_boxes.values()
    +                    if op and op.result == int_neg.getarg(0)]
    +        assert len(getfield) == 1
    +        assert getfield[0].getarg(0) == self.p1
    diff --git a/pypy/jit/tl/pypyjit_demo.py b/pypy/jit/tl/pypyjit_demo.py
    --- a/pypy/jit/tl/pypyjit_demo.py
    +++ b/pypy/jit/tl/pypyjit_demo.py
    @@ -2,22 +2,16 @@
     pypyjit.set_param(threshold=200)
     
     
    -def main(a, b):
    -    i = sa = 0
    -    while i < 300:
    -        if a > 0: # Specialises the loop
    -            pass
    -        if b < 2 and b > 0:
    -            pass
    -        if (a >> b) >= 0:
    -            sa += 1
    -        if (a << b) > 2:
    -            sa += 10000
    -        i += 1
    -    return sa
    +def f(n):
    +    pairs = [(0.0, 1.0), (2.0, 3.0)] * n
    +    mag = 0
    +    for (x1, x2) in pairs:
    +        dx = x1 - x2
    +        mag += ((dx * dx ) ** (-1.5))            
    +    return n
     
     try:
    -    print main(2, 1)
    +    print f(301)
     
     except Exception, e:
         print "Exception: ", type(e)
    diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py
    --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py
    +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py
    @@ -1,5 +1,5 @@
     from __future__ import with_statement
    -import sys
    +import sys, os
     import types
     import subprocess
     import py
    diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py
    --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py
    +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py
    @@ -92,6 +92,43 @@
             """)
     
     
    +    def test_cached_pure_func_of_equal_fields(self):            
    +        def main(n):
    +            class A(object):
    +                def __init__(self, val):
    +                    self.val1 = self.val2 = val
    +            a = A(1)
    +            b = A(1)
    +            sa = 0
    +            while n:
    +                sa += 2*a.val1
    +                sa += 2*b.val2
    +                b.val2 = a.val1
    +                n -= 1
    +            return sa
    +        #
    +        log = self.run(main, [1000])
    +        assert log.result == 4000
    +        loop, = log.loops_by_filename(self.filepath)
    +        assert loop.match("""
    +            i12 = int_is_true(i4)
    +            guard_true(i12, descr=...)
    +            guard_not_invalidated(descr=...)
    +            i13 = int_add_ovf(i8, i9)
    +            guard_no_overflow(descr=...)
    +            i10p = getfield_gc_pure(p10, descr=...)
    +            i10 = int_mul_ovf(2, i10p)
    +            guard_no_overflow(descr=...)
    +            i14 = int_add_ovf(i13, i10)
    +            guard_no_overflow(descr=...)
    +            setfield_gc(p7, p11, descr=...)
    +            i17 = int_sub_ovf(i4, 1)
    +            guard_no_overflow(descr=...)
    +            --TICK--
    +            jump(..., descr=...)
    +            """)
    +
    +
         def test_range_iter(self):
             def main(n):
                 def g(n):
    @@ -115,7 +152,6 @@
                 i21 = force_token()
                 setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>)
                 guard_not_invalidated(descr=...)
    -            i26 = int_sub(i9, 1)
                 i23 = int_lt(i18, 0)
                 guard_false(i23, descr=...)
                 i25 = int_ge(i18, i9)
    
    From noreply at buildbot.pypy.org  Wed Sep  7 23:02:29 2011
    From: noreply at buildbot.pypy.org (alex_gaynor)
    Date: Wed,  7 Sep 2011 23:02:29 +0200 (CEST)
    Subject: [pypy-commit] pypy improve-heap-caching-tracing: Add a special case
     for ll_arraycopy in the tracing heap cache.
    Message-ID: <20110907210229.94C6F82213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Alex Gaynor 
    Branch: improve-heap-caching-tracing
    Changeset: r47153:70d999525e7c
    Date: 2011-09-07 14:02 -0700
    http://bitbucket.org/pypy/pypy/changeset/70d999525e7c/
    
    Log:	Add a special case for ll_arraycopy in the tracing heap cache.
    
    diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py
    --- a/pypy/jit/metainterp/heapcache.py
    +++ b/pypy/jit/metainterp/heapcache.py
    @@ -1,5 +1,7 @@
    +from pypy.jit.codewriter.effectinfo import EffectInfo
    +from pypy.jit.metainterp.history import ConstInt
     from pypy.jit.metainterp.resoperation import rop
    -from pypy.jit.metainterp.history import ConstInt
    +
     
     class HeapCache(object):
         def __init__(self):
    @@ -21,7 +23,7 @@
             # cache the length of arrays
             self.length_cache = {}
     
    -    def invalidate_caches(self, opnum, descr):
    +    def invalidate_caches(self, opnum, descr, argboxes):
             if opnum == rop.SETFIELD_GC:
                 return
             if opnum == rop.SETARRAYITEM_GC:
    @@ -41,6 +43,20 @@
                    ef == effectinfo.EF_ELIDABLE_CANNOT_RAISE or \
                    ef == effectinfo.EF_ELIDABLE_CAN_RAISE:
                     return
    +            # A special case for ll_arraycopy, because it is so common, and its
    +            # effects are so well defined.
    +            elif effectinfo.oopspecindex == EffectInfo.OS_ARRAYCOPY:
    +                # The destination box
    +                if argboxes[2] in self.new_boxes:
    +                    # XXX: no descr here so we invalidate any of them, not just
    +                    # of the correct type
    +                    for descr, cache in self.heap_array_cache.iteritems():
    +                        for idx, cache in cache.iteritems():
    +                            for frombox in list(cache):
    +                                if frombox not in self.new_boxes:
    +                                    del cache[frombox]
    +                    return
    +
             self.heap_cache.clear()
             self.heap_array_cache.clear()
     
    diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py
    --- a/pypy/jit/metainterp/pyjitpl.py
    +++ b/pypy/jit/metainterp/pyjitpl.py
    @@ -1676,7 +1676,7 @@
             # record the operation
             profiler = self.staticdata.profiler
             profiler.count_ops(opnum, RECORDED_OPS)
    -        self.heapcache.invalidate_caches(opnum, descr)
    +        self.heapcache.invalidate_caches(opnum, descr, argboxes)
             op = self.history.record(opnum, argboxes, resbox, descr)
             self.attach_debug_info(op)
             return resbox
    diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py
    --- a/pypy/jit/metainterp/test/test_heapcache.py
    +++ b/pypy/jit/metainterp/test/test_heapcache.py
    @@ -25,15 +25,17 @@
         EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE = 5 #can raise and force virtualizables
         EF_RANDOM_EFFECTS                  = 6 #can do whatever
     
    -    def __init__(self, extraeffect):
    +    def __init__(self, extraeffect, oopspecindex):
             self.extraeffect = extraeffect
    +        self.oopspecindex = oopspecindex
     
     class FakeCallDescr(object):
    -    def __init__(self, extraeffect):
    +    def __init__(self, extraeffect, oopspecindex=None):
             self.extraeffect = extraeffect
    +        self.oopspecindex = oopspecindex
     
         def get_extra_info(self):
    -        return FakeEffektinfo(self.extraeffect)
    +        return FakeEffektinfo(self.extraeffect, self.oopspecindex)
     
     class TestHeapCache(object):
         def test_known_class_box(self):
    @@ -237,25 +239,25 @@
             h.setfield(box1, descr1, box2)
             h.setarrayitem(box1, descr1, index1, box2)
             h.setarrayitem(box1, descr1, index2, box4)
    -        h.invalidate_caches(rop.INT_ADD, None)
    -        h.invalidate_caches(rop.INT_ADD_OVF, None)
    -        h.invalidate_caches(rop.SETFIELD_RAW, None)
    -        h.invalidate_caches(rop.SETARRAYITEM_RAW, None)
    +        h.invalidate_caches(rop.INT_ADD, None, [])
    +        h.invalidate_caches(rop.INT_ADD_OVF, None, [])
    +        h.invalidate_caches(rop.SETFIELD_RAW, None, [])
    +        h.invalidate_caches(rop.SETARRAYITEM_RAW, None, [])
             assert h.getfield(box1, descr1) is box2
             assert h.getarrayitem(box1, descr1, index1) is box2
             assert h.getarrayitem(box1, descr1, index2) is box4
     
             h.invalidate_caches(
    -            rop.CALL, FakeCallDescr(FakeEffektinfo.EF_ELIDABLE_CANNOT_RAISE))
    +            rop.CALL, FakeCallDescr(FakeEffektinfo.EF_ELIDABLE_CANNOT_RAISE), [])
             assert h.getfield(box1, descr1) is box2
             assert h.getarrayitem(box1, descr1, index1) is box2
             assert h.getarrayitem(box1, descr1, index2) is box4
     
             h.invalidate_caches(
    -            rop.CALL_LOOPINVARIANT, FakeCallDescr(FakeEffektinfo.EF_LOOPINVARIANT))
    +            rop.CALL_LOOPINVARIANT, FakeCallDescr(FakeEffektinfo.EF_LOOPINVARIANT), [])
     
             h.invalidate_caches(
    -            rop.CALL, FakeCallDescr(FakeEffektinfo.EF_RANDOM_EFFECTS))
    +            rop.CALL, FakeCallDescr(FakeEffektinfo.EF_RANDOM_EFFECTS), [])
             assert h.getfield(box1, descr1) is None
             assert h.getarrayitem(box1, descr1, index1) is None
             assert h.getarrayitem(box1, descr1, index2) is None
    @@ -294,3 +296,33 @@
     
             h.replace_box(lengthbox1, lengthbox2)
             assert h.arraylen(box4) is lengthbox2
    +
    +    def test_ll_arraycopy(self):
    +        h = HeapCache()
    +        h.new_array(box1, lengthbox1)
    +        h.setarrayitem(box1, descr1, index1, box2)
    +        h.new_array(box2, lengthbox1)
    +        # Just need the destination box for this call
    +        h.invalidate_caches(
    +            rop.CALL,
    +            # XXX: hardcoded oopspecindex
    +            FakeCallDescr(FakeEffektinfo.EF_CANNOT_RAISE, 1),
    +            [None, None, box2, None, None]
    +        )
    +        assert h.getarrayitem(box1, descr1, index1) is box2
    +        h.invalidate_caches(
    +            rop.CALL,
    +            FakeCallDescr(FakeEffektinfo.EF_CANNOT_RAISE, 1),
    +            [None, None, box3, None, None]
    +        )
    +        assert h.getarrayitem(box1, descr1, index1) is None
    +
    +        h.setarrayitem(box4, descr1, index1, box2)
    +        assert h.getarrayitem(box4, descr1, index1) is box2
    +        h.invalidate_caches(
    +            rop.CALL,
    +            # XXX: hardcoded oopspecindex
    +            FakeCallDescr(FakeEffektinfo.EF_CANNOT_RAISE, 1),
    +            [None, None, box2, None, None]
    +        )
    +        assert h.getarrayitem(box4, descr1, index1) is None
    diff --git a/pypy/jit/metainterp/test/test_tracingopts.py b/pypy/jit/metainterp/test/test_tracingopts.py
    --- a/pypy/jit/metainterp/test/test_tracingopts.py
    +++ b/pypy/jit/metainterp/test/test_tracingopts.py
    @@ -558,3 +558,18 @@
             res = self.interp_operations(fn, [7])
             assert res == 7 * 3
             self.check_operations_history(arraylen_gc=1)
    +
    +    def test_arraycopy(self):
    +        class Gbl(object):
    +            pass
    +        g = Gbl()
    +        g.a = [0] * 7
    +        def fn(n):
    +            assert n >= 0
    +            a = g.a
    +            x = [0] * n
    +            x[2] = 21
    +            return len(a[:n]) + x[2]
    +        res = self.interp_operations(fn, [3])
    +        assert res == 24
    +        self.check_operations_history(getarrayitem_gc=0)
    
    From noreply at buildbot.pypy.org  Thu Sep  8 05:37:32 2011
    From: noreply at buildbot.pypy.org (wlav)
    Date: Thu,  8 Sep 2011 05:37:32 +0200 (CEST)
    Subject: [pypy-commit] pypy reflex-support: merge default into branch
    Message-ID: <20110908033732.32C2582213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Wim Lavrijsen 
    Branch: reflex-support
    Changeset: r47154:06400362eacb
    Date: 2011-09-06 11:39 -0700
    http://bitbucket.org/pypy/pypy/changeset/06400362eacb/
    
    Log:	merge default into branch
    
    diff too long, truncating to 10000 out of 10169 lines
    
    diff --git a/lib-python/conftest.py b/lib-python/conftest.py
    --- a/lib-python/conftest.py
    +++ b/lib-python/conftest.py
    @@ -359,7 +359,7 @@
         RegrTest('test_property.py', core=True),
         RegrTest('test_pstats.py'),
         RegrTest('test_pty.py', skip="unsupported extension module"),
    -    RegrTest('test_pwd.py', skip=skip_win32),
    +    RegrTest('test_pwd.py', usemodules="pwd", skip=skip_win32),
         RegrTest('test_py3kwarn.py'),
         RegrTest('test_pyclbr.py'),
         RegrTest('test_pydoc.py'),
    diff --git a/lib-python/modified-2.7/ctypes/util.py b/lib-python/modified-2.7/ctypes/util.py
    --- a/lib-python/modified-2.7/ctypes/util.py
    +++ b/lib-python/modified-2.7/ctypes/util.py
    @@ -72,8 +72,8 @@
             return name
     
     if os.name == "posix" and sys.platform == "darwin":
    -    from ctypes.macholib.dyld import dyld_find as _dyld_find
         def find_library(name):
    +        from ctypes.macholib.dyld import dyld_find as _dyld_find
             possible = ['lib%s.dylib' % name,
                         '%s.dylib' % name,
                         '%s.framework/%s' % (name, name)]
    diff --git a/lib-python/modified-2.7/gzip.py b/lib-python/modified-2.7/gzip.py
    new file mode 100644
    --- /dev/null
    +++ b/lib-python/modified-2.7/gzip.py
    @@ -0,0 +1,514 @@
    +"""Functions that read and write gzipped files.
    +
    +The user of the file doesn't have to worry about the compression,
    +but random access is not allowed."""
    +
    +# based on Andrew Kuchling's minigzip.py distributed with the zlib module
    +
    +import struct, sys, time, os
    +import zlib
    +import io
    +import __builtin__
    +
    +__all__ = ["GzipFile","open"]
    +
    +FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
    +
    +READ, WRITE = 1, 2
    +
    +def write32u(output, value):
    +    # The L format writes the bit pattern correctly whether signed
    +    # or unsigned.
    +    output.write(struct.pack("'
    +
    +    def _check_closed(self):
    +        """Raises a ValueError if the underlying file object has been closed.
    +
    +        """
    +        if self.closed:
    +            raise ValueError('I/O operation on closed file.')
    +
    +    def _init_write(self, filename):
    +        self.name = filename
    +        self.crc = zlib.crc32("") & 0xffffffffL
    +        self.size = 0
    +        self.writebuf = []
    +        self.bufsize = 0
    +
    +    def _write_gzip_header(self):
    +        self.fileobj.write('\037\213')             # magic header
    +        self.fileobj.write('\010')                 # compression method
    +        fname = os.path.basename(self.name)
    +        if fname.endswith(".gz"):
    +            fname = fname[:-3]
    +        flags = 0
    +        if fname:
    +            flags = FNAME
    +        self.fileobj.write(chr(flags))
    +        mtime = self.mtime
    +        if mtime is None:
    +            mtime = time.time()
    +        write32u(self.fileobj, long(mtime))
    +        self.fileobj.write('\002')
    +        self.fileobj.write('\377')
    +        if fname:
    +            self.fileobj.write(fname + '\000')
    +
    +    def _init_read(self):
    +        self.crc = zlib.crc32("") & 0xffffffffL
    +        self.size = 0
    +
    +    def _read_gzip_header(self):
    +        magic = self.fileobj.read(2)
    +        if magic != '\037\213':
    +            raise IOError, 'Not a gzipped file'
    +        method = ord( self.fileobj.read(1) )
    +        if method != 8:
    +            raise IOError, 'Unknown compression method'
    +        flag = ord( self.fileobj.read(1) )
    +        self.mtime = read32(self.fileobj)
    +        # extraflag = self.fileobj.read(1)
    +        # os = self.fileobj.read(1)
    +        self.fileobj.read(2)
    +
    +        if flag & FEXTRA:
    +            # Read & discard the extra field, if present
    +            xlen = ord(self.fileobj.read(1))
    +            xlen = xlen + 256*ord(self.fileobj.read(1))
    +            self.fileobj.read(xlen)
    +        if flag & FNAME:
    +            # Read and discard a null-terminated string containing the filename
    +            while True:
    +                s = self.fileobj.read(1)
    +                if not s or s=='\000':
    +                    break
    +        if flag & FCOMMENT:
    +            # Read and discard a null-terminated string containing a comment
    +            while True:
    +                s = self.fileobj.read(1)
    +                if not s or s=='\000':
    +                    break
    +        if flag & FHCRC:
    +            self.fileobj.read(2)     # Read & discard the 16-bit header CRC
    +
    +    def write(self,data):
    +        self._check_closed()
    +        if self.mode != WRITE:
    +            import errno
    +            raise IOError(errno.EBADF, "write() on read-only GzipFile object")
    +
    +        if self.fileobj is None:
    +            raise ValueError, "write() on closed GzipFile object"
    +
    +        # Convert data type if called by io.BufferedWriter.
    +        if isinstance(data, memoryview):
    +            data = data.tobytes()
    +
    +        if len(data) > 0:
    +            self.size = self.size + len(data)
    +            self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
    +            self.fileobj.write( self.compress.compress(data) )
    +            self.offset += len(data)
    +
    +        return len(data)
    +
    +    def read(self, size=-1):
    +        self._check_closed()
    +        if self.mode != READ:
    +            import errno
    +            raise IOError(errno.EBADF, "read() on write-only GzipFile object")
    +
    +        if self.extrasize <= 0 and self.fileobj is None:
    +            return ''
    +
    +        readsize = 1024
    +        if size < 0:        # get the whole thing
    +            try:
    +                while True:
    +                    self._read(readsize)
    +                    readsize = min(self.max_read_chunk, readsize * 2)
    +            except EOFError:
    +                size = self.extrasize
    +        elif size == 0:
    +            return ""
    +        else:               # just get some more of it
    +            try:
    +                while size > self.extrasize:
    +                    self._read(readsize)
    +                    readsize = min(self.max_read_chunk, readsize * 2)
    +            except EOFError:
    +                if size > self.extrasize:
    +                    size = self.extrasize
    +
    +        offset = self.offset - self.extrastart
    +        chunk = self.extrabuf[offset: offset + size]
    +        self.extrasize = self.extrasize - size
    +
    +        self.offset += size
    +        return chunk
    +
    +    def _unread(self, buf):
    +        self.extrasize = len(buf) + self.extrasize
    +        self.offset -= len(buf)
    +
    +    def _read(self, size=1024):
    +        if self.fileobj is None:
    +            raise EOFError, "Reached EOF"
    +
    +        if self._new_member:
    +            # If the _new_member flag is set, we have to
    +            # jump to the next member, if there is one.
    +            #
    +            # First, check if we're at the end of the file;
    +            # if so, it's time to stop; no more members to read.
    +            pos = self.fileobj.tell()   # Save current position
    +            self.fileobj.seek(0, 2)     # Seek to end of file
    +            if pos == self.fileobj.tell():
    +                raise EOFError, "Reached EOF"
    +            else:
    +                self.fileobj.seek( pos ) # Return to original position
    +
    +            self._init_read()
    +            self._read_gzip_header()
    +            self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
    +            self._new_member = False
    +
    +        # Read a chunk of data from the file
    +        buf = self.fileobj.read(size)
    +
    +        # If the EOF has been reached, flush the decompression object
    +        # and mark this object as finished.
    +
    +        if buf == "":
    +            uncompress = self.decompress.flush()
    +            self._read_eof()
    +            self._add_read_data( uncompress )
    +            raise EOFError, 'Reached EOF'
    +
    +        uncompress = self.decompress.decompress(buf)
    +        self._add_read_data( uncompress )
    +
    +        if self.decompress.unused_data != "":
    +            # Ending case: we've come to the end of a member in the file,
    +            # so seek back to the start of the unused data, finish up
    +            # this member, and read a new gzip header.
    +            # (The number of bytes to seek back is the length of the unused
    +            # data, minus 8 because _read_eof() will rewind a further 8 bytes)
    +            self.fileobj.seek( -len(self.decompress.unused_data)+8, 1)
    +
    +            # Check the CRC and file size, and set the flag so we read
    +            # a new member on the next call
    +            self._read_eof()
    +            self._new_member = True
    +
    +    def _add_read_data(self, data):
    +        self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
    +        offset = self.offset - self.extrastart
    +        self.extrabuf = self.extrabuf[offset:] + data
    +        self.extrasize = self.extrasize + len(data)
    +        self.extrastart = self.offset
    +        self.size = self.size + len(data)
    +
    +    def _read_eof(self):
    +        # We've read to the end of the file, so we have to rewind in order
    +        # to reread the 8 bytes containing the CRC and the file size.
    +        # We check the that the computed CRC and size of the
    +        # uncompressed data matches the stored values.  Note that the size
    +        # stored is the true file size mod 2**32.
    +        self.fileobj.seek(-8, 1)
    +        crc32 = read32(self.fileobj)
    +        isize = read32(self.fileobj)  # may exceed 2GB
    +        if crc32 != self.crc:
    +            raise IOError("CRC check failed %s != %s" % (hex(crc32),
    +                                                         hex(self.crc)))
    +        elif isize != (self.size & 0xffffffffL):
    +            raise IOError, "Incorrect length of data produced"
    +
    +        # Gzip files can be padded with zeroes and still have archives.
    +        # Consume all zero bytes and set the file position to the first
    +        # non-zero byte. See http://www.gzip.org/#faq8
    +        c = "\x00"
    +        while c == "\x00":
    +            c = self.fileobj.read(1)
    +        if c:
    +            self.fileobj.seek(-1, 1)
    +
    +    @property
    +    def closed(self):
    +        return self.fileobj is None
    +
    +    def close(self):
    +        if self.fileobj is None:
    +            return
    +        if self.mode == WRITE:
    +            self.fileobj.write(self.compress.flush())
    +            write32u(self.fileobj, self.crc)
    +            # self.size may exceed 2GB, or even 4GB
    +            write32u(self.fileobj, self.size & 0xffffffffL)
    +            self.fileobj = None
    +        elif self.mode == READ:
    +            self.fileobj = None
    +        if self.myfileobj:
    +            self.myfileobj.close()
    +            self.myfileobj = None
    +
    +    def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH):
    +        self._check_closed()
    +        if self.mode == WRITE:
    +            # Ensure the compressor's buffer is flushed
    +            self.fileobj.write(self.compress.flush(zlib_mode))
    +            self.fileobj.flush()
    +
    +    def fileno(self):
    +        """Invoke the underlying file object's fileno() method.
    +
    +        This will raise AttributeError if the underlying file object
    +        doesn't support fileno().
    +        """
    +        return self.fileobj.fileno()
    +
    +    def rewind(self):
    +        '''Return the uncompressed stream file position indicator to the
    +        beginning of the file'''
    +        if self.mode != READ:
    +            raise IOError("Can't rewind in write mode")
    +        self.fileobj.seek(0)
    +        self._new_member = True
    +        self.extrabuf = ""
    +        self.extrasize = 0
    +        self.extrastart = 0
    +        self.offset = 0
    +
    +    def readable(self):
    +        return self.mode == READ
    +
    +    def writable(self):
    +        return self.mode == WRITE
    +
    +    def seekable(self):
    +        return True
    +
    +    def seek(self, offset, whence=0):
    +        if whence:
    +            if whence == 1:
    +                offset = self.offset + offset
    +            else:
    +                raise ValueError('Seek from end not supported')
    +        if self.mode == WRITE:
    +            if offset < self.offset:
    +                raise IOError('Negative seek in write mode')
    +            count = offset - self.offset
    +            for i in range(count // 1024):
    +                self.write(1024 * '\0')
    +            self.write((count % 1024) * '\0')
    +        elif self.mode == READ:
    +            if offset == self.offset:
    +                self.read(0) # to make sure that this file is open
    +                return self.offset
    +            if offset < self.offset:
    +                # for negative seek, rewind and do positive seek
    +                self.rewind()
    +            count = offset - self.offset
    +            for i in range(count // 1024):
    +                self.read(1024)
    +            self.read(count % 1024)
    +
    +        return self.offset
    +
    +    def readline(self, size=-1):
    +        if size < 0:
    +            # Shortcut common case - newline found in buffer.
    +            offset = self.offset - self.extrastart
    +            i = self.extrabuf.find('\n', offset) + 1
    +            if i > 0:
    +                self.extrasize -= i - offset
    +                self.offset += i - offset
    +                return self.extrabuf[offset: i]
    +
    +            size = sys.maxint
    +            readsize = self.min_readsize
    +        else:
    +            readsize = size
    +        bufs = []
    +        while size != 0:
    +            c = self.read(readsize)
    +            i = c.find('\n')
    +
    +            # We set i=size to break out of the loop under two
    +            # conditions: 1) there's no newline, and the chunk is
    +            # larger than size, or 2) there is a newline, but the
    +            # resulting line would be longer than 'size'.
    +            if (size <= i) or (i == -1 and len(c) > size):
    +                i = size - 1
    +
    +            if i >= 0 or c == '':
    +                bufs.append(c[:i + 1])    # Add portion of last chunk
    +                self._unread(c[i + 1:])   # Push back rest of chunk
    +                break
    +
    +            # Append chunk to list, decrease 'size',
    +            bufs.append(c)
    +            size = size - len(c)
    +            readsize = min(size, readsize * 2)
    +        if readsize > self.min_readsize:
    +            self.min_readsize = min(readsize, self.min_readsize * 2, 512)
    +        return ''.join(bufs) # Return resulting line
    +
    +
    +def _test():
    +    # Act like gzip; with -d, act like gunzip.
    +    # The input file is not deleted, however, nor are any other gzip
    +    # options or features supported.
    +    args = sys.argv[1:]
    +    decompress = args and args[0] == "-d"
    +    if decompress:
    +        args = args[1:]
    +    if not args:
    +        args = ["-"]
    +    for arg in args:
    +        if decompress:
    +            if arg == "-":
    +                f = GzipFile(filename="", mode="rb", fileobj=sys.stdin)
    +                g = sys.stdout
    +            else:
    +                if arg[-3:] != ".gz":
    +                    print "filename doesn't end in .gz:", repr(arg)
    +                    continue
    +                f = open(arg, "rb")
    +                g = __builtin__.open(arg[:-3], "wb")
    +        else:
    +            if arg == "-":
    +                f = sys.stdin
    +                g = GzipFile(filename="", mode="wb", fileobj=sys.stdout)
    +            else:
    +                f = __builtin__.open(arg, "rb")
    +                g = open(arg + ".gz", "wb")
    +        while True:
    +            chunk = f.read(1024)
    +            if not chunk:
    +                break
    +            g.write(chunk)
    +        if g is not sys.stdout:
    +            g.close()
    +        if f is not sys.stdin:
    +            f.close()
    +
    +if __name__ == '__main__':
    +    _test()
    diff --git a/lib-python/modified-2.7/sqlite3/test/regression.py b/lib-python/modified-2.7/sqlite3/test/regression.py
    --- a/lib-python/modified-2.7/sqlite3/test/regression.py
    +++ b/lib-python/modified-2.7/sqlite3/test/regression.py
    @@ -274,6 +274,18 @@
             cur.execute("UPDATE foo SET id = 3 WHERE id = 1")
             self.assertEqual(cur.description, None)
     
    +    def CheckStatementCache(self):
    +        cur = self.con.cursor()
    +        cur.execute("CREATE TABLE foo (id INTEGER)")
    +        values = [(i,) for i in xrange(5)]
    +        cur.executemany("INSERT INTO foo (id) VALUES (?)", values)
    +
    +        cur.execute("SELECT id FROM foo")
    +        self.assertEqual(list(cur), values)
    +        self.con.commit()
    +        cur.execute("SELECT id FROM foo")
    +        self.assertEqual(list(cur), values)
    +
     def suite():
         regression_suite = unittest.makeSuite(RegressionTests, "Check")
         return unittest.TestSuite((regression_suite,))
    diff --git a/lib-python/modified-2.7/tarfile.py b/lib-python/modified-2.7/tarfile.py
    --- a/lib-python/modified-2.7/tarfile.py
    +++ b/lib-python/modified-2.7/tarfile.py
    @@ -252,8 +252,8 @@
            the high bit set. So we calculate two checksums, unsigned and
            signed.
         """
    -    unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
    -    signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
    +    unsigned_chksum = 256 + sum(struct.unpack("148B8x356B", buf[:512]))
    +    signed_chksum = 256 + sum(struct.unpack("148b8x356b", buf[:512]))
         return unsigned_chksum, signed_chksum
     
     def copyfileobj(src, dst, length=None):
    @@ -265,7 +265,6 @@
         if length is None:
             shutil.copyfileobj(src, dst)
             return
    -
         BUFSIZE = 16 * 1024
         blocks, remainder = divmod(length, BUFSIZE)
         for b in xrange(blocks):
    @@ -802,19 +801,19 @@
             if self.closed:
                 raise ValueError("I/O operation on closed file")
     
    -        buf = ""
             if self.buffer:
                 if size is None:
    -                buf = self.buffer
    +                buf = self.buffer + self.fileobj.read()
                     self.buffer = ""
                 else:
                     buf = self.buffer[:size]
                     self.buffer = self.buffer[size:]
    -
    -        if size is None:
    -            buf += self.fileobj.read()
    +                buf += self.fileobj.read(size - len(buf))
             else:
    -            buf += self.fileobj.read(size - len(buf))
    +            if size is None:
    +                buf = self.fileobj.read()
    +            else:
    +                buf = self.fileobj.read(size)
     
             self.position += len(buf)
             return buf
    diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py
    --- a/lib_pypy/_ctypes/basics.py
    +++ b/lib_pypy/_ctypes/basics.py
    @@ -54,7 +54,8 @@
         def get_ffi_argtype(self):
             if self._ffiargtype:
                 return self._ffiargtype
    -        return _shape_to_ffi_type(self._ffiargshape)
    +        self._ffiargtype = _shape_to_ffi_type(self._ffiargshape)
    +        return self._ffiargtype
     
         def _CData_output(self, resbuffer, base=None, index=-1):
             #assert isinstance(resbuffer, _rawffi.ArrayInstance)
    @@ -166,7 +167,8 @@
         return tp._alignmentofinstances()
     
     def byref(cdata):
    -    from ctypes import pointer
    +    # "pointer" is imported at the end of this module to avoid circular
    +    # imports
         return pointer(cdata)
     
     def cdata_from_address(self, address):
    @@ -224,5 +226,9 @@
         'Z' : _ffi.types.void_p,
         'X' : _ffi.types.void_p,
         'v' : _ffi.types.sshort,
    +    '?' : _ffi.types.ubyte,
         }
     
    +
    +# used by "byref"
    +from _ctypes.pointer import pointer
    diff --git a/lib_pypy/_elementtree.py b/lib_pypy/_elementtree.py
    new file mode 100644
    --- /dev/null
    +++ b/lib_pypy/_elementtree.py
    @@ -0,0 +1,6 @@
    +# Just use ElementTree.
    +
    +from xml.etree import ElementTree
    +
    +globals().update(ElementTree.__dict__)
    +del __all__
    diff --git a/lib_pypy/_pypy_interact.py b/lib_pypy/_pypy_interact.py
    --- a/lib_pypy/_pypy_interact.py
    +++ b/lib_pypy/_pypy_interact.py
    @@ -56,6 +56,10 @@
                     prompt = getattr(sys, 'ps1', '>>> ')
                 try:
                     line = raw_input(prompt)
    +                # Can be None if sys.stdin was redefined
    +                encoding = getattr(sys.stdin, 'encoding', None)
    +                if encoding and not isinstance(line, unicode):
    +                    line = line.decode(encoding)
                 except EOFError:
                     console.write("\n")
                     break
    diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py
    --- a/lib_pypy/_sqlite3.py
    +++ b/lib_pypy/_sqlite3.py
    @@ -24,6 +24,7 @@
     from ctypes import c_void_p, c_int, c_double, c_int64, c_char_p, cdll
     from ctypes import POINTER, byref, string_at, CFUNCTYPE, cast
     from ctypes import sizeof, c_ssize_t
    +from collections import OrderedDict
     import datetime
     import sys
     import time
    @@ -274,6 +275,28 @@
     def unicode_text_factory(x):
         return unicode(x, 'utf-8')
     
    +
    +class StatementCache(object):
    +    def __init__(self, connection, maxcount):
    +        self.connection = connection
    +        self.maxcount = maxcount
    +        self.cache = OrderedDict()
    +
    +    def get(self, sql, cursor, row_factory):
    +        try:
    +            stat = self.cache[sql]
    +        except KeyError:
    +            stat = Statement(self.connection, sql)
    +            self.cache[sql] = stat
    +            if len(self.cache) > self.maxcount:
    +                self.cache.popitem(0)
    +        #
    +        if stat.in_use:
    +            stat = Statement(self.connection, sql)
    +        stat.set_row_factory(row_factory)
    +        return stat
    +
    +
     class Connection(object):
         def __init__(self, database, timeout=5.0, detect_types=0, isolation_level="",
                      check_same_thread=True, factory=None, cached_statements=100):
    @@ -291,6 +314,7 @@
             self.row_factory = None
             self._isolation_level = isolation_level
             self.detect_types = detect_types
    +        self.statement_cache = StatementCache(self, cached_statements)
     
             self.cursors = []
     
    @@ -399,7 +423,7 @@
             cur = Cursor(self)
             if not isinstance(sql, (str, unicode)):
                 raise Warning("SQL is of wrong type. Must be string or unicode.")
    -        statement = Statement(cur, sql, self.row_factory)
    +        statement = self.statement_cache.get(sql, cur, self.row_factory)
             return statement
     
         def _get_isolation_level(self):
    @@ -681,6 +705,8 @@
             from sqlite3.dump import _iterdump
             return _iterdump(self)
     
    +DML, DQL, DDL = range(3)
    +
     class Cursor(object):
         def __init__(self, con):
             if not isinstance(con, Connection):
    @@ -708,12 +734,12 @@
             if type(sql) is unicode:
                 sql = sql.encode("utf-8")
             self._check_closed()
    -        self.statement = Statement(self, sql, self.row_factory)
    +        self.statement = self.connection.statement_cache.get(sql, self, self.row_factory)
     
             if self.connection._isolation_level is not None:
    -            if self.statement.kind == "DDL":
    +            if self.statement.kind == DDL:
                     self.connection.commit()
    -            elif self.statement.kind == "DML":
    +            elif self.statement.kind == DML:
                     self.connection._begin()
     
             self.statement.set_params(params)
    @@ -724,18 +750,18 @@
                 self.statement.reset()
                 raise self.connection._get_exception(ret)
     
    -        if self.statement.kind == "DQL"and ret == SQLITE_ROW:
    +        if self.statement.kind == DQL and ret == SQLITE_ROW:
                 self.statement._build_row_cast_map()
    -            self.statement._readahead()
    +            self.statement._readahead(self)
             else:
                 self.statement.item = None
                 self.statement.exhausted = True
     
    -        if self.statement.kind in ("DML", "DDL"):
    +        if self.statement.kind == DML or self.statement.kind == DDL:
                 self.statement.reset()
     
             self.rowcount = -1
    -        if self.statement.kind == "DML":
    +        if self.statement.kind == DML:
                 self.rowcount = sqlite.sqlite3_changes(self.connection.db)
     
             return self
    @@ -746,8 +772,9 @@
             if type(sql) is unicode:
                 sql = sql.encode("utf-8")
             self._check_closed()
    -        self.statement = Statement(self, sql, self.row_factory)
    -        if self.statement.kind == "DML":
    +        self.statement = self.connection.statement_cache.get(sql, self, self.row_factory)
    +
    +        if self.statement.kind == DML:
                 self.connection._begin()
             else:
                 raise ProgrammingError, "executemany is only for DML statements"
    @@ -799,7 +826,7 @@
             return self
     
         def __iter__(self):
    -        return self.statement
    +        return iter(self.fetchone, None)
     
         def _check_reset(self):
             if self.reset:
    @@ -816,7 +843,7 @@
                 return None
     
             try:
    -            return self.statement.next()
    +            return self.statement.next(self)
             except StopIteration:
                 return None
     
    @@ -830,7 +857,7 @@
             if size is None:
                 size = self.arraysize
             lst = []
    -        for row in self.statement:
    +        for row in self:
                 lst.append(row)
                 if len(lst) == size:
                     break
    @@ -841,7 +868,7 @@
             self._check_reset()
             if self.statement is None:
                 return []
    -        return list(self.statement)
    +        return list(self)
     
         def _getdescription(self):
             if self._description is None:
    @@ -871,22 +898,24 @@
         lastrowid = property(_getlastrowid)
     
     class Statement(object):
    -    def __init__(self, cur, sql, row_factory):
    +    def __init__(self, connection, sql):
             self.statement = None
             if not isinstance(sql, str):
                 raise ValueError, "sql must be a string"
    -        self.con = cur.connection
    -        self.cur = weakref.ref(cur)
    +        self.con = connection
             self.sql = sql # DEBUG ONLY
    -        self.row_factory = row_factory
             first_word = self._statement_kind = sql.lstrip().split(" ")[0].upper()
             if first_word in ("INSERT", "UPDATE", "DELETE", "REPLACE"):
    -            self.kind = "DML"
    +            self.kind = DML
             elif first_word in ("SELECT", "PRAGMA"):
    -            self.kind = "DQL"
    +            self.kind = DQL
             else:
    -            self.kind = "DDL"
    +            self.kind = DDL
             self.exhausted = False
    +        self.in_use = False
    +        #
    +        # set by set_row_factory
    +        self.row_factory = None
     
             self.statement = c_void_p()
             next_char = c_char_p()
    @@ -895,7 +924,7 @@
             if ret == SQLITE_OK and self.statement.value is None:
                 # an empty statement, we work around that, as it's the least trouble
                 ret = sqlite.sqlite3_prepare_v2(self.con.db, "select 42", -1, byref(self.statement), byref(next_char))
    -            self.kind = "DQL"
    +            self.kind = DQL
     
             if ret != SQLITE_OK:
                 raise self.con._get_exception(ret)
    @@ -907,6 +936,9 @@
     
             self._build_row_cast_map()
     
    +    def set_row_factory(self, row_factory):
    +        self.row_factory = row_factory
    +
         def _build_row_cast_map(self):
             self.row_cast_map = []
             for i in xrange(sqlite.sqlite3_column_count(self.statement)):
    @@ -976,6 +1008,7 @@
             ret = sqlite.sqlite3_reset(self.statement)
             if ret != SQLITE_OK:
                 raise self.con._get_exception(ret)
    +        self.mark_dirty()
     
             if params is None:
                 if sqlite.sqlite3_bind_parameter_count(self.statement) != 0:
    @@ -1006,10 +1039,7 @@
                         raise ProgrammingError("missing parameter '%s'" %param)
                     self.set_param(idx, param)
     
    -    def __iter__(self):
    -        return self
    -
    -    def next(self):
    +    def next(self, cursor):
             self.con._check_closed()
             self.con._check_thread()
             if self.exhausted:
    @@ -1025,10 +1055,10 @@
                 sqlite.sqlite3_reset(self.statement)
                 raise exc
     
    -        self._readahead()
    +        self._readahead(cursor)
             return item
     
    -    def _readahead(self):
    +    def _readahead(self, cursor):
             self.column_count = sqlite.sqlite3_column_count(self.statement)
             row = []
             for i in xrange(self.column_count):
    @@ -1063,23 +1093,30 @@
     
             row = tuple(row)
             if self.row_factory is not None:
    -            row = self.row_factory(self.cur(), row)
    +            row = self.row_factory(cursor, row)
             self.item = row
     
         def reset(self):
             self.row_cast_map = None
    -        return sqlite.sqlite3_reset(self.statement)
    +        ret = sqlite.sqlite3_reset(self.statement)
    +        self.in_use = False
    +        self.exhausted = False
    +        return ret
     
         def finalize(self):
             sqlite.sqlite3_finalize(self.statement)
             self.statement = None
    +        self.in_use = False
    +
    +    def mark_dirty(self):
    +        self.in_use = True
     
         def __del__(self):
             sqlite.sqlite3_finalize(self.statement)
             self.statement = None
     
         def _get_description(self):
    -        if self.kind == "DML":
    +        if self.kind == DML:
                 return None
             desc = []
             for i in xrange(sqlite.sqlite3_column_count(self.statement)):
    diff --git a/lib_pypy/distributed/test/test_distributed.py b/lib_pypy/distributed/test/test_distributed.py
    --- a/lib_pypy/distributed/test/test_distributed.py
    +++ b/lib_pypy/distributed/test/test_distributed.py
    @@ -9,7 +9,7 @@
     class AppTestDistributed(object):
         def setup_class(cls):
             cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
    -            "usemodules":("_stackless",)})
    +            "usemodules":("_continuation",)})
     
         def test_init(self):
             import distributed
    @@ -91,10 +91,8 @@
     
     class AppTestDistributedTasklets(object):
         spaceconfig = {"objspace.std.withtproxy": True,
    -                   "objspace.usemodules._stackless": True}
    +                   "objspace.usemodules._continuation": True}
         def setup_class(cls):
    -        #cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
    -        #    "usemodules":("_stackless",)})
             cls.w_test_env = cls.space.appexec([], """():
             from distributed import test_env
             return test_env
    diff --git a/lib_pypy/distributed/test/test_greensock.py b/lib_pypy/distributed/test/test_greensock.py
    --- a/lib_pypy/distributed/test/test_greensock.py
    +++ b/lib_pypy/distributed/test/test_greensock.py
    @@ -10,7 +10,7 @@
             if not option.runappdirect:
                 py.test.skip("Cannot run this on top of py.py because of PopenGateway")
             cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
    -                                       "usemodules":("_stackless",)})
    +                                       "usemodules":("_continuation",)})
             cls.w_remote_side_code = cls.space.appexec([], """():
             import sys
             sys.path.insert(0, '%s')
    diff --git a/lib_pypy/distributed/test/test_socklayer.py b/lib_pypy/distributed/test/test_socklayer.py
    --- a/lib_pypy/distributed/test/test_socklayer.py
    +++ b/lib_pypy/distributed/test/test_socklayer.py
    @@ -9,7 +9,8 @@
     class AppTestSocklayer:
         def setup_class(cls):
             cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
    -                                       "usemodules":("_stackless","_socket", "select")})
    +                                       "usemodules":("_continuation",
    +                                                     "_socket", "select")})
         
         def test_socklayer(self):
             class X(object):
    diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py
    --- a/lib_pypy/greenlet.py
    +++ b/lib_pypy/greenlet.py
    @@ -59,7 +59,12 @@
             #
             while not target:
                 if not target.__started:
    -                _continulet.__init__(target, _greenlet_start, *args)
    +                if unbound_method != _continulet.throw:
    +                    greenlet_func = _greenlet_start
    +                else:
    +                    greenlet_func = _greenlet_throw
    +                _continulet.__init__(target, greenlet_func, *args)
    +                unbound_method = _continulet.switch
                     args = ()
                     target.__started = True
                     break
    @@ -136,3 +141,11 @@
             if greenlet.parent is not _tls.main:
                 _continuation.permute(greenlet, greenlet.parent)
         return (res,)
    +
    +def _greenlet_throw(greenlet, exc, value, tb):
    +    _tls.current = greenlet
    +    try:
    +        raise exc, value, tb
    +    finally:
    +        if greenlet.parent is not _tls.main:
    +            _continuation.permute(greenlet, greenlet.parent)
    diff --git a/lib_pypy/pypy_test/test_coroutine.py b/lib_pypy/pypy_test/test_coroutine.py
    --- a/lib_pypy/pypy_test/test_coroutine.py
    +++ b/lib_pypy/pypy_test/test_coroutine.py
    @@ -2,7 +2,7 @@
     from py.test import skip, raises
     
     try:
    -    from lib_pypy.stackless import coroutine, CoroutineExit
    +    from stackless import coroutine, CoroutineExit
     except ImportError, e:
         skip('cannot import stackless: %s' % (e,))
     
    @@ -20,10 +20,6 @@
             assert not co.is_zombie
     
         def test_is_zombie_del_without_frame(self):
    -        try:
    -            import _stackless # are we on pypy with a stackless build?
    -        except ImportError:
    -            skip("only works on pypy-c-stackless")
             import gc
             res = []
             class MyCoroutine(coroutine):
    @@ -45,10 +41,6 @@
             assert res[0], "is_zombie was False in __del__"
     
         def test_is_zombie_del_with_frame(self):
    -        try:
    -            import _stackless # are we on pypy with a stackless build?
    -        except ImportError:
    -            skip("only works on pypy-c-stackless")
             import gc
             res = []
             class MyCoroutine(coroutine):
    diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py
    --- a/lib_pypy/pyrepl/reader.py
    +++ b/lib_pypy/pyrepl/reader.py
    @@ -401,13 +401,19 @@
                 return "(arg: %s) "%self.arg
             if "\n" in self.buffer:
                 if lineno == 0:
    -                return self._ps2
    +                res = self.ps2
                 elif lineno == self.buffer.count("\n"):
    -                return self._ps4
    +                res = self.ps4
                 else:
    -                return self._ps3
    +                res = self.ps3
             else:
    -            return self._ps1
    +            res = self.ps1
    +        # Lazily call str() on self.psN, and cache the results using as key
    +        # the object on which str() was called.  This ensures that even if the
    +        # same object is used e.g. for ps1 and ps2, str() is called only once.
    +        if res not in self._pscache:
    +            self._pscache[res] = str(res)
    +        return self._pscache[res]
     
         def push_input_trans(self, itrans):
             self.input_trans_stack.append(self.input_trans)
    @@ -473,8 +479,7 @@
                 self.pos = 0
                 self.dirty = 1
                 self.last_command = None
    -            self._ps1, self._ps2, self._ps3, self._ps4 = \
    -                           map(str, [self.ps1, self.ps2, self.ps3, self.ps4])
    +            self._pscache = {}
             except:
                 self.restore()
                 raise
    @@ -571,7 +576,7 @@
             self.console.push_char(char)
             self.handle1(0)
         
    -    def readline(self):
    +    def readline(self, returns_unicode=False):
             """Read a line.  The implementation of this method also shows
             how to drive Reader if you want more control over the event
             loop."""
    @@ -580,6 +585,8 @@
                 self.refresh()
                 while not self.finished:
                     self.handle1()
    +            if returns_unicode:
    +                return self.get_unicode()
                 return self.get_buffer()
             finally:
                 self.restore()
    diff --git a/lib_pypy/pyrepl/readline.py b/lib_pypy/pyrepl/readline.py
    --- a/lib_pypy/pyrepl/readline.py
    +++ b/lib_pypy/pyrepl/readline.py
    @@ -198,7 +198,7 @@
             reader.ps1 = prompt
             return reader.readline()
     
    -    def multiline_input(self, more_lines, ps1, ps2):
    +    def multiline_input(self, more_lines, ps1, ps2, returns_unicode=False):
             """Read an input on possibly multiple lines, asking for more
             lines as long as 'more_lines(unicodetext)' returns an object whose
             boolean value is true.
    @@ -209,7 +209,7 @@
                 reader.more_lines = more_lines
                 reader.ps1 = reader.ps2 = ps1
                 reader.ps3 = reader.ps4 = ps2
    -            return reader.readline()
    +            return reader.readline(returns_unicode=returns_unicode)
             finally:
                 reader.more_lines = saved
     
    diff --git a/lib_pypy/pyrepl/simple_interact.py b/lib_pypy/pyrepl/simple_interact.py
    --- a/lib_pypy/pyrepl/simple_interact.py
    +++ b/lib_pypy/pyrepl/simple_interact.py
    @@ -54,7 +54,8 @@
                 ps1 = getattr(sys, 'ps1', '>>> ')
                 ps2 = getattr(sys, 'ps2', '... ')
                 try:
    -                statement = multiline_input(more_lines, ps1, ps2)
    +                statement = multiline_input(more_lines, ps1, ps2,
    +                                            returns_unicode=True)
                 except EOFError:
                     break
                 more = console.push(statement)
    diff --git a/lib_pypy/stackless.py b/lib_pypy/stackless.py
    --- a/lib_pypy/stackless.py
    +++ b/lib_pypy/stackless.py
    @@ -4,121 +4,124 @@
     Please refer to their documentation.
     """
     
    -DEBUG = True
    -
    -def dprint(*args):
    -    for arg in args:
    -        print arg,
    -    print
     
     import traceback
    -import sys
    +import _continuation
    +from functools import partial
    +
    +class TaskletExit(Exception):
    +    pass
    +
    +CoroutineExit = TaskletExit
    +
    +class GWrap(_continuation.continulet):
    +    """This is just a wrapper around continulet to allow
    +       to stick additional attributes to a continulet.
    +       To be more concrete, we need a backreference to
    +       the coroutine object"""
    +
    +
    +class coroutine(object):
    +    "we can't have continulet as a base, because continulets can't be rebound"
    +
    +    def __init__(self):
    +        self._frame = None
    +        self.is_zombie = False
    +
    +    def __getattr__(self, attr):
    +        return getattr(self._frame, attr)
    +
    +    def __del__(self):
    +        self.is_zombie = True
    +        del self._frame
    +        self._frame = None
    +
    +    def bind(self, func, *argl, **argd):
    +        """coro.bind(f, *argl, **argd) -> None.
    +           binds function f to coro. f will be called with
    +           arguments *argl, **argd
    +        """
    +        if self._frame is None or not self._frame.is_pending():
    +
    +            def _func(c, *args, **kwargs):
    +                return func(*args, **kwargs)
    +            
    +            run = partial(_func, *argl, **argd)
    +            self._frame = frame = GWrap(run)
    +        else:
    +            raise ValueError("cannot bind a bound coroutine")
    +
    +    def switch(self):
    +        """coro.switch() -> returnvalue
    +           switches to coroutine coro. If the bound function
    +           f finishes, the returnvalue is that of f, otherwise
    +           None is returned
    +        """
    +        current = _getcurrent()
    +        current._jump_to(self)
    +
    +    def _jump_to(self, coroutine):
    +        _tls.current_coroutine = coroutine
    +        self._frame.switch(to=coroutine._frame)
    +
    +    def kill(self):
    +        """coro.kill() : kill coroutine coro"""
    +        _tls.current_coroutine = self
    +        self._frame.throw(CoroutineExit)
    +
    +    def _is_alive(self):
    +        if self._frame is None:
    +            return False
    +        return not self._frame.is_pending()
    +    is_alive = property(_is_alive)
    +    del _is_alive
    +
    +    def getcurrent():
    +        """coroutine.getcurrent() -> the currently running coroutine"""
    +        try:
    +            return _getcurrent()
    +        except AttributeError:
    +            return _maincoro
    +    getcurrent = staticmethod(getcurrent)
    +
    +    def __reduce__(self):
    +        raise TypeError, 'pickling is not possible based upon continulets'
    +
    +
    +def _getcurrent():
    +    "Returns the current coroutine (i.e. the one which called this function)."
    +    try:
    +        return _tls.current_coroutine
    +    except AttributeError:
    +        # first call in this thread: current == main
    +        _coroutine_create_main()
    +        return _tls.current_coroutine
    +
     try:
    -    # If _stackless can be imported then TaskletExit and CoroutineExit are 
    -    # automatically added to the builtins.
    -    from _stackless import coroutine, greenlet
    -except ImportError: # we are running from CPython
    -    from greenlet import greenlet, GreenletExit
    -    TaskletExit = CoroutineExit = GreenletExit
    -    del GreenletExit
    -    try:
    -        from functools import partial
    -    except ImportError: # we are not running python 2.5
    -        class partial(object):
    -            # just enough of 'partial' to be usefull
    -            def __init__(self, func, *argl, **argd):
    -                self.func = func
    -                self.argl = argl
    -                self.argd = argd
    +    from thread import _local
    +except ImportError:
    +    class _local(object):    # assume no threads
    +        pass
     
    -            def __call__(self):
    -                return self.func(*self.argl, **self.argd)
    +_tls = _local()
     
    -    class GWrap(greenlet):
    -        """This is just a wrapper around greenlets to allow
    -           to stick additional attributes to a greenlet.
    -           To be more concrete, we need a backreference to
    -           the coroutine object"""
    +def _coroutine_create_main():
    +    # create the main coroutine for this thread
    +    _tls.current_coroutine = None
    +    main_coroutine = coroutine()
    +    main_coroutine.bind(lambda x:x)
    +    _tls.main_coroutine = main_coroutine
    +    _tls.current_coroutine = main_coroutine
    +    return main_coroutine
     
    -    class MWrap(object):
    -        def __init__(self,something):
    -            self.something = something
     
    -        def __getattr__(self, attr):
    -            return getattr(self.something, attr)
    +_maincoro = _coroutine_create_main()
     
    -    class coroutine(object):
    -        "we can't have greenlet as a base, because greenlets can't be rebound"
    -
    -        def __init__(self):
    -            self._frame = None
    -            self.is_zombie = False
    -
    -        def __getattr__(self, attr):
    -            return getattr(self._frame, attr)
    -
    -        def __del__(self):
    -            self.is_zombie = True
    -            del self._frame
    -            self._frame = None
    -
    -        def bind(self, func, *argl, **argd):
    -            """coro.bind(f, *argl, **argd) -> None.
    -               binds function f to coro. f will be called with
    -               arguments *argl, **argd
    -            """
    -            if self._frame is None or self._frame.dead:
    -                self._frame = frame = GWrap()
    -                frame.coro = self
    -            if hasattr(self._frame, 'run') and self._frame.run:
    -                raise ValueError("cannot bind a bound coroutine")
    -            self._frame.run = partial(func, *argl, **argd)
    -
    -        def switch(self):
    -            """coro.switch() -> returnvalue
    -               switches to coroutine coro. If the bound function
    -               f finishes, the returnvalue is that of f, otherwise
    -               None is returned
    -            """
    -            try:
    -                return greenlet.switch(self._frame)
    -            except TypeError, exp: # self._frame is the main coroutine
    -                return greenlet.switch(self._frame.something)
    -
    -        def kill(self):
    -            """coro.kill() : kill coroutine coro"""
    -            self._frame.throw()
    -
    -        def _is_alive(self):
    -            if self._frame is None:
    -                return False
    -            return not self._frame.dead
    -        is_alive = property(_is_alive)
    -        del _is_alive
    -
    -        def getcurrent():
    -            """coroutine.getcurrent() -> the currently running coroutine"""
    -            try:
    -                return greenlet.getcurrent().coro
    -            except AttributeError:
    -                return _maincoro
    -        getcurrent = staticmethod(getcurrent)
    -
    -        def __reduce__(self):
    -            raise TypeError, 'pickling is not possible based upon greenlets'
    -
    -    _maincoro = coroutine()
    -    maingreenlet = greenlet.getcurrent()
    -    _maincoro._frame = frame = MWrap(maingreenlet)
    -    frame.coro = _maincoro
    -    del frame
    -    del maingreenlet
     
     from collections import deque
     
     import operator
    -__all__ = 'run getcurrent getmain schedule tasklet channel coroutine \
    -                greenlet'.split()
    +__all__ = 'run getcurrent getmain schedule tasklet channel coroutine'.split()
     
     _global_task_id = 0
     _squeue = None
    @@ -131,7 +134,8 @@
     def _scheduler_remove(value):
         try:
             del _squeue[operator.indexOf(_squeue, value)]
    -    except ValueError:pass
    +    except ValueError:
    +        pass
     
     def _scheduler_append(value, normal=True):
         if normal:
    diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
    --- a/pypy/config/pypyoption.py
    +++ b/pypy/config/pypyoption.py
    @@ -27,7 +27,7 @@
     # --allworkingmodules
     working_modules = default_modules.copy()
     working_modules.update(dict.fromkeys(
    -    ["_socket", "unicodedata", "mmap", "fcntl", "_locale",
    +    ["_socket", "unicodedata", "mmap", "fcntl", "_locale", "pwd",
          "rctime" , "select", "zipimport", "_lsprof",
          "crypt", "signal", "_rawffi", "termios", "zlib", "bz2",
          "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO",
    @@ -58,6 +58,7 @@
         # unix only modules
         del working_modules["crypt"]
         del working_modules["fcntl"]
    +    del working_modules["pwd"]
         del working_modules["termios"]
         del working_modules["_minimal_curses"]
     
    diff --git a/pypy/config/test/test_config.py b/pypy/config/test/test_config.py
    --- a/pypy/config/test/test_config.py
    +++ b/pypy/config/test/test_config.py
    @@ -281,11 +281,11 @@
     
     def test_underscore_in_option_name():
         descr = OptionDescription("opt", "", [
    -        BoolOption("_stackless", "", default=False),
    +        BoolOption("_foobar", "", default=False),
         ])
         config = Config(descr)
         parser = to_optparse(config)
    -    assert parser.has_option("--_stackless")
    +    assert parser.has_option("--_foobar")
     
     def test_none():
         dummy1 = BoolOption('dummy1', 'doc dummy', default=False, cmdline=None)
    diff --git a/pypy/doc/config/objspace.usemodules._stackless.txt b/pypy/doc/config/objspace.usemodules._stackless.txt
    deleted file mode 100644
    --- a/pypy/doc/config/objspace.usemodules._stackless.txt
    +++ /dev/null
    @@ -1,1 +0,0 @@
    -Deprecated.
    diff --git a/pypy/doc/config/objspace.usemodules.pwd.txt b/pypy/doc/config/objspace.usemodules.pwd.txt
    new file mode 100644
    --- /dev/null
    +++ b/pypy/doc/config/objspace.usemodules.pwd.txt
    @@ -0,0 +1,2 @@
    +Use the 'pwd' module. 
    +This module is expected to be fully working.
    diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
    --- a/pypy/doc/faq.rst
    +++ b/pypy/doc/faq.rst
    @@ -315,6 +315,28 @@
     
     .. _`Andrew Brown's tutorial`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html
     
    +---------------------------------------------------------
    +Can RPython modules for PyPy be translated independently?
    +---------------------------------------------------------
    +
    +No, you have to rebuild the entire interpreter.  This means two things:
    +
    +* It is imperative to use test-driven development.  You have to test
    +  exhaustively your module in pure Python, before even attempting to
    +  translate it.  Once you translate it, you should have only a few typing
    +  issues left to fix, but otherwise the result should work out of the box.
    +
    +* Second, and perhaps most important: do you have a really good reason
    +  for writing the module in RPython in the first place?  Nowadays you
    +  should really look at alternatives, like writing it in pure Python,
    +  using ctypes if it needs to call C code.  Other alternatives are being
    +  developed too (as of summer 2011), like a Cython binding.
    +
    +In this context it is not that important to be able to translate
    +RPython modules independently of translating the complete interpreter.
    +(It could be done given enough efforts, but it's a really serious
    +undertaking.  Consider it as quite unlikely for now.)
    +
     ----------------------------------------------------------
     Why does PyPy draw a Mandelbrot fractal while translating?
     ----------------------------------------------------------
    diff --git a/pypy/doc/jit/pyjitpl5.rst b/pypy/doc/jit/pyjitpl5.rst
    --- a/pypy/doc/jit/pyjitpl5.rst
    +++ b/pypy/doc/jit/pyjitpl5.rst
    @@ -103,7 +103,7 @@
     
     The meta-interpreter starts interpreting the JIT bytecode.  Each operation is
     executed and then recorded in a list of operations, called the trace.
    -Operations can have a list of boxes that operate on, arguments.  Some operations
    +Operations can have a list of boxes they operate on, arguments.  Some operations
     (like GETFIELD and GETARRAYITEM) also have special objects that describe how
     their arguments are laid out in memory.  All possible operations generated by
     tracing are listed in metainterp/resoperation.py.  When a (interpreter-level)
    diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst
    --- a/pypy/doc/stackless.rst
    +++ b/pypy/doc/stackless.rst
    @@ -199,7 +199,11 @@
     The following features (present in some past Stackless version of PyPy)
     are for the time being not supported any more:
     
    -* Tasklets and channels (needs to be rewritten at app-level)
    +* Tasklets and channels (currently ``stackless.py`` seems to import,
    +  but you have tasklets on top of coroutines on top of greenlets on
    +  top of continulets on top of stacklets, and it's probably not too
    +  hard to cut two of these levels by adapting ``stackless.py`` to
    +  use directly continulets)
     
     * Coroutines (could be rewritten at app-level)
     
    @@ -209,6 +213,13 @@
     
     * Automatic unlimited stack (must be emulated__ so far)
     
    +* Support for other CPUs than x86 and x86-64
    +
    +* The app-level ``f_back`` field of frames crossing continulet boundaries
    +  is None for now, unlike what I explain in the theoretical overview
    +  above.  It mostly means that in a ``pdb.set_trace()`` you cannot go
    +  ``up`` past countinulet boundaries.  This could be fixed.
    +
     .. __: `recursion depth limit`_
     
     (*) Pickling, as well as changing threads, could be implemented by using
    @@ -217,9 +228,8 @@
     "hard" switch (like now) when the C stack contains non-trivial C frames
     to save, and a "soft" switch (like previously) when it contains only
     simple calls from Python to Python.  Soft-switched continulets would
    -also consume a bit less RAM, at the possible expense of making the
    -switch a bit slower (unsure about that; what is the Stackless Python
    -experience?).
    +also consume a bit less RAM, and the switch might be a bit faster too
    +(unsure about that; what is the Stackless Python experience?).
     
     
     Recursion depth limit
    diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
    --- a/pypy/interpreter/baseobjspace.py
    +++ b/pypy/interpreter/baseobjspace.py
    @@ -626,9 +626,9 @@
                 self.default_compiler = compiler
                 return compiler
     
    -    def createframe(self, code, w_globals, closure=None):
    +    def createframe(self, code, w_globals, outer_func=None):
             "Create an empty PyFrame suitable for this code object."
    -        return self.FrameClass(self, code, w_globals, closure)
    +        return self.FrameClass(self, code, w_globals, outer_func)
     
         def allocate_lock(self):
             """Return an interp-level Lock object if threads are enabled,
    diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
    --- a/pypy/interpreter/function.py
    +++ b/pypy/interpreter/function.py
    @@ -30,7 +30,7 @@
         can_change_code = True
         _immutable_fields_ = ['code?',
                               'w_func_globals?',
    -                          'closure?',
    +                          'closure?[*]',
                               'defs_w?[*]',
                               'name?']
     
    @@ -96,7 +96,7 @@
                 assert isinstance(code, PyCode)
                 if nargs < 5:
                     new_frame = self.space.createframe(code, self.w_func_globals,
    -                                                   self.closure)
    +                                                   self)
                     for i in funccallunrolling:
                         if i < nargs:
                             new_frame.locals_stack_w[i] = args_w[i]
    @@ -156,7 +156,7 @@
         def _flat_pycall(self, code, nargs, frame):
             # code is a PyCode
             new_frame = self.space.createframe(code, self.w_func_globals,
    -                                                   self.closure)
    +                                                   self)
             for i in xrange(nargs):
                 w_arg = frame.peekvalue(nargs-1-i)
                 new_frame.locals_stack_w[i] = w_arg
    @@ -167,7 +167,7 @@
         def _flat_pycall_defaults(self, code, nargs, frame, defs_to_load):
             # code is a PyCode
             new_frame = self.space.createframe(code, self.w_func_globals,
    -                                                   self.closure)
    +                                                   self)
             for i in xrange(nargs):
                 w_arg = frame.peekvalue(nargs-1-i)
                 new_frame.locals_stack_w[i] = w_arg
    diff --git a/pypy/interpreter/miscutils.py b/pypy/interpreter/miscutils.py
    --- a/pypy/interpreter/miscutils.py
    +++ b/pypy/interpreter/miscutils.py
    @@ -167,3 +167,7 @@
     
         def getmainthreadvalue(self):
             return self._value
    +
    +    def getallvalues(self):
    +        return {0: self._value}
    +
    diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py
    --- a/pypy/interpreter/nestedscope.py
    +++ b/pypy/interpreter/nestedscope.py
    @@ -8,7 +8,7 @@
     
     class Cell(Wrappable):
         "A simple container for a wrapped value."
    -    
    +
         def __init__(self, w_value=None):
             self.w_value = w_value
     
    @@ -90,32 +90,33 @@
         #     variables coming from a parent function in which i'm nested
         # 'closure' is a list of Cell instances: the received free vars.
     
    -    cells = None
    -
         @jit.unroll_safe
    -    def initialize_frame_scopes(self, closure, code):
    -        super_initialize_frame_scopes(self, closure, code)
    +    def initialize_frame_scopes(self, outer_func, code):
    +        super_initialize_frame_scopes(self, outer_func, code)
             ncellvars = len(code.co_cellvars)
             nfreevars = len(code.co_freevars)
             if not nfreevars:
                 if not ncellvars:
    +                self.cells = []
                     return            # no self.cells needed - fast path
    -            if closure is None:
    -                closure = []
    -        elif closure is None:
    +        elif outer_func is None:
                 space = self.space
                 raise OperationError(space.w_TypeError,
                                      space.wrap("directly executed code object "
                                                 "may not contain free variables"))
    -        if len(closure) != nfreevars:
    +        if outer_func and outer_func.closure:
    +            closure_size = len(outer_func.closure)
    +        else:
    +            closure_size = 0
    +        if closure_size != nfreevars:
                 raise ValueError("code object received a closure with "
                                      "an unexpected number of free variables")
             self.cells = [None] * (ncellvars + nfreevars)
             for i in range(ncellvars):
                 self.cells[i] = Cell()
             for i in range(nfreevars):
    -            self.cells[i + ncellvars] = closure[i]
    -    
    +            self.cells[i + ncellvars] = outer_func.closure[i]
    +
         def _getcells(self):
             return self.cells
     
    diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
    --- a/pypy/interpreter/pycode.py
    +++ b/pypy/interpreter/pycode.py
    @@ -198,7 +198,7 @@
     
         def funcrun(self, func, args):
             frame = self.space.createframe(self, func.w_func_globals,
    -                                  func.closure)
    +                                  func)
             sig = self._signature
             # speed hack
             fresh_frame = jit.hint(frame, access_directly=True,
    @@ -211,7 +211,7 @@
     
         def funcrun_obj(self, func, w_obj, args):
             frame = self.space.createframe(self, func.w_func_globals,
    -                                  func.closure)
    +                                  func)
             sig = self._signature
             # speed hack
             fresh_frame = jit.hint(frame, access_directly=True,
    diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
    --- a/pypy/interpreter/pyframe.py
    +++ b/pypy/interpreter/pyframe.py
    @@ -51,7 +51,7 @@
         is_being_profiled        = False
         escaped                  = False  # see mark_as_escaped()
     
    -    def __init__(self, space, code, w_globals, closure):
    +    def __init__(self, space, code, w_globals, outer_func):
             if not we_are_translated():
                 assert type(self) in (space.FrameClass, CPythonFrame), (
                     "use space.FrameClass(), not directly PyFrame()")
    @@ -70,7 +70,7 @@
                 self.builtin = space.builtin.pick_builtin(w_globals)
             # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS.
             # class bodies only have CO_NEWLOCALS.
    -        self.initialize_frame_scopes(closure, code)
    +        self.initialize_frame_scopes(outer_func, code)
             self.f_lineno = code.co_firstlineno
     
         def mark_as_escaped(self):
    @@ -117,8 +117,8 @@
                 return self.builtin
             else:
                 return self.space.builtin
    -        
    -    def initialize_frame_scopes(self, closure, code): 
    +
    +    def initialize_frame_scopes(self, outer_func, code):
             # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS.
             # class bodies only have CO_NEWLOCALS.
             # CO_NEWLOCALS: make a locals dict unless optimized is also set
    @@ -385,7 +385,11 @@
             
             # do not use the instance's __init__ but the base's, because we set
             # everything like cells from here
    -        PyFrame.__init__(self, space, pycode, w_globals, closure)
    +        # XXX hack
    +        from pypy.interpreter.function import Function
    +        outer_func = Function(space, None, closure=closure,
    +                             forcename="fake")
    +        PyFrame.__init__(self, space, pycode, w_globals, outer_func)
             f_back = space.interp_w(PyFrame, w_f_back, can_be_None=True)
             new_frame.f_backref = jit.non_virtual_ref(f_back)
     
    diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
    --- a/pypy/interpreter/pyopcode.py
    +++ b/pypy/interpreter/pyopcode.py
    @@ -1523,10 +1523,8 @@
     
             if not isinstance(prog, codetype):
                 filename = ''
    -            if not isinstance(prog, str):
    -                if isinstance(prog, basestring):
    -                    prog = str(prog)
    -                elif isinstance(prog, file):
    +            if not isinstance(prog, basestring):
    +                if isinstance(prog, file):
                         filename = prog.name
                         prog = prog.read()
                     else:
    diff --git a/pypy/interpreter/pyparser/future.py b/pypy/interpreter/pyparser/future.py
    --- a/pypy/interpreter/pyparser/future.py
    +++ b/pypy/interpreter/pyparser/future.py
    @@ -109,25 +109,19 @@
                 self.getc() == self.getc(+2)):
                 self.pos += 3
                 while 1: # Deal with a triple quoted docstring
    -                if self.getc() == '\\':
    -                    self.pos += 2
    +                c = self.getc()
    +                if c == '\\':
    +                    self.pos += 1
    +                    self._skip_next_char_from_docstring()
    +                elif c != endchar:
    +                    self._skip_next_char_from_docstring()
                     else:
    -                    c = self.getc()
    -                    if c != endchar:
    -                        self.pos += 1
    -                        if c == '\n':
    -                            self.atbol()
    -                        elif c == '\r':
    -                            if self.getc() == '\n':
    -                                self.pos += 1
    -                                self.atbol()
    -                    else:
    -                        self.pos += 1
    -                        if (self.getc() == endchar and
    -                            self.getc(+1) == endchar):
    -                            self.pos += 2
    -                            self.consume_empty_line()
    -                            break
    +                    self.pos += 1
    +                    if (self.getc() == endchar and
    +                        self.getc(+1) == endchar):
    +                        self.pos += 2
    +                        self.consume_empty_line()
    +                        break
     
             else: # Deal with a single quoted docstring
                 self.pos += 1
    @@ -138,17 +132,21 @@
                         self.consume_empty_line()
                         return
                     elif c == '\\':
    -                    # Deal with linefeeds
    -                    if self.getc() != '\r':
    -                        self.pos += 1
    -                    else:
    -                        self.pos += 1
    -                        if self.getc() == '\n':
    -                            self.pos += 1
    +                    self._skip_next_char_from_docstring()
                     elif c in '\r\n':
                         # Syntax error
                         return
     
    +    def _skip_next_char_from_docstring(self):
    +        c = self.getc()
    +        self.pos += 1
    +        if c == '\n':
    +            self.atbol()
    +        elif c == '\r':
    +            if self.getc() == '\n':
    +                self.pos += 1
    +            self.atbol()
    +
         def consume_continuation(self):
             c = self.getc()
             if c in '\n\r':
    diff --git a/pypy/interpreter/pyparser/test/test_futureautomaton.py b/pypy/interpreter/pyparser/test/test_futureautomaton.py
    --- a/pypy/interpreter/pyparser/test/test_futureautomaton.py
    +++ b/pypy/interpreter/pyparser/test/test_futureautomaton.py
    @@ -221,6 +221,14 @@
         assert f.lineno == 3
         assert f.col_offset == 0
     
    +def test_lots_of_continuation_lines():
    +    s = "\\\n\\\n\\\n\\\n\\\n\\\n\nfrom __future__ import with_statement\n"
    +    f = run(s)
    +    assert f.pos == len(s)
    +    assert f.flags == fut.CO_FUTURE_WITH_STATEMENT
    +    assert f.lineno == 8
    +    assert f.col_offset == 0
    +
     # This looks like a bug in cpython parser
     # and would require extensive modifications
     # to future.py in order to emulate the same behaviour
    @@ -239,3 +247,19 @@
             raise AssertionError('IndentationError not raised')
         assert f.lineno == 2
         assert f.col_offset == 0
    +
    +def test_continuation_lines_in_docstring_single_quoted():
    +    s = '"\\\n\\\n\\\n\\\n\\\n\\\n"\nfrom  __future__ import division\n'
    +    f = run(s)
    +    assert f.pos == len(s)
    +    assert f.flags == fut.CO_FUTURE_DIVISION
    +    assert f.lineno == 8
    +    assert f.col_offset == 0
    +
    +def test_continuation_lines_in_docstring_triple_quoted():
    +    s = '"""\\\n\\\n\\\n\\\n\\\n\\\n"""\nfrom  __future__ import division\n'
    +    f = run(s)
    +    assert f.pos == len(s)
    +    assert f.flags == fut.CO_FUTURE_DIVISION
    +    assert f.lineno == 8
    +    assert f.col_offset == 0
    diff --git a/pypy/interpreter/test/test_exec.py b/pypy/interpreter/test/test_exec.py
    --- a/pypy/interpreter/test/test_exec.py
    +++ b/pypy/interpreter/test/test_exec.py
    @@ -219,3 +219,30 @@
                 raise e
     
             assert res == 1
    +
    +    def test_exec_unicode(self):
    +        # 's' is a string
    +        s = "x = u'\xd0\xb9\xd1\x86\xd1\x83\xd0\xba\xd0\xb5\xd0\xbd'"
    +        # 'u' is a unicode
    +        u = s.decode('utf-8')
    +        exec u
    +        assert len(x) == 6
    +        assert ord(x[0]) == 0x0439
    +        assert ord(x[1]) == 0x0446
    +        assert ord(x[2]) == 0x0443
    +        assert ord(x[3]) == 0x043a
    +        assert ord(x[4]) == 0x0435
    +        assert ord(x[5]) == 0x043d
    +
    +    def test_eval_unicode(self):
    +        u = "u'%s'" % unichr(0x1234)
    +        v = eval(u)
    +        assert v == unichr(0x1234)
    +
    +    def test_compile_unicode(self):
    +        s = "x = u'\xd0\xb9\xd1\x86\xd1\x83\xd0\xba\xd0\xb5\xd0\xbd'"
    +        u = s.decode('utf-8')
    +        c = compile(u, '', 'exec')
    +        exec c
    +        assert len(x) == 6
    +        assert ord(x[0]) == 0x0439
    diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py
    --- a/pypy/jit/backend/llgraph/runner.py
    +++ b/pypy/jit/backend/llgraph/runner.py
    @@ -25,13 +25,14 @@
     class Descr(history.AbstractDescr):
     
         def __init__(self, ofs, typeinfo, extrainfo=None, name=None,
    -                 arg_types=None, count_fields_if_immut=-1):
    +                 arg_types=None, count_fields_if_immut=-1, ffi_flags=0):
             self.ofs = ofs
             self.typeinfo = typeinfo
             self.extrainfo = extrainfo
             self.name = name
             self.arg_types = arg_types
             self.count_fields_if_immut = count_fields_if_immut
    +        self.ffi_flags = ffi_flags
     
         def get_arg_types(self):
             return self.arg_types
    @@ -67,6 +68,9 @@
         def count_fields_if_immutable(self):
             return self.count_fields_if_immut
     
    +    def get_ffi_flags(self):
    +        return self.ffi_flags
    +
         def __lt__(self, other):
             raise TypeError("cannot use comparison on Descrs")
         def __le__(self, other):
    @@ -114,14 +118,14 @@
             return False
     
         def getdescr(self, ofs, typeinfo='?', extrainfo=None, name=None,
    -                 arg_types=None, count_fields_if_immut=-1):
    +                 arg_types=None, count_fields_if_immut=-1, ffi_flags=0):
             key = (ofs, typeinfo, extrainfo, name, arg_types,
    -               count_fields_if_immut)
    +               count_fields_if_immut, ffi_flags)
             try:
                 return self._descrs[key]
             except KeyError:
                 descr = Descr(ofs, typeinfo, extrainfo, name, arg_types,
    -                          count_fields_if_immut)
    +                          count_fields_if_immut, ffi_flags)
                 self._descrs[key] = descr
                 return descr
     
    @@ -326,7 +330,7 @@
             return self.getdescr(0, token[0], extrainfo=extrainfo,
                                  arg_types=''.join(arg_types))
     
    -    def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo):
    +    def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo, ffi_flags):
             from pypy.jit.backend.llsupport.ffisupport import get_ffi_type_kind
             from pypy.jit.backend.llsupport.ffisupport import UnsupportedKind
             arg_types = []
    @@ -339,7 +343,8 @@
             except UnsupportedKind:
                 return None
             return self.getdescr(0, reskind, extrainfo=extrainfo,
    -                             arg_types=''.join(arg_types))
    +                             arg_types=''.join(arg_types),
    +                             ffi_flags=ffi_flags)
     
     
         def grab_exc_value(self):
    diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py
    --- a/pypy/jit/backend/llsupport/descr.py
    +++ b/pypy/jit/backend/llsupport/descr.py
    @@ -260,10 +260,12 @@
         _clsname = ''
         loop_token = None
         arg_classes = ''     # <-- annotation hack
    +    ffi_flags = 0
     
    -    def __init__(self, arg_classes, extrainfo=None):
    +    def __init__(self, arg_classes, extrainfo=None, ffi_flags=0):
             self.arg_classes = arg_classes    # string of "r" and "i" (ref/int)
             self.extrainfo = extrainfo
    +        self.ffi_flags = ffi_flags
     
         def __repr__(self):
             res = '%s(%s)' % (self.__class__.__name__, self.arg_classes)
    @@ -284,6 +286,13 @@
         def get_extra_info(self):
             return self.extrainfo
     
    +    def get_ffi_flags(self):
    +        return self.ffi_flags
    +
    +    def get_call_conv(self):
    +        from pypy.rlib.clibffi import get_call_conv
    +        return get_call_conv(self.ffi_flags, True)
    +
         def get_arg_types(self):
             return self.arg_classes
     
    @@ -391,8 +400,8 @@
         """
         _clsname = 'DynamicIntCallDescr'
     
    -    def __init__(self, arg_classes, result_size, result_sign, extrainfo=None):
    -        BaseIntCallDescr.__init__(self, arg_classes, extrainfo)
    +    def __init__(self, arg_classes, result_size, result_sign, extrainfo=None, ffi_flags=0):
    +        BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags)
             assert isinstance(result_sign, bool)
             self._result_size = chr(result_size)
             self._result_sign = result_sign
    diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py
    --- a/pypy/jit/backend/llsupport/ffisupport.py
    +++ b/pypy/jit/backend/llsupport/ffisupport.py
    @@ -8,7 +8,7 @@
     class UnsupportedKind(Exception):
         pass
     
    -def get_call_descr_dynamic(cpu, ffi_args, ffi_result, extrainfo=None):
    +def get_call_descr_dynamic(cpu, ffi_args, ffi_result, extrainfo=None, ffi_flags=0):
         """Get a call descr: the types of result and args are represented by
         rlib.libffi.types.*"""
         try:
    @@ -20,18 +20,24 @@
         if reskind == history.INT:
             size = intmask(ffi_result.c_size)
             signed = is_ffi_type_signed(ffi_result)
    -        return DynamicIntCallDescr(arg_classes, size, signed, extrainfo)
    +        return DynamicIntCallDescr(arg_classes, size, signed, extrainfo,
    +                                   ffi_flags=ffi_flags)
         elif reskind == history.REF:
    -        return  NonGcPtrCallDescr(arg_classes, extrainfo)
    +        return  NonGcPtrCallDescr(arg_classes, extrainfo,
    +                                  ffi_flags=ffi_flags)
         elif reskind == history.FLOAT:
    -        return FloatCallDescr(arg_classes, extrainfo)
    +        return FloatCallDescr(arg_classes, extrainfo,
    +                              ffi_flags=ffi_flags)
         elif reskind == history.VOID:
    -        return VoidCallDescr(arg_classes, extrainfo)
    +        return VoidCallDescr(arg_classes, extrainfo,
    +                             ffi_flags=ffi_flags)
         elif reskind == 'L':
    -        return LongLongCallDescr(arg_classes, extrainfo)
    +        return LongLongCallDescr(arg_classes, extrainfo,
    +                                 ffi_flags=ffi_flags)
         elif reskind == 'S':
             SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT)
    -        return SingleFloatCallDescr(arg_classes, extrainfo)
    +        return SingleFloatCallDescr(arg_classes, extrainfo,
    +                                    ffi_flags=ffi_flags)
         assert False
     
     def get_ffi_type_kind(cpu, ffi_type):
    diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py
    --- a/pypy/jit/backend/llsupport/llmodel.py
    +++ b/pypy/jit/backend/llsupport/llmodel.py
    @@ -257,10 +257,10 @@
         def calldescrof(self, FUNC, ARGS, RESULT, extrainfo):
             return get_call_descr(self.gc_ll_descr, ARGS, RESULT, extrainfo)
     
    -    def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo):
    +    def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo, ffi_flags):
             from pypy.jit.backend.llsupport import ffisupport
             return ffisupport.get_call_descr_dynamic(self, ffi_args, ffi_result,
    -                                                 extrainfo)
    +                                                 extrainfo, ffi_flags)
     
         def get_overflow_error(self):
             ovf_vtable = self.cast_adr_to_int(self._ovf_error_vtable)
    diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py
    --- a/pypy/jit/backend/llsupport/regalloc.py
    +++ b/pypy/jit/backend/llsupport/regalloc.py
    @@ -57,11 +57,13 @@
         all_regs              = []
         no_lower_byte_regs    = []
         save_around_call_regs = []
    -    
    +    frame_reg             = None
    +
         def __init__(self, longevity, frame_manager=None, assembler=None):
             self.free_regs = self.all_regs[:]
             self.longevity = longevity
             self.reg_bindings = {}
    +        self.bindings_to_frame_reg = {}
             self.position = -1
             self.frame_manager = frame_manager
             self.assembler = assembler
    @@ -218,6 +220,10 @@
             self.reg_bindings[v] = loc
             return loc
     
    +    def force_allocate_frame_reg(self, v):
    +        """ Allocate the new variable v in the frame register."""
    +        self.bindings_to_frame_reg[v] = None
    +
         def force_spill_var(self, var):
             self._sync_var(var)
             try:
    @@ -236,6 +242,8 @@
             try:
                 return self.reg_bindings[box]
             except KeyError:
    +            if box in self.bindings_to_frame_reg:
    +                return self.frame_reg
                 return self.frame_manager.loc(box)
     
         def return_constant(self, v, forbidden_vars=[], selected_reg=None):
    @@ -264,8 +272,9 @@
             self._check_type(v)
             if isinstance(v, Const):
                 return self.return_constant(v, forbidden_vars, selected_reg)
    -        
             prev_loc = self.loc(v)
    +        if prev_loc is self.frame_reg and selected_reg is None:
    +            return prev_loc
             loc = self.force_allocate_reg(v, forbidden_vars, selected_reg,
                                           need_lower_byte=need_lower_byte)
             if prev_loc is not loc:
    diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py
    --- a/pypy/jit/backend/llsupport/test/test_ffisupport.py
    +++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py
    @@ -13,17 +13,19 @@
     
     def test_call_descr_dynamic():
         args = [types.sint, types.pointer]
    -    descr = get_call_descr_dynamic(FakeCPU(), args, types.sint)
    +    descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, ffi_flags=42)
         assert isinstance(descr, DynamicIntCallDescr)
         assert descr.arg_classes == 'ii'
    +    assert descr.get_ffi_flags() == 42
     
         args = [types.sint, types.double, types.pointer]
         descr = get_call_descr_dynamic(FakeCPU(), args, types.void)
         assert descr is None    # missing floats
         descr = get_call_descr_dynamic(FakeCPU(supports_floats=True),
    -                                   args, types.void)
    +                                   args, types.void, ffi_flags=43)
         assert isinstance(descr, VoidCallDescr)
         assert descr.arg_classes == 'ifi'
    +    assert descr.get_ffi_flags() == 43
     
         descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8)
         assert isinstance(descr, DynamicIntCallDescr)
    @@ -39,14 +41,16 @@
             descr = get_call_descr_dynamic(FakeCPU(), [], types.slonglong)
             assert descr is None   # missing longlongs
             descr = get_call_descr_dynamic(FakeCPU(supports_longlong=True),
    -                                       [], types.slonglong)
    +                                       [], types.slonglong, ffi_flags=43)
             assert isinstance(descr, LongLongCallDescr)
    +        assert descr.get_ffi_flags() == 43
         else:
             assert types.slonglong is types.slong
     
         descr = get_call_descr_dynamic(FakeCPU(), [], types.float)
         assert descr is None   # missing singlefloats
         descr = get_call_descr_dynamic(FakeCPU(supports_singlefloats=True),
    -                                   [], types.float)
    +                                   [], types.float, ffi_flags=44)
         SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT)
         assert isinstance(descr, SingleFloatCallDescr)
    +    assert descr.get_ffi_flags() == 44
    diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py
    --- a/pypy/jit/backend/test/runner_test.py
    +++ b/pypy/jit/backend/test/runner_test.py
    @@ -468,7 +468,7 @@
                 assert longlong.getrealfloat(x) == 3.5 - 42
     
         def test_call(self):
    -        from pypy.rlib.libffi import types
    +        from pypy.rlib.libffi import types, FUNCFLAG_CDECL
     
             def func_int(a, b):
                 return a + b
    @@ -497,7 +497,8 @@
                 assert res.value == 2 * num
                 # then, try it with the dynamic calldescr
                 dyn_calldescr = cpu.calldescrof_dynamic([ffi_type, ffi_type], ffi_type,
    -                                                    EffectInfo.MOST_GENERAL)
    +                                                    EffectInfo.MOST_GENERAL,
    +                                                    ffi_flags=FUNCFLAG_CDECL)
                 res = self.execute_operation(rop.CALL,
                                              [funcbox, BoxInt(num), BoxInt(num)],
                                              'int', descr=dyn_calldescr)
    @@ -1944,7 +1945,7 @@
             assert values == [1, 10]
     
         def test_call_to_c_function(self):
    -        from pypy.rlib.libffi import CDLL, types, ArgChain
    +        from pypy.rlib.libffi import CDLL, types, ArgChain, FUNCFLAG_CDECL
             from pypy.rpython.lltypesystem.ll2ctypes import libc_name
             libc = CDLL(libc_name)
             c_tolower = libc.getpointer('tolower', [types.uchar], types.sint)
    @@ -1955,7 +1956,8 @@
             func_adr = llmemory.cast_ptr_to_adr(c_tolower.funcsym)
             funcbox = ConstInt(heaptracker.adr2int(func_adr))
             calldescr = cpu.calldescrof_dynamic([types.uchar], types.sint,
    -                                            EffectInfo.MOST_GENERAL)
    +                                            EffectInfo.MOST_GENERAL,
    +                                            ffi_flags=FUNCFLAG_CDECL)
             i1 = BoxInt()
             i2 = BoxInt()
             tok = BoxInt()
    @@ -2012,7 +2014,8 @@
             calldescr = cpu.calldescrof_dynamic([types.pointer, types_size_t,
                                                  types_size_t, types.pointer],
                                                 types.void,
    -                                            EffectInfo.MOST_GENERAL)
    +                                            EffectInfo.MOST_GENERAL,
    +                                            ffi_flags=clibffi.FUNCFLAG_CDECL)
             i0 = BoxInt()
             i1 = BoxInt()
             i2 = BoxInt()
    @@ -2038,6 +2041,62 @@
             assert len(glob.lst) > 0
             lltype.free(raw, flavor='raw')
     
    +    def test_call_to_winapi_function(self):
    +        from pypy.rlib.clibffi import _WIN32, FUNCFLAG_STDCALL
    +        if not _WIN32:
    +            py.test.skip("Windows test only")
    +        from pypy.rlib.libffi import CDLL, types, ArgChain
    +        from pypy.rlib.rwin32 import DWORD
    +        libc = CDLL('KERNEL32')
    +        c_GetCurrentDir = libc.getpointer('GetCurrentDirectoryA',
    +                                          [types.ulong, types.pointer],
    +                                          types.ulong)
    +
    +        cwd = os.getcwd()
    +        buflen = len(cwd) + 10
    +        buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw')
    +        argchain = ArgChain().arg(rffi.cast(DWORD, buflen)).arg(buffer)
    +        res = c_GetCurrentDir.call(argchain, DWORD)
    +        assert rffi.cast(lltype.Signed, res) == len(cwd)
    +        assert rffi.charp2strn(buffer, buflen) == cwd
    +        lltype.free(buffer, flavor='raw')
    +
    +        cpu = self.cpu
    +        func_adr = llmemory.cast_ptr_to_adr(c_GetCurrentDir.funcsym)
    +        funcbox = ConstInt(heaptracker.adr2int(func_adr))
    +        calldescr = cpu.calldescrof_dynamic([types.ulong, types.pointer],
    +                                            types.ulong,
    +                                            EffectInfo.MOST_GENERAL,
    +                                            ffi_flags=FUNCFLAG_STDCALL)
    +        i1 = BoxInt()
    +        i2 = BoxInt()
    +        faildescr = BasicFailDescr(1)
    +        # if the stdcall convention is ignored, then ESP is wrong after the
    +        # call: 8 bytes too much.  If we repeat the call often enough, crash.
    +        ops = []
    +        for i in range(50):
    +            i3 = BoxInt()
    +            ops += [
    +                ResOperation(rop.CALL_RELEASE_GIL, [funcbox, i1, i2], i3,
    +                             descr=calldescr),
    +                ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr),
    +                ]
    +            ops[-1].setfailargs([])
    +        ops += [
    +            ResOperation(rop.FINISH, [i3], None, descr=BasicFailDescr(0))
    +        ]
    +        looptoken = LoopToken()
    +        self.cpu.compile_loop([i1, i2], ops, looptoken)
    +
    +        buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw')
    +        self.cpu.set_future_value_int(0, buflen)
    +        self.cpu.set_future_value_int(1, rffi.cast(lltype.Signed, buffer))
    +        fail = self.cpu.execute_token(looptoken)
    +        assert fail.identifier == 0
    +        assert self.cpu.get_latest_value_int(0) == len(cwd)
    +        assert rffi.charp2strn(buffer, buflen) == cwd
    +        lltype.free(buffer, flavor='raw')
    +
         def test_guard_not_invalidated(self):
             cpu = self.cpu
             i0 = BoxInt()
    diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py
    --- a/pypy/jit/backend/x86/assembler.py
    +++ b/pypy/jit/backend/x86/assembler.py
    @@ -34,6 +34,7 @@
     from pypy.rlib.debug import (debug_print, debug_start, debug_stop,
                                  have_debug_prints)
     from pypy.rlib import rgc
    +from pypy.rlib.clibffi import FFI_DEFAULT_ABI
     from pypy.jit.backend.x86.jump import remap_frame_layout
     from pypy.jit.metainterp.history import ConstInt, BoxInt
     from pypy.jit.codewriter.effectinfo import EffectInfo
    @@ -956,6 +957,7 @@
             if (isinstance(from_loc, RegLoc) and from_loc.is_xmm) or (isinstance(to_loc, RegLoc) and to_loc.is_xmm):
                 self.mc.MOVSD(to_loc, from_loc)
             else:
    +            assert to_loc is not ebp
                 self.mc.MOV(to_loc, from_loc)
     
         regalloc_mov = mov # legacy interface
    @@ -1120,7 +1122,7 @@
             return genop_cmp_guard_float
     
         def _emit_call(self, force_index, x, arglocs, start=0, tmp=eax,
    -                   argtypes=None):
    +                   argtypes=None, callconv=FFI_DEFAULT_ABI):
             if IS_X86_64:
                 return self._emit_call_64(force_index, x, arglocs, start, argtypes)
     
    @@ -1149,6 +1151,16 @@
             # x is a location
             self.mc.CALL(x)
             self.mark_gc_roots(force_index)
    +        #
    +        if callconv != FFI_DEFAULT_ABI:
    +            self._fix_stdcall(callconv, p)
    +
    +    def _fix_stdcall(self, callconv, p):
    +        from pypy.rlib.clibffi import FFI_STDCALL
    +        assert callconv == FFI_STDCALL
    +        # it's a bit stupid, but we're just going to cancel the fact that
    +        # the called function just added 'p' to ESP, by subtracting it again.
    +        self.mc.SUB_ri(esp.value, p)
     
         def _emit_call_64(self, force_index, x, arglocs, start, argtypes):
             src_locs = []
    @@ -2127,7 +2139,8 @@
                 tmp = eax
     
             self._emit_call(force_index, x, arglocs, 3, tmp=tmp,
    -                        argtypes=op.getdescr().get_arg_types())
    +                        argtypes=op.getdescr().get_arg_types(),
    +                        callconv=op.getdescr().get_call_conv())
     
             if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8:
                 # a float or a long long return
    @@ -2498,11 +2511,6 @@
     
         genop_discard_cond_call_gc_wb_array = genop_discard_cond_call_gc_wb
     
    -    def genop_force_token(self, op, arglocs, resloc):
    -        # RegAlloc.consider_force_token ensures this:
    -        assert isinstance(resloc, RegLoc)
    -        self.mc.LEA_rb(resloc.value, FORCE_INDEX_OFS)
    -
         def not_implemented_op_discard(self, op, arglocs):
             not_implemented("not implemented operation: %s" % op.getopname())
     
    diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py
    --- a/pypy/jit/backend/x86/regalloc.py
    +++ b/pypy/jit/backend/x86/regalloc.py
    @@ -29,6 +29,7 @@
         all_regs = [eax, ecx, edx, ebx, esi, edi]
         no_lower_byte_regs = [esi, edi]
         save_around_call_regs = [eax, edx, ecx]
    +    frame_reg = ebp
     
         REGLOC_TO_GCROOTMAP_REG_INDEX = {
             ebx: 1,
    @@ -312,8 +313,11 @@
                         self.fm.frame_bindings[arg] = loc
                 else:
                     if isinstance(loc, RegLoc):
    -                    self.rm.reg_bindings[arg] = loc
    -                    used[loc] = None
    +                    if loc is ebp:
    +                        self.rm.bindings_to_frame_reg[arg] = None
    +                    else:
    +                        self.rm.reg_bindings[arg] = loc
    +                        used[loc] = None
                     else:
                         self.fm.frame_bindings[arg] = loc
             self.rm.free_regs = []
    @@ -1358,8 +1362,8 @@
                                                 self.assembler.datablockwrapper)
     
         def consider_force_token(self, op):
    -        loc = self.rm.force_allocate_reg(op.result)
    -        self.Perform(op, [], loc)
    +        # the FORCE_TOKEN operation returns directly 'ebp'
    +        self.rm.force_allocate_frame_reg(op.result)
     
         def not_implemented_op(self, op):
             not_implemented("not implemented operation: %s" % op.getopname())
    diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py
    --- a/pypy/jit/backend/x86/runner.py
    +++ b/pypy/jit/backend/x86/runner.py
    @@ -119,7 +119,8 @@
                 setitem(index, null)
     
         def get_latest_force_token(self):
    -        return self.assembler.fail_ebp + FORCE_INDEX_OFS
    +        # the FORCE_TOKEN operation and this helper both return 'ebp'.
    +        return self.assembler.fail_ebp
     
         def execute_token(self, executable_token):
             addr = executable_token._x86_bootstrap_code
    @@ -153,8 +154,9 @@
                                            flavor='raw', zero=True,
                                            immortal=True)
     
    -    def force(self, addr_of_force_index):
    +    def force(self, addr_of_force_token):
             TP = rffi.CArrayPtr(lltype.Signed)
    +        addr_of_force_index = addr_of_force_token + FORCE_INDEX_OFS
             fail_index = rffi.cast(TP, addr_of_force_index)[0]
             assert fail_index >= 0, "already forced!"
             faildescr = self.get_fail_descr_from_number(fail_index)
    @@ -164,7 +166,7 @@
             # start of "no gc operation!" block
             fail_index_2 = self.assembler.grab_frame_values(
                 bytecode,
    -            addr_of_force_index - FORCE_INDEX_OFS,
    +            addr_of_force_token,
                 self.all_null_registers)
             self.assembler.leave_jitted_hook()
             # end of "no gc operation!" block
    diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py
    --- a/pypy/jit/backend/x86/rx86.py
    +++ b/pypy/jit/backend/x86/rx86.py
    @@ -527,6 +527,7 @@
     
         NOP = insn('\x90')
         RET = insn('\xC3')
    +    RET16_i = insn('\xC2', immediate(1, 'h'))
     
         PUSH_r = insn(rex_nw, register(1), '\x50')
         PUSH_b = insn(rex_nw, '\xFF', orbyte(6<<3), stack_bp(1))
    diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py
    --- a/pypy/jit/backend/x86/test/test_runner.py
    +++ b/pypy/jit/backend/x86/test/test_runner.py
    @@ -433,6 +433,88 @@
                     ops_offset[operations[2]] <=
                     ops_offset[None])
     
    +    def test_calling_convention(self, monkeypatch):
    +        if WORD != 4:
    +            py.test.skip("32-bit only test")
    +        from pypy.jit.backend.x86.regloc import eax, edx
    +        from pypy.jit.backend.x86 import codebuf
    +        from pypy.jit.codewriter.effectinfo import EffectInfo
    +        from pypy.rlib.libffi import types, clibffi
    +        had_stdcall = hasattr(clibffi, 'FFI_STDCALL')
    +        if not had_stdcall:    # not running on Windows, but we can still test
    +            monkeypatch.setattr(clibffi, 'FFI_STDCALL', 12345, raising=False)
    +        #
    +        for ffi in [clibffi.FFI_DEFAULT_ABI, clibffi.FFI_STDCALL]:
    +            cpu = self.cpu
    +            mc = codebuf.MachineCodeBlockWrapper()
    +            mc.MOV_rs(eax.value, 4)      # argument 1
    +            mc.MOV_rs(edx.value, 40)     # argument 10
    +            mc.SUB_rr(eax.value, edx.value)     # return arg1 - arg10
    +            if ffi == clibffi.FFI_DEFAULT_ABI:
    +                mc.RET()
    +            else:
    +                mc.RET16_i(40)
    +            rawstart = mc.materialize(cpu.asmmemmgr, [])
    +            #
    +            calldescr = cpu.calldescrof_dynamic([types.slong] * 10,
    +                                                types.slong,
    +                                                EffectInfo.MOST_GENERAL,
    +                                                ffi_flags=-1)
    +            calldescr.get_call_conv = lambda: ffi      # <==== hack
    +            funcbox = ConstInt(rawstart)
    +            i1 = BoxInt()
    +            i2 = BoxInt()
    +            i3 = BoxInt()
    +            i4 = BoxInt()
    +            i5 = BoxInt()
    +            i6 = BoxInt()
    +            c = ConstInt(-1)
    +            faildescr = BasicFailDescr(1)
    +            # we must call it repeatedly: if the stack pointer gets increased
    +            # by 40 bytes by the STDCALL call, and if we don't expect it,
    +            # then we are going to get our stack emptied unexpectedly by
    +            # several repeated calls
    +            ops = [
    +            ResOperation(rop.CALL_RELEASE_GIL,
    +                         [funcbox, i1, c, c, c, c, c, c, c, c, i2],
    +                         i3, descr=calldescr),
    +            ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr),
    +
    +            ResOperation(rop.CALL_RELEASE_GIL,
    +                         [funcbox, i1, c, c, c, c, c, c, c, c, i2],
    +                         i4, descr=calldescr),
    +            ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr),
    +
    +            ResOperation(rop.CALL_RELEASE_GIL,
    +                         [funcbox, i1, c, c, c, c, c, c, c, c, i2],
    +                         i5, descr=calldescr),
    +            ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr),
    +
    +            ResOperation(rop.CALL_RELEASE_GIL,
    +                         [funcbox, i1, c, c, c, c, c, c, c, c, i2],
    +                         i6, descr=calldescr),
    +            ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr),
    +
    +            ResOperation(rop.FINISH, [i3, i4, i5, i6], None,
    +                         descr=BasicFailDescr(0))
    +            ]
    +            ops[1].setfailargs([])
    +            ops[3].setfailargs([])
    +            ops[5].setfailargs([])
    +            ops[7].setfailargs([])
    +            looptoken = LoopToken()
    +            self.cpu.compile_loop([i1, i2], ops, looptoken)
    +
    +            self.cpu.set_future_value_int(0, 123450)
    +            self.cpu.set_future_value_int(1, 123408)
    +            fail = self.cpu.execute_token(looptoken)
    +            assert fail.identifier == 0
    +            assert self.cpu.get_latest_value_int(0) == 42
    +            assert self.cpu.get_latest_value_int(1) == 42
    +            assert self.cpu.get_latest_value_int(2) == 42
    +            assert self.cpu.get_latest_value_int(3) == 42
    +
    +
     class TestDebuggingAssembler(object):
         def setup_method(self, meth):
             self.cpu = CPU(rtyper=None, stats=FakeStats())
    diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py
    --- a/pypy/jit/codewriter/support.py
    +++ b/pypy/jit/codewriter/support.py
    @@ -91,9 +91,12 @@
         reds_v = op.args[2+numgreens:]
         assert len(reds_v) == numreds
         #
    -    def _sort(args_v):
    +    def _sort(args_v, is_green):
             from pypy.jit.metainterp.history import getkind
             lst = [v for v in args_v if v.concretetype is not lltype.Void]
    +        if is_green:
    +            assert len(lst) == len(args_v), (
    +                "not supported so far: 'greens' variables contain Void")
             _kind2count = {'int': 1, 'ref': 2, 'float': 3}
             lst2 = sorted(lst, key=lambda v: _kind2count[getkind(v.concretetype)])
             # a crash here means that you have to reorder the variable named in
    @@ -102,7 +105,7 @@
             assert lst == lst2
             return lst
         #
    -    return (_sort(greens_v), _sort(reds_v))
    +    return (_sort(greens_v, True), _sort(reds_v, False))
     
     def maybe_on_top_of_llinterp(rtyper, fnptr):
         # Run a generated graph on top of the llinterp for testing.
    diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py
    --- a/pypy/jit/metainterp/optimizeopt/fficall.py
    +++ b/pypy/jit/metainterp/optimizeopt/fficall.py
    @@ -18,26 +18,27 @@
         def __init__(self, funcval, cpu, prepare_op):
             self.funcval = funcval
             self.opargs = []
    -        argtypes, restype = self._get_signature(funcval)
    +        argtypes, restype, flags = self._get_signature(funcval)
             self.descr = cpu.calldescrof_dynamic(argtypes, restype,
    -                                             EffectInfo.MOST_GENERAL)
    +                                             EffectInfo.MOST_GENERAL,
    +                                             ffi_flags=flags)
             # ^^^ may be None if unsupported
             self.prepare_op = prepare_op
             self.delayed_ops = []
     
         def _get_signature(self, funcval):
             """
    -        given the funcval, return a tuple (argtypes, restype), where the
    -        actuall types are libffi.types.*
    +        given the funcval, return a tuple (argtypes, restype, flags), where
    +        the actuall types are libffi.types.*
     
             The implementation is tricky because we have three possible cases:
     
             - translated: the easiest case, we can just cast back the pointer to
    -          the original Func instance and read .argtypes and .restype
    +          the original Func instance and read .argtypes, .restype and .flags
     
             - completely untranslated: this is what we get from test_optimizeopt
               tests. funcval contains a FakeLLObject whose _fake_class is Func,
    -          and we can just get .argtypes and .restype
    +          and we can just get .argtypes, .restype and .flags
     
             - partially translated: this happens when running metainterp tests:
               funcval contains the low-level equivalent of a Func, and thus we
    @@ -49,10 +50,10 @@
             llfunc = funcval.box.getref_base()
             if we_are_translated():
                 func = cast_base_ptr_to_instance(Func, llfunc)
    -            return func.argtypes, func.restype
    +            return func.argtypes, func.restype, func.flags
             elif getattr(llfunc, '_fake_class', None) is Func:
                 # untranslated
    -            return llfunc.argtypes, llfunc.restype
    +            return llfunc.argtypes, llfunc.restype, llfunc.flags
             else:
                 # partially translated
                 # llfunc contains an opaque pointer to something like the following:
    @@ -63,7 +64,7 @@
                 # because we don't have the exact TYPE to cast to.  Instead, we
                 # just fish it manually :-(
                 f = llfunc._obj.container
    -            return f.inst_argtypes, f.inst_restype
    +            return f.inst_argtypes, f.inst_restype, f.inst_flags
     
     
     class OptFfiCall(Optimization):
    diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py
    --- a/pypy/jit/metainterp/optimizeopt/heap.py
    +++ b/pypy/jit/metainterp/optimizeopt/heap.py
    @@ -25,7 +25,7 @@
             #      'cached_fields'.
             #
             self._cached_fields = {}
    -        self._cached_fields_getfield_op = {}        
    +        self._cached_fields_getfield_op = {}
             self._lazy_setfield = None
             self._lazy_setfield_registered = False
     
    @@ -75,7 +75,7 @@
         def remember_field_value(self, structvalue, fieldvalue, getfield_op=None):
             assert self._lazy_setfield is None
             self._cached_fields[structvalue] = fieldvalue
    -        self._cached_fields_getfield_op[structvalue] = getfield_op        
    +        self._cached_fields_getfield_op[structvalue] = getfield_op
     
         def force_lazy_setfield(self, optheap, can_cache=True):
             op = self._lazy_setfield
    @@ -163,7 +163,7 @@
     
         def new(self):
             return OptHeap()
    -        
    +
         def produce_potential_short_preamble_ops(self, sb):
             descrkeys = self.cached_fields.keys()
             if not we_are_translated():
    diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
    --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
    +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
    @@ -4711,6 +4711,33 @@
             """
             self.optimize_loop(ops, expected)
     
    +    def test_forced_virtuals_aliasing(self):
    +        ops = """
    +        [i0, i1]
    +        p0 = new(descr=ssize)
    +        p1 = new(descr=ssize)
    +        escape(p0)
    +        escape(p1)
    +        setfield_gc(p0, i0, descr=adescr)
    +        setfield_gc(p1, i1, descr=adescr)
    +        i2 = getfield_gc(p0, descr=adescr)
    +        jump(i2, i2)
    +        """
    +        expected = """
    +        [i0, i1]
    +        p0 = new(descr=ssize)
    +        escape(p0)
    +        p1 = new(descr=ssize)
    +        escape(p1)
    +        setfield_gc(p0, i0, descr=adescr)
    +        setfield_gc(p1, i1, descr=adescr)
    +        jump(i0, i0)
    +        """
    +        py.test.skip("not implemented")
    +        # setfields on things that used to be virtual still can't alias each
    +        # other
    +        self.optimize_loop(ops, expected)
    +
     
     class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin):
         pass
    diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py
    --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py
    +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py
    @@ -14,12 +14,15 @@
         can check that the signature of a call is really what you want.
         """
     
    -    def __init__(self, arg_types, typeinfo):
    +    def __init__(self, arg_types, typeinfo, flags):
             self.arg_types = arg_types
             self.typeinfo = typeinfo   # return type
    +        self.flags = flags
     
         def __eq__(self, other):
    -        return self.arg_types == other.arg_types and self.typeinfo == other.typeinfo
    +        return (self.arg_types == other.arg_types and
    +                self.typeinfo == other.typeinfo and
    +                self.flags == other.get_ffi_flags())
     
     class FakeLLObject(object):
     
    @@ -41,14 +44,17 @@
             vable_token_descr = LLtypeMixin.valuedescr
             valuedescr = LLtypeMixin.valuedescr
     
    -        int_float__int = MyCallDescr('if', 'i')
    +        int_float__int_42 = MyCallDescr('if', 'i', 42)
    +        int_float__int_43 = MyCallDescr('if', 'i', 43)
             funcptr = FakeLLObject()
             func = FakeLLObject(_fake_class=Func,
                                 argtypes=[types.sint, types.double],
    -                            restype=types.sint)
    +                            restype=types.sint,
    +                            flags=42)
             func2 = FakeLLObject(_fake_class=Func,
                                  argtypes=[types.sint, types.double],
    -                             restype=types.sint)
    +                             restype=types.sint,
    +                             flags=43)
             #
             def calldescr(cpu, FUNC, oopspecindex, extraeffect=None):
                 if extraeffect == EffectInfo.EF_RANDOM_EFFECTS:
    @@ -83,7 +89,7 @@
             """
             expected = """
             [i0, f1]
    -        i3 = call_release_gil(12345, i0, f1, descr=int_float__int)
    +        i3 = call_release_gil(12345, i0, f1, descr=int_float__int_42)
             guard_not_forced() []
             guard_no_exception() []
             jump(i3, f1)
    @@ -123,7 +129,7 @@
             [i0, f1, p2]
             i4 = force_token()
             setfield_gc(p2, i4, descr=vable_token_descr)
    -        i3 = call_release_gil(12345, i0, f1, descr=int_float__int)
    +        i3 = call_release_gil(12345, i0, f1, descr=int_float__int_42)
             guard_not_forced() [p2]
             guard_no_exception() [p2]
             jump(i3, f1, p2)
    @@ -220,7 +226,7 @@
             call(0, ConstPtr(func),                        descr=libffi_prepare)
             #
             # this "nested" call is nicely optimized
    -        i4 = call_release_gil(67890, i0, f1, descr=int_float__int)
    +        i4 = call_release_gil(67890, i0, f1, descr=int_float__int_43)
             guard_not_forced() []
             guard_no_exception() []
             #
    @@ -265,7 +271,7 @@
             expected = """
             [i0, f1, p2]
             setfield_gc(p2, i0, descr=valuedescr)
    -        i3 = call_release_gil(12345, i0, f1, descr=int_float__int)
    +        i3 = call_release_gil(12345, i0, f1, descr=int_float__int_42)
             guard_not_forced() []
             guard_no_exception() []
             jump(i3, f1, p2)
    diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py
    --- a/pypy/jit/metainterp/test/test_warmspot.py
    +++ b/pypy/jit/metainterp/test/test_warmspot.py
    @@ -252,6 +252,41 @@
             self.check_loops({'int_sub': 1, 'int_gt': 1, 'guard_true': 1,
                               'jump': 1})
     
    +    def test_void_red_variable(self):
    +        mydriver = JitDriver(greens=[], reds=['a', 'm'])
    +        def f1(m):
    +            a = None
    +            while m > 0:
    +                mydriver.jit_merge_point(a=a, m=m)
    +                m = m - 1
    +                if m == 10:
    +                    pass   # other case
    +        self.meta_interp(f1, [18])
    +
    +    def test_bug_constant_rawptrs(self):
    +        py.test.skip("crashes because a is a constant")
    +        from pypy.rpython.lltypesystem import lltype, rffi
    +        mydriver = JitDriver(greens=['a'], reds=['m'])
    +        def f1(m):
    +            a = lltype.nullptr(rffi.VOIDP.TO)
    +            while m > 0:
    +                mydriver.jit_merge_point(a=a, m=m)
    +                m = m - 1
    +        self.meta_interp(f1, [18])
    +
    +    def test_bug_rawptrs(self):
    +        from pypy.rpython.lltypesystem import lltype, rffi
    +        mydriver = JitDriver(greens=['a'], reds=['m'])
    +        def f1(m):
    +            a = lltype.malloc(rffi.VOIDP.TO, 5, flavor='raw')
    +            while m > 0:
    +                mydriver.jit_merge_point(a=a, m=m)
    +                m = m - 1
    +                if m == 10:
    +                    pass
    +            lltype.free(a, flavor='raw')
    +        self.meta_interp(f1, [18])
    +
     
     class TestLLWarmspot(WarmspotTests, LLJitMixin):
         CPUClass = runner.LLtypeCPU
    diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py
    --- a/pypy/jit/metainterp/warmspot.py
    +++ b/pypy/jit/metainterp/warmspot.py
    @@ -130,8 +130,15 @@
         results = _find_jit_marker(graphs, 'jit_merge_point')
         if not results:
             raise Exception("no jit_merge_point found!")
    +    seen = set([graph for graph, block, pos in results])
    +    assert len(seen) == len(results), (
    +        "found several jit_merge_points in the same graph")
         return results
     
    +def locate_jit_merge_point(graph):
    +    [(graph, block, pos)] = find_jit_merge_points([graph])
    +    return block, pos, block.operations[pos]
    +
     def find_set_param(graphs):
         return _find_jit_marker(graphs, 'set_param')
     
    @@ -235,7 +242,7 @@
         def split_graph_and_record_jitdriver(self, graph, block, pos):
             op = block.operations[pos]
             jd = JitDriverStaticData()
    -        jd._jit_merge_point_pos = (graph, op)
    +        jd._jit_merge_point_in = graph
             args = op.args[2:]
             s_binding = self.translator.annotator.binding
             jd._portal_args_s = [s_binding(v) for v in args]
    @@ -245,7 +252,8 @@
             graph.startblock = support.split_before_jit_merge_point(*jmpp)
             graph.startblock.isstartblock = True
             # a crash in the following checkgraph() means that you forgot
    -        # to list some variable in greens=[] or reds=[] in JitDriver.
    +        # to list some variable in greens=[] or reds=[] in JitDriver,
    +        # or that a jit_merge_point() takes a constant as an argument.
             checkgraph(graph)
             for v in graph.getargs():
                 assert isinstance(v, Variable)
    @@ -503,7 +511,8 @@
                 self.make_args_specification(jd)
     
         def make_args_specification(self, jd):
    -        graph, op = jd._jit_merge_point_pos
    +        graph = jd._jit_merge_point_in
    +        _, _, op = locate_jit_merge_point(graph)
             greens_v, reds_v = support.decode_hp_hint_args(op)
             ALLARGS = [v.concretetype for v in (greens_v + reds_v)]
             jd._green_args_spec = [v.concretetype for v in greens_v]
    @@ -551,7 +560,7 @@
                 assert jitdriver in sublists, \
                        "can_enter_jit with no matching jit_merge_point"
                 jd, sublist = sublists[jitdriver]
    -            origportalgraph = jd._jit_merge_point_pos[0]
    +            origportalgraph = jd._jit_merge_point_in
                 if graph is not origportalgraph:
                     sublist.append((graph, block, index))
                     jd.no_loop_header = False
    @@ -581,7 +590,7 @@
                 can_enter_jits = [(jd.portal_graph, jd.portal_graph.startblock, 0)]
     
             for graph, block, index in can_enter_jits:
    -            if graph is jd._jit_merge_point_pos[0]:
    +            if graph is jd._jit_merge_point_in:
                     continue
     
                 op = block.operations[index]
    @@ -639,7 +648,7 @@
             #           while 1:
             #               more stuff
             #
    -        origportalgraph = jd._jit_merge_point_pos[0]
    +        origportalgraph = jd._jit_merge_point_in
             portalgraph = jd.portal_graph
             PORTALFUNC = jd._PORTAL_FUNCTYPE
     
    @@ -655,11 +664,13 @@
             portalfunc_ARGS = []
             nums = {}
             for i, ARG in enumerate(PORTALFUNC.ARGS):
    +            kind = history.getkind(ARG)
    +            assert kind != 'void'
                 if i < len(jd.jitdriver.greens):
                     color = 'green'
                 else:
                     color = 'red'
    -            attrname = '%s_%s' % (color, history.getkind(ARG))
    +            attrname = '%s_%s' % (color, kind)
                 count = nums.get(attrname, 0)
                 nums[attrname] = count + 1
                 portalfunc_ARGS.append((ARG, attrname, count))
    @@ -791,14 +802,7 @@
             # ____________________________________________________________
             # Now mutate origportalgraph to end with a call to portal_runner_ptr
             #
    -        _, op = jd._jit_merge_point_pos
    -        for origblock in origportalgraph.iterblocks():
    -            if op in origblock.operations:
    -                break
    -        else:
    -            assert False, "lost the operation %r in the graph %r" % (
    -                op, origportalgraph)
    -        origindex = origblock.operations.index(op)
    +        origblock, origindex, op = locate_jit_merge_point(origportalgraph)
             assert op.opname == 'jit_marker'
             assert op.args[0].value == 'jit_merge_point'
             greens_v, reds_v = support.decode_hp_hint_args(op)
    diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py
    --- a/pypy/jit/metainterp/warmstate.py
    +++ b/pypy/jit/metainterp/warmstate.py
    @@ -124,7 +124,7 @@
         # Hash of lltype or ootype object.
         # Only supports strings, unicodes and regular instances,
         # as well as primitives that can meaningfully be cast to Signed.
    -    if isinstance(TYPE, lltype.Ptr):
    +    if isinstance(TYPE, lltype.Ptr) and TYPE.TO._gckind == 'gc':
             if TYPE.TO is rstr.STR or TYPE.TO is rstr.UNICODE:
                 return rstr.LLHelpers.ll_strhash(x)    # assumed not null
             else:
    @@ -140,7 +140,7 @@
             else:
                 return 0
         else:
    -        return lltype.cast_primitive(lltype.Signed, x)
    +        return rffi.cast(lltype.Signed, x)
     
     @specialize.ll_and_arg(3)
     def set_future_value(cpu, j, value, typecode):
    diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py
    --- a/pypy/module/__builtin__/functional.py
    +++ b/pypy/module/__builtin__/functional.py
    @@ -292,7 +292,7 @@
                     raise
                 break
             new_frame = space.createframe(code, w_func.w_func_globals,
    -                                      w_func.closure)
    +                                      w_func)
             new_frame.locals_stack_w[0] = w_item
             w_res = new_frame.run()
             result_w.append(w_res)
    diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py
    --- a/pypy/module/_codecs/interp_codecs.py
    +++ b/pypy/module/_codecs/interp_codecs.py
    @@ -687,11 +687,15 @@
     # support for the "string escape" codec
     # This is a bytes-to bytes transformation
     
    - at unwrap_spec(errors='str_or_None')
    -def escape_encode(space, w_string, errors='strict'):
    -    w_repr = space.repr(w_string)
    -    w_result = space.getslice(w_repr, space.wrap(1), space.wrap(-1))
    -    return space.newtuple([w_result, space.len(w_string)])
    + at unwrap_spec(data=str, errors='str_or_None')
    +def escape_encode(space, data, errors='strict'):
    +    from pypy.objspace.std.stringobject import string_escape_encode
    +    result = string_escape_encode(data, quote="'")
    +    start = 1
    +    end = len(result) - 1
    +    assert end >= 0
    +    w_result = space.wrap(result[start:end])
    +    return space.newtuple([w_result, space.wrap(len(data))])
     
     @unwrap_spec(data=str, errors='str_or_None')
     def escape_decode(space, data, errors='strict'):
    diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py
    --- a/pypy/module/_codecs/test/test_codecs.py
    +++ b/pypy/module/_codecs/test/test_codecs.py
    @@ -102,7 +102,6 @@
         
         def test_indexerror(self):
             test =   "\\"     # trailing backslash
    -             
             raises (ValueError, test.decode,'string-escape')
     
         def test_charmap_decode(self):
    @@ -292,6 +291,10 @@
             assert '\\0f'.decode('string_escape') == chr(0) + 'f'
             assert '\\08'.decode('string_escape') == chr(0) + '8'
     
    +    def test_escape_encode(self):
    +        assert '"'.encode('string_escape') == '"'
    +        assert "'".encode('string_escape') == "\\'"
    +
         def test_decode_utf8_different_case(self):
             constant = u"a"
             assert constant.encode("utf-8") == constant.encode("UTF-8")
    diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py
    --- a/pypy/module/_continuation/interp_continuation.py
    +++ b/pypy/module/_continuation/interp_continuation.py
    @@ -43,11 +43,11 @@
         def switch(self, w_to):
             to = self.space.interp_w(W_Continulet, w_to, can_be_None=True)
             if to is not None:
    -            if self is to:    # double-switch to myself: no-op
    -                return get_result()
                 if to.sthread is None:
                     start_state.clear()
                     raise geterror(self.space, "continulet not initialized yet")
    +            if self is to:    # double-switch to myself: no-op
    +                return get_result()
             if self.sthread is None:
                 start_state.clear()
                 raise geterror(self.space, "continulet not initialized yet")
    diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py
    --- a/pypy/module/_rawffi/test/test__rawffi.py
    +++ b/pypy/module/_rawffi/test/test__rawffi.py
    @@ -639,33 +639,6 @@
             a1.free()
             cb.free()
     
    -    def test_another_callback_in_stackless(self):
    -        try:
    -            import _stackless
    -        except ImportError:
    -            skip("only valid in a stackless pypy-c")
    -
    -        import _rawffi
    -        lib = _rawffi.CDLL(self.lib_name)
    -        runcallback = lib.ptr('runcallback', ['P'], 'q')
    -        def callback():
    -            co = _stackless.coroutine()
    -            def f():
    -                pass
    -            try:
    -                co.bind(f)
    -                co.switch()
    -            except RuntimeError:
    -                return 1<<42
    -            return -5
    -
    -        cb = _rawffi.CallbackPtr(callback, [], 'q')
    -        a1 = cb.byptr()
    -        res = runcallback(a1)
    -        assert res[0] == 1<<42
    -        a1.free()
    -        cb.free()
    -
         def test_raising_callback(self):
             import _rawffi, sys
             import StringIO
    diff --git a/pypy/module/_stackless/__init__.py b/pypy/module/_stackless/__init__.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/__init__.py
    +++ /dev/null
    @@ -1,36 +0,0 @@
    -# Package initialisation
    -from pypy.interpreter.mixedmodule import MixedModule
    -
    -class Module(MixedModule):
    -    """
    -    This module implements Stackless for applications.
    -    """
    -
    -    appleveldefs = {
    -        'GreenletExit' : 'app_greenlet.GreenletExit',
    -        'GreenletError' : 'app_greenlet.GreenletError',
    -    }
    -
    -    interpleveldefs = {
    -        'tasklet'    : 'interp_stackless.tasklet',
    -        'coroutine'  : 'interp_coroutine.AppCoroutine',
    -        'greenlet'   : 'interp_greenlet.AppGreenlet',
    -        'usercostate': 'interp_composable_coroutine.W_UserCoState',
    -        '_return_main' : 'interp_coroutine.return_main',
    -        'get_stack_depth_limit': 'interp_coroutine.get_stack_depth_limit',
    -        'set_stack_depth_limit': 'interp_coroutine.set_stack_depth_limit',
    -    }
    -
    -    def setup_after_space_initialization(self):
    -        # post-installing classmethods/staticmethods which
    -        # are not yet directly supported
    -        from pypy.module._stackless.interp_coroutine import post_install as post_install_coro
    -        post_install_coro(self)
    -        from pypy.module._stackless.interp_greenlet import post_install as post_install_greenlet
    -        post_install_greenlet(self)
    -
    -        if self.space.config.translation.gc == 'marksweep':
    -            from pypy.module._stackless.interp_clonable import post_install as post_install_clonable
    -            self.extra_interpdef('clonable', 'interp_clonable.AppClonableCoroutine')
    -            self.extra_interpdef('fork',     'interp_clonable.fork')
    -            post_install_clonable(self)
    diff --git a/pypy/module/_stackless/app_greenlet.py b/pypy/module/_stackless/app_greenlet.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/app_greenlet.py
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -class GreenletExit(Exception):
    -    pass
    -
    -class GreenletError(Exception):
    -    pass
    diff --git a/pypy/module/_stackless/interp_clonable.py b/pypy/module/_stackless/interp_clonable.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/interp_clonable.py
    +++ /dev/null
    @@ -1,106 +0,0 @@
    -from pypy.interpreter.error import OperationError
    -from pypy.interpreter.typedef import TypeDef
    -from pypy.interpreter.gateway import interp2app
    -from pypy.module._stackless.interp_coroutine import AppCoroutine, AppCoState
    -from pypy.module._stackless.interp_coroutine import makeStaticMethod
    -from pypy.module._stackless.rcoroutine import AbstractThunk
    -from pypy.module._stackless.rclonable import InterpClonableMixin
    -
    -
    -class AppClonableCoroutine(AppCoroutine, InterpClonableMixin):
    -
    -    def newsubctx(self):
    -        self.hello_local_pool()
    -        AppCoroutine.newsubctx(self)
    -        self.goodbye_local_pool()
    -
    -    def hello(self):
    -        self.hello_local_pool()
    -        AppCoroutine.hello(self)
    -
    -    def goodbye(self):
    -        AppCoroutine.goodbye(self)
    -        self.goodbye_local_pool()
    -
    -    def descr_method__new__(space, w_subtype):
    -        co = space.allocate_instance(AppClonableCoroutine, w_subtype)
    -        costate = AppClonableCoroutine._get_state(space)
    -        AppClonableCoroutine.__init__(co, space, state=costate)
    -        return space.wrap(co)
    -
    -    def _get_state(space):
    -        return space.fromcache(AppClonableCoState)
    -    _get_state = staticmethod(_get_state)
    -
    -    def w_getcurrent(space):
    -        return space.wrap(AppClonableCoroutine._get_state(space).current)
    -    w_getcurrent = staticmethod(w_getcurrent)
    -
    -    def w_clone(self):
    -        space = self.space
    -        costate = self.costate
    -        if costate.current is self:
    -            raise OperationError(space.w_RuntimeError,
    -                                 space.wrap("clone() cannot clone the "
    -                                            "current coroutine"
    -                                            "; use fork() instead"))
    -        copy = AppClonableCoroutine(space, state=costate)
    -        copy.subctx = self.clone_into(copy, self.subctx)
    -        return space.wrap(copy)
    -
    -    def descr__reduce__(self, space):
    -        raise OperationError(space.w_TypeError,
    -                             space.wrap("_stackless.clonable instances are "
    -                                        "not picklable"))
    -
    -
    -AppClonableCoroutine.typedef = TypeDef("clonable", AppCoroutine.typedef,
    -    __new__    = interp2app(AppClonableCoroutine.descr_method__new__.im_func),
    -    getcurrent = interp2app(AppClonableCoroutine.w_getcurrent),
    -    clone      = interp2app(AppClonableCoroutine.w_clone),
    -    __reduce__ = interp2app(AppClonableCoroutine.descr__reduce__),
    -)
    -
    -class AppClonableCoState(AppCoState):
    -    def post_install(self):
    -        self.current = self.main = AppClonableCoroutine(self.space, state=self)
    -        self.main.subctx.clear_framestack()      # wack
    -
    -def post_install(module):
    -    makeStaticMethod(module, 'clonable', 'getcurrent')
    -    space = module.space
    -    AppClonableCoroutine._get_state(space).post_install()
    -
    -# ____________________________________________________________
    -
    -class ForkThunk(AbstractThunk):
    -    def __init__(self, coroutine):
    -        self.coroutine = coroutine
    -        self.newcoroutine = None
    -    def call(self):
    -        oldcoro = self.coroutine
    -        self.coroutine = None
    -        newcoro = AppClonableCoroutine(oldcoro.space, state=oldcoro.costate)
    -        newcoro.subctx = oldcoro.clone_into(newcoro, oldcoro.subctx)
    -        newcoro.parent = oldcoro
    -        self.newcoroutine = newcoro
    -
    -def fork(space):
    -    """Fork, as in the Unix fork(): the call returns twice, and the return
    -    value of the call is either the new 'child' coroutine object (if returning
    -    into the parent), or None (if returning into the child).  This returns
    -    into the parent first, which can switch to the child later.
    -    """
    -    costate = AppClonableCoroutine._get_state(space)
    -    current = costate.current
    -    if current is costate.main:
    -        raise OperationError(space.w_RuntimeError,
    -                             space.wrap("cannot fork() in the main "
    -                                        "clonable coroutine"))
    -    thunk = ForkThunk(current)
    -    coro_fork = AppClonableCoroutine(space, state=costate)
    -    coro_fork.bind(thunk)
    -    coro_fork.switch()
    -    # we resume here twice.  The following would need explanations about
    -    # why it returns the correct thing in both the parent and the child...
    -    return space.wrap(thunk.newcoroutine)
    diff --git a/pypy/module/_stackless/interp_composable_coroutine b/pypy/module/_stackless/interp_composable_coroutine
    deleted file mode 100644
    --- a/pypy/module/_stackless/interp_composable_coroutine
    +++ /dev/null
    @@ -1,33 +0,0 @@
    -from pypy.interpreter.baseobjspace import Wrappable
    -from pypy.interpreter.typedef import TypeDef, interp2app
    -from pypy.module._stackless.coroutine import AppCoState, AppCoroutine
    -
    -
    -class W_UserCoState(Wrappable):
    -    def __init__(self, space):
    -        self.costate = AppCoState(space)
    -        self.costate.post_install()
    -
    -    def descr_method__new__(space, w_subtype):
    -        costate = space.allocate_instance(W_UserCoState, w_subtype)
    -        W_UserCoState.__init__(costate, space)
    -        return space.wrap(costate)
    -
    -    def w_getcurrent(self):
    -        space = self.costate.space
    -        return space.wrap(self.costate.current)
    -
    -    def w_spawn(self, w_subtype=None):
    -        space = self.costate.space
    -        if space.is_w(w_subtype, space.w_None):
    -            w_subtype = space.gettypeobject(AppCoroutine.typedef)
    -        co = space.allocate_instance(AppCoroutine, w_subtype)
    -        AppCoroutine.__init__(co, space, state=self.costate)
    -        return space.wrap(co)
    -
    -W_UserCoState.typedef = TypeDef("usercostate",
    -    __new__ = interp2app(W_UserCoState.descr_method__new__.im_func),
    -    __module__ = '_stackless',
    -    getcurrent = interp2app(W_UserCoState.w_getcurrent),
    -    spawn      = interp2app(W_UserCoState.w_spawn),
    -)
    diff --git a/pypy/module/_stackless/interp_composable_coroutine.py b/pypy/module/_stackless/interp_composable_coroutine.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/interp_composable_coroutine.py
    +++ /dev/null
    @@ -1,34 +0,0 @@
    -from pypy.interpreter.baseobjspace import Wrappable
    -from pypy.interpreter.typedef import TypeDef, interp2app
    -from pypy.module._stackless.interp_coroutine import AppCoState, AppCoroutine
    -
    -
    -class W_UserCoState(Wrappable):
    -    def __init__(self, space):
    -        self.costate = AppCoState(space)
    -        self.costate.post_install()
    -
    -    def descr_method__new__(space, w_subtype):
    -        costate = space.allocate_instance(W_UserCoState, w_subtype)
    -        W_UserCoState.__init__(costate, space)
    -        return space.wrap(costate)
    -
    -    def w_getcurrent(self):
    -        space = self.costate.space
    -        return space.wrap(self.costate.current)
    -
    -    def w_spawn(self, w_subtype=None):
    -        space = self.costate.space
    -        if space.is_w(w_subtype, space.w_None):
    -            w_subtype = space.gettypeobject(AppCoroutine.typedef)
    -        co = space.allocate_instance(AppCoroutine, w_subtype)
    -        AppCoroutine.__init__(co, space, state=self.costate)
    -        return space.wrap(co)
    -
    -W_UserCoState.typedef = TypeDef("usercostate",
    -    __new__ = interp2app(W_UserCoState.descr_method__new__.im_func),
    -    __module__ = '_stackless',
    -    getcurrent = interp2app(W_UserCoState.w_getcurrent),
    -    spawn      = interp2app(W_UserCoState.w_spawn),
    -)
    -W_UserCoState.acceptable_as_base_class = False
    diff --git a/pypy/module/_stackless/interp_coroutine.py b/pypy/module/_stackless/interp_coroutine.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/interp_coroutine.py
    +++ /dev/null
    @@ -1,403 +0,0 @@
    -"""
    -Coroutine implementation for application level on top
    -of the internal coroutines.
    -This is an extensible concept. Multiple implementations
    -of concurrency can exist together, if they follow the
    -basic concept of maintaining their own costate.
    -
    -There is also some diversification possible by using
    -multiple costates for the same type. This leads to
    -disjoint switchable sets within the same type.
    -
    -I'm not so sure to what extent the opposite is possible, too.
    -I.e., merging the costate of tasklets and greenlets would
    -allow them to be parents of each other. Needs a bit more
    -experience to decide where to set the limits.
    -"""
    -
    -from pypy.interpreter.argument import Arguments
    -from pypy.interpreter.typedef import GetSetProperty, TypeDef
    -from pypy.interpreter.gateway import interp2app, unwrap_spec
    -from pypy.interpreter.error import OperationError, operationerrfmt
    -
    -from pypy.module._stackless.stackless_flags import StacklessFlags
    -from pypy.module._stackless.rcoroutine import Coroutine, BaseCoState, AbstractThunk, CoroutineExit
    -
    -from pypy.module.exceptions.interp_exceptions import W_SystemExit, _new_exception
    -
    -from pypy.rlib import rstack, jit # for resume points
    -from pypy.tool import stdlib_opcode as pythonopcode
    -
    -class _AppThunk(AbstractThunk):
    -
    -    def __init__(self, space, costate, w_obj, args):
    -        self.space = space
    -        self.costate = costate
    -        if not space.is_true(space.callable(w_obj)):
    -            raise operationerrfmt(
    -                space.w_TypeError,
    -                "'%s' object is not callable",
    -                space.type(w_obj).getname(space))
    -        self.w_func = w_obj
    -        self.args = args
    -
    -    def call(self):
    -        costate = self.costate
    -        w_result = self.space.call_args(self.w_func, self.args)
    -        costate.w_tempval = w_result
    -
    -class _ResumeThunk(AbstractThunk):
    -    def __init__(self, space, costate, w_frame):
    -        self.space = space
    -        self.costate = costate
    -        self.w_frame = w_frame
    -
    -    def call(self):
    -        w_result = resume_frame(self.space, self.w_frame)
    -        # costate.w_tempval = w_result #XXX?
    -
    -
    -W_CoroutineExit = _new_exception('CoroutineExit', W_SystemExit,
    -                        """Coroutine killed manually.""")
    -
    -# Should be moved to interp_stackless.py if it's ever implemented... Currently
    -# used by pypy/lib/stackless.py.
    -W_TaskletExit = _new_exception('TaskletExit', W_SystemExit,
    -            """Tasklet killed manually.""")
    -
    -class AppCoroutine(Coroutine): # XXX, StacklessFlags):
    -
    -    def __init__(self, space, state=None):
    -        self.space = space
    -        if state is None:
    -            state = AppCoroutine._get_state(space)
    -        Coroutine.__init__(self, state)
    -        self.flags = 0
    -        self.newsubctx()
    -
    -    def newsubctx(self):
    -        ec = self.space.getexecutioncontext()
    -        self.subctx = ec.Subcontext()
    -
    -    def descr_method__new__(space, w_subtype):
    -        co = space.allocate_instance(AppCoroutine, w_subtype)
    -        AppCoroutine.__init__(co, space)
    -        return space.wrap(co)
    -
    -    def _get_state(space):
    -        return space.fromcache(AppCoState)
    -    _get_state = staticmethod(_get_state)
    -
    -    def w_bind(self, w_func, __args__):
    -        space = self.space
    -        if self.frame is not None:
    -            raise OperationError(space.w_ValueError, space.wrap(
    -                "cannot bind a bound Coroutine"))
    -        state = self.costate
    -        thunk = _AppThunk(space, state, w_func, __args__)
    -        self.bind(thunk)
    -
    -    def w_switch(self):
    -        space = self.space
    -        if self.frame is None:
    -            raise OperationError(space.w_ValueError, space.wrap(
    -                "cannot switch to an unbound Coroutine"))
    -        state = self.costate
    -        self.switch()
    -        w_ret, state.w_tempval = state.w_tempval, space.w_None
    -        return w_ret
    -
    -    def switch(self):
    -        space = self.space
    -        try:
    -            Coroutine.switch(self)
    -        except CoroutineExit:
    -            raise OperationError(self.costate.w_CoroutineExit, space.w_None)
    -
    -    def w_finished(self, w_excinfo):
    -        pass
    -
    -    def finish(self, operror=None):
    -        space = self.space
    -        if isinstance(operror, OperationError):
    -            w_exctype = operror.w_type
    -            w_excvalue = operror.get_w_value(space)
    -            w_exctraceback = operror.get_traceback()
    -            w_excinfo = space.newtuple([w_exctype, w_excvalue, w_exctraceback])
    -
    -            if w_exctype is self.costate.w_CoroutineExit:
    -                self.coroutine_exit = True
    -        else:
    -            w_N = space.w_None
    -            w_excinfo = space.newtuple([w_N, w_N, w_N])
    -
    -        return space.call_method(space.wrap(self),'finished', w_excinfo)
    -
    -    def hello(self):
    -        ec = self.space.getexecutioncontext()
    -        self.subctx.enter(ec)
    -
    -    def goodbye(self):
    -        ec = self.space.getexecutioncontext()
    -        self.subctx.leave(ec)
    -
    -    def w_kill(self):
    -        self.kill()
    -
    -    def w_throw(self, w_type, w_value=None, w_traceback=None):
    -        space = self.space
    -
    -        operror = OperationError(w_type, w_value)
    -        operror.normalize_exception(space)
    -
    -        if not space.is_w(w_traceback, space.w_None):
    -            from pypy.interpreter import pytraceback
    -            tb = space.interpclass_w(w_traceback)
    -            if tb is None or not space.is_true(space.isinstance(tb,
    -                space.gettypeobject(pytraceback.PyTraceback.typedef))):
    -                raise OperationError(space.w_TypeError,
    -                      space.wrap("throw: arg 3 must be a traceback or None"))
    -            operror.set_traceback(tb)
    -
    -        self._kill(operror)
    -
    -    def _userdel(self):
    -        if self.get_is_zombie():
    -            return
    -        self.set_is_zombie(True)
    -        self.space.userdel(self.space.wrap(self))
    -
    -    def w_getcurrent(space):
    -        return space.wrap(AppCoroutine._get_state(space).current)
    -    w_getcurrent = staticmethod(w_getcurrent)
    -
    -    def w_getmain(space):
    -        return space.wrap(AppCoroutine._get_state(space).main)
    -    w_getmain = staticmethod(w_getmain)
    -
    -    # pickling interface
    -    def descr__reduce__(self, space):
    -        # this is trying to be simplistic at the moment.
    -        # we neither allow to pickle main (which can become a mess
    -        # since it has some deep anchestor frames)
    -        # nor we allow to pickle the current coroutine.
    -        # rule: switch before pickling.
    -        # you cannot construct the tree that you are climbing.
    -        from pypy.interpreter.mixedmodule import MixedModule
    -        w_mod    = space.getbuiltinmodule('_stackless')
    -        mod      = space.interp_w(MixedModule, w_mod)
    -        w_mod2    = space.getbuiltinmodule('_pickle_support')
    -        mod2      = space.interp_w(MixedModule, w_mod2)
    -        w_new_inst = mod.get('coroutine')
    -        w        = space.wrap
    -        nt = space.newtuple
    -        ec = self.space.getexecutioncontext()
    -
    -        if self is self.costate.main:
    -            return nt([mod.get('_return_main'), nt([])])
    -
    -        thunk = self.thunk
    -        if isinstance(thunk, _AppThunk):
    -            w_args, w_kwds = thunk.args.topacked()
    -            w_thunk = nt([thunk.w_func, w_args, w_kwds])
    -        else:
    -            w_thunk = space.w_None
    -
    -        tup_base = [
    -            ]
    -        tup_state = [
    -            w(self.flags),
    -            self.subctx.getstate(space),
    -            w_thunk,
    -            w(self.parent),
    -            ]
    -
    -        return nt([w_new_inst, nt(tup_base), nt(tup_state)])
    -
    -    def descr__setstate__(self, space, w_args):
    -        w_flags, w_state, w_thunk, w_parent = space.unpackiterable(w_args,
    -                                                        expected_length=4)
    -        self.flags = space.int_w(w_flags)
    -        if space.is_w(w_parent, space.w_None):
    -            w_parent = self.w_getmain(space)
    -        self.parent = space.interp_w(AppCoroutine, w_parent)
    -        ec = self.space.getexecutioncontext()
    -        self.subctx.setstate(space, w_state)
    -        if space.is_w(w_thunk, space.w_None):
    -            if space.is_w(w_state, space.w_None):
    -                self.thunk = None
    -            else:
    -                self.bind(_ResumeThunk(space, self.costate, self.subctx.topframe))
    -        else:
    -            w_func, w_args, w_kwds = space.unpackiterable(w_thunk,
    -                                                          expected_length=3)
    -            args = Arguments.frompacked(space, w_args, w_kwds)
    -            self.bind(_AppThunk(space, self.costate, w_func, args))
    -
    -
    -# _mixin_ did not work
    -for methname in StacklessFlags.__dict__:
    -    meth = getattr(StacklessFlags, methname)
    -    if hasattr(meth, 'im_func'):
    -        setattr(AppCoroutine, meth.__name__, meth.im_func)
    -del meth, methname
    -
    -def w_get_is_zombie(self, space):
    -    return space.wrap(self.get_is_zombie())
    -AppCoroutine.w_get_is_zombie = w_get_is_zombie
    -
    -def w_get_is_alive(self, space):
    -    return space.wrap(self.is_alive())
    -AppCoroutine.w_get_is_alive = w_get_is_alive
    -
    -def w_descr__framestack(self, space):
    -    assert isinstance(self, AppCoroutine)
    -    counter = 0
    -    f = self.subctx.topframe
    -    while f is not None:
    -        counter += 1
    -        f = f.f_backref()
    -    items = [None] * counter
    -    f = self.subctx.topframe
    -    while f is not None:
    -        counter -= 1
    -        assert counter >= 0
    -        items[counter] = space.wrap(f)
    -        f = f.f_backref()
    -    assert counter == 0
    -    return space.newtuple(items)
    -
    -def makeStaticMethod(module, classname, funcname):
    -    "NOT_RPYTHON"
    -    space = module.space
    -    w_klass = space.getattr(space.wrap(module), space.wrap(classname))
    -    # HACK HACK HACK
    -    # make the typeobject mutable for a while
    -    from pypy.objspace.std.typeobject import W_TypeObject
    -    assert isinstance(w_klass, W_TypeObject)
    -    old_flag = w_klass.flag_heaptype
    -    w_klass.flag_heaptype = True
    -
    -    space.appexec([w_klass, space.wrap(funcname)], """
    -        (klass, funcname):
    -            func = getattr(klass, funcname)
    -            setattr(klass, funcname, staticmethod(func.im_func))
    -    """)
    -    w_klass.flag_heaptype = old_flag
    -
    -def post_install(module):
    -    makeStaticMethod(module, 'coroutine', 'getcurrent')
    -    makeStaticMethod(module, 'coroutine', 'getmain')
    -    space = module.space
    -    AppCoroutine._get_state(space).post_install()
    -
    -# space.appexec("""() :
    -
    -# maybe use __spacebind__ for postprocessing
    -
    -AppCoroutine.typedef = TypeDef("coroutine",
    -    __new__ = interp2app(AppCoroutine.descr_method__new__.im_func),
    -    bind = interp2app(AppCoroutine.w_bind),
    -    switch = interp2app(AppCoroutine.w_switch),
    -    kill = interp2app(AppCoroutine.w_kill),
    -    throw = interp2app(AppCoroutine.w_throw),
    -    finished = interp2app(AppCoroutine.w_finished),
    -    is_alive = GetSetProperty(AppCoroutine.w_get_is_alive),
    -    is_zombie = GetSetProperty(AppCoroutine.w_get_is_zombie,
    -      doc=AppCoroutine.get_is_zombie.__doc__), #--- this flag is a bit obscure
    -      # and not useful (it's totally different from Coroutine.is_zombie(), too)
    -      # but lib/stackless.py uses it
    -    _framestack = GetSetProperty(w_descr__framestack),
    -    getcurrent = interp2app(AppCoroutine.w_getcurrent),
    -    getmain = interp2app(AppCoroutine.w_getmain),
    -    __reduce__   = interp2app(AppCoroutine.descr__reduce__),
    -    __setstate__ = interp2app(AppCoroutine.descr__setstate__),
    -    __module__ = '_stackless',
    -)
    -
    -class AppCoState(BaseCoState):
    -    def __init__(self, space):
    -        BaseCoState.__init__(self)
    -        self.w_tempval = space.w_None
    -        self.space = space
    -
    -        # XXX Workaround: for now we need to instantiate these classes
    -        # explicitly for translation to work
    -        W_CoroutineExit(space)
    -        W_TaskletExit(space)
    -
    -        # Exporting new exception to space
    -        self.w_CoroutineExit = space.gettypefor(W_CoroutineExit)
    -        space.setitem(
    -                      space.exceptions_module.w_dict,
    -                      space.new_interned_str('CoroutineExit'),
    -                      self.w_CoroutineExit)
    -        space.setitem(space.builtin.w_dict,
    -                      space.new_interned_str('CoroutineExit'),
    -                      self.w_CoroutineExit)
    -
    -        # Should be moved to interp_stackless.py if it's ever implemented...
    -        self.w_TaskletExit = space.gettypefor(W_TaskletExit)
    -        space.setitem(
    -                      space.exceptions_module.w_dict,
    -                      space.new_interned_str('TaskletExit'),
    -                      self.w_TaskletExit)
    -        space.setitem(space.builtin.w_dict,
    -                      space.new_interned_str('TaskletExit'),
    -                      self.w_TaskletExit)
    -
    -    def post_install(self):
    -        self.current = self.main = AppCoroutine(self.space, state=self)
    -        self.main.subctx.clear_framestack()      # wack
    -
    -def return_main(space):
    -    return AppCoroutine._get_state(space).main
    -
    -def get_stack_depth_limit(space):
    -    return space.wrap(rstack.get_stack_depth_limit())
    -
    - at unwrap_spec(limit=int)
    -def set_stack_depth_limit(space, limit):
    -    rstack.set_stack_depth_limit(limit)
    -
    -
    -# ___________________________________________________________________
    -# unpickling trampoline
    -
    -def resume_frame(space, w_frame):
    -    from pypy.interpreter.pyframe import PyFrame
    -    frame = space.interp_w(PyFrame, w_frame, can_be_None=True)
    -    w_result = space.w_None
    -    operr = None
    -    executioncontext = frame.space.getexecutioncontext()
    -    while frame is not None:
    -        code = frame.pycode.co_code
    -        instr = frame.last_instr
    -        opcode = ord(code[instr])
    -        map = pythonopcode.opmap
    -        call_ops = [map['CALL_FUNCTION'], map['CALL_FUNCTION_KW'], map['CALL_FUNCTION_VAR'],
    -                    map['CALL_FUNCTION_VAR_KW'], map['CALL_METHOD']]
    -        assert opcode in call_ops
    -        instr += 1
    -        oparg = ord(code[instr]) | ord(code[instr + 1]) << 8
    -        nargs = oparg & 0xff
    -        nkwds = (oparg >> 8) & 0xff
    -        if nkwds == 0:     # only positional arguments
    -            # fast paths leaves things on the stack, pop them
    -            if space.config.objspace.opcodes.CALL_METHOD and opcode == map['CALL_METHOD']:
    -                frame.dropvalues(nargs + 2)
    -            elif opcode == map['CALL_FUNCTION']:
    -                frame.dropvalues(nargs + 1)
    -
    -        # small hack: unlink frame out of the execution context, because
    -        # execute_frame will add it there again
    -        executioncontext.topframeref = jit.non_virtual_ref(frame.f_backref())
    -        frame.last_instr = instr + 1 # continue after the call
    -        try:
    -            w_result = frame.execute_frame(w_result, operr)
    -        except OperationError, operr:
    -            pass
    -        frame = frame.f_backref()
    -    if operr:
    -        raise operr
    -    return w_result
    diff --git a/pypy/module/_stackless/interp_greenlet.py b/pypy/module/_stackless/interp_greenlet.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/interp_greenlet.py
    +++ /dev/null
    @@ -1,238 +0,0 @@
    -from pypy.interpreter.argument import Arguments
    -from pypy.interpreter.typedef import GetSetProperty, TypeDef
    -from pypy.interpreter.gateway import interp2app
    -from pypy.interpreter.gateway import NoneNotWrapped
    -from pypy.interpreter.error import OperationError
    -
    -from pypy.module._stackless.rcoroutine import Coroutine, BaseCoState
    -from pypy.module._stackless.rcoroutine import AbstractThunk, syncstate
    -from pypy.module._stackless.interp_coroutine import makeStaticMethod
    -
    -
    -class GreenletThunk(AbstractThunk):
    -
    -    def __init__(self, greenlet):
    -        self.greenlet = greenlet
    -
    -    def call(self):
    -        greenlet = self.greenlet
    -        greenlet.active = True
    -        try:
    -            space = greenlet.space
    -            args_w = greenlet.costate.args_w
    -            __args__ = Arguments(space, args_w)
    -            try:
    -                w_run = space.getattr(space.wrap(greenlet), space.wrap('run'))
    -                greenlet.w_callable = None
    -                w_result = space.call_args(w_run, __args__)
    -            except OperationError, operror:
    -                if not operror.match(space, greenlet.costate.w_GreenletExit):
    -                    raise
    -                w_result = operror.get_w_value(space)
    -        finally:
    -            greenlet.active = False
    -        greenlet.costate.args_w = [w_result]
    -
    -class AppGreenletCoState(BaseCoState):
    -    def __init__(self, space):
    -        BaseCoState.__init__(self)
    -        self.args_w = None
    -        self.space = space
    -        self.w_GreenletExit  = get(space, "GreenletExit")
    -        self.w_GreenletError = get(space, "GreenletError")
    -
    -    def post_install(self):
    -        self.current = self.main = AppGreenlet(self.space, is_main=True)
    -
    -class AppGreenlet(Coroutine):
    -    def __init__(self, space, w_callable=None, is_main=False):
    -        Coroutine.__init__(self, self._get_state(space))
    -        self.space = space
    -        self.w_callable = w_callable
    -        self.active = is_main
    -        self.subctx = space.getexecutioncontext().Subcontext()
    -        if is_main:
    -            self.subctx.clear_framestack()      # wack
    -        else:
    -            self.bind(GreenletThunk(self))
    -
    -    def descr_method__new__(space, w_subtype, __args__):
    -        co = space.allocate_instance(AppGreenlet, w_subtype)
    -        AppGreenlet.__init__(co, space)
    -        return space.wrap(co)
    -
    -    def descr_method__init__(self, w_run=NoneNotWrapped,
    -                                   w_parent=NoneNotWrapped):
    -        if w_run is not None:
    -            self.set_run(w_run)
    -        if w_parent is not None:
    -            self.set_parent(w_parent)
    -
    -    def _get_state(space):
    -        return space.fromcache(AppGreenletCoState)
    -    _get_state = staticmethod(_get_state)
    -
    -    def hello(self):
    -        ec = self.space.getexecutioncontext()
    -        self.subctx.enter(ec)
    -
    -    def goodbye(self):
    -        ec = self.space.getexecutioncontext()
    -        self.subctx.leave(ec)
    -
    -    def w_getcurrent(space):
    -        return space.wrap(AppGreenlet._get_state(space).current)
    -    w_getcurrent = staticmethod(w_getcurrent)
    -
    -    def w_switch(self, args_w):
    -        # Find the switch target - it might be a parent greenlet
    -        space = self.space
    -        costate = self.costate
    -        target = self
    -        while target.isdead():
    -            target = target.parent
    -            assert isinstance(target, AppGreenlet)
    -        # Switch to it
    -        costate.args_w = args_w
    -        if target is not costate.current:
    -            target.switch()
    -        else:
    -            # case not handled in Coroutine.switch()
    -            syncstate._do_things_to_do()
    -        result_w = costate.args_w
    -        costate.args_w = None
    -        # costate.args_w can be set to None above for throw(), but then
    -        # switch() should have raised.  At this point cosstate.args_w != None.
    -        assert result_w is not None
    -        # Return the result of a switch, packaging it in a tuple if
    -        # there is more than one value.
    -        if len(result_w) == 1:
    -            return result_w[0]
    -        return space.newtuple(result_w)
    -
    -    def w_throw(self, w_type=None, w_value=None, w_traceback=None):
    -        space = self.space
    -        if space.is_w(w_type, space.w_None):
    -            w_type = self.costate.w_GreenletExit
    -        # Code copied from RAISE_VARARGS but slightly modified.  Not too nice.
    -        operror = OperationError(w_type, w_value)
    -        operror.normalize_exception(space)
    -        if not space.is_w(w_traceback, space.w_None):
    -            from pypy.interpreter import pytraceback
    -            tb = space.interpclass_w(w_traceback)
    -            if tb is None or not space.is_true(space.isinstance(tb, 
    -                space.gettypeobject(pytraceback.PyTraceback.typedef))):
    -                raise OperationError(space.w_TypeError,
    -                      space.wrap("throw: arg 3 must be a traceback or None"))
    -            operror.set_traceback(tb)
    -        # Dead greenlet: turn GreenletExit into a regular return
    -        if self.isdead() and operror.match(space, self.costate.w_GreenletExit):
    -            args_w = [operror.get_w_value(space)]
    -        else:
    -            syncstate.push_exception(operror)
    -            args_w = None
    -        return self.w_switch(args_w)
    -
    -    def _userdel(self):
    -        self.space.userdel(self.space.wrap(self))
    -
    -    def isdead(self):
    -        return self.thunk is None and not self.active
    -
    -    def w_get_is_dead(self, space):
    -        return space.newbool(self.isdead())
    -
    -    def descr__nonzero__(self):
    -        return self.space.newbool(self.active)
    -
    -    def w_get_run(self, space):
    -        w_run = self.w_callable
    -        if w_run is None:
    -            raise OperationError(space.w_AttributeError, space.wrap("run"))
    -        return w_run
    -
    -    def set_run(self, w_run):
    -        space = self.space
    -        if self.thunk is None:
    -            raise OperationError(space.w_AttributeError,
    -                                 space.wrap("run cannot be set "
    -                                            "after the start of the greenlet"))
    -        self.w_callable = w_run
    -
    -    def w_set_run(self, space, w_run):
    -        self.set_run(w_run)
    -
    -    def w_del_run(self, space):
    -        if self.w_callable is None:
    -            raise OperationError(space.w_AttributeError, space.wrap("run"))
    -        self.w_callable = None
    -
    -    def w_get_parent(self, space):
    -        return space.wrap(self.parent)
    -
    -    def set_parent(self, w_parent):
    -        space = self.space
    -        newparent = space.interp_w(AppGreenlet, w_parent)
    -        if newparent.costate is not self.costate:
    -            raise OperationError(self.costate.w_GreenletError,
    -                                 space.wrap("invalid foreign parent"))
    -        curr = newparent
    -        while curr:
    -            if curr is self:
    -                raise OperationError(space.w_ValueError,
    -                                     space.wrap("cyclic parent chain"))
    -            curr = curr.parent
    -        self.parent = newparent
    -
    -    def w_set_parent(self, space, w_parent):
    -        self.set_parent(w_parent)
    -
    -    def w_get_frame(self, space):
    -        if not self.active or self.costate.current is self:
    -            f = None
    -        else:
    -            f = self.subctx.topframe
    -        return space.wrap(f)
    -
    -def get(space, name):
    -    w_module = space.getbuiltinmodule('_stackless')
    -    return space.getattr(w_module, space.wrap(name))
    -
    -def post_install(module):
    -    "NOT_RPYTHON"
    -    makeStaticMethod(module, 'greenlet', 'getcurrent')
    -    space = module.space
    -    state = AppGreenlet._get_state(space)
    -    state.post_install()
    -    w_greenlet = get(space, 'greenlet')
    -    # HACK HACK HACK
    -    # make the typeobject mutable for a while
    -    from pypy.objspace.std.typeobject import W_TypeObject
    -    assert isinstance(w_greenlet, W_TypeObject)
    -    old_flag = w_greenlet.flag_heaptype
    -    w_greenlet.flag_heaptype = True
    -    space.appexec([w_greenlet,
    -                   state.w_GreenletExit,
    -                   state.w_GreenletError], """
    -    (greenlet, exit, error):
    -        greenlet.GreenletExit = exit
    -        greenlet.error = error
    -    """)
    -    w_greenlet.flag_heaptype = old_flag
    -
    -AppGreenlet.typedef = TypeDef("greenlet",
    -    __new__ = interp2app(AppGreenlet.descr_method__new__.im_func),
    -    __init__ = interp2app(AppGreenlet.descr_method__init__),
    -    switch = interp2app(AppGreenlet.w_switch),
    -    dead = GetSetProperty(AppGreenlet.w_get_is_dead),
    -    run = GetSetProperty(AppGreenlet.w_get_run,
    -                         AppGreenlet.w_set_run,
    -                         AppGreenlet.w_del_run),
    -    parent = GetSetProperty(AppGreenlet.w_get_parent,
    -                            AppGreenlet.w_set_parent),
    -    getcurrent = interp2app(AppGreenlet.w_getcurrent),
    -    throw = interp2app(AppGreenlet.w_throw),
    -    gr_frame = GetSetProperty(AppGreenlet.w_get_frame),
    -    __nonzero__ = interp2app(AppGreenlet.descr__nonzero__),
    -    __module__ = '_stackless',
    -)
    diff --git a/pypy/module/_stackless/interp_stackless.py b/pypy/module/_stackless/interp_stackless.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/interp_stackless.py
    +++ /dev/null
    @@ -1,28 +0,0 @@
    -from pypy.interpreter.baseobjspace import Wrappable
    -from pypy.interpreter.typedef import TypeDef
    -from pypy.interpreter.gateway import interp2app
    -import os
    -
    -
    -class tasklet(Wrappable):
    -
    -    def __init__(self, space):
    -        self.space = space
    -        self.flags = 0
    -        self.state = None
    -
    -    def descr_method__new__(space, w_subtype):
    -        t = space.allocate_instance(tasklet, w_subtype)
    -        tasklet.__init__(t, space)
    -        return space.wrap(t)
    -
    -    def w_demo(self):
    -        output("42")
    -
    -tasklet.typedef = TypeDef("tasklet",
    -    __new__ = interp2app(tasklet.descr_method__new__.im_func),
    -    demo = interp2app(tasklet.w_demo),
    -)
    -
    -def output(stuff):
    -    os.write(2, stuff + '\n')
    diff --git a/pypy/module/_stackless/rclonable.py b/pypy/module/_stackless/rclonable.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/rclonable.py
    +++ /dev/null
    @@ -1,87 +0,0 @@
    -from pypy.module._stackless.interp_coroutine import AbstractThunk, Coroutine
    -from pypy.rlib.rgc import gc_swap_pool, gc_clone
    -from pypy.rlib.objectmodel import we_are_translated
    -
    -
    -class InterpClonableMixin:
    -    local_pool = None
    -    _mixin_ = True
    -
    -    def hello_local_pool(self):
    -        if we_are_translated():
    -            self.saved_pool = gc_swap_pool(self.local_pool)
    -
    -    def goodbye_local_pool(self):
    -        if we_are_translated():
    -            self.local_pool = gc_swap_pool(self.saved_pool)
    -            self.saved_pool = None
    -
    -    def clone_into(self, copy, extradata=None):
    -        if not we_are_translated():
    -            raise NotImplementedError
    -        # cannot gc_clone() directly self, because it is not in its own
    -        # local_pool.  Moreover, it has a __del__, which cloning doesn't
    -        # support properly at the moment.
    -        copy.parent = self.parent
    -        # the hello/goodbye pair has two purposes: it forces
    -        # self.local_pool to be computed even if it was None up to now,
    -        # and it puts the 'data' tuple in the correct pool to be cloned.
    -        self.hello_local_pool()
    -        data = (self.frame, extradata)
    -        self.goodbye_local_pool()
    -        # clone!
    -        data, copy.local_pool = gc_clone(data, self.local_pool)
    -        copy.frame, extradata = data
    -        copy.thunk = self.thunk # in case we haven't switched to self yet
    -        return extradata
    -
    -
    -class InterpClonableCoroutine(Coroutine, InterpClonableMixin):
    -
    -    def hello(self):
    -        self.hello_local_pool()
    -
    -    def goodbye(self):
    -        self.goodbye_local_pool()
    -
    -    def clone(self):
    -        # hack, this is overridden in AppClonableCoroutine
    -        if self.getcurrent() is self:
    -            raise RuntimeError("clone() cannot clone the current coroutine; "
    -                               "use fork() instead")
    -        copy = InterpClonableCoroutine(self.costate)
    -        self.clone_into(copy)
    -        return copy
    -
    -
    -class ForkThunk(AbstractThunk):
    -    def __init__(self, coroutine):
    -        self.coroutine = coroutine
    -        self.newcoroutine = None
    -    def call(self):
    -        oldcoro = self.coroutine
    -        self.coroutine = None
    -        newcoro = oldcoro.clone()
    -        newcoro.parent = oldcoro
    -        self.newcoroutine = newcoro
    -
    -def fork():
    -    """Fork, as in the Unix fork(): the call returns twice, and the return
    -    value of the call is either the new 'child' coroutine object (if returning
    -    into the parent), or None (if returning into the child).  This returns
    -    into the parent first, which can switch to the child later.
    -    """
    -    current = InterpClonableCoroutine.getcurrent()
    -    if not isinstance(current, InterpClonableCoroutine):
    -        raise RuntimeError("fork() in a non-clonable coroutine")
    -    thunk = ForkThunk(current)
    -    coro_fork = InterpClonableCoroutine()
    -    coro_fork.bind(thunk)
    -    coro_fork.switch()
    -    # we resume here twice.  The following would need explanations about
    -    # why it returns the correct thing in both the parent and the child...
    -    return thunk.newcoroutine
    -
    -##    from pypy.rpython.lltypesystem import lltype, lloperation
    -##    lloperation.llop.debug_view(lltype.Void, current, thunk,
    -##        lloperation.llop.gc_x_size_header(lltype.Signed))
    diff --git a/pypy/module/_stackless/rcoroutine.py b/pypy/module/_stackless/rcoroutine.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/rcoroutine.py
    +++ /dev/null
    @@ -1,10 +0,0 @@
    -from pypy.rlib.rcoroutine import make_coroutine_classes
    -from pypy.interpreter.baseobjspace import Wrappable
    -
    -d = make_coroutine_classes(Wrappable)
    -
    -Coroutine = d['Coroutine']
    -BaseCoState = d['BaseCoState']
    -AbstractThunk = d['AbstractThunk']
    -syncstate = d['syncstate']
    -CoroutineExit = d['CoroutineExit']
    diff --git a/pypy/module/_stackless/stackless_flags.py b/pypy/module/_stackless/stackless_flags.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/stackless_flags.py
    +++ /dev/null
    @@ -1,201 +0,0 @@
    -"""
    -basic definitions for tasklet flags.
    -For simplicity and compatibility,
    -they are defined the same for coroutines,
    -even if they are not used.
    -
    -taken from tasklet_structs.h
    -----------------------------
    -
    -/***************************************************************************
    -
    -    Tasklet Flag Definition
    -    -----------------------
    -
    -    blocked:        The tasklet is either waiting in a channel for
    -                    writing (1) or reading (-1) or not blocked (0).
    -                    Maintained by the channel logic. Do not change.
    -
    -    atomic:         If true, schedulers will never switch. Driven by
    -                    the code object or dynamically, see below.
    -
    -    ignore_nesting: Allows auto-scheduling, even if nesting_level
    -                    is not zero.
    -
    -    autoschedule:   The tasklet likes to be auto-scheduled. User driven.
    -
    -    block_trap:     Debugging aid. Whenever the tasklet would be
    -                    blocked by a channel, an exception is raised.
    -
    -    is_zombie:      This tasklet is almost dead, its deallocation has
    -                    started. The tasklet *must* die at some time, or the
    -                    process can never end.
    -
    -    pending_irq:    If set, an interrupt was issued during an atomic
    -                    operation, and should be handled when possible.
    -
    -
    -    Policy for atomic/autoschedule and switching:
    -    ---------------------------------------------
    -    A tasklet switch can always be done explicitly by calling schedule().
    -    Atomic and schedule are concerned with automatic features.
    -
    -    atomic  autoschedule
    -
    -        1       any     Neither a scheduler nor a watchdog will
    -                        try to switch this tasklet.
    -
    -        0       0       The tasklet can be stopped on desire, or it
    -                        can be killed by an exception.
    -
    -        0       1       Like above, plus auto-scheduling is enabled.
    -
    -    Default settings:
    -    -----------------
    -    All flags are zero by default.
    -
    - ***************************************************************************/
    -
    -typedef struct _tasklet_flags {
    -        int blocked: 2;
    -        unsigned int atomic: 1;
    -        unsigned int ignore_nesting: 1;
    -        unsigned int autoschedule: 1;
    -        unsigned int block_trap: 1;
    -        unsigned int is_zombie: 1;
    -        unsigned int pending_irq: 1;
    -} PyTaskletFlagStruc;
    -"""
    -
    -from pypy.rlib.rarithmetic import LONG_BIT, intmask
    -
    -class BitSetDef(object):
    -    __slots__ = "_names __dict__ _attrname".split()
    -
    -    def __init__(self, _attrname):
    -        self._names = []
    -        self._attrname = _attrname
    -        
    -    def __setattr__(self, key, value):
    -        if key not in self.__slots__:
    -            assert key not in self.__dict__
    -            self._names.append(key)
    -        object.__setattr__(self, key, value)
    -
    -    def __iter__(self):
    -        return self._enum_objects()
    -    
    -    def _enum_objects(self):
    -        for name in self._names:
    -            yield name, getattr(self, name)
    -
    -# negative values are user-writable
    -flags = BitSetDef("flags")
    -flags.blocked           =   2, """writing (1) or reading (-1) or not blocked (0)"""
    -flags.atomic            =  -1, """If true, schedulers will never switch"""
    -flags.ignore_nesting    =  -1, """allow auto-scheduling in nested interpreters"""
    -flags.autoschedule      =  -1, """enable auto-scheduling"""
    -flags.block_trap        =  -1, """raise an exception instead of blocking"""
    -flags.is_zombie         =   1, """__del__ is in progress"""
    -flags.pending_irq       =   1, """an interrupt occured while being atomic"""
    -
    -def make_get_bits(name, bits, shift):
    -    """ return a bool for single bits, signed int otherwise """
    -    signmask = 1 << (bits - 1 + shift)
    -    lshift = bits + shift
    -    rshift = bits
    -    if bits == 1:
    -        return "bool(%s & 0x%x)" % (name, signmask)
    -    else:
    -        return "intmask(%s << (LONG_BIT-%d)) >> (LONG_BIT-%d)" % (name, lshift, rshift)
    -
    -def make_set_bits(name, bits, shift):
    -    datamask = int('1' * bits, 2)
    -    clearmask = datamask << shift
    -    return "%s & ~0x%x | (value & 0x%x) << %d" % (name, clearmask, datamask, shift)
    -
    -def gen_code():
    -    from cStringIO import StringIO
    -    f = StringIO()
    -    print >> f, "class StacklessFlags(object):"
    -    print >> f, "    _mixin_ = True"
    -    shift = 0
    -    field = "self.%s" % flags._attrname
    -    for name, (bits, doc) in flags:
    -        write, bits = bits < 0, abs(bits)
    -        print >> f
    -        print >> f, '    def get_%s(self):' % name
    -        print >> f, '        """%s"""' % doc
    -        print >> f, '        return %s' % make_get_bits(field, bits, shift)
    -        print >> f, '    def set_%s(self, value):' % name
    -        print >> f, '        """%s"""' % doc
    -        print >> f, '        %s = %s' % (field, make_set_bits(field, bits, shift))
    -        print >> f, '    set_%s._public = %s' % (name, write)
    -        shift += bits
    -    return f.getvalue()
    -
    -# BEGIN generated code
    -class StacklessFlags(object):
    -    _mixin_ = True
    -
    -    def get_blocked(self):
    -        """writing (1) or reading (-1) or not blocked (0)"""
    -        return intmask(self.flags << (LONG_BIT-2)) >> (LONG_BIT-2)
    -    def set_blocked(self, value):
    -        """writing (1) or reading (-1) or not blocked (0)"""
    -        self.flags = self.flags & ~0x3 | (value & 0x3) << 0
    -    set_blocked._public = False
    -
    -    def get_atomic(self):
    -        """If true, schedulers will never switch"""
    -        return bool(self.flags & 0x4)
    -    def set_atomic(self, value):
    -        """If true, schedulers will never switch"""
    -        self.flags = self.flags & ~0x4 | (value & 0x1) << 2
    -    set_atomic._public = True
    -
    -    def get_ignore_nesting(self):
    -        """allow auto-scheduling in nested interpreters"""
    -        return bool(self.flags & 0x8)
    -    def set_ignore_nesting(self, value):
    -        """allow auto-scheduling in nested interpreters"""
    -        self.flags = self.flags & ~0x8 | (value & 0x1) << 3
    -    set_ignore_nesting._public = True
    -
    -    def get_autoschedule(self):
    -        """enable auto-scheduling"""
    -        return bool(self.flags & 0x10)
    -    def set_autoschedule(self, value):
    -        """enable auto-scheduling"""
    -        self.flags = self.flags & ~0x10 | (value & 0x1) << 4
    -    set_autoschedule._public = True
    -
    -    def get_block_trap(self):
    -        """raise an exception instead of blocking"""
    -        return bool(self.flags & 0x20)
    -    def set_block_trap(self, value):
    -        """raise an exception instead of blocking"""
    -        self.flags = self.flags & ~0x20 | (value & 0x1) << 5
    -    set_block_trap._public = True
    -
    -    def get_is_zombie(self):
    -        """__del__ is in progress"""
    -        return bool(self.flags & 0x40)
    -    def set_is_zombie(self, value):
    -        """__del__ is in progress"""
    -        self.flags = self.flags & ~0x40 | (value & 0x1) << 6
    -    set_is_zombie._public = False
    -
    -    def get_pending_irq(self):
    -        """an interrupt occured while being atomic"""
    -        return bool(self.flags & 0x80)
    -    def set_pending_irq(self, value):
    -        """an interrupt occured while being atomic"""
    -        self.flags = self.flags & ~0x80 | (value & 0x1) << 7
    -    set_pending_irq._public = False
    -
    -# END generated code
    -
    -if __name__ == '__main__':
    -    # paste this into the file
    -    print gen_code()
    diff --git a/pypy/module/_stackless/test/__init__.py b/pypy/module/_stackless/test/__init__.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/test/__init__.py
    +++ /dev/null
    @@ -1,1 +0,0 @@
    -#
    \ No newline at end of file
    diff --git a/pypy/module/_stackless/test/conftest.py b/pypy/module/_stackless/test/conftest.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/test/conftest.py
    +++ /dev/null
    @@ -1,8 +0,0 @@
    -import sys
    -import py.test
    -
    -def pytest_runtest_setup(item):
    -    py.test.importorskip('greenlet')
    -    if sys.platform == 'win32':
    -        py.test.skip("stackless tests segfault on Windows")
    -
    diff --git a/pypy/module/_stackless/test/slp_test_pickle.py b/pypy/module/_stackless/test/slp_test_pickle.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/test/slp_test_pickle.py
    +++ /dev/null
    @@ -1,35 +0,0 @@
    -from pypy.conftest import gettestobjspace
    -
    -# app-level testing of coroutine pickling
    -
    -class AppTest_Pickle:
    -
    -    def setup_class(cls):
    -        space = gettestobjspace(usemodules=('_stackless',))
    -        cls.space = space
    -
    -    def test_simple_ish(self):
    -
    -        output = []
    -        import _stackless
    -        def f(coro, n, x):
    -            if n == 0:
    -                coro.switch()
    -                return
    -            f(coro, n-1, 2*x)
    -            output.append(x)
    -
    -        def example():
    -            main_coro = _stackless.coroutine.getcurrent()
    -            sub_coro = _stackless.coroutine()
    -            sub_coro.bind(f, main_coro, 5, 1)
    -            sub_coro.switch()
    -
    -            import pickle
    -            pckl = pickle.dumps(sub_coro)
    -            new_coro = pickle.loads(pckl)
    -
    -            new_coro.switch()
    -
    -        example()
    -        assert output == [16, 8, 4, 2, 1]
    diff --git a/pypy/module/_stackless/test/test_choicepoint.py b/pypy/module/_stackless/test/test_choicepoint.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/test/test_choicepoint.py
    +++ /dev/null
    @@ -1,85 +0,0 @@
    -import py; py.test.skip("clonable coroutines not really maintained any more")
    -
    -from pypy.rlib.rcoroutine import AbstractThunk
    -from pypy.module._stackless.rclonable import InterpClonableCoroutine as ClonableCoroutine
    -
    -class ChoicePointHolder(object):
    -    def __init__(self):
    -        self.choicepoints = []
    -        self.clone_me = False
    -        self.answer = 0
    -        self.solutions_count = 0
    -
    -    def next_choice(self):
    -        return self.choicepoints.pop()
    -
    -    def add(self, choice, answer=0):
    -        self.choicepoints.append((choice, answer))
    -
    -    def more_choices(self):
    -        return bool(self.choicepoints)
    -
    -    def choice(self):
    -        #os.write(1, "choice\n")
    -        self.clone_me = True
    -        self.g_main.switch()
    -        #os.write(1, "answer: %d\n" % (self.answer,))
    -        return self.answer
    -
    -    def fail(self):
    -        self.g_main.switch()
    -        assert False
    -
    -choicepoints = ChoicePointHolder()
    -
    -# ____________________________________________________________
    -
    -class SearchTask(AbstractThunk):
    -    def call(self):
    -        path = []
    -        for i in range(10):
    -            res = choicepoints.choice()
    -            assert len(path) == i
    -            path.append(res)
    -            #os.write(1, "{%x} trying: %s\n" % (id(path), path))
    -            if i == 3:
    -                import gc; gc.collect()
    -        #os.write(1, "{%x} found a solution: %s\n" % (id(path), path))
    -        choicepoints.solutions_count += 1
    -
    -# ____________________________________________________________
    -
    -
    -class SearchAllTask(AbstractThunk):
    -    def call(self):
    -        search_coro = ClonableCoroutine()
    -        search_coro.bind(SearchTask())
    -        choicepoints.add(search_coro)
    -
    -        #os.write(1, "starting\n")
    -        while choicepoints.more_choices():
    -            searcher, nextvalue = choicepoints.next_choice()
    -            choicepoints.clone_me = False
    -            choicepoints.answer = nextvalue
    -            #os.write(1, '<<< {%x} %d\n' % (id(searcher), nextvalue))
    -            searcher.switch()
    -            #os.write(1, '>>> %d\n' % (choicepoints.clone_me,))
    -            if choicepoints.clone_me:
    -                searcher2 = searcher.clone()
    -                #os.write(1, 'searcher = {%x}, searcher2 = {%x}\n' % (
    -                #    id(searcher), id(searcher2)))
    -                choicepoints.add(searcher, 5)
    -                choicepoints.add(searcher2, 4)
    -
    -def entry_point():
    -    choicepoints.g_main = ClonableCoroutine()
    -    choicepoints.g_main.bind(SearchAllTask())
    -    choicepoints.g_main.switch()
    -    return choicepoints.solutions_count
    -
    -def test_choicepoint():
    -    from pypy.translator.c.test import test_newgc
    -    tester = test_newgc.TestUsingStacklessFramework()
    -    fn = tester.getcompiled(entry_point)
    -    res = fn()
    -    assert res == 2 ** 10
    diff --git a/pypy/module/_stackless/test/test_clonable.py b/pypy/module/_stackless/test/test_clonable.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/test/test_clonable.py
    +++ /dev/null
    @@ -1,187 +0,0 @@
    -import py; py.test.skip("clonable coroutines not really maintained any more")
    -
    -from pypy.conftest import gettestobjspace, option
    -import py, sys
    -
    -# app-level testing of coroutine cloning
    -
    -class AppTestClonable:
    -
    -    def setup_class(cls):
    -        if not option.runappdirect:
    -            py.test.skip('pure appdirect test (run with -A)')
    -        cls.space = space = gettestobjspace(usemodules=('_stackless',))
    -        if not space.is_true(space.appexec([], """():
    -            import _stackless
    -            return hasattr(_stackless, 'clonable')
    -        """)):
    -            py.test.skip('no _stackless.clonable')
    -
    -
    -    def test_solver(self):
    -        import _stackless
    -
    -        class Fail(Exception):
    -            pass
    -
    -        class Success(Exception):
    -            pass
    -
    -        def first_solution(func):
    -            global next_answer
    -            co = _stackless.clonable()
    -            co.bind(func)
    -            pending = [(co, None)]
    -            while pending:
    -                co, next_answer = pending.pop()
    -                try:
    -                    co.switch()
    -                except Fail:
    -                    pass
    -                except Success, e:
    -                    return e.args[0]
    -                else:
    -                    # zero_or_one() called, clone the coroutine
    -                    co2 = co.clone()
    -                    pending.append((co2, 1))
    -                    pending.append((co, 0))
    -            raise Fail("no solution")
    -
    -        pending = []
    -        main = _stackless.clonable.getcurrent()
    -
    -        def zero_or_one():
    -            main.switch()
    -            return next_answer
    -
    -        # ____________________________________________________________
    -
    -        invalid_prefixes = {
    -            (0, 0): True,
    -            (0, 1, 0): True,
    -            (0, 1, 1): True,
    -            (1, 0): True,
    -            (1, 1, 0, 0): True,
    -            }
    -
    -        def example():
    -            test = []
    -            for n in range(5):
    -                test.append(zero_or_one())
    -                if tuple(test) in invalid_prefixes:
    -                    raise Fail
    -            raise Success(test)
    -
    -        res = first_solution(example)
    -        assert res == [1, 1, 0, 1, 0]
    -
    -
    -    def test_myself_may_not_be_me_any_more(self):
    -        import gc
    -        from _stackless import clonable
    -
    -        counter = [0]
    -
    -        def runner():
    -            while 1:
    -                assert clonable.getcurrent() is coro
    -                counter[0] += 1
    -                main.switch()
    -
    -        main = clonable.getcurrent()
    -        coro = clonable()
    -        coro.bind(runner)
    -
    -        coro.switch()
    -        assert counter == [1]
    -
    -        assert clonable.getcurrent() is main
    -        coro1 = coro.clone()
    -        assert counter == [1]
    -        assert clonable.getcurrent() is main
    -        coro.switch()
    -        assert counter == [2]
    -        coro.switch()
    -        assert counter == [3]
    -        assert clonable.getcurrent() is main
    -        del coro1
    -        gc.collect()
    -        #print "collected!"
    -        assert clonable.getcurrent() is main
    -        assert counter == [3]
    -        coro.switch()
    -        assert clonable.getcurrent() is main
    -        assert counter == [4]
    -
    -
    -    def test_fork(self):
    -        import _stackless
    -
    -        class Fail(Exception):
    -            pass
    -
    -        class Success(Exception):
    -            pass
    -
    -        def first_solution(func):
    -            global next_answer
    -            co = _stackless.clonable()
    -            co.bind(func)
    -            try:
    -                co.switch()
    -            except Success, e:
    -                return e.args[0]
    -
    -        def zero_or_one():
    -            sub = _stackless.fork()
    -            if sub is not None:
    -                # in the parent: run the child first
    -                try:
    -                    sub.switch()
    -                except Fail:
    -                    pass
    -                # then proceed with answer '1'
    -                return 1
    -            else:
    -                # in the child: answer '0'
    -                return 0
    -
    -        # ____________________________________________________________
    -
    -        invalid_prefixes = {
    -            (0, 0): True,
    -            (0, 1, 0): True,
    -            (0, 1, 1): True,
    -            (1, 0): True,
    -            (1, 1, 0, 0): True,
    -            }
    -
    -        def example():
    -            test = []
    -            for n in range(5):
    -                test.append(zero_or_one())
    -                if tuple(test) in invalid_prefixes:
    -                    raise Fail
    -            raise Success(test)
    -
    -        res = first_solution(example)
    -        assert res == [1, 1, 0, 1, 0]
    -
    -    def test_clone_before_start(self):
    -        """Tests that a clonable coroutine can be
    -        cloned before it is started
    -        (this used to fail with a segmentation fault)
    -        """
    -        import _stackless
    -
    -        counter = [0]
    -        def simple_coro():
    -            print "hello"
    -            counter[0] += 1
    -
    -        s = _stackless.clonable()
    -        s.bind(simple_coro)
    -        t = s.clone()
    -        s.switch()
    -        t.switch()
    -        assert counter[0] == 2
    diff --git a/pypy/module/_stackless/test/test_composable_coroutine.py b/pypy/module/_stackless/test/test_composable_coroutine.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/test/test_composable_coroutine.py
    +++ /dev/null
    @@ -1,133 +0,0 @@
    -""" a faith is the connection between past and future that divides the
    -    application into switch-compatible chunks.
    -    -- stakkars
    -"""
    -from pypy.conftest import gettestobjspace
    -from py.test import skip
    -
    -class AppTest_ComposableCoroutine:
    -
    -    def setup_class(cls):
    -        space = gettestobjspace(usemodules=('_stackless',))
    -        cls.space = space
    -
    -        cls.w_generator_ = space.appexec([], """():
    -            import _stackless
    -
    -            generators_costate = _stackless.usercostate()
    -            main = generators_costate.getcurrent()
    -
    -            class generator_iterator(_stackless.coroutine):
    -
    -                def __iter__(self):
    -                    return self
    -
    -                def next(self):
    -                    if self.gi_answer is not None:
    -                        raise ValueError('stackless-generator'
    -                                         ' already executing')
    -                    self.gi_answer = []
    -                    self.gi_caller = generators_costate.getcurrent()
    -                    self.switch()
    -                    answer = self.gi_answer
    -                    self.gi_answer = None
    -                    if answer:
    -                        return answer[0]
    -                    else:
    -                        raise StopIteration
    -
    -            def generator(f):
    -                def myfunc(*args, **kwds):
    -                    g = generators_costate.spawn(generator_iterator)
    -                    g.gi_answer = None
    -                    g.bind(f, *args, **kwds)
    -                    return g
    -                return myfunc
    -
    -            def Yield(value):
    -                g = generators_costate.getcurrent()
    -                if g is main:
    -                    raise ValueError('Yield() outside any stackless-generator')
    -                assert isinstance(g, generator_iterator)
    -                assert g.gi_answer == []
    -                g.gi_answer.append(value)
    -                g.gi_caller.switch()
    -
    -            generator.Yield = Yield
    -            generator._costate = generators_costate
    -            return (generator,)
    -        """)
    -
    -    def test_simple_costate(self):
    -        import _stackless
    -        costate = _stackless.usercostate()
    -        main = costate.getcurrent()
    -
    -        result = []
    -        def f():
    -            result.append(costate.getcurrent())
    -        co = costate.spawn()
    -        co.bind(f)
    -        co.switch()
    -        assert result == [co]
    -
    -    def test_generator(self):
    -        generator, = self.generator_
    -
    -        def squares(n):
    -            for i in range(n):
    -                generator.Yield(i*i)
    -        squares = generator(squares)
    -
    -        lst1 = [i*i for i in range(10)]
    -        for got in squares(10):
    -            expected = lst1.pop(0)
    -            assert got == expected
    -        assert lst1 == []
    -
    -    def test_multiple_costates(self):
    -        """Test that two independent costates mix transparently:
    -
    -        - compute_costate, used for a coroutine that fills a list with
    -                           some more items each time it is switched to
    -
    -        - generators_costate, used interally by self.generator (see above)
    -        """
    -
    -        import _stackless
    -        generator, = self.generator_
    -
    -        # you can see how it fails if we don't have two different costates
    -        # by setting compute_costate to generator._costate instead
    -        compute_costate = _stackless.usercostate()
    -        compute_main = compute_costate.getcurrent()
    -        lst = []
    -
    -        def filler():     # -> 0, 1, 2, 100, 101, 102, 200, 201, 202, 300 ...
    -            for k in range(5):
    -                for j in range(3):
    -                    lst.append(100 * k + j)
    -                compute_main.switch()
    -
    -        filler_co = compute_costate.spawn()
    -        filler_co.bind(filler)
    -
    -        def grab_next_value():
    -            while not lst:
    -                #print 'filling more...'
    -                filler_co.switch()
    -                #print 'now lst =', lst
    -            #print 'grabbing', lst[0]
    -            return lst.pop(0)
    -
    -        def squares(n):
    -            for i in range(n):
    -                #print 'square:', i
    -                generator.Yield(i*grab_next_value())
    -        squares = generator(squares)
    -
    -        lst1 = [0, 1, 4,  300, 404, 510,  1200, 1407, 1616,  2700]
    -        for got in squares(10):
    -            expected = lst1.pop(0)
    -            assert got == expected
    -        assert lst1 == []
    diff --git a/pypy/module/_stackless/test/test_coroutine.py b/pypy/module/_stackless/test/test_coroutine.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/test/test_coroutine.py
    +++ /dev/null
    @@ -1,168 +0,0 @@
    -from pypy.conftest import gettestobjspace, option
    -from py.test import skip
    -
    -
    -class AppTest_Coroutine:
    -
    -    def setup_class(cls):
    -        space = gettestobjspace(usemodules=('_stackless',))
    -        cls.space = space
    -
    -    def test_raise_propagate(self):
    -        import _stackless as stackless
    -        co = stackless.coroutine()
    -        def f():
    -            return 1/0
    -        co.bind(f)
    -        try:
    -            co.switch()
    -        except ZeroDivisionError:
    -            pass
    -        else:
    -            raise AssertionError("exception not propagated")
    -
    -    def test_strange_test(self):
    -        from _stackless import coroutine
    -        def f():
    -            print "in new coro"
    -            return 42
    -        def create():
    -            b = coroutine()
    -            b.bind(f)
    -            print "bound"
    -            b.switch()
    -            print "switched"
    -            return b
    -        a = coroutine()
    -        a.bind(create)
    -        b = a.switch()
    -        # now b.parent = a
    -        def nothing():
    -            pass
    -        a.bind(nothing)
    -        def kill():
    -            # this sets a.parent = b
    -            a.kill()
    -        b.bind(kill)
    -        b.switch()
    -
    -    def test_kill(self):
    -        import _stackless as stackless
    -        co = stackless.coroutine()
    -        def f():
    -            pass
    -        co.bind(f)
    -        assert co.is_alive
    -        co.kill()
    -        assert not co.is_alive
    -
    -    def test_kill_running(self):
    -        coroutineexit = []
    -        import _stackless as stackless
    -        main = stackless.coroutine.getcurrent()
    -        result = []
    -        co = stackless.coroutine()
    -        def f():
    -            x = 2
    -            try:
    -                result.append(1)
    -                main.switch()
    -                x = 3
    -            except CoroutineExit:
    -                coroutineexit.append(True)
    -                raise
    -            finally:
    -                result.append(x)
    -            result.append(4)
    -        co.bind(f)
    -        assert co.is_alive
    -        co.switch()
    -        assert co.is_alive
    -        assert result == [1]
    -        co.kill()
    -        assert not co.is_alive
    -        assert result == [1, 2]
    -        assert coroutineexit == [True]
    -
    -    def test_bogus_bind(self):
    -        import _stackless as stackless
    -        co = stackless.coroutine()
    -        def f():
    -            pass
    -        co.bind(f)
    -        raises(ValueError, co.bind, f)
    -
    -    def test__framestack(self):
    -        import _stackless as stackless
    -        main = stackless.coroutine.getmain()
    -        co = stackless.coroutine()
    -        def g():
    -            return co._framestack
    -        def f():
    -            return g()
    -
    -        co.bind(f)
    -        stack = co.switch()
    -        assert stack == () # running corountine, _framestack is empty
    -
    -        co = stackless.coroutine()
    -        def g():
    -            return main.switch()
    -        def f():
    -            return g()
    -
    -        co.bind(f)
    -        co.switch()
    -        stack = co._framestack
    -        assert len(stack) == 2
    -        assert stack[0].f_code is f.func_code
    -        assert stack[1].f_code is g.func_code
    -
    -        co = stackless.coroutine()
    -
    -
    -
    -class AppTestDirect:
    -    def setup_class(cls):
    -        if not option.runappdirect:
    -            skip('pure appdirect test (run with -A)')
    -        cls.space = gettestobjspace(usemodules=('_stackless',))
    -
    -    def test_stack_depth_limit(self):
    -        import sys
    -        import _stackless as stackless
    -        st = stackless.get_stack_depth_limit()
    -        try:
    -            stackless.set_stack_depth_limit(1)
    -            assert stackless.get_stack_depth_limit() == 1
    -            try:
    -                co = stackless.coroutine()
    -                def f():
    -                    pass
    -                co.bind(f)
    -                co.switch()
    -            except RuntimeError:
    -                pass
    -        finally:
    -            stackless.set_stack_depth_limit(st)
    -
    -class TestRandomThings:
    -    def setup_class(cls):
    -        cls.space = gettestobjspace(usemodules=('_stackless',))
    -
    -    def test___del___handling(self):
    -        space = self.space
    -        w_l = space.newlist([])
    -        coro = space.appexec([w_l], """(l):
    -            from _stackless import coroutine
    -            class MyCoroutine(coroutine):
    -                def __del__(self):
    -                    l.append(self.is_zombie)
    -            return MyCoroutine()
    -        """)
    -        coro.__del__()
    -        space.user_del_action.perform(space.getexecutioncontext(), None)
    -        coro._kill_finally()
    -        assert space.len_w(w_l) == 1
    -        res = space.is_true(space.getitem(w_l, space.wrap(0)))
    -        assert res
    diff --git a/pypy/module/_stackless/test/test_greenlet.py b/pypy/module/_stackless/test/test_greenlet.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/test/test_greenlet.py
    +++ /dev/null
    @@ -1,643 +0,0 @@
    -from pypy.conftest import gettestobjspace, skip_on_missing_buildoption
    -
    -class AppTest_Greenlet:
    -
    -    def setup_class(cls):
    -        space = gettestobjspace(usemodules=('_stackless',))
    -        cls.space = space
    -
    -    def test_very_simple(self):
    -        from _stackless import greenlet
    -        lst = []
    -        def f(x):
    -            lst.append(x)
    -            return x + 10
    -        g = greenlet(f)
    -        assert not g
    -        res = g.switch(20)
    -        assert res == 30
    -        assert lst == [20]
    -        assert g.dead
    -        assert not g
    -
    -    def test_switch_back_to_main(self):
    -        from _stackless import greenlet
    -        lst = []
    -        main = greenlet.getcurrent()
    -        def f(x):
    -            lst.append(x)
    -            x = main.switch(x + 10)
    -            return 40 + x 
    -        g = greenlet(f)
    -        res = g.switch(20)
    -        assert res == 30
    -        assert lst == [20]
    -        assert not g.dead
    -        res = g.switch(2)
    -        assert res == 42
    -        assert g.dead
    -
    -    def test_simple(self):
    -        from _stackless import greenlet
    -        lst = []
    -        gs = []
    -        def f():
    -            lst.append(1)
    -            greenlet.getcurrent().parent.switch()
    -            lst.append(3)
    -        g = greenlet(f)
    -        lst.append(0)
    -        g.switch()
    -        lst.append(2)
    -        g.switch()
    -        lst.append(4)
    -        assert lst == range(5)
    -
    -    def test_exception_simple(self):
    -        from _stackless import greenlet
    -        def f():
    -            raise ValueError
    -        g1 = greenlet(f)
    -        raises(ValueError, g1.switch)
    -
    -    def test_exception_propagate(self):
    -        from _stackless import greenlet
    -        def f():
    -            raise ValueError
    -        def g():
    -            return g1.switch()
    -        g1 = greenlet(f)
    -        g2 = greenlet(g)
    -        raises(ValueError, g1.switch)
    -        g1 = greenlet(f)
    -        raises(ValueError, g2.switch)
    -
    -
    -    def test_exc_info_save_restore(self):
    -        from _stackless import greenlet
    -        import sys
    -        def f():
    -            try:
    -                raise ValueError('fun')
    -            except:
    -                exc_info = sys.exc_info()
    -                greenlet(h).switch()
    -                assert exc_info == sys.exc_info()
    -
    -        def h():
    -            assert sys.exc_info() == (None, None, None)
    -
    -        greenlet(f).switch()
    -
    -    def test_exception(self):
    -        from _stackless import greenlet
    -        import sys
    -        def fmain(seen):
    -            try:
    -                greenlet.getcurrent().parent.switch()
    -            except:
    -                seen.append(sys.exc_info()[0])
    -                raise
    -            raise ValueError
    -        seen = []
    -        g1 = greenlet(fmain)
    -        g2 = greenlet(fmain)
    -        g1.switch(seen)
    -        g2.switch(seen)
    -        raises(TypeError, "g2.parent = 1")
    -        g2.parent = g1
    -        assert seen == []
    -        raises(ValueError, g2.switch)
    -        assert seen == [ValueError]
    -        g2.switch()
    -        assert seen == [ValueError]
    -
    -    def test_send_exception(self):
    -        from _stackless import greenlet
    -        import sys
    -        def send_exception(g, exc):
    -            # note: send_exception(g, exc)  can be now done with  g.throw(exc).
    -            # the purpose of this test is to explicitely check the propagation rules.
    -            def crasher(exc):
    -                raise exc
    -            g1 = greenlet(crasher)
    -            g1.parent = g
    -            g1.switch(exc)
    -        def fmain(seen):
    -            try:
    -                greenlet.getcurrent().parent.switch()
    -            except:
    -                seen.append(sys.exc_info()[0])
    -                raise
    -            raise ValueError
    -
    -        seen = []
    -        g1 = greenlet(fmain)
    -        g1.switch(seen)
    -        raises(KeyError, "send_exception(g1, KeyError)")
    -        assert seen == [KeyError]
    -        seen = []
    -        g1 = greenlet(fmain)
    -        g1.switch(seen)
    -        raises(KeyError, "g1.throw(KeyError)")
    -        assert seen == [KeyError]
    -        assert g1.dead
    -
    -    def test_frame(self):
    -        from _stackless import greenlet
    -        import sys
    -        def f1():
    -            f = sys._getframe(0)
    -            assert f.f_back is None
    -            greenlet.getcurrent().parent.switch(f)
    -            return "meaning of life"
    -        g = greenlet(f1)
    -        frame = g.switch()
    -        assert frame is g.gr_frame
    -        assert g
    -        next = g.switch()
    -        assert not g
    -        assert next == "meaning of life"
    -        assert g.gr_frame is None
    -
    -    def test_mixing_greenlet_coroutine(self):
    -        from _stackless import greenlet, coroutine
    -        lst = []
    -        def f():
    -            lst.append(1)
    -            greenlet.getcurrent().parent.switch()
    -            lst.append(3)
    -        def make_h(c):
    -            def h():
    -                g = greenlet(f)
    -                lst.append(0)
    -                g.switch()
    -                c.switch()
    -                lst.append(2)
    -                g.switch()
    -                c.switch()
    -                lst.append(4)
    -                c.switch()
    -            return h
    -        c1 = coroutine.getcurrent()
    -        c2 = coroutine()
    -        c3 = coroutine()
    -        c2.bind(make_h(c3))
    -        c3.bind(make_h(c2))
    -        c2.switch()
    -        assert lst == [0, 1, 0, 1, 2, 3, 2, 3, 4, 4]
    -
    -    def test_dealloc(self):
    -        skip("not working yet")
    -        from _stackless import greenlet
    -        import sys
    -        def fmain(seen):
    -            try:
    -                greenlet.getcurrent().parent.switch()
    -            except:
    -                seen.append(sys.exc_info()[0])
    -                raise
    -            raise ValueError
    -        seen = []
    -        seen = []
    -        g1 = greenlet(fmain)
    -        g2 = greenlet(fmain)
    -        g1.switch(seen)
    -        g2.switch(seen)
    -        assert seen == []
    -        del g1
    -        assert seen == [greenlet.GreenletExit]
    -        del g2
    -        assert seen == [greenlet.GreenletExit, greenlet.GreenletExit]
    -
    -
    -# ____________________________________________________________
    -#
    -# The tests from greenlets.
    -# For now, without the ones that involve threads
    -#
    -class AppTest_PyMagicTestGreenlet:
    -
    -    def setup_class(cls):
    -        space = gettestobjspace(usemodules=('_stackless',))
    -        cls.space = space
    -        cls.w_glob = space.appexec([], """():
    -            import sys
    -            from _stackless import greenlet
    -
    -            class SomeError(Exception):
    -                pass
    -
    -            def fmain(seen):
    -                try:
    -                    greenlet.getcurrent().parent.switch()
    -                except:
    -                    seen.append(sys.exc_info()[0])
    -                    raise
    -                raise SomeError
    -
    -            class Glob: pass
    -            glob = Glob()
    -            glob.__dict__.update(locals())
    -            return glob
    -        """)
    -
    -    def test_simple(self):
    -        greenlet = self.glob.greenlet
    -        lst = []
    -        def f():
    -            lst.append(1)
    -            greenlet.getcurrent().parent.switch()
    -            lst.append(3)
    -        g = greenlet(f)
    -        lst.append(0)
    -        g.switch()
    -        lst.append(2)
    -        g.switch()
    -        lst.append(4)
    -        assert lst == range(5)
    -
    -    def test_exception(self):
    -        greenlet  = self.glob.greenlet
    -        fmain     = self.glob.fmain
    -        SomeError = self.glob.SomeError
    -        seen = []
    -        g1 = greenlet(fmain)
    -        g2 = greenlet(fmain)
    -        g1.switch(seen)
    -        g2.switch(seen)
    -        g2.parent = g1
    -        assert seen == []
    -        raises(SomeError, g2.switch)
    -        assert seen == [SomeError]
    -        g2.switch()
    -        assert seen == [SomeError]
    -
    -    def test_send_exception(self):
    -        greenlet  = self.glob.greenlet
    -        fmain     = self.glob.fmain
    -        def send_exception(g, exc):
    -            # note: send_exception(g, exc)  can be now done with  g.throw(exc).
    -            # the purpose of this test is to explicitely check the
    -            # propagation rules.
    -            def crasher(exc):
    -                raise exc
    -            g1 = greenlet(crasher, parent=g)
    -            g1.switch(exc)
    -
    -        seen = []
    -        g1 = greenlet(fmain)
    -        g1.switch(seen)
    -        raises(KeyError, "send_exception(g1, KeyError)")
    -        assert seen == [KeyError]
    -
    -    def test_dealloc(self):
    -        skip("XXX in-progress: GC handling of greenlets")
    -        import gc
    -        greenlet = self.glob.greenlet
    -        fmain    = self.glob.fmain
    -        seen = []
    -        g1 = greenlet(fmain)
    -        g2 = greenlet(fmain)
    -        g1.switch(seen)
    -        g2.switch(seen)
    -        assert seen == []
    -        del g1
    -        gc.collect()
    -        assert seen == [greenlet.GreenletExit]
    -        del g2
    -        gc.collect()
    -        assert seen == [greenlet.GreenletExit, greenlet.GreenletExit]
    -
    -    def test_frame(self):
    -        import sys
    -        greenlet = self.glob.greenlet
    -        def f1():
    -            f = sys._getframe(0)
    -            assert f.f_back is None
    -            greenlet.getcurrent().parent.switch(f)
    -            return "meaning of life"
    -        g = greenlet(f1)
    -        frame = g.switch()
    -        assert frame is g.gr_frame
    -        assert g
    -        next = g.switch()
    -        assert not g
    -        assert next == "meaning of life"
    -        assert g.gr_frame is None
    -
    -
    -class AppTest_PyMagicTestThrow:
    -
    -    def setup_class(cls):
    -        space = gettestobjspace(usemodules=('_stackless',))
    -        cls.space = space
    -
    -    def test_class(self):
    -        from _stackless import greenlet
    -        def switch(*args):
    -            return greenlet.getcurrent().parent.switch(*args)
    -
    -        def f():
    -            try:
    -                switch("ok")
    -            except RuntimeError:
    -                switch("ok")
    -                return
    -            switch("fail")
    -
    -        g = greenlet(f)
    -        res = g.switch()
    -        assert res == "ok"
    -        res = g.throw(RuntimeError)
    -        assert res == "ok"
    -
    -    def test_val(self):
    -        from _stackless import greenlet
    -        def switch(*args):
    -            return greenlet.getcurrent().parent.switch(*args)
    -
    -        def f():
    -            try:
    -                switch("ok")
    -            except RuntimeError, val:
    -                if str(val) == "ciao":
    -                    switch("ok")
    -                    return
    -            switch("fail")
    -
    -        g = greenlet(f)
    -        res = g.switch()
    -        assert res == "ok"
    -        res = g.throw(RuntimeError("ciao"))
    -        assert res == "ok"
    -
    -        g = greenlet(f)
    -        res = g.switch()
    -        assert res == "ok"
    -        res = g.throw(RuntimeError, "ciao")
    -        assert res == "ok"
    -
    -    def test_kill(self):
    -        from _stackless import greenlet
    -        def switch(*args):
    -            return greenlet.getcurrent().parent.switch(*args)
    -
    -        def f():
    -            switch("ok")
    -            switch("fail")
    -
    -        g = greenlet(f)
    -        res = g.switch()
    -        assert res == "ok"
    -        res = g.throw()
    -        assert isinstance(res, greenlet.GreenletExit)
    -        assert g.dead
    -        res = g.throw()    # immediately eaten by the already-dead greenlet
    -        assert isinstance(res, greenlet.GreenletExit)
    -
    -    def test_throw_goes_to_original_parent(self):
    -        from _stackless import greenlet
    -        main = greenlet.getcurrent()
    -        def f1():
    -            try:
    -                main.switch("f1 ready to catch")
    -            except IndexError:
    -                return "caught"
    -            else:
    -                return "normal exit"
    -        def f2():
    -            main.switch("from f2")
    -
    -        g1 = greenlet(f1)
    -        g2 = greenlet(f2, parent=g1)
    -        raises(IndexError, g2.throw, IndexError)
    -        assert g2.dead
    -        assert g1.dead
    -
    -        g1 = greenlet(f1)
    -        g2 = greenlet(f2, parent=g1)
    -        res = g1.switch()
    -        assert res == "f1 ready to catch"
    -        res = g2.throw(IndexError)
    -        assert res == "caught"
    -        assert g2.dead
    -        assert g1.dead
    -
    -        g1 = greenlet(f1)
    -        g2 = greenlet(f2, parent=g1)
    -        res = g1.switch()
    -        assert res == "f1 ready to catch"
    -        res = g2.switch()
    -        assert res == "from f2"
    -        res = g2.throw(IndexError)
    -        assert res == "caught"
    -        assert g2.dead
    -        assert g1.dead
    -            
    -
    -class AppTest_PyMagicTestGenerator:
    -
    -    def setup_class(cls):
    -        space = gettestobjspace(usemodules=('_stackless',))
    -        cls.space = space
    -
    -    def test_generator(self):
    -        from _stackless import greenlet
    -
    -        class genlet(greenlet):
    -
    -            def __init__(self, *args, **kwds):
    -                self.args = args
    -                self.kwds = kwds
    -
    -            def run(self):
    -                fn, = self.fn
    -                fn(*self.args, **self.kwds)
    -
    -            def __iter__(self):
    -                return self
    -
    -            def next(self):
    -                self.parent = greenlet.getcurrent()
    -                result = self.switch()
    -                if self:
    -                    return result
    -                else:
    -                    raise StopIteration
    -
    -        def Yield(value):
    -            g = greenlet.getcurrent()
    -            while not isinstance(g, genlet):
    -                if g is None:
    -                    raise RuntimeError, 'yield outside a genlet'
    -                g = g.parent
    -            g.parent.switch(value)
    -
    -        def generator(func):
    -            class generator(genlet):
    -                fn = (func,)
    -            return generator
    -
    -        # ___ test starts here ___
    -        seen = []
    -        def g(n):
    -            for i in range(n):
    -                seen.append(i)
    -                Yield(i)
    -        g = generator(g)
    -        for k in range(3):
    -            for j in g(5):
    -                seen.append(j)
    -        assert seen == 3 * [0, 0, 1, 1, 2, 2, 3, 3, 4, 4]
    -
    -
    -class AppTest_PyMagicTestGeneratorNested:
    -
    -    def setup_class(cls):
    -        space = gettestobjspace(usemodules=('_stackless',))
    -        cls.space = space
    -        cls.w_glob = space.appexec([], """():
    -            from _stackless import greenlet
    -
    -            class genlet(greenlet):
    -
    -                def __init__(self, *args, **kwds):
    -                    self.args = args
    -                    self.kwds = kwds
    -                    self.child = None
    -
    -                def run(self):
    -                    fn, = self.fn
    -                    fn(*self.args, **self.kwds)
    -
    -                def __iter__(self):
    -                    return self
    -
    -                def set_child(self, child):
    -                    self.child = child
    -
    -                def next(self):
    -                    if self.child:
    -                        child = self.child
    -                        while child.child:
    -                            tmp = child
    -                            child = child.child
    -                            tmp.child = None
    -
    -                        result = child.switch()
    -                    else:
    -                        self.parent = greenlet.getcurrent()            
    -                        result = self.switch()
    -
    -                    if self:
    -                        return result
    -                    else:
    -                        raise StopIteration
    -
    -            def Yield(value, level = 1):
    -                g = greenlet.getcurrent()
    -
    -                while level != 0:
    -                    if not isinstance(g, genlet):
    -                        raise RuntimeError, 'yield outside a genlet'
    -                    if level > 1:
    -                        g.parent.set_child(g)
    -                    g = g.parent
    -                    level -= 1
    -
    -                g.switch(value)
    -
    -            def Genlet(func):
    -                class Genlet(genlet):
    -                    fn = (func,)
    -                return Genlet
    -
    -            class Glob: pass
    -            glob = Glob()
    -            glob.__dict__.update(locals())
    -            return glob
    -        """)
    -
    -    def test_genlet_1(self):
    -        Genlet = self.glob.Genlet
    -        Yield  = self.glob.Yield
    -
    -        def g1(n, seen):
    -            for i in range(n):
    -                seen.append(i+1)
    -                yield i
    -
    -        def g2(n, seen):
    -            for i in range(n):
    -                seen.append(i+1)
    -                Yield(i)
    -
    -        g2 = Genlet(g2)
    -
    -        def nested(i):
    -            Yield(i)
    -
    -        def g3(n, seen):
    -            for i in range(n):
    -                seen.append(i+1)
    -                nested(i)
    -        g3 = Genlet(g3)
    -
    -        raises(RuntimeError, Yield, 10)
    -        for g in [g1, g2, g3]:
    -            seen = []
    -            for k in range(3):
    -                for j in g(5, seen):
    -                    seen.append(j)
    -            assert seen == 3 * [1, 0, 2, 1, 3, 2, 4, 3, 5, 4]
    -        raises(RuntimeError, Yield, 10)
    -
    -    def test_nested_genlets(self):
    -        Genlet = self.glob.Genlet
    -        Yield  = self.glob.Yield
    -        def a(n):
    -            if n == 0:
    -                return
    -            for ii in ax(n-1):
    -                Yield(ii)
    -            Yield(n)
    -        ax = Genlet(a)
    -        seen = []
    -        for ii in ax(5):
    -            seen.append(ii)
    -        assert seen == [1, 2, 3, 4, 5]
    -
    -    def test_perms(self):
    -        Genlet = self.glob.Genlet
    -        Yield  = self.glob.Yield
    -        def perms(l):
    -            if len(l) > 1:
    -                for e in l:
    -                    # No syntactical sugar for generator expressions
    -                    [Yield([e] + p) for p in perms([x for x in l if x!=e])]
    -            else:
    -                Yield(l)
    -        perms = Genlet(perms)
    -        gen_perms = perms(range(4))
    -        permutations = list(gen_perms)
    -        assert len(permutations) == 4*3*2*1
    -        assert [0,1,2,3] in permutations
    -        assert [3,2,1,0] in permutations
    -
    -    def test_layered_genlets(self):
    -        Genlet = self.glob.Genlet
    -        Yield  = self.glob.Yield
    -        def gr1(n):
    -            for ii in range(1, n):
    -                Yield(ii)
    -                Yield(ii * ii, 2)
    -        gr1 = Genlet(gr1)
    -        def gr2(n, seen):
    -            for ii in gr1(n):
    -                seen.append(ii)
    -        gr2 = Genlet(gr2)
    -        seen = []
    -        for ii in gr2(5, seen):
    -            seen.append(ii)
    -        assert seen == [1, 1, 2, 4, 3, 9, 4, 16]
    diff --git a/pypy/module/_stackless/test/test_interp_clonable.py b/pypy/module/_stackless/test/test_interp_clonable.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/test/test_interp_clonable.py
    +++ /dev/null
    @@ -1,118 +0,0 @@
    -"""
    -testing cloning
    -"""
    -import py; py.test.skip("clonable coroutines not really maintained any more")
    -
    -from pypy import conftest; conftest.translation_test_so_skip_if_appdirect()
    -from pypy.translator.c import gc
    -from pypy.rpython.memory.gctransform import stacklessframework
    -from pypy.rpython.memory.test import test_transformed_gc
    -from pypy.module._stackless.rclonable import InterpClonableCoroutine as ClonableCoroutine
    -from pypy.module._stackless.rclonable import AbstractThunk, fork
    -
    -class TestClonableCoroutine(test_transformed_gc.GCTest):
    -
    -    gcname = "marksweep"
    -    stacklessgc = True
    -    class gcpolicy(gc.StacklessFrameworkGcPolicy):
    -        class transformerclass(stacklessframework.StacklessFrameworkGCTransformer):
    -            GC_PARAMS = {'start_heap_size': 4096 }
    -
    -    def test_clone(self):
    -        class T(AbstractThunk):
    -            def __init__(self, result):
    -                self.result = result
    -            def call(self):
    -                self.result.append(2)
    -                ClonableCoroutine.getmain().switch()
    -                self.result.append(4)
    -        def f():
    -            result = []
    -            coro = ClonableCoroutine()
    -            coro.bind(T(result))
    -            result.append(1)
    -            coro.switch()
    -            coro2 = coro.clone()
    -            result.append(3)
    -            coro2.switch()
    -            result.append(5)
    -            coro.switch()
    -            result.append(6)
    -            n = 0
    -            for i in result:
    -                n = n*10 + i
    -            return n
    -
    -        run = self.runner(f)
    -        res = run([])
    -        assert res == 1234546
    -
    -    def test_clone_local_state(self):
    -        class T(AbstractThunk):
    -            def __init__(self, result):
    -                self.result = result
    -            def call(self):
    -                localstate = []
    -                localstate.append(10)
    -                self.result.append(2)
    -                ClonableCoroutine.getmain().switch()
    -                localstate.append(20)
    -                if localstate == [10, 20]:
    -                    self.result.append(4)
    -                else:
    -                    self.result.append(0)
    -        def f():
    -            result = []
    -            coro = ClonableCoroutine()
    -            coro.bind(T(result))
    -            result.append(1)
    -            coro.switch()
    -            coro2 = coro.clone()
    -            result.append(3)
    -            coro2.switch()
    -            result.append(5)
    -            coro.switch()
    -            result.append(6)
    -            n = 0
    -            for i in result:
    -                n = n*10 + i
    -            return n
    -
    -        run = self.runner(f)
    -        res = run([])
    -        assert res == 1234546
    -
    -    def test_fork(self):
    -        class T(AbstractThunk):
    -            def __init__(self, result):
    -                self.result = result
    -            def call(self):
    -                localdata = [10]
    -                self.result.append(2)
    -                newcoro = fork()
    -                localdata.append(20)
    -                if newcoro is not None:
    -                    # in the parent
    -                    self.result.append(3)
    -                    newcoro.switch()
    -                    self.result.append(5)
    -                else:
    -                    # in the child
    -                    self.result.append(4)
    -                localdata.append(30)
    -                self.result.append(localdata != [10, 20, 30])
    -        def f():
    -            result = []
    -            coro = ClonableCoroutine()
    -            coro.bind(T(result))
    -            result.append(1)
    -            coro.switch()
    -            result.append(6)
    -            n = 0
    -            for i in result:
    -                n = n*10 + i
    -            return n
    -
    -        run = self.runner(f)
    -        res = run([])
    -        assert res == 12340506
    diff --git a/pypy/module/_stackless/test/test_pickle.py b/pypy/module/_stackless/test/test_pickle.py
    deleted file mode 100644
    --- a/pypy/module/_stackless/test/test_pickle.py
    +++ /dev/null
    @@ -1,487 +0,0 @@
    -from pypy.conftest import gettestobjspace, option
    -import py
    -
    -# app-level testing of coroutine pickling
    -
    -
    -class AppTestBasic:
    -    def setup_class(cls):
    -        cls.space = gettestobjspace(usemodules=('_stackless',))
    -
    -    def test_pickle_main(self):
    -        import _stackless, pickle
    -        main = _stackless.coroutine.getcurrent()
    -        s = pickle.dumps(main)
    -        c = pickle.loads(s)
    -        assert c is main
    -
    -
    -class AppTestPickle:
    -
    -    def setup_class(cls):
    -        cls.space = gettestobjspace(usemodules=('_stackless',), CALL_METHOD=True)
    -
    -    def test_pickle_coroutine_empty(self):
    -        # this test is limited to basic pickling.
    -        # real stacks can only tested with a stackless pypy build.
    -        import _stackless as stackless
    -        co = stackless.coroutine()
    -        import pickle
    -        pckl = pickle.dumps(co)
    -        co2 = pickle.loads(pckl)
    -        # the empty unpickled coroutine can still be used:
    -        result = []
    -        co2.bind(result.append, 42)
    -        co2.switch()
    -        assert result == [42]
    -
    -    def test_pickle_coroutine_bound(self):
    -        import pickle
    -        import _stackless
    -        lst = [4]
    -        co = _stackless.coroutine()
    -        co.bind(lst.append, 2)
    -        pckl = pickle.dumps((co, lst))
    -
    -        (co2, lst2) = pickle.loads(pckl)
    -        assert lst2 == [4]
    -        co2.switch()
    -        assert lst2 == [4, 2]
    -
    -
    -    def test_simple_ish(self):
    -
    -        import new, sys
    -
    -        mod = new.module('mod')
    -        sys.modules['mod'] = mod
    -        try:
    -            exec '''
    -output = []
    -import _stackless
    -def f(coro, n, x):
    -    if n == 0:
    -        coro.switch()
    -        return
    -    f(coro, n-1, 2*x)
    -    output.append(x)
    -
    -def example():
    -    main_coro = _stackless.coroutine.getcurrent()
    -    sub_coro = _stackless.coroutine()
    -    sub_coro.bind(f, main_coro, 5, 1)
    -    sub_coro.switch()
    -
    -    import pickle
    -    pckl = pickle.dumps(sub_coro)
    -    new_coro = pickle.loads(pckl)
    -
    -    new_coro.switch()
    -
    -example()
    -assert output == [16, 8, 4, 2, 1]
    -''' in mod.__dict__
    -        finally:
    -            del sys.modules['mod']
    -
    -    def test_pickle_again(self):
    -
    -        import new, sys
    -
    -        mod = new.module('mod')
    -        sys.modules['mod'] = mod
    -        try:
    -            exec '''
    -output = []
    -import _stackless
    -def f(coro, n, x):
    -    if n == 0:
    -        coro.switch()
    -        return
    -    f(coro, n-1, 2*x)
    -    output.append(x)
    -
    -def example():
    -    main_coro = _stackless.coroutine.getcurrent()
    -    sub_coro = _stackless.coroutine()
    -    sub_coro.bind(f, main_coro, 5, 1)
    -    sub_coro.switch()
    -
    -    import pickle
    -    pckl = pickle.dumps(sub_coro)
    -    new_coro = pickle.loads(pckl)
    -    pckl = pickle.dumps(new_coro)
    -    newer_coro = pickle.loads(pckl)
    -
    -    newer_coro.switch()
    -
    -example()
    -assert output == [16, 8, 4, 2, 1]
    -''' in mod.__dict__
    -        finally:
    -            del sys.modules['mod']
    -
    -    def test_kwargs(self):
    -
    -        import new, sys
    -
    -        mod = new.module('mod')
    -        sys.modules['mod'] = mod
    -        try:
    -            exec '''
    -output = []
    -import _stackless
    -def f(coro, n, x, step=4):
    -    if n == 0:
    -        coro.switch()
    -        return
    -    f(coro, n-1, 2*x, step=1)
    -    output.append(x)
    -
    -def example():
    -    main_coro = _stackless.coroutine.getcurrent()
    -    sub_coro = _stackless.coroutine()
    -    sub_coro.bind(f, main_coro, 5, 1, 1)
    -    sub_coro.switch()
    -
    -    import pickle
    -    pckl = pickle.dumps(sub_coro)
    -    new_coro = pickle.loads(pckl)
    -
    -    new_coro.switch()
    -
    -example()
    -assert output == [16, 8, 4, 2, 1]
    -''' in mod.__dict__
    -        finally:
    -            del sys.modules['mod']
    -
    -    def test_starstarargs(self):
    -
    -        import new, sys
    -
    -        mod = new.module('mod')
    -        sys.modules['mod'] = mod
    -        try:
    -            exec '''
    -output = []
    -import _stackless
    -def f(coro, n, x, step=4):
    -    if n == 0:
    -        coro.switch()
    -        return
    -    f(coro, n-1, 2*x, **{'step': 1})
    -    output.append(x)
    -
    -def example():
    -    main_coro = _stackless.coroutine.getcurrent()
    -    sub_coro = _stackless.coroutine()
    -    sub_coro.bind(f, main_coro, 5, 1, 1)
    -    sub_coro.switch()
    -
    -    import pickle
    -    pckl = pickle.dumps(sub_coro)
    -    new_coro = pickle.loads(pckl)
    -
    -    new_coro.switch()
    -
    -example()
    -assert output == [16, 8, 4, 2, 1]
    -''' in mod.__dict__
    -        finally:
    -            del sys.modules['mod']
    -
    -    def test_closure(self):
    -        import new, sys
    -
    -        mod = new.module('mod')
    -        sys.modules['mod'] = mod
    -        try:
    -            exec '''
    -output = []
    -import _stackless
    -def example():
    -    main_coro = _stackless.coroutine.getcurrent()
    -    sub_coro = _stackless.coroutine()
    -    y = 3
    -    def f(coro, n, x):
    -        if n == 0:
    -            coro.switch()
    -            return
    -        f(coro, n-1, 2*x)
    -        output.append(x+y)
    -
    -    sub_coro.bind(f, main_coro, 5, 1)
    -    sub_coro.switch()
    -
    -    import pickle
    -    pckl = pickle.dumps(sub_coro)
    -    new_coro = pickle.loads(pckl)
    -
    -    new_coro.switch()
    -
    -example()
    -assert output == [19, 11, 7, 5, 4]
    -''' in mod.__dict__
    -        finally:
    -            del sys.modules['mod']
    -
    -    def test_exception(self):
    -        import new, sys
    -
    -        mod = new.module('mod')
    -        sys.modules['mod'] = mod
    -        try:
    -            exec '''
    -output = []
    -import _stackless
    -def f(coro):
    -    try:
    -        raise ValueError
    -    except:
    -        coro.switch()
    -        import sys
    -        t, v, tb = sys.exc_info()
    -        output.append(t)
    -
    -def example():
    -    main_coro = _stackless.coroutine.getcurrent()
    -    sub_coro = _stackless.coroutine()
    -
    -    sub_coro.bind(f, main_coro)
    -    sub_coro.switch()
    -
    -    import pickle
    -    pckl = pickle.dumps(sub_coro)
    -    new_coro = pickle.loads(pckl)
    -
    -    new_coro.switch()
    -
    -example()
    -assert output == [ValueError]
    -''' in mod.__dict__
    -        finally:
    -            del sys.modules['mod']
    -
    -    def test_exception_after_unpickling(self):
    -
    -        import new, sys
    -
    -        mod = new.module('mod')
    -        sys.modules['mod'] = mod
    -        try:
    -            exec '''
    -output = []
    -import _stackless
    -def f(coro, n, x):
    -    if n == 0:
    -        coro.switch()
    -        raise ValueError
    -    try:
    -        f(coro, n-1, 2*x)
    -    finally:
    -        output.append(x)
    -
    -def example():
    -    main_coro = _stackless.coroutine.getcurrent()
    -    sub_coro = _stackless.coroutine()
    -    sub_coro.bind(f, main_coro, 5, 1)
    -    sub_coro.switch()
    -
    -    import pickle
    -    pckl = pickle.dumps(sub_coro)
    -    new_coro = pickle.loads(pckl)
    -
    -    try:
    -        sub_coro.switch()
    -    except ValueError:
    -        pass
    -    else:
    -        assert 0
    -    try:
    -        new_coro.switch()
    -    except ValueError:
    -        pass
    -    else:
    -        assert 0
    -
    -example()
    -assert output == [16, 8, 4, 2, 1] * 2
    -''' in mod.__dict__
    -        finally:
    -            del sys.modules['mod']
    -
    -    def test_loop(self):
    -        import new, sys
    -
    -        mod = new.module('mod')
    -        sys.modules['mod'] = mod
    -        try:
    -            exec '''
    -output = []
    -import _stackless
    -def f(coro):
    -    for x in (1,2,3):
    -        coro.switch()
    -        output.append(x)
    -
    -def example():
    -    main_coro = _stackless.coroutine.getcurrent()
    -    sub_coro = _stackless.coroutine()
    -
    -    sub_coro.bind(f, main_coro)
    -    sub_coro.switch()
    -
    -    import pickle
    -    pckl = pickle.dumps(sub_coro)
    -    new_coro = pickle.loads(pckl)
    -
    -    new_coro.switch()
    -    new_coro.switch()
    -    new_coro.switch()
    -
    -example()
    -assert output == [1, 2, 3]
    -''' in mod.__dict__
    -        finally:
    -            del sys.modules['mod']
    -
    -    def test_valstack(self):
    -        import new, sys
    -
    -        mod = new.module('mod')
    -        sys.modules['mod'] = mod
    -        try:
    -            exec '''
    -output = []
    -import _stackless
    -def f(coro):
    -    r = 1+g(coro)+3
    -    output.append(r)
    -
    -def g(coro):
    -    coro.switch()
    -    return 2
    -
    -def example():
    -    main_coro = _stackless.coroutine.getcurrent()
    -    sub_coro = _stackless.coroutine()
    -
    -    sub_coro.bind(f, main_coro)
    -    sub_coro.switch()
    -
    -    import pickle
    -    pckl = pickle.dumps(sub_coro)
    -    new_coro = pickle.loads(pckl)
    -
    -    new_coro.switch()
    -
    -
    -example()
    -assert output == [6]
    -''' in mod.__dict__
    -        finally:
    -            del sys.modules['mod']
    -
    -
    -    def test_exec_and_locals(self):
    -        import new, sys
    -
    -        mod = new.module('mod')
    -        sys.modules['mod'] = mod
    -        try:
    -            exec '''
    -output = []
    -import _stackless
    -
    -def f(coro):
    -    x = None
    -    exec "x = 9"
    -    coro.switch()
    -    output.append(x)
    -
    -def example():
    -    main_coro = _stackless.coroutine.getcurrent()
    -    sub_coro = _stackless.coroutine()
    -    sub_coro.bind(f, main_coro)
    -    sub_coro.switch()
    -
    -    import pickle
    -    pckl = pickle.dumps(sub_coro)
    -    new_coro = pickle.loads(pckl)
    -
    -    new_coro.switch()
    -
    -example()
    -assert output == [9]
    -''' in mod.__dict__
    -        finally:
    -            del sys.modules['mod']
    -
    -
    -    def test_solver(self):
    -        import new, sys
    -
    -        mod = new.module('mod')
    -        sys.modules['mod'] = mod
    -        try:
    -            exec '''
    -import _stackless, pickle
    -
    -class Fail(Exception):
    -    pass
    -
    -class Success(Exception):
    -    pass
    -
    -def first_solution(func):
    -    global next_answer
    -    co = _stackless.coroutine()
    -    co.bind(func)
    -    pending = [(co, None)]
    -    while pending:
    -        co, next_answer = pending.pop()
    -        try:
    -            co.switch()
    -        except Fail:
    -            pass
    -        except Success, e:
    -            return e.args[0]
    -        else:
    -            # zero_or_one() called, clone the coroutine
    -            # NB. this seems to be quite slow
    -            co2 = pickle.loads(pickle.dumps(co))
    -            pending.append((co2, 1))
    -            pending.append((co, 0))
    -    raise Fail("no solution")
    -
    -pending = []
    -main = _stackless.coroutine.getcurrent()
    -
    -def zero_or_one():
    -    main.switch()
    -    return next_answer
    -
    -# ____________________________________________________________
    -
    -invalid_prefixes = {
    -    (0, 0): True,
    -    (0, 1, 0): True,
    -    (0, 1, 1): True,
    -    (1, 0): True,
    -    (1, 1, 0, 0): True,
    -    }
    -
    -def example():
    -    test = []
    -    for n in range(5):
    -        test.append(zero_or_one())
    -        if tuple(test) in invalid_prefixes:
    -            raise Fail
    -    raise Success(test)
    -
    -res = first_solution(example)
    -assert res == [1, 1, 0, 1, 0]
    -''' in mod.__dict__
    -        finally:
    -            del sys.modules['mod']
    diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py
    --- a/pypy/module/_weakref/interp__weakref.py
    +++ b/pypy/module/_weakref/interp__weakref.py
    @@ -8,24 +8,12 @@
     
     
     class WeakrefLifeline(W_Root):
    +    cached_weakref_index = -1
    +    cached_proxy_index = -1
    +
         def __init__(self, space):
             self.space = space
             self.refs_weak = []
    -        self.cached_weakref_index = -1
    -        self.cached_proxy_index = -1
    -
    -    def __del__(self):
    -        """This runs when the interp-level object goes away, and allows
    -        its lifeline to go away.  The purpose of this is to activate the
    -        callbacks even if there is no __del__ method on the interp-level
    -        W_Root subclass implementing the object.
    -        """
    -        for i in range(len(self.refs_weak) - 1, -1, -1):
    -            w_ref = self.refs_weak[i]()
    -            if w_ref is not None and w_ref.w_callable is not None:
    -                w_ref.enqueue_for_destruction(self.space,
    -                                              W_WeakrefBase.activate_callback,
    -                                              'weakref callback of ')
     
         def clear_all_weakrefs(self):
             """Clear all weakrefs.  This is called when an app-level object has
    @@ -39,12 +27,11 @@
             # weakref callbacks are not invoked eagerly here.  They are
             # invoked by self.__del__() anyway.
     
    -    @jit.dont_look_inside
    -    def get_or_make_weakref(self, space, w_subtype, w_obj, w_callable):
    +    def get_or_make_weakref(self, w_subtype, w_obj):
    +        space = self.space
             w_weakreftype = space.gettypeobject(W_Weakref.typedef)
             is_weakreftype = space.is_w(w_weakreftype, w_subtype)
    -        can_reuse = space.is_w(w_callable, space.w_None)
    -        if is_weakreftype and can_reuse and self.cached_weakref_index >= 0:
    +        if is_weakreftype and self.cached_weakref_index >= 0:
                 w_cached = self.refs_weak[self.cached_weakref_index]()
                 if w_cached is not None:
                     return w_cached
    @@ -52,16 +39,15 @@
                     self.cached_weakref_index = -1
             w_ref = space.allocate_instance(W_Weakref, w_subtype)
             index = len(self.refs_weak)
    -        W_Weakref.__init__(w_ref, space, w_obj, w_callable)
    +        W_Weakref.__init__(w_ref, space, w_obj, None)
             self.refs_weak.append(weakref.ref(w_ref))
    -        if is_weakreftype and can_reuse:
    +        if is_weakreftype:
                 self.cached_weakref_index = index
             return w_ref
     
    -    @jit.dont_look_inside
    -    def get_or_make_proxy(self, space, w_obj, w_callable):
    -        can_reuse = space.is_w(w_callable, space.w_None)
    -        if can_reuse and self.cached_proxy_index >= 0:
    +    def get_or_make_proxy(self, w_obj):
    +        space = self.space
    +        if self.cached_proxy_index >= 0:
                 w_cached = self.refs_weak[self.cached_proxy_index]()
                 if w_cached is not None:
                     return w_cached
    @@ -69,12 +55,11 @@
                     self.cached_proxy_index = -1
             index = len(self.refs_weak)
             if space.is_true(space.callable(w_obj)):
    -            w_proxy = W_CallableProxy(space, w_obj, w_callable)
    +            w_proxy = W_CallableProxy(space, w_obj, None)
             else:
    -            w_proxy = W_Proxy(space, w_obj, w_callable)
    +            w_proxy = W_Proxy(space, w_obj, None)
             self.refs_weak.append(weakref.ref(w_proxy))
    -        if can_reuse:
    -            self.cached_proxy_index = index
    +        self.cached_proxy_index = index
             return w_proxy
     
         def get_any_weakref(self, space):
    @@ -90,6 +75,45 @@
                     return w_ref
             return space.w_None
     
    +
    +class WeakrefLifelineWithCallbacks(WeakrefLifeline):
    +
    +    def __init__(self, space, oldlifeline=None):
    +        self.space = space
    +        if oldlifeline is None:
    +            self.refs_weak = []
    +        else:
    +            self.refs_weak = oldlifeline.refs_weak
    +
    +    def __del__(self):
    +        """This runs when the interp-level object goes away, and allows
    +        its lifeline to go away.  The purpose of this is to activate the
    +        callbacks even if there is no __del__ method on the interp-level
    +        W_Root subclass implementing the object.
    +        """
    +        for i in range(len(self.refs_weak) - 1, -1, -1):
    +            w_ref = self.refs_weak[i]()
    +            if w_ref is not None and w_ref.w_callable is not None:
    +                w_ref.enqueue_for_destruction(self.space,
    +                                              W_WeakrefBase.activate_callback,
    +                                              'weakref callback of ')
    +
    +    def make_weakref_with_callback(self, w_subtype, w_obj, w_callable):
    +        space = self.space
    +        w_ref = space.allocate_instance(W_Weakref, w_subtype)
    +        W_Weakref.__init__(w_ref, space, w_obj, w_callable)
    +        self.refs_weak.append(weakref.ref(w_ref))
    +        return w_ref
    +
    +    def make_proxy_with_callback(self, w_obj, w_callable):
    +        space = self.space
    +        if space.is_true(space.callable(w_obj)):
    +            w_proxy = W_CallableProxy(space, w_obj, w_callable)
    +        else:
    +            w_proxy = W_Proxy(space, w_obj, w_callable)
    +        self.refs_weak.append(weakref.ref(w_proxy))
    +        return w_proxy
    +
     # ____________________________________________________________
     
     class Dummy:
    @@ -103,8 +127,7 @@
     
     class W_WeakrefBase(Wrappable):
         def __init__(w_self, space, w_obj, w_callable):
    -        if space.is_w(w_callable, space.w_None):
    -            w_callable = None
    +        assert w_callable is not space.w_None    # should be really None
             w_self.space = space
             assert w_obj is not None
             w_self.w_obj_weak = weakref.ref(w_obj)
    @@ -177,16 +200,39 @@
         def descr__ne__(self, space, w_ref2):
             return space.not_(space.eq(self, w_ref2))
     
    +def getlifeline(space, w_obj):
    +    lifeline = w_obj.getweakref()
    +    if lifeline is None:
    +        lifeline = WeakrefLifeline(space)
    +        w_obj.setweakref(space, lifeline)
    +    return lifeline
    +
    +def getlifelinewithcallbacks(space, w_obj):
    +    lifeline = w_obj.getweakref()
    +    if not isinstance(lifeline, WeakrefLifelineWithCallbacks):  # or None
    +        oldlifeline = lifeline
    +        lifeline = WeakrefLifelineWithCallbacks(space, oldlifeline)
    +        w_obj.setweakref(space, lifeline)
    +    return lifeline
    +
    + at jit.dont_look_inside
    +def get_or_make_weakref(space, w_subtype, w_obj):
    +    return getlifeline(space, w_obj).get_or_make_weakref(w_subtype, w_obj)
    +
    + at jit.dont_look_inside
    +def make_weakref_with_callback(space, w_subtype, w_obj, w_callable):
    +    lifeline = getlifelinewithcallbacks(space, w_obj)
    +    return lifeline.make_weakref_with_callback(w_subtype, w_obj, w_callable)
    +
     def descr__new__weakref(space, w_subtype, w_obj, w_callable=None,
                             __args__=None):
         if __args__.arguments_w:
             raise OperationError(space.w_TypeError, space.wrap(
                 "__new__ expected at most 2 arguments"))
    -    lifeline = w_obj.getweakref()
    -    if lifeline is None:
    -        lifeline = WeakrefLifeline(space)
    -        w_obj.setweakref(space, lifeline)
    -    return lifeline.get_or_make_weakref(space, w_subtype, w_obj, w_callable)
    +    if space.is_w(w_callable, space.w_None):
    +        return get_or_make_weakref(space, w_subtype, w_obj)
    +    else:
    +        return make_weakref_with_callback(space, w_subtype, w_obj, w_callable)
     
     W_Weakref.typedef = TypeDef("weakref",
         __doc__ = """A weak reference to an object 'obj'.  A 'callback' can be given,
    @@ -239,15 +285,23 @@
             w_obj = force(space, self)
             return space.call_args(w_obj, __args__)
     
    + at jit.dont_look_inside
    +def get_or_make_proxy(space, w_obj):
    +    return getlifeline(space, w_obj).get_or_make_proxy(w_obj)
    +
    + at jit.dont_look_inside
    +def make_proxy_with_callback(space, w_obj, w_callable):
    +    lifeline = getlifelinewithcallbacks(space, w_obj)
    +    return lifeline.make_proxy_with_callback(w_obj, w_callable)
    +
     def proxy(space, w_obj, w_callable=None):
         """Create a proxy object that weakly references 'obj'.
     'callback', if given, is called with the proxy as an argument when 'obj'
     is about to be finalized."""
    -    lifeline = w_obj.getweakref()
    -    if lifeline is None:
    -        lifeline = WeakrefLifeline(space)
    -        w_obj.setweakref(space, lifeline)
    -    return lifeline.get_or_make_proxy(space, w_obj, w_callable)
    +    if space.is_w(w_callable, space.w_None):
    +        return get_or_make_proxy(space, w_obj)
    +    else:
    +        return make_proxy_with_callback(space, w_obj, w_callable)
     
     def descr__new__proxy(space, w_subtype, w_obj, w_callable=None):
         raise OperationError(
    diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py
    --- a/pypy/module/_weakref/test/test_weakref.py
    +++ b/pypy/module/_weakref/test/test_weakref.py
    @@ -369,6 +369,26 @@
                 return A
             raises(TypeError, tryit)
     
    +    def test_proxy_to_dead_object(self):
    +        import _weakref, gc
    +        class A(object):
    +            pass
    +        p = _weakref.proxy(A())
    +        gc.collect()
    +        raises(ReferenceError, "p + 1")
    +
    +    def test_proxy_with_callback(self):
    +        import _weakref, gc
    +        class A(object):
    +            pass
    +        a2 = A()
    +        def callback(proxy):
    +            a2.seen = proxy
    +        p = _weakref.proxy(A(), callback)
    +        gc.collect()
    +        raises(ReferenceError, "p + 1")
    +        assert a2.seen is p
    +
         def test_repr(self):
             import _weakref, gc
             for kind in ('ref', 'proxy'):
    diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py
    --- a/pypy/module/bz2/interp_bz2.py
    +++ b/pypy/module/bz2/interp_bz2.py
    @@ -446,7 +446,9 @@
                 result = self.buffer[pos:pos + n]
                 self.pos += n
             else:
    -            result = self.buffer
    +            pos = self.pos
    +            assert pos >= 0
    +            result = self.buffer[pos:]
                 self.pos = 0
                 self.buffer = ""
             self.readlength += len(result)
    diff --git a/pypy/module/bz2/test/test_bz2_file.py b/pypy/module/bz2/test/test_bz2_file.py
    --- a/pypy/module/bz2/test/test_bz2_file.py
    +++ b/pypy/module/bz2/test/test_bz2_file.py
    @@ -274,14 +274,14 @@
                 pass
             del bz2f   # delete from this frame, which is captured in the traceback
     
    -    def test_read_chunk10(self):
    +    def test_read_chunk9(self):
             from bz2 import BZ2File
             self.create_temp_file()
             
             bz2f = BZ2File(self.temppath)
             text_read = ""
             while True:
    -            data = bz2f.read(10)
    +            data = bz2f.read(9) # 9 doesn't divide evenly into data length
                 if not data:
                     break
                 text_read = "%s%s" % (text_read, data)
    diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py
    --- a/pypy/module/cpyext/frameobject.py
    +++ b/pypy/module/cpyext/frameobject.py
    @@ -57,7 +57,7 @@
         code = space.interp_w(PyCode, w_code)
         w_globals = from_ref(space, py_frame.c_f_globals)
     
    -    frame = space.FrameClass(space, code, w_globals, closure=None)
    +    frame = space.FrameClass(space, code, w_globals, outer_func=None)
         frame.f_lineno = py_frame.c_f_lineno
         w_obj = space.wrap(frame)
         track_reference(space, py_obj, w_obj)
    diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py
    --- a/pypy/module/marshal/interp_marshal.py
    +++ b/pypy/module/marshal/interp_marshal.py
    @@ -40,7 +40,7 @@
             reader = FileReader(space, w_f)
         try:
             u = Unmarshaller(space, reader)
    -        return u.load_w_obj(False)
    +        return u.load_w_obj()
         finally:
             reader.finished()
     
    @@ -49,7 +49,7 @@
     ignored."""
         space.timer.start("marshal loads")
         u = StringUnmarshaller(space, w_str)
    -    obj = u.load_w_obj(False)
    +    obj = u.load_w_obj()
         space.timer.stop("marshal loads")
         return obj
     
    @@ -424,7 +424,7 @@
             lng = self.get_lng()
             return self.get(lng)
     
    -    def get_w_obj(self, allow_null):
    +    def get_w_obj(self, allow_null=False):
             space = self.space
             w_ret = space.w_None # something not None
             tc = self.get1()
    @@ -434,9 +434,9 @@
                     'NULL object in marshal data'))
             return w_ret
     
    -    def load_w_obj(self, allow_null):
    +    def load_w_obj(self):
             try:
    -            return self.get_w_obj(allow_null)
    +            return self.get_w_obj()
             except rstackovf.StackOverflow:
                 rstackovf.check_stack_overflow()
                 self._overflow()
    diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py
    --- a/pypy/module/micronumpy/__init__.py
    +++ b/pypy/module/micronumpy/__init__.py
    @@ -1,42 +1,51 @@
    -
     from pypy.interpreter.mixedmodule import MixedModule
     
    +
     class Module(MixedModule):
    -
         applevel_name = 'numpy'
     
         interpleveldefs = {
             'array': 'interp_numarray.SingleDimArray',
             'dtype': 'interp_dtype.W_Dtype',
    +        'ufunc': 'interp_ufuncs.W_Ufunc',
     
             'zeros': 'interp_numarray.zeros',
             'empty': 'interp_numarray.zeros',
             'ones': 'interp_numarray.ones',
             'fromstring': 'interp_support.fromstring',
    +    }
     
    -        # ufuncs
    -        'abs': 'interp_ufuncs.absolute',
    -        'absolute': 'interp_ufuncs.absolute',
    -        'add': 'interp_ufuncs.add',
    -        'copysign': 'interp_ufuncs.copysign',
    -        'divide': 'interp_ufuncs.divide',
    -        'exp': 'interp_ufuncs.exp',
    -        'fabs': 'interp_ufuncs.fabs',
    -        'floor': 'interp_ufuncs.floor',
    -        'maximum': 'interp_ufuncs.maximum',
    -        'minimum': 'interp_ufuncs.minimum',
    -        'multiply': 'interp_ufuncs.multiply',
    -        'negative': 'interp_ufuncs.negative',
    -        'reciprocal': 'interp_ufuncs.reciprocal',
    -        'sign': 'interp_ufuncs.sign',
    -        'subtract': 'interp_ufuncs.subtract',
    -        'sin': 'interp_ufuncs.sin',
    -        'cos': 'interp_ufuncs.cos',
    -        'tan': 'interp_ufuncs.tan',
    -        'arcsin': 'interp_ufuncs.arcsin',
    -        'arccos': 'interp_ufuncs.arccos',
    -        'arctan': 'interp_ufuncs.arctan',
    -    }
    +    # ufuncs
    +    for exposed, impl in [
    +        ("abs", "absolute"),
    +        ("absolute", "absolute"),
    +        ("add", "add"),
    +        ("arccos", "arccos"),
    +        ("arcsin", "arcsin"),
    +        ("arctan", "arctan"),
    +        ("copysign", "copysign"),
    +        ("cos", "cos"),
    +        ("divide", "divide"),
    +        ("equal", "equal"),
    +        ("exp", "exp"),
    +        ("fabs", "fabs"),
    +        ("floor", "floor"),
    +        ("greater", "greater"),
    +        ("greater_equal", "greater_equal"),
    +        ("less", "less"),
    +        ("less_equal", "less_equal"),
    +        ("maximum", "maximum"),
    +        ("minimum", "minimum"),
    +        ("multiply", "multiply"),
    +        ("negative", "negative"),
    +        ("not_equal", "not_equal"),
    +        ("reciprocal", "reciprocal"),
    +        ("sign", "sign"),
    +        ("sin", "sin"),
    +        ("subtract", "subtract"),
    +        ("tan", "tan"),
    +    ]:
    +        interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl
     
         appleveldefs = {
             'average': 'app_numpy.average',
    diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py
    --- a/pypy/module/micronumpy/compile.py
    +++ b/pypy/module/micronumpy/compile.py
    @@ -20,6 +20,7 @@
     
     class FakeSpace(object):
         w_ValueError = None
    +    w_TypeError = None
     
         def __init__(self):
             """NOT_RPYTHON"""
    diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py
    --- a/pypy/module/micronumpy/interp_dtype.py
    +++ b/pypy/module/micronumpy/interp_dtype.py
    @@ -53,7 +53,9 @@
     
     VOID_TP = lltype.Ptr(lltype.Array(lltype.Void, hints={'nolength': True, "uncast_on_llgraph": True}))
     
    -def create_low_level_dtype(num, kind, name, aliases, applevel_types, T, valtype):
    +def create_low_level_dtype(num, kind, name, aliases, applevel_types, T, valtype,
    +    expected_size=None):
    +
         class Box(BaseBox):
             def __init__(self, val):
                 self.val = val
    @@ -113,6 +115,8 @@
         W_LowLevelDtype.aliases = aliases
         W_LowLevelDtype.applevel_types = applevel_types
         W_LowLevelDtype.num_bytes = rffi.sizeof(T)
    +    if expected_size is not None:
    +        assert W_LowLevelDtype.num_bytes == expected_size
         return W_LowLevelDtype
     
     
    @@ -125,6 +129,16 @@
             ))
         return impl
     
    +def raw_binop(func):
    +    # Returns the result unwrapped.
    +    @functools.wraps(func)
    +    def impl(self, v1, v2):
    +        return func(self,
    +            self.for_computation(self.unbox(v1)),
    +            self.for_computation(self.unbox(v2))
    +        )
    +    return impl
    +
     def unaryop(func):
         @functools.wraps(func)
         def impl(self, v):
    @@ -166,8 +180,24 @@
     
         def bool(self, v):
             return bool(self.for_computation(self.unbox(v)))
    +    @raw_binop
    +    def eq(self, v1, v2):
    +        return v1 == v2
    +    @raw_binop
         def ne(self, v1, v2):
    -        return self.for_computation(self.unbox(v1)) != self.for_computation(self.unbox(v2))
    +        return v1 != v2
    +    @raw_binop
    +    def lt(self, v1, v2):
    +        return v1 < v2
    +    @raw_binop
    +    def le(self, v1, v2):
    +        return v1 <= v2
    +    @raw_binop
    +    def gt(self, v1, v2):
    +        return v1 > v2
    +    @raw_binop
    +    def ge(self, v1, v2):
    +        return v1 >= v2
     
     
     class FloatArithmeticDtype(ArithmaticTypeMixin):
    @@ -220,7 +250,7 @@
             return math.tan(v)
         @unaryop
         def arcsin(self, v):
    -        if v < -1.0 or  v > 1.0:
    +        if v < -1.0 or v > 1.0:
                 return rfloat.NAN
             return math.asin(v)
         @unaryop
    @@ -282,10 +312,21 @@
         applevel_types = [],
         T = rffi.SIGNEDCHAR,
         valtype = rffi.SIGNEDCHAR._type,
    +    expected_size = 1,
     )
     class W_Int8Dtype(IntegerArithmeticDtype, W_Int8Dtype):
    -    def unwrap(self, space, w_item):
    -        return self.adapt_val(space.int_w(space.int(w_item)))
    +    pass
    +
    +W_Int16Dtype = create_low_level_dtype(
    +    num = 3, kind = SIGNEDLTR, name = "int16",
    +    aliases = ["int16"],
    +    applevel_types = [],
    +    T = rffi.SHORT,
    +    valtype = rffi.SHORT._type,
    +    expected_size = 2,
    +)
    +class W_Int16Dtype(IntegerArithmeticDtype, W_Int16Dtype):
    +    pass
     
     W_Int32Dtype = create_low_level_dtype(
         num = 5, kind = SIGNEDLTR, name = "int32",
    @@ -293,6 +334,7 @@
         applevel_types = [],
         T = rffi.INT,
         valtype = rffi.INT._type,
    +    expected_size = 4,
     )
     class W_Int32Dtype(IntegerArithmeticDtype, W_Int32Dtype):
         pass
    @@ -303,6 +345,7 @@
         applevel_types = ["long"],
         T = rffi.LONGLONG,
         valtype = rffi.LONGLONG._type,
    +    expected_size = 8,
     )
     class W_Int64Dtype(IntegerArithmeticDtype, W_Int64Dtype):
         pass
    @@ -313,6 +356,7 @@
         applevel_types = ["float"],
         T = lltype.Float,
         valtype = float,
    +    expected_size = 8,
     )
     class W_Float64Dtype(FloatArithmeticDtype, W_Float64Dtype):
         def unwrap(self, space, w_item):
    @@ -323,7 +367,7 @@
     
     ALL_DTYPES = [
         W_BoolDtype,
    -    W_Int8Dtype, W_Int32Dtype, W_Int64Dtype,
    +    W_Int8Dtype, W_Int16Dtype, W_Int32Dtype, W_Int64Dtype,
         W_Float64Dtype
     ]
     
    @@ -353,4 +397,4 @@
         kind = interp_attrproperty("kind", cls=W_Dtype),
         shape = GetSetProperty(W_Dtype.descr_get_shape),
     )
    -W_Dtype.typedef.acceptable_as_base_class = False
    \ No newline at end of file
    +W_Dtype.typedef.acceptable_as_base_class = False
    diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py
    --- a/pypy/module/micronumpy/interp_numarray.py
    +++ b/pypy/module/micronumpy/interp_numarray.py
    @@ -53,90 +53,59 @@
                 i += 1
             return arr
     
    -    def _unaryop_impl(w_ufunc):
    +    def _unaryop_impl(ufunc_name):
             def impl(self, space):
    -            return w_ufunc(space, self)
    -        return func_with_new_name(impl, "unaryop_%s_impl" % w_ufunc.__name__)
    +            return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self])
    +        return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name)
     
    -    descr_pos = _unaryop_impl(interp_ufuncs.positive)
    -    descr_neg = _unaryop_impl(interp_ufuncs.negative)
    -    descr_abs = _unaryop_impl(interp_ufuncs.absolute)
    +    descr_pos = _unaryop_impl("positive")
    +    descr_neg = _unaryop_impl("negative")
    +    descr_abs = _unaryop_impl("absolute")
     
    -    def _binop_impl(w_ufunc):
    +    def _binop_impl(ufunc_name):
             def impl(self, space, w_other):
    -            return w_ufunc(space, self, w_other)
    -        return func_with_new_name(impl, "binop_%s_impl" % w_ufunc.__name__)
    +            return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other])
    +        return func_with_new_name(impl, "binop_%s_impl" % ufunc_name)
     
    -    descr_add = _binop_impl(interp_ufuncs.add)
    -    descr_sub = _binop_impl(interp_ufuncs.subtract)
    -    descr_mul = _binop_impl(interp_ufuncs.multiply)
    -    descr_div = _binop_impl(interp_ufuncs.divide)
    -    descr_pow = _binop_impl(interp_ufuncs.power)
    -    descr_mod = _binop_impl(interp_ufuncs.mod)
    +    descr_add = _binop_impl("add")
    +    descr_sub = _binop_impl("subtract")
    +    descr_mul = _binop_impl("multiply")
    +    descr_div = _binop_impl("divide")
    +    descr_pow = _binop_impl("power")
    +    descr_mod = _binop_impl("mod")
     
    -    def _binop_right_impl(w_ufunc):
    +    descr_eq = _binop_impl("equal")
    +    descr_ne = _binop_impl("not_equal")
    +    descr_lt = _binop_impl("less")
    +    descr_le = _binop_impl("less_equal")
    +    descr_gt = _binop_impl("greater")
    +    descr_ge = _binop_impl("greater_equal")
    +
    +    def _binop_right_impl(ufunc_name):
             def impl(self, space, w_other):
                 w_other = scalar_w(space,
                     interp_ufuncs.find_dtype_for_scalar(space, w_other, self.find_dtype()),
                     w_other
                 )
    -            return w_ufunc(space, w_other, self)
    -        return func_with_new_name(impl, "binop_right_%s_impl" % w_ufunc.__name__)
    +            return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self])
    +        return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name)
     
    -    descr_radd = _binop_right_impl(interp_ufuncs.add)
    -    descr_rsub = _binop_right_impl(interp_ufuncs.subtract)
    -    descr_rmul = _binop_right_impl(interp_ufuncs.multiply)
    -    descr_rdiv = _binop_right_impl(interp_ufuncs.divide)
    -    descr_rpow = _binop_right_impl(interp_ufuncs.power)
    -    descr_rmod = _binop_right_impl(interp_ufuncs.mod)
    +    descr_radd = _binop_right_impl("add")
    +    descr_rsub = _binop_right_impl("subtract")
    +    descr_rmul = _binop_right_impl("multiply")
    +    descr_rdiv = _binop_right_impl("divide")
    +    descr_rpow = _binop_right_impl("power")
    +    descr_rmod = _binop_right_impl("mod")
     
    -    def _reduce_sum_prod_impl(op_name, init):
    -        reduce_driver = jit.JitDriver(greens=['signature'],
    -                         reds = ['i', 'size', 'self', 'result', 'res_dtype'])
    +    def _reduce_ufunc_impl(ufunc_name):
    +        def impl(self, space):
    +            return getattr(interp_ufuncs.get(space), ufunc_name).descr_reduce(space, self)
    +        return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name)
     
    -        def loop(self, res_dtype, result, size):
    -            i = 0
    -            while i < size:
    -                reduce_driver.jit_merge_point(signature=self.signature,
    -                                              self=self, res_dtype=res_dtype,
    -                                              size=size, i=i, result=result)
    -                result = getattr(res_dtype, op_name)(
    -                    result,
    -                    self.eval(i).convert_to(res_dtype)
    -                )
    -                i += 1
    -            return result
    -
    -        def impl(self, space):
    -            dtype = interp_ufuncs.find_unaryop_result_dtype(
    -                space, self.find_dtype(), promote_to_largest=True
    -            )
    -            result = dtype.adapt_val(init)
    -            return loop(self, dtype, result, self.find_size()).wrap(space)
    -        return func_with_new_name(impl, "reduce_%s_impl" % op_name)
    -
    -    def _reduce_max_min_impl(op_name):
    -        reduce_driver = jit.JitDriver(greens=['signature'],
    -                         reds = ['i', 'size', 'self', 'result', 'dtype'])
    -        def loop(self, result, size):
    -            i = 1
    -            dtype = self.find_dtype()
    -            while i < size:
    -                reduce_driver.jit_merge_point(signature=self.signature,
    -                                              self=self, dtype=dtype,
    -                                              size=size, i=i, result=result)
    -                result = getattr(dtype, op_name)(result, self.eval(i))
    -                i += 1
    -            return result
    -
    -        def impl(self, space):
    -            size = self.find_size()
    -            if size == 0:
    -                raise OperationError(space.w_ValueError,
    -                    space.wrap("Can't call %s on zero-size arrays" \
    -                            % op_name))
    -            return loop(self, self.eval(0), size).wrap(space)
    -        return func_with_new_name(impl, "reduce_%s_impl" % op_name)
    +    descr_sum = _reduce_ufunc_impl("add")
    +    descr_prod = _reduce_ufunc_impl("multiply")
    +    descr_max = _reduce_ufunc_impl("maximum")
    +    descr_min = _reduce_ufunc_impl("minimum")
     
         def _reduce_argmax_argmin_impl(op_name):
             reduce_driver = jit.JitDriver(greens=['signature'],
    @@ -192,10 +161,6 @@
         def descr_any(self, space):
             return space.wrap(self._any())
     
    -    descr_sum = _reduce_sum_prod_impl("add", 0)
    -    descr_prod = _reduce_sum_prod_impl("mul", 1)
    -    descr_max = _reduce_max_min_impl("max")
    -    descr_min = _reduce_max_min_impl("min")
         descr_argmax = _reduce_argmax_argmin_impl("max")
         descr_argmin = _reduce_argmax_argmin_impl("min")
     
    @@ -248,7 +213,7 @@
             res = "array([" + ", ".join(concrete._getnums(False)) + "]"
             dtype = concrete.find_dtype()
             if (dtype is not space.fromcache(interp_dtype.W_Float64Dtype) and
    -            dtype is not space.fromcache(interp_dtype.W_Int64Dtype)):
    +            dtype is not space.fromcache(interp_dtype.W_Int64Dtype)) or not self.find_size():
                 res += ", dtype=" + dtype.name
             res += ")"
             return space.wrap(res)
    @@ -259,7 +224,15 @@
             return space.wrap("[" + " ".join(concrete._getnums(True)) + "]")
     
         def descr_getitem(self, space, w_idx):
    -        # TODO: indexing by tuples
    +        # TODO: indexing by arrays and lists
    +        if space.isinstance_w(w_idx, space.w_tuple):
    +            length = space.len_w(w_idx)
    +            if length == 0:
    +                return space.wrap(self)
    +            if length > 1: # only one dimension for now.
    +                raise OperationError(space.w_IndexError,
    +                                     space.wrap("invalid index"))
    +            w_idx = space.getitem(w_idx, space.wrap(0))
             start, stop, step, slice_length = space.decode_index4(w_idx, self.find_size())
             if step == 0:
                 # Single index
    @@ -273,8 +246,19 @@
                 return space.wrap(res)
     
         def descr_setitem(self, space, w_idx, w_value):
    -        # TODO: indexing by tuples and lists
    +        # TODO: indexing by arrays and lists
             self.invalidated()
    +        if space.isinstance_w(w_idx, space.w_tuple):
    +            length = space.len_w(w_idx)
    +            if length > 1: # only one dimension for now.
    +                raise OperationError(space.w_IndexError,
    +                                     space.wrap("invalid index"))
    +            if length == 0:
    +                w_idx = space.newslice(space.wrap(0),
    +                                      space.wrap(self.find_size()),
    +                                      space.wrap(1))
    +            else:
    +                w_idx = space.getitem(w_idx, space.wrap(0))
             start, stop, step, slice_length = space.decode_index4(w_idx,
                                                                   self.find_size())
             if step == 0:
    @@ -427,10 +411,11 @@
         """
         Intermediate class for performing binary operations.
         """
    -    def __init__(self, signature, res_dtype, left, right):
    +    def __init__(self, signature, calc_dtype, res_dtype, left, right):
             VirtualArray.__init__(self, signature, res_dtype)
             self.left = left
             self.right = right
    +        self.calc_dtype = calc_dtype
     
         def _del_sources(self):
             self.left = None
    @@ -444,14 +429,14 @@
             return self.right.find_size()
     
         def _eval(self, i):
    -        lhs = self.left.eval(i).convert_to(self.res_dtype)
    -        rhs = self.right.eval(i).convert_to(self.res_dtype)
    +        lhs = self.left.eval(i).convert_to(self.calc_dtype)
    +        rhs = self.right.eval(i).convert_to(self.calc_dtype)
     
             sig = jit.promote(self.signature)
             assert isinstance(sig, signature.Signature)
             call_sig = sig.components[0]
             assert isinstance(call_sig, signature.Call2)
    -        return call_sig.func(self.res_dtype, lhs, rhs)
    +        return call_sig.func(self.calc_dtype, lhs, rhs)
     
     class ViewArray(BaseArray):
         """
    @@ -596,18 +581,28 @@
         __pos__ = interp2app(BaseArray.descr_pos),
         __neg__ = interp2app(BaseArray.descr_neg),
         __abs__ = interp2app(BaseArray.descr_abs),
    +
         __add__ = interp2app(BaseArray.descr_add),
         __sub__ = interp2app(BaseArray.descr_sub),
         __mul__ = interp2app(BaseArray.descr_mul),
         __div__ = interp2app(BaseArray.descr_div),
         __pow__ = interp2app(BaseArray.descr_pow),
         __mod__ = interp2app(BaseArray.descr_mod),
    +
         __radd__ = interp2app(BaseArray.descr_radd),
         __rsub__ = interp2app(BaseArray.descr_rsub),
         __rmul__ = interp2app(BaseArray.descr_rmul),
         __rdiv__ = interp2app(BaseArray.descr_rdiv),
         __rpow__ = interp2app(BaseArray.descr_rpow),
         __rmod__ = interp2app(BaseArray.descr_rmod),
    +
    +    __eq__ = interp2app(BaseArray.descr_eq),
    +    __ne__ = interp2app(BaseArray.descr_ne),
    +    __lt__ = interp2app(BaseArray.descr_lt),
    +    __le__ = interp2app(BaseArray.descr_le),
    +    __gt__ = interp2app(BaseArray.descr_gt),
    +    __ge__ = interp2app(BaseArray.descr_ge),
    +
         __repr__ = interp2app(BaseArray.descr_repr),
         __str__ = interp2app(BaseArray.descr_str),
     
    diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py
    --- a/pypy/module/micronumpy/interp_ufuncs.py
    +++ b/pypy/module/micronumpy/interp_ufuncs.py
    @@ -1,57 +1,168 @@
    +from pypy.interpreter.baseobjspace import Wrappable
    +from pypy.interpreter.error import OperationError, operationerrfmt
    +from pypy.interpreter.gateway import interp2app
    +from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty
     from pypy.module.micronumpy import interp_dtype, signature
    +from pypy.rlib import jit
     from pypy.tool.sourcetools import func_with_new_name
     
     
    -def ufunc(func=None, promote_to_float=False, promote_bools=False):
    -    if func is None:
    -        return lambda func: ufunc(func, promote_to_float, promote_bools)
    -    call_sig = signature.Call1(func)
    -    def impl(space, w_obj):
    +reduce_driver = jit.JitDriver(
    +    greens = ["signature"],
    +    reds = ["i", "size", "self", "dtype", "value", "obj"]
    +)
    +
    +class W_Ufunc(Wrappable):
    +    _attrs_ = ["name", "promote_to_float", "promote_bools", "identity"]
    +
    +    def __init__(self, name, promote_to_float, promote_bools, identity):
    +        self.name = name
    +        self.promote_to_float = promote_to_float
    +        self.promote_bools = promote_bools
    +
    +        self.identity = identity
    +
    +    def descr_repr(self, space):
    +        return space.wrap("" % self.name)
    +
    +    def descr_get_identity(self, space):
    +        if self.identity is None:
    +            return space.w_None
    +        return self.identity.wrap(space)
    +
    +    def descr_call(self, space, __args__):
    +        try:
    +            args_w = __args__.fixedunpack(self.argcount)
    +        except ValueError, e:
    +            raise OperationError(space.w_TypeError, space.wrap(str(e)))
    +        return self.call(space, args_w)
    +
    +    def descr_reduce(self, space, w_obj):
    +        from pypy.module.micronumpy.interp_numarray import convert_to_array, Scalar
    +
    +        if self.argcount != 2:
    +            raise OperationError(space.w_ValueError, space.wrap("reduce only "
    +                "supported for binary functions"))
    +
    +        assert isinstance(self, W_Ufunc2)
    +        obj = convert_to_array(space, w_obj)
    +        if isinstance(obj, Scalar):
    +            raise OperationError(space.w_TypeError, space.wrap("cannot reduce "
    +                "on a scalar"))
    +
    +        size = obj.find_size()
    +        dtype = find_unaryop_result_dtype(
    +            space, obj.find_dtype(),
    +            promote_to_largest=True
    +        )
    +        start = 0
    +        if self.identity is None:
    +            if size == 0:
    +                raise operationerrfmt(space.w_ValueError, "zero-size array to "
    +                    "%s.reduce without identity", self.name)
    +            value = obj.eval(0).convert_to(dtype)
    +            start += 1
    +        else:
    +            value = self.identity.convert_to(dtype)
    +        new_sig = signature.Signature.find_sig([
    +            self.reduce_signature, obj.signature
    +        ])
    +        return self.reduce(new_sig, start, value, obj, dtype, size).wrap(space)
    +
    +    def reduce(self, signature, start, value, obj, dtype, size):
    +        i = start
    +        while i < size:
    +            reduce_driver.jit_merge_point(signature=signature, self=self,
    +                                          value=value, obj=obj, i=i,
    +                                          dtype=dtype, size=size)
    +            value = self.func(dtype, value, obj.eval(i).convert_to(dtype))
    +            i += 1
    +        return value
    +
    +class W_Ufunc1(W_Ufunc):
    +    argcount = 1
    +
    +    def __init__(self, func, name, promote_to_float=False, promote_bools=False,
    +        identity=None):
    +
    +        W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity)
    +        self.func = func
    +        self.signature = signature.Call1(func)
    +
    +    def call(self, space, args_w):
             from pypy.module.micronumpy.interp_numarray import (Call1,
                 convert_to_array, Scalar)
     
    +        [w_obj] = args_w
             w_obj = convert_to_array(space, w_obj)
             res_dtype = find_unaryop_result_dtype(space,
                 w_obj.find_dtype(),
    -            promote_to_float=promote_to_float,
    -            promote_bools=promote_bools,
    +            promote_to_float=self.promote_to_float,
    +            promote_bools=self.promote_bools,
             )
             if isinstance(w_obj, Scalar):
    -            return func(res_dtype, w_obj.value.convert_to(res_dtype)).wrap(space)
    +            return self.func(res_dtype, w_obj.value.convert_to(res_dtype)).wrap(space)
     
    -        new_sig = signature.Signature.find_sig([call_sig, w_obj.signature])
    +        new_sig = signature.Signature.find_sig([self.signature, w_obj.signature])
             w_res = Call1(new_sig, res_dtype, w_obj)
             w_obj.add_invalidates(w_res)
             return w_res
    -    return func_with_new_name(impl, "%s_dispatcher" % func.__name__)
     
    -def ufunc2(func=None, promote_to_float=False, promote_bools=False):
    -    if func is None:
    -        return lambda func: ufunc2(func, promote_to_float, promote_bools)
     
    -    call_sig = signature.Call2(func)
    -    def impl(space, w_lhs, w_rhs):
    +class W_Ufunc2(W_Ufunc):
    +    argcount = 2
    +
    +    def __init__(self, func, name, promote_to_float=False, promote_bools=False,
    +        identity=None, comparison_func=False):
    +
    +        W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity)
    +        self.func = func
    +        self.comparison_func = comparison_func
    +        self.signature = signature.Call2(func)
    +        self.reduce_signature = signature.BaseSignature()
    +
    +    def call(self, space, args_w):
             from pypy.module.micronumpy.interp_numarray import (Call2,
                 convert_to_array, Scalar)
     
    +        [w_lhs, w_rhs] = args_w
             w_lhs = convert_to_array(space, w_lhs)
             w_rhs = convert_to_array(space, w_rhs)
    -        res_dtype = find_binop_result_dtype(space,
    +        calc_dtype = find_binop_result_dtype(space,
                 w_lhs.find_dtype(), w_rhs.find_dtype(),
    -            promote_to_float=promote_to_float,
    -            promote_bools=promote_bools,
    +            promote_to_float=self.promote_to_float,
    +            promote_bools=self.promote_bools,
             )
    +        if self.comparison_func:
    +            res_dtype = space.fromcache(interp_dtype.W_BoolDtype)
    +        else:
    +            res_dtype = calc_dtype
             if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar):
    -            return func(res_dtype, w_lhs.value, w_rhs.value).wrap(space)
    +            return self.func(calc_dtype,
    +                w_lhs.value.convert_to(calc_dtype),
    +                w_rhs.value.convert_to(calc_dtype)
    +            ).wrap(space)
     
             new_sig = signature.Signature.find_sig([
    -            call_sig, w_lhs.signature, w_rhs.signature
    +            self.signature, w_lhs.signature, w_rhs.signature
             ])
    -        w_res = Call2(new_sig, res_dtype, w_lhs, w_rhs)
    +        w_res = Call2(new_sig, calc_dtype, res_dtype, w_lhs, w_rhs)
             w_lhs.add_invalidates(w_res)
             w_rhs.add_invalidates(w_res)
             return w_res
    -    return func_with_new_name(impl, "%s_dispatcher" % func.__name__)
    +
    +
    +W_Ufunc.typedef = TypeDef("ufunc",
    +    __module__ = "numpy",
    +
    +    __call__ = interp2app(W_Ufunc.descr_call),
    +    __repr__ = interp2app(W_Ufunc.descr_repr),
    +
    +    identity = GetSetProperty(W_Ufunc.descr_get_identity),
    +    nin = interp_attrproperty("argcount", cls=W_Ufunc),
    +
    +    reduce = interp2app(W_Ufunc.descr_reduce),
    +)
     
     def find_binop_result_dtype(space, dt1, dt2, promote_to_float=False,
         promote_bools=False):
    @@ -74,7 +185,7 @@
         assert False
     
     def find_unaryop_result_dtype(space, dt, promote_to_float=False,
    -    promote_to_largest=False, promote_bools=False):
    +    promote_bools=False, promote_to_largest=False):
         if promote_bools and (dt.kind == interp_dtype.BOOLLTR):
             return space.fromcache(interp_dtype.W_Int8Dtype)
         if promote_to_float:
    @@ -106,53 +217,77 @@
         return space.fromcache(interp_dtype.W_Float64Dtype)
     
     
    -def ufunc_dtype_caller(ufunc_name, op_name, argcount, **kwargs):
    +def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func):
         if argcount == 1:
    -        @ufunc(**kwargs)
             def impl(res_dtype, value):
                 return getattr(res_dtype, op_name)(value)
         elif argcount == 2:
    -        @ufunc2(**kwargs)
             def impl(res_dtype, lvalue, rvalue):
    -            return getattr(res_dtype, op_name)(lvalue, rvalue)
    +            res = getattr(res_dtype, op_name)(lvalue, rvalue)
    +            if comparison_func:
    +                res = space.fromcache(interp_dtype.W_BoolDtype).box(res)
    +            return res
         return func_with_new_name(impl, ufunc_name)
     
    -for ufunc_def in [
    -    ("add", "add", 2),
    -    ("subtract", "sub", 2),
    -    ("multiply", "mul", 2),
    -    ("divide", "div", 2, {"promote_bools": True}),
    -    ("mod", "mod", 2, {"promote_bools": True}),
    -    ("power", "pow", 2, {"promote_bools": True}),
    +class UfuncState(object):
    +    def __init__(self, space):
    +        "NOT_RPYTHON"
    +        for ufunc_def in [
    +            ("add", "add", 2, {"identity": 0}),
    +            ("subtract", "sub", 2),
    +            ("multiply", "mul", 2, {"identity": 1}),
    +            ("divide", "div", 2, {"promote_bools": True}),
    +            ("mod", "mod", 2, {"promote_bools": True}),
    +            ("power", "pow", 2, {"promote_bools": True}),
     
    -    ("maximum", "max", 2),
    -    ("minimum", "min", 2),
    +            ("equal", "eq", 2, {"comparison_func": True}),
    +            ("not_equal", "ne", 2, {"comparison_func": True}),
    +            ("less", "lt", 2, {"comparison_func": True}),
    +            ("less_equal", "le", 2, {"comparison_func": True}),
    +            ("greater", "gt", 2, {"comparison_func": True}),
    +            ("greater_equal", "ge", 2, {"comparison_func": True}),
     
    -    ("copysign", "copysign", 2, {"promote_to_float": True}),
    +            ("maximum", "max", 2),
    +            ("minimum", "min", 2),
     
    -    ("positive", "pos", 1),
    -    ("negative", "neg", 1),
    -    ("absolute", "abs", 1),
    -    ("sign", "sign", 1, {"promote_bools": True}),
    -    ("reciprocal", "reciprocal", 1),
    +            ("copysign", "copysign", 2, {"promote_to_float": True}),
     
    -    ("fabs", "fabs", 1, {"promote_to_float": True}),
    -    ("floor", "floor", 1, {"promote_to_float": True}),
    -    ("exp", "exp", 1, {"promote_to_float": True}),
    +            ("positive", "pos", 1),
    +            ("negative", "neg", 1),
    +            ("absolute", "abs", 1),
    +            ("sign", "sign", 1, {"promote_bools": True}),
    +            ("reciprocal", "reciprocal", 1),
     
    -    ("sin", "sin", 1, {"promote_to_float": True}),
    -    ("cos", "cos", 1, {"promote_to_float": True}),
    -    ("tan", "tan", 1, {"promote_to_float": True}),
    -    ("arcsin", "arcsin", 1, {"promote_to_float": True}),
    -    ("arccos", "arccos", 1, {"promote_to_float": True}),
    -    ("arctan", "arctan", 1, {"promote_to_float": True}),
    -]:
    -    ufunc_name = ufunc_def[0]
    -    op_name = ufunc_def[1]
    -    argcount = ufunc_def[2]
    -    try:
    -        extra_kwargs = ufunc_def[3]
    -    except IndexError:
    -        extra_kwargs = {}
    +            ("fabs", "fabs", 1, {"promote_to_float": True}),
    +            ("floor", "floor", 1, {"promote_to_float": True}),
    +            ("exp", "exp", 1, {"promote_to_float": True}),
     
    -    globals()[ufunc_name] = ufunc_dtype_caller(ufunc_name, op_name, argcount, **extra_kwargs)
    +            ("sin", "sin", 1, {"promote_to_float": True}),
    +            ("cos", "cos", 1, {"promote_to_float": True}),
    +            ("tan", "tan", 1, {"promote_to_float": True}),
    +            ("arcsin", "arcsin", 1, {"promote_to_float": True}),
    +            ("arccos", "arccos", 1, {"promote_to_float": True}),
    +            ("arctan", "arctan", 1, {"promote_to_float": True}),
    +        ]:
    +            self.add_ufunc(space, *ufunc_def)
    +
    +    def add_ufunc(self, space, ufunc_name, op_name, argcount, extra_kwargs=None):
    +        if extra_kwargs is None:
    +            extra_kwargs = {}
    +
    +        identity = extra_kwargs.get("identity")
    +        if identity is not None:
    +            identity = space.fromcache(interp_dtype.W_Int64Dtype).adapt_val(identity)
    +        extra_kwargs["identity"] = identity
    +
    +        func = ufunc_dtype_caller(space, ufunc_name, op_name, argcount,
    +            comparison_func=extra_kwargs.get("comparison_func", False)
    +        )
    +        if argcount == 1:
    +            ufunc = W_Ufunc1(func, ufunc_name, **extra_kwargs)
    +        elif argcount == 2:
    +            ufunc = W_Ufunc2(func, ufunc_name, **extra_kwargs)
    +        setattr(self, ufunc_name, ufunc)
    +
    +def get(space):
    +    return space.fromcache(UfuncState)
    \ No newline at end of file
    diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py
    --- a/pypy/module/micronumpy/test/test_dtypes.py
    +++ b/pypy/module/micronumpy/test/test_dtypes.py
    @@ -82,10 +82,20 @@
                 assert a[1] == 1
     
         def test_add_int8(self):
    -        from numpy import array
    +        from numpy import array, dtype
     
             a = array(range(5), dtype="int8")
             b = a + a
    +        assert b.dtype is dtype("int8")
    +        for i in range(5):
    +            assert b[i] == i * 2
    +
    +    def test_add_int16(self):
    +        from numpy import array, dtype
    +
    +        a = array(range(5), dtype="int16")
    +        b = a + a
    +        assert b.dtype is dtype("int16")
             for i in range(5):
                 assert b[i] == i * 2
     
    @@ -98,4 +108,4 @@
             from numpy import dtype
     
             # You can't subclass dtype
    -        raises(TypeError, type, "Foo", (dtype,), {})
    \ No newline at end of file
    +        raises(TypeError, type, "Foo", (dtype,), {})
    diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py
    --- a/pypy/module/micronumpy/test/test_numarray.py
    +++ b/pypy/module/micronumpy/test/test_numarray.py
    @@ -52,10 +52,14 @@
             from numpy import array, zeros
             a = array(range(5), float)
             assert repr(a) == "array([0.0, 1.0, 2.0, 3.0, 4.0])"
    +        a = array([], float)
    +        assert repr(a) == "array([], dtype=float64)"
             a = zeros(1001)
             assert repr(a) == "array([0.0, 0.0, 0.0, ..., 0.0, 0.0, 0.0])"
             a = array(range(5), long)
             assert repr(a) == "array([0, 1, 2, 3, 4])"
    +        a = array([], long)
    +        assert repr(a) == "array([], dtype=int64)"
             a = array([True, False, True, False], "?")
             assert repr(a) == "array([True, False, True, False], dtype=bool)"
     
    @@ -84,6 +88,9 @@
             a = array(range(5), dtype="int8")
             assert str(a) == "[0 1 2 3 4]"
     
    +        a = array(range(5), dtype="int16")
    +        assert str(a) == "[0 1 2 3 4]"
    +
         def test_str_slice(self):
             from numpy import array, zeros
             a = array(range(5), float)
    @@ -102,6 +109,16 @@
             assert a[-1] == 8
             raises(IndexError, "a[-6]")
     
    +    def test_getitem_tuple(self):
    +        from numpy import array
    +        a = array(range(5))
    +        raises(IndexError, "a[(1,2)]")
    +        for i in xrange(5):
    +            assert a[(i,)] == i
    +        b = a[()]
    +        for i in xrange(5):
    +            assert a[i] == b[i]
    +
         def test_setitem(self):
             from numpy import array
             a = array(range(5))
    @@ -110,6 +127,17 @@
             raises(IndexError, "a[5] = 0.0")
             raises(IndexError, "a[-6] = 3.0")
     
    +    def test_setitem_tuple(self):
    +        from numpy import array
    +        a = array(range(5))
    +        raises(IndexError, "a[(1,2)] = [0,1]")
    +        for i in xrange(5):
    +            a[(i,)] = i+1
    +            assert a[i] == i+1
    +        a[()] = range(5)
    +        for i in xrange(5):
    +            assert a[i] == i
    +
         def test_setslice_array(self):
             from numpy import array
             a = array(range(5))
    @@ -529,6 +557,26 @@
             assert array([1.2, 5]).dtype is dtype(float)
             assert array([]).dtype is dtype(float)
     
    +    def test_comparison(self):
    +        import operator
    +        from numpy import array, dtype
    +
    +        a = array(range(5))
    +        b = array(range(5), float)
    +        for func in [
    +            operator.eq, operator.ne, operator.lt, operator.le, operator.gt,
    +            operator.ge
    +        ]:
    +            c = func(a, 3)
    +            assert c.dtype is dtype(bool)
    +            for i in xrange(5):
    +                assert c[i] == func(a[i], 3)
    +
    +            c = func(b, 3)
    +            assert c.dtype is dtype(bool)
    +            for i in xrange(5):
    +                assert c[i] == func(b[i], 3)
    +
     
     class AppTestSupport(object):
         def setup_class(cls):
    @@ -541,4 +589,4 @@
             a = fromstring(self.data)
             for i in range(4):
                 assert a[i] == i + 1
    -        raises(ValueError, fromstring, "abc")
    \ No newline at end of file
    +        raises(ValueError, fromstring, "abc")
    diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py
    --- a/pypy/module/micronumpy/test/test_ufuncs.py
    +++ b/pypy/module/micronumpy/test/test_ufuncs.py
    @@ -3,6 +3,32 @@
     
     
     class AppTestUfuncs(BaseNumpyAppTest):
    +    def test_ufunc_instance(self):
    +        from numpy import add, ufunc
    +
    +        assert isinstance(add, ufunc)
    +        assert repr(add) == ""
    +        assert repr(ufunc) == ""
    +
    +    def test_ufunc_attrs(self):
    +        from numpy import add, multiply, sin
    +
    +        assert add.identity == 0
    +        assert multiply.identity == 1
    +        assert sin.identity is None
    +
    +        assert add.nin == 2
    +        assert multiply.nin == 2
    +        assert sin.nin == 1
    +
    +    def test_wrong_arguments(self):
    +        from numpy import add, sin
    +
    +        raises(TypeError, add, 1)
    +        raises(TypeError, add, 1, 2, 3)
    +        raises(TypeError, sin, 1, 2)
    +        raises(TypeError, sin)
    +
         def test_single_item(self):
             from numpy import negative, sign, minimum
     
    @@ -272,3 +298,42 @@
             b = arctan(a)
             assert math.isnan(b[0])
     
    +    def test_reduce_errors(self):
    +        from numpy import sin, add
    +
    +        raises(ValueError, sin.reduce, [1, 2, 3])
    +        raises(TypeError, add.reduce, 1)
    +
    +    def test_reduce(self):
    +        from numpy import add, maximum
    +
    +        assert add.reduce([1, 2, 3]) == 6
    +        assert maximum.reduce([1]) == 1
    +        assert maximum.reduce([1, 2, 3]) == 3
    +        raises(ValueError, maximum.reduce, [])
    +
    +    def test_comparisons(self):
    +        import operator
    +        from numpy import equal, not_equal, less, less_equal, greater, greater_equal
    +
    +        for ufunc, func in [
    +            (equal, operator.eq),
    +            (not_equal, operator.ne),
    +            (less, operator.lt),
    +            (less_equal, operator.le),
    +            (greater, operator.gt),
    +            (greater_equal, operator.ge),
    +        ]:
    +            for a, b in [
    +                (3, 3),
    +                (3, 4),
    +                (4, 3),
    +                (3.0, 3.0),
    +                (3.0, 3.5),
    +                (3.5, 3.0),
    +                (3.0, 3),
    +                (3, 3.0),
    +                (3.5, 3),
    +                (3, 3.5),
    +            ]:
    +                assert ufunc(a, b) is func(a, b)
    diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py
    --- a/pypy/module/micronumpy/test/test_zjit.py
    +++ b/pypy/module/micronumpy/test/test_zjit.py
    @@ -19,7 +19,7 @@
         def test_add(self):
             def f(i):
                 ar = SingleDimArray(i, dtype=self.float64_dtype)
    -            v = interp_ufuncs.add(self.space, ar, ar)
    +            v = interp_ufuncs.get(self.space).add.call(self.space, [ar, ar])
                 return v.get_concrete().eval(3).val
     
             result = self.meta_interp(f, [5], listops=True, backendopt=True)
    @@ -31,9 +31,10 @@
         def test_floatadd(self):
             def f(i):
                 ar = SingleDimArray(i, dtype=self.float64_dtype)
    -            v = interp_ufuncs.add(self.space,
    -                ar,
    -                scalar_w(self.space, self.float64_dtype, self.space.wrap(4.5))
    +            v = interp_ufuncs.get(self.space).add.call(self.space, [
    +                    ar,
    +                    scalar_w(self.space, self.float64_dtype, self.space.wrap(4.5))
    +                ],
                 )
                 assert isinstance(v, BaseArray)
                 return v.get_concrete().eval(3).val
    @@ -89,14 +90,21 @@
         def test_max(self):
             space = self.space
             float64_dtype = self.float64_dtype
    +        int64_dtype = self.int64_dtype
     
             def f(i):
    -            ar = SingleDimArray(i, dtype=NonConstant(float64_dtype))
    +            if NonConstant(False):
    +                dtype = int64_dtype
    +            else:
    +                dtype = float64_dtype
    +            ar = SingleDimArray(i, dtype=dtype)
                 j = 0
                 while j < i:
                     ar.get_concrete().setitem(j, float64_dtype.box(float(j)))
                     j += 1
    -            return ar.descr_add(space, ar).descr_max(space).floatval
    +            v = ar.descr_add(space, ar).descr_max(space)
    +            assert isinstance(v, FloatObject)
    +            return v.floatval
     
             result = self.meta_interp(f, [5], listops=True, backendopt=True)
             self.check_loops({"getarrayitem_raw": 2, "float_add": 1,
    @@ -108,14 +116,21 @@
         def test_min(self):
             space = self.space
             float64_dtype = self.float64_dtype
    +        int64_dtype = self.int64_dtype
     
             def f(i):
    -            ar = SingleDimArray(i, dtype=NonConstant(float64_dtype))
    +            if NonConstant(False):
    +                dtype = int64_dtype
    +            else:
    +                dtype = float64_dtype
    +            ar = SingleDimArray(i, dtype=dtype)
                 j = 0
                 while j < i:
                     ar.get_concrete().setitem(j, float64_dtype.box(float(j)))
                     j += 1
    -            return ar.descr_add(space, ar).descr_min(space).floatval
    +            v = ar.descr_add(space, ar).descr_min(space)
    +            assert isinstance(v, FloatObject)
    +            return v.floatval
     
             result = self.meta_interp(f, [5], listops=True, backendopt=True)
             self.check_loops({"getarrayitem_raw": 2, "float_add": 1,
    @@ -180,9 +195,9 @@
     
             def f(i):
                 ar = SingleDimArray(i, dtype=self.float64_dtype)
    -            v1 = interp_ufuncs.add(space, ar, scalar_w(space, self.float64_dtype, space.wrap(4.5)))
    +            v1 = interp_ufuncs.get(self.space).add.call(space, [ar, scalar_w(space, self.float64_dtype, space.wrap(4.5))])
                 assert isinstance(v1, BaseArray)
    -            v2 = interp_ufuncs.multiply(space, v1, scalar_w(space, self.float64_dtype, space.wrap(4.5)))
    +            v2 = interp_ufuncs.get(self.space).multiply.call(space, [v1, scalar_w(space, self.float64_dtype, space.wrap(4.5))])
                 v1.force_if_needed()
                 assert isinstance(v2, BaseArray)
                 return v2.get_concrete().eval(3).val
    @@ -200,8 +215,8 @@
             space = self.space
             def f(i):
                 ar = SingleDimArray(i, dtype=self.float64_dtype)
    -            v1 = interp_ufuncs.add(space, ar, ar)
    -            v2 = interp_ufuncs.negative(space, v1)
    +            v1 = interp_ufuncs.get(self.space).add.call(space, [ar, ar])
    +            v2 = interp_ufuncs.get(self.space).negative.call(space, [v1])
                 return v2.get_concrete().eval(3).val
     
             result = self.meta_interp(f, [5], listops=True, backendopt=True)
    @@ -216,13 +231,13 @@
             def f(i):
                 ar = SingleDimArray(i, dtype=self.float64_dtype)
     
    -            v1 = interp_ufuncs.add(space, ar, ar)
    -            v2 = interp_ufuncs.negative(space, v1)
    +            v1 = interp_ufuncs.get(self.space).add.call(space, [ar, ar])
    +            v2 = interp_ufuncs.get(self.space).negative.call(space, [v1])
                 v2.get_concrete()
     
                 for i in xrange(5):
    -                v1 = interp_ufuncs.multiply(space, ar, ar)
    -                v2 = interp_ufuncs.negative(space, v1)
    +                v1 = interp_ufuncs.get(self.space).multiply.call(space, [ar, ar])
    +                v2 = interp_ufuncs.get(self.space).negative.call(space, [v1])
                     v2.get_concrete()
     
             self.meta_interp(f, [5], listops=True, backendopt=True)
    @@ -237,7 +252,7 @@
                     SingleDimSlice.signature, ar.signature
                 ])
                 s = SingleDimSlice(0, step*i, step, i, ar, new_sig)
    -            v = interp_ufuncs.add(self.space, s, s)
    +            v = interp_ufuncs.get(self.space).add.call(self.space, [s, s])
                 return v.get_concrete().eval(3).val
     
             result = self.meta_interp(f, [5], listops=True, backendopt=True)
    @@ -259,7 +274,7 @@
                     SingleDimSlice.signature, s1.signature
                 ])
                 s2 = SingleDimSlice(0, step2*i, step2, i, ar, new_sig)
    -            v = interp_ufuncs.add(self.space, s1, s2)
    +            v = interp_ufuncs.get(self.space).add.call(self.space, [s1, s2])
                 return v.get_concrete().eval(3).val
     
             result = self.meta_interp(f, [5], listops=True, backendopt=True)
    diff --git a/pypy/module/pwd/__init__.py b/pypy/module/pwd/__init__.py
    new file mode 100644
    --- /dev/null
    +++ b/pypy/module/pwd/__init__.py
    @@ -0,0 +1,25 @@
    +from pypy.interpreter.mixedmodule import MixedModule
    +
    +class Module(MixedModule):
    +    """
    +    This module provides access to the Unix password database.
    +    It is available on all Unix versions.
    +
    +    Password database entries are reported as 7-tuples containing the following
    +    items from the password database (see `'), in order:
    +    pw_name, pw_passwd, pw_uid, pw_gid, pw_gecos, pw_dir, pw_shell.
    +    The uid and gid items are integers, all others are strings. An
    +    exception is raised if the entry asked for cannot be found.
    +    """
    +
    +    interpleveldefs = {
    +        'getpwuid': 'interp_pwd.getpwuid',
    +        'getpwnam': 'interp_pwd.getpwnam',
    +        'getpwall': 'interp_pwd.getpwall',
    +    }
    +
    +    appleveldefs = {
    +        'struct_passwd': 'app_pwd.struct_passwd',
    +        'struct_pwent': 'app_pwd.struct_passwd',
    +    }
    +
    diff --git a/pypy/module/pwd/app_pwd.py b/pypy/module/pwd/app_pwd.py
    new file mode 100644
    --- /dev/null
    +++ b/pypy/module/pwd/app_pwd.py
    @@ -0,0 +1,20 @@
    +from _structseq import structseqtype, structseqfield
    +
    +class struct_passwd:
    +    """
    +    pwd.struct_passwd: Results from getpw*() routines.
    +
    +    This object may be accessed either as a tuple of
    +      (pw_name,pw_passwd,pw_uid,pw_gid,pw_gecos,pw_dir,pw_shell)
    +    or via the object attributes as named in the above tuple.
    +    """
    +    __metaclass__ = structseqtype
    +    name = "pwd.struct_passwd"
    +
    +    pw_name   = structseqfield(0, "user name")
    +    pw_passwd = structseqfield(1, "password")
    +    pw_uid    = structseqfield(2, "user id")
    +    pw_gid    = structseqfield(3, "group id")
    +    pw_gecos  = structseqfield(4, "real name")
    +    pw_dir    = structseqfield(5, "home directory")
    +    pw_shell  = structseqfield(6, "shell program")
    diff --git a/pypy/module/pwd/interp_pwd.py b/pypy/module/pwd/interp_pwd.py
    new file mode 100644
    --- /dev/null
    +++ b/pypy/module/pwd/interp_pwd.py
    @@ -0,0 +1,95 @@
    +from pypy.translator.tool.cbuild import ExternalCompilationInfo
    +from pypy.rpython.tool import rffi_platform
    +from pypy.rpython.lltypesystem import rffi, lltype
    +from pypy.interpreter.gateway import interp2app, unwrap_spec
    +from pypy.interpreter.error import OperationError, operationerrfmt
    +from pypy.rlib.rarithmetic import intmask
    +
    +eci = ExternalCompilationInfo(
    +    includes=['pwd.h']
    +    )
    +
    +class CConfig:
    +    _compilation_info_ = eci
    +
    +    uid_t = rffi_platform.SimpleType("uid_t")
    +
    +    passwd = rffi_platform.Struct(
    +        'struct passwd',
    +        [('pw_name', rffi.CCHARP),
    +         ('pw_passwd', rffi.CCHARP),
    +         ('pw_uid', rffi.INT),
    +         ('pw_gid', rffi.INT),
    +         ('pw_gecos', rffi.CCHARP),
    +         ('pw_dir', rffi.CCHARP),
    +         ('pw_shell', rffi.CCHARP),
    +         ])
    +
    +config = rffi_platform.configure(CConfig)
    +passwd_p = lltype.Ptr(config['passwd'])
    +uid_t = config['uid_t']
    +
    +def external(name, args, result, **kwargs):
    +    return rffi.llexternal(name, args, result, compilation_info=eci, **kwargs)
    +
    +c_getpwuid = external("getpwuid", [uid_t], passwd_p)
    +c_getpwnam = external("getpwnam", [rffi.CCHARP], passwd_p)
    +c_setpwent = external("setpwent", [], lltype.Void)
    +c_getpwent = external("getpwent", [], passwd_p)
    +c_endpwent = external("endpwent", [], lltype.Void)
    +
    +def make_struct_passwd(space, pw):
    +    w_passwd_struct = space.getattr(space.getbuiltinmodule('pwd'),
    +                                    space.wrap('struct_passwd'))
    +    w_tuple = space.newtuple([
    +        space.wrap(rffi.charp2str(pw.c_pw_name)),
    +        space.wrap(rffi.charp2str(pw.c_pw_passwd)),
    +        space.wrap(intmask(pw.c_pw_uid)),
    +        space.wrap(intmask(pw.c_pw_gid)),
    +        space.wrap(rffi.charp2str(pw.c_pw_gecos)),
    +        space.wrap(rffi.charp2str(pw.c_pw_dir)),
    +        space.wrap(rffi.charp2str(pw.c_pw_shell)),
    +        ])
    +    return space.call_function(w_passwd_struct, w_tuple)
    +
    + at unwrap_spec(uid=int)
    +def getpwuid(space, uid):
    +    """
    +    getpwuid(uid) -> (pw_name,pw_passwd,pw_uid,
    +                      pw_gid,pw_gecos,pw_dir,pw_shell)
    +    Return the password database entry for the given numeric user ID.
    +    See pwd.__doc__ for more on password database entries.
    +    """
    +    pw = c_getpwuid(uid)
    +    if not pw:
    +        raise operationerrfmt(space.w_KeyError,
    +            "getpwuid(): uid not found: %d", uid)
    +    return make_struct_passwd(space, pw)
    +
    + at unwrap_spec(name=str)
    +def getpwnam(space, name):
    +    """
    +    getpwnam(name) -> (pw_name,pw_passwd,pw_uid,
    +                        pw_gid,pw_gecos,pw_dir,pw_shell)
    +    Return the password database entry for the given user name.
    +    See pwd.__doc__ for more on password database entries.
    +    """
    +    pw = c_getpwnam(name)
    +    if not pw:
    +        raise operationerrfmt(space.w_KeyError,
    +            "getpwnam(): name not found: %s", name)
    +    return make_struct_passwd(space, pw)
    +
    +def getpwall(space):
    +    users_w = []
    +    c_setpwent()
    +    try:
    +        while True:
    +            pw = c_getpwent()
    +            if not pw:
    +                break
    +            users_w.append(make_struct_passwd(space, pw))
    +    finally:
    +        c_endpwent()
    +    return space.newlist(users_w)
    +    
    diff --git a/pypy/module/pwd/test/test_pwd.py b/pypy/module/pwd/test/test_pwd.py
    new file mode 100644
    --- /dev/null
    +++ b/pypy/module/pwd/test/test_pwd.py
    @@ -0,0 +1,28 @@
    +from pypy.conftest import gettestobjspace
    +
    +class AppTestPwd:
    +    def setup_class(cls):
    +        cls.space = gettestobjspace(usemodules=['pwd'])
    +
    +    def test_getpwuid(self):
    +        import pwd
    +        raises(KeyError, pwd.getpwuid, -1)
    +        pw = pwd.getpwuid(0)
    +        assert pw.pw_name == 'root'
    +        assert isinstance(pw.pw_passwd, str)
    +        assert pw.pw_uid == 0
    +        assert pw.pw_gid == 0
    +        assert pw.pw_dir == '/root'
    +        assert pw.pw_shell.startswith('/')
    +        #
    +        assert type(pw.pw_uid) is int
    +        assert type(pw.pw_gid) is int
    +
    +    def test_getpwnam(self):
    +        import pwd
    +        raises(KeyError, pwd.getpwnam, '~invalid~')
    +        assert pwd.getpwnam('root').pw_name == 'root'
    +
    +    def test_getpwall(self):
    +        import pwd
    +        assert pwd.getpwnam('root') in pwd.getpwall()
    diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py
    --- a/pypy/module/pypyjit/interp_jit.py
    +++ b/pypy/module/pypyjit/interp_jit.py
    @@ -21,6 +21,7 @@
     
     PyFrame._virtualizable2_ = ['last_instr', 'pycode',
                                 'valuestackdepth', 'locals_stack_w[*]',
    +                            'cells[*]',
                                 'last_exception',
                                 'lastblock',
                                 'is_being_profiled',
    diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py
    --- a/pypy/module/pypyjit/policy.py
    +++ b/pypy/module/pypyjit/policy.py
    @@ -8,7 +8,8 @@
                     modname == '__builtin__.interp_classobj' or
                     modname == '__builtin__.functional' or
                     modname == '__builtin__.descriptor' or
    -                modname == 'thread.os_local'):
    +                modname == 'thread.os_local' or
    +                modname == 'thread.os_thread'):
                 return True
             if '.' in modname:
                 modname, _ = modname.split('.', 1)
    diff --git a/pypy/module/pypyjit/test/test_policy.py b/pypy/module/pypyjit/test/test_policy.py
    --- a/pypy/module/pypyjit/test/test_policy.py
    +++ b/pypy/module/pypyjit/test/test_policy.py
    @@ -34,7 +34,9 @@
     
     def test_thread_local():
         from pypy.module.thread.os_local import Local
    +    from pypy.module.thread.os_thread import get_ident
         assert pypypolicy.look_inside_function(Local.getdict.im_func)
    +    assert pypypolicy.look_inside_function(get_ident)
     
     def test_pypy_module():
         from pypy.module._collections.interp_deque import W_Deque
    diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py
    --- a/pypy/module/pypyjit/test_pypy_c/model.py
    +++ b/pypy/module/pypyjit/test_pypy_c/model.py
    @@ -2,7 +2,10 @@
     import sys
     import re
     import os.path
    -from _pytest.assertion import newinterpret
    +try:
    +    from _pytest.assertion import newinterpret
    +except ImportError:   # e.g. Python 2.5
    +    newinterpret = None
     from pypy.tool.jitlogparser.parser import SimpleParser, Function, TraceForOpcode
     from pypy.tool.jitlogparser.storage import LoopStorage
     
    @@ -196,7 +199,7 @@
                         source = str(source.deindent()).strip()
             except py.error.ENOENT:
                 source = None
    -        if source and source.startswith('self._assert('):
    +        if source and source.startswith('self._assert(') and newinterpret:
                 # transform self._assert(x, 'foo') into assert x, 'foo'
                 source = source.replace('self._assert(', 'assert ')
                 source = source[:-1] # remove the trailing ')'
    diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py
    --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py
    +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py
    @@ -1,3 +1,4 @@
    +from __future__ import with_statement
     import sys
     import types
     import subprocess
    diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py
    --- a/pypy/module/pypyjit/test_pypy_c/test_call.py
    +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py
    @@ -174,7 +174,7 @@
                 guard_no_overflow(descr=...)
                 i18 = force_token()
                 --TICK--
    -            jump(p0, p1, p2, p3, p4, i8, p7, i17, p8, i9, i17, p10, p11, p12, descr=)
    +            jump(..., descr=)
             """)
     
         def test_default_and_kw(self):
    @@ -396,3 +396,70 @@
                 --TICK--
                 jump(..., descr=)
             """)
    +
    +    def test_global_closure_has_constant_cells(self):
    +        log = self.run("""
    +            def make_adder(n):
    +                def add(x):
    +                    return x + n
    +                return add
    +            add5 = make_adder(5)
    +            def main():
    +                i = 0
    +                while i < 5000:
    +                    i = add5(i) # ID: call
    +            """, [])
    +        loop, = log.loops_by_id('call', is_entry_bridge=True)
    +        assert loop.match("""
    +            guard_value(i6, 1, descr=...)
    +            guard_nonnull_class(p8, ConstClass(W_IntObject), descr=...)
    +            guard_value(i4, 0, descr=...)
    +            guard_value(p3, ConstPtr(ptr14), descr=...)
    +            i15 = getfield_gc_pure(p8, descr=)
    +            i17 = int_lt(i15, 5000)
    +            guard_true(i17, descr=...)
    +            p18 = getfield_gc(p0, descr=)
    +            guard_value(p18, ConstPtr(ptr19), descr=...)
    +            p20 = getfield_gc(p18, descr=)
    +            guard_value(p20, ConstPtr(ptr21), descr=...)
    +            guard_not_invalidated(descr=...)
    +            # most importantly, there is no getarrayitem_gc here
    +            p23 = call(ConstClass(getexecutioncontext), descr=)
    +            p24 = getfield_gc(p23, descr=)
    +            i25 = force_token()
    +            p26 = getfield_gc(p23, descr=)
    +            guard_isnull(p26, descr=...)
    +            i27 = getfield_gc(p23, descr=)
    +            i28 = int_is_zero(i27)
    +            guard_true(i28, descr=...)
    +            p30 = getfield_gc(ConstPtr(ptr29), descr=)
    +            guard_nonnull_class(p30, ConstClass(W_IntObject), descr=...)
    +            i32 = getfield_gc_pure(p30, descr=)
    +            i33 = int_add_ovf(i15, i32)
    +            guard_no_overflow(descr=...)
    +            --TICK--
    +            jump(p0, p1, p2, p5, i33, i32, p23, p30, p24, descr=)
    +        """)
    +
    +    def test_local_closure_is_virtual(self):
    +        log = self.run("""
    +            def main():
    +                i = 0
    +                while i < 5000:
    +                    def add():
    +                        return i + 1
    +                    i = add() # ID: call
    +            """, [])
    +        loop, = log.loops_by_id('call')
    +        assert loop.match("""
    +            i8 = getfield_gc_pure(p6, descr=)
    +            i10 = int_lt(i8, 5000)
    +            guard_true(i10, descr=...)
    +            i11 = force_token()
    +            i13 = int_add(i8, 1)
    +            --TICK--
    +            p22 = new_with_vtable(ConstClass(W_IntObject))
    +            setfield_gc(p22, i13, descr=)
    +            setfield_gc(p4, p22, descr=)
    +            jump(p0, p1, p2, p3, p4, p7, p22, p7, descr=)
    +        """)
    diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py
    --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py
    +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py
    @@ -23,6 +23,4 @@
                 guard_not_invalidated(descr=...)
                 p19 = getfield_gc(ConstPtr(p17), descr=)
                 guard_value(p19, ConstPtr(ptr20), descr=...)
    -            p22 = getfield_gc(ConstPtr(ptr21), descr=)
    -            guard_nonnull(p22, descr=...)
    -        """)
    +        """)
    \ No newline at end of file
    diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py
    --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py
    +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py
    @@ -142,6 +142,7 @@
                 i = 0
                 b = B(1)
                 while i < 100:
    +                b.x
                     v = b.x # ID: loadattr
                     i += v
                 return i
    @@ -150,8 +151,6 @@
             loop, = log.loops_by_filename(self.filepath)
             assert loop.match_by_id('loadattr',
             '''
    -        guard_not_invalidated(descr=...)
    -        i16 = arraylen_gc(p10, descr=)
             i19 = call(ConstClass(ll_dict_lookup), _, _, _, descr=...)
             guard_no_exception(descr=...)
             i21 = int_and(i19, _)
    @@ -181,8 +180,7 @@
             assert loop.match_by_id("contains", """
                 guard_not_invalidated(descr=...)
                 i11 = force_token()
    -            i12 = int_add_ovf(i5, i7)
    -            guard_no_overflow(descr=...)
    +            i12 = int_add(i5, 1)
             """)
     
         def test_id_compare_optimization(self):
    diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py
    --- a/pypy/module/sys/__init__.py
    +++ b/pypy/module/sys/__init__.py
    @@ -47,6 +47,7 @@
             'pypy_initial_path'     : 'state.pypy_initial_path',
     
             '_getframe'             : 'vm._getframe', 
    +        '_current_frames'       : 'vm._current_frames', 
             'setrecursionlimit'     : 'vm.setrecursionlimit', 
             'getrecursionlimit'     : 'vm.getrecursionlimit', 
             'setcheckinterval'      : 'vm.setcheckinterval', 
    diff --git a/pypy/module/sys/test/test_encoding.py b/pypy/module/sys/test/test_encoding.py
    new file mode 100644
    --- /dev/null
    +++ b/pypy/module/sys/test/test_encoding.py
    @@ -0,0 +1,30 @@
    +import os, py
    +from pypy.rlib import rlocale
    +from pypy.module.sys.interp_encoding import _getfilesystemencoding
    +from pypy.module.sys.interp_encoding import base_encoding
    +
    +
    +def test__getfilesystemencoding(space):
    +    if not (rlocale.HAVE_LANGINFO and rlocale.CODESET):
    +        py.test.skip("requires HAVE_LANGINFO and CODESET")
    +
    +    def clear():
    +        for key in os.environ.keys():
    +            if key == 'LANG' or key.startswith('LC_'):
    +                del os.environ[key]
    +
    +    def get(**env):
    +        original_env = os.environ.copy()
    +        try:
    +            clear()
    +            os.environ.update(env)
    +            return _getfilesystemencoding(space)
    +        finally:
    +            clear()
    +            os.environ.update(original_env)
    +
    +    assert get() in (base_encoding, 'ANSI_X3.4-1968')
    +    assert get(LANG='foobar') in (base_encoding, 'ANSI_X3.4-1968')
    +    assert get(LANG='en_US.UTF-8') == 'UTF-8'
    +    assert get(LC_ALL='en_US.UTF-8') == 'UTF-8'
    +    assert get(LC_CTYPE='en_US.UTF-8') == 'UTF-8'
    diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py
    --- a/pypy/module/sys/test/test_sysmodule.py
    +++ b/pypy/module/sys/test/test_sysmodule.py
    @@ -1,6 +1,6 @@
     # -*- coding: iso-8859-1 -*-
     import autopath
    -from pypy.conftest import option
    +from pypy.conftest import option, gettestobjspace
     from py.test import raises
     from pypy.interpreter.gateway import app2interp_temp
     import sys
    @@ -524,3 +524,51 @@
             # If this ever actually becomes a compilation option this test should
             # be changed.
             assert sys.float_repr_style == "short"
    +
    +class AppTestCurrentFrames:
    +
    +    def test_current_frames(self):
    +        try:
    +            import thread
    +        except ImportError:
    +            pass
    +        else:
    +            skip('This test requires an intepreter without threads')
    +        import sys
    +
    +        def f():
    +            return sys._current_frames()
    +        frames = f()
    +        assert frames.keys() == [0]
    +        assert frames[0].f_code.co_name == 'f'
    +
    +class AppTestCurrentFramesWithThread(AppTestCurrentFrames):
    +    def setup_class(cls):
    +        cls.space = gettestobjspace(usemodules=('thread',))
    +
    +    def test_current_frames(self):
    +        import sys
    +        import time
    +        import thread
    +
    +        thread_id = thread.get_ident()
    +        self.ready = False
    +        def other_thread():
    +            self.ready = True
    +            print "thread started"
    +            time.sleep(5)
    +        thread.start_new_thread(other_thread, ())
    +
    +        def f():
    +            for i in range(100):
    +                if self.ready: break
    +                time.sleep(0.1)
    +            return sys._current_frames()
    +        
    +        frames = f()
    +        thisframe = frames.pop(thread_id)
    +        assert thisframe.f_code.co_name == 'f'
    +
    +        assert len(frames) == 1
    +        _, other_frame = frames.popitem()
    +        assert other_frame.f_code.co_name == 'other_thread'
    diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py
    --- a/pypy/module/sys/version.py
    +++ b/pypy/module/sys/version.py
    @@ -14,7 +14,7 @@
     
     if platform.name == 'msvc':
         COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600)
    -elif platform.cc.startswith('gcc'):
    +elif platform.cc is not None and platform.cc.startswith('gcc'):
         out = platform.execute(platform.cc, '--version').out
         match = re.search(' (\d+\.\d+(\.\d+)*)', out)
         if match:
    diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py
    --- a/pypy/module/sys/vm.py
    +++ b/pypy/module/sys/vm.py
    @@ -43,6 +43,23 @@
         f.mark_as_escaped()
         return space.wrap(f)
     
    +def _current_frames(space):
    +    """_current_frames() -> dictionary
    +
    +    Return a dictionary mapping each current thread T's thread id to T's
    +    current stack frame.
    +
    +    This function should be used for specialized purposes only."""
    +    w_result = space.newdict()
    +    ecs = space.threadlocals.getallvalues()
    +    for thread_ident, ec in ecs.items():
    +        f = ec.gettopframe_nohidden()
    +        f.mark_as_escaped()
    +        space.setitem(w_result,
    +                      space.wrap(thread_ident),
    +                      space.wrap(f))
    +    return w_result                      
    +
     def setrecursionlimit(space, w_new_limit):
         """setrecursionlimit() sets the maximum number of nested calls that
     can occur before a RuntimeError is raised.  On PyPy the limit is
    diff --git a/pypy/module/test_lib_pypy/test_distributed/test_distributed.py b/pypy/module/test_lib_pypy/test_distributed/test_distributed.py
    --- a/pypy/module/test_lib_pypy/test_distributed/test_distributed.py
    +++ b/pypy/module/test_lib_pypy/test_distributed/test_distributed.py
    @@ -1,3 +1,4 @@
    +import py; py.test.skip("xxx remove")
     
     """ Controllers tests
     """
    @@ -8,7 +9,7 @@
     class AppTestDistributed(object):
         def setup_class(cls):
             cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
    -            "usemodules":("_stackless",)})
    +            "usemodules":("_continuation",)})
     
         def test_init(self):
             import distributed
    @@ -90,14 +91,12 @@
     
     class AppTestDistributedTasklets(object):
         spaceconfig = {"objspace.std.withtproxy": True,
    -                   "objspace.usemodules._stackless": True}
    +                   "objspace.usemodules._continuation": True}
         reclimit = sys.getrecursionlimit()
     
         def setup_class(cls):
             import py.test
             py.test.importorskip('greenlet')
    -        #cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
    -        #    "usemodules":("_stackless",)})
             cls.w_test_env_ = cls.space.appexec([], """():
             from distributed import test_env
             return (test_env,)
    diff --git a/pypy/module/test_lib_pypy/test_distributed/test_greensock.py b/pypy/module/test_lib_pypy/test_distributed/test_greensock.py
    --- a/pypy/module/test_lib_pypy/test_distributed/test_greensock.py
    +++ b/pypy/module/test_lib_pypy/test_distributed/test_greensock.py
    @@ -1,5 +1,4 @@
    -
    -import py
    +import py; py.test.skip("xxx remove")
     from pypy.conftest import gettestobjspace, option
     
     def setup_module(mod):
    @@ -10,7 +9,7 @@
             if not option.runappdirect:
                 py.test.skip("Cannot run this on top of py.py because of PopenGateway")
             cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
    -                                       "usemodules":("_stackless",)})
    +                                       "usemodules":("_continuation",)})
             cls.w_remote_side_code = cls.space.appexec([], """():
             import sys
             sys.path.insert(0, '%s')
    diff --git a/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py b/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py
    --- a/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py
    +++ b/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py
    @@ -1,4 +1,4 @@
    -import py
    +import py; py.test.skip("xxx remove")
     from pypy.conftest import gettestobjspace
     
     def setup_module(mod):
    @@ -9,7 +9,8 @@
     class AppTestSocklayer:
         def setup_class(cls):
             cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
    -                                       "usemodules":("_stackless","_socket", "select")})
    +                                       "usemodules":("_continuation",
    +                                                     "_socket", "select")})
         
         def test_socklayer(self):
             class X(object):
    diff --git a/pypy/module/test_lib_pypy/test_greenlet.py b/pypy/module/test_lib_pypy/test_greenlet.py
    --- a/pypy/module/test_lib_pypy/test_greenlet.py
    +++ b/pypy/module/test_lib_pypy/test_greenlet.py
    @@ -231,3 +231,13 @@
             assert res == "next step"
             res = g2.switch("goes to f1 instead")
             assert res == "all ok"
    +
    +    def test_throw_in_not_started_yet(self):
    +        from greenlet import greenlet
    +        #
    +        def f1():
    +            never_reached
    +        #
    +        g1 = greenlet(f1)
    +        raises(ValueError, g1.throw, ValueError)
    +        assert g1.dead
    diff --git a/pypy/module/test_lib_pypy/test_stackless.py b/pypy/module/test_lib_pypy/test_stackless_pickle.py
    rename from pypy/module/test_lib_pypy/test_stackless.py
    rename to pypy/module/test_lib_pypy/test_stackless_pickle.py
    --- a/pypy/module/test_lib_pypy/test_stackless.py
    +++ b/pypy/module/test_lib_pypy/test_stackless_pickle.py
    @@ -1,3 +1,4 @@
    +import py; py.test.skip("XXX port me")
     from pypy.conftest import gettestobjspace, option
     
     class AppTest_Stackless:
    diff --git a/pypy/module/thread/threadlocals.py b/pypy/module/thread/threadlocals.py
    --- a/pypy/module/thread/threadlocals.py
    +++ b/pypy/module/thread/threadlocals.py
    @@ -43,6 +43,9 @@
             ident = self._mainthreadident
             return self._valuedict.get(ident, None)
     
    +    def getallvalues(self):
    +        return self._valuedict
    +
         def enter_thread(self, space):
             "Notification that the current thread is just starting."
             ec = space.getexecutioncontext()
    diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py
    --- a/pypy/objspace/flow/flowcontext.py
    +++ b/pypy/objspace/flow/flowcontext.py
    @@ -184,7 +184,7 @@
     
     class FlowExecutionContext(ExecutionContext):
     
    -    def __init__(self, space, code, globals, constargs={}, closure=None,
    +    def __init__(self, space, code, globals, constargs={}, outer_func=None,
                      name=None):
             ExecutionContext.__init__(self, space)
             self.code = code
    @@ -193,11 +193,11 @@
     
             self.crnt_offset = -1
             self.crnt_frame = None
    -        if closure is None:
    +        if outer_func and outer_func.closure:
    +            self.closure = [nestedscope.Cell(Constant(value))
    +                            for value in outer_func.closure]
    +        else:
                 self.closure = None
    -        else:
    -            self.closure = [nestedscope.Cell(Constant(value))
    -                            for value in closure]
             frame = self.create_frame()
             formalargcount = code.getformalargcount()
             arg_list = [Variable() for i in range(formalargcount)]
    @@ -216,7 +216,7 @@
             # while ignoring any operation like the creation of the locals dict
             self.recorder = []
             frame = FlowSpaceFrame(self.space, self.code,
    -                               self.w_globals, self.closure)
    +                               self.w_globals, self)
             frame.last_instr = 0
             return frame
     
    diff --git a/pypy/objspace/flow/objspace.py b/pypy/objspace/flow/objspace.py
    --- a/pypy/objspace/flow/objspace.py
    +++ b/pypy/objspace/flow/objspace.py
    @@ -252,9 +252,9 @@
                 raise TypeError("%r is a generator" % (func,))
             code = PyCode._from_code(self, code)
             if func.func_closure is None:
    -            closure = None
    +            cl = None
             else:
    -            closure = [extract_cell_content(c) for c in func.func_closure]
    +            cl = [extract_cell_content(c) for c in func.func_closure]
             # CallableFactory.pycall may add class_ to functions that are methods
             name = func.func_name
             class_ = getattr(func, 'class_', None)
    @@ -262,8 +262,10 @@
                 name = '%s.%s' % (class_.__name__, name)
             for c in "<>&!":
                 name = name.replace(c, '_')
    +        class outerfunc: # hack
    +            closure = cl
             ec = flowcontext.FlowExecutionContext(self, code, func.func_globals,
    -                                              constargs, closure, name)
    +                                              constargs, outerfunc, name)
             graph = ec.graph
             graph.func = func
             # attach a signature and defaults to the graph
    diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py
    --- a/pypy/objspace/std/celldict.py
    +++ b/pypy/objspace/std/celldict.py
    @@ -65,6 +65,10 @@
             if isinstance(cell, ModuleCell):
                 cell.w_value = w_value
                 return
    +        # If the new value and the current value are the same, don't create a
    +        # level of indirection, or mutate are version.
    +        if self.space.is_w(w_value, cell):
    +            return
             if cell is not None:
                 w_value = ModuleCell(w_value)
             self.mutated()
    diff --git a/pypy/objspace/std/fake.py b/pypy/objspace/std/fake.py
    --- a/pypy/objspace/std/fake.py
    +++ b/pypy/objspace/std/fake.py
    @@ -142,7 +142,7 @@
     
         def funcrun(self, func, args):
             frame = func.space.createframe(self, func.w_func_globals,
    -                                        func.closure)
    +                                       func)
             sig = self.signature()
             scope_w = args.parse_obj(None, func.name, sig, func.defs_w)
             frame.setfastscope(scope_w)
    diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py
    --- a/pypy/objspace/std/floatobject.py
    +++ b/pypy/objspace/std/floatobject.py
    @@ -355,9 +355,13 @@
         y = w_float2.floatval
         if y == 0.0:
             raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float modulo"))
    -    mod = math.fmod(x, y)
    -    if (mod and ((y < 0.0) != (mod < 0.0))):
    -        mod += y
    +    try:
    +        mod = math.fmod(x, y)
    +    except ValueError:
    +        mod = rfloat.NAN
    +    else:
    +        if (mod and ((y < 0.0) != (mod < 0.0))):
    +            mod += y
     
         return W_FloatObject(mod)
     
    @@ -366,7 +370,10 @@
         y = w_float2.floatval
         if y == 0.0:
             raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float modulo"))
    -    mod = math.fmod(x, y)
    +    try:
    +        mod = math.fmod(x, y)
    +    except ValueError:
    +        return [W_FloatObject(rfloat.NAN), W_FloatObject(rfloat.NAN)]
         # fmod is typically exact, so vx-mod is *mathematically* an
         # exact multiple of wx.  But this is fp arithmetic, and fp
         # vx - mod is an approximation; the result is that div may
    diff --git a/pypy/objspace/std/marshal_impl.py b/pypy/objspace/std/marshal_impl.py
    --- a/pypy/objspace/std/marshal_impl.py
    +++ b/pypy/objspace/std/marshal_impl.py
    @@ -325,10 +325,10 @@
         # of building a list of tuples.
         w_dic = space.newdict()
         while 1:
    -        w_key = u.get_w_obj(True)
    +        w_key = u.get_w_obj(allow_null=True)
             if w_key is None:
                 break
    -        w_value = u.get_w_obj(False)
    +        w_value = u.get_w_obj()
             space.setitem(w_dic, w_key, w_value)
         return w_dic
     register(TYPE_DICT, unmarshal_DictMulti)
    @@ -364,7 +364,7 @@
     # so we no longer can handle it in interp_marshal.atom_strlist
     
     def unmarshal_str(u):
    -    w_obj = u.get_w_obj(False)
    +    w_obj = u.get_w_obj()
         try:
             return u.space.str_w(w_obj)
         except OperationError, e:
    diff --git a/pypy/objspace/std/objecttype.py b/pypy/objspace/std/objecttype.py
    --- a/pypy/objspace/std/objecttype.py
    +++ b/pypy/objspace/std/objecttype.py
    @@ -24,7 +24,12 @@
         return w_obj.getrepr(space, '%s object' % (classname,))
     
     def descr__str__(space, w_obj):
    -    return space.repr(w_obj)
    +    w_type = space.type(w_obj)
    +    w_impl = w_type.lookup("__repr__")
    +    if w_impl is None:
    +        raise OperationError(space.w_TypeError,      # can it really occur?
    +                             space.wrap("operand does not support unary str"))
    +    return space.get_and_call_function(w_impl, w_obj)
     
     def descr__class__(space, w_obj):
         return space.type(w_obj)
    diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
    --- a/pypy/objspace/std/objspace.py
    +++ b/pypy/objspace/std/objspace.py
    @@ -129,12 +129,12 @@
             ec._py_repr = None
             return ec
     
    -    def createframe(self, code, w_globals, closure=None):
    +    def createframe(self, code, w_globals, outer_func=None):
             from pypy.objspace.std.fake import CPythonFakeCode, CPythonFakeFrame
             if not we_are_translated() and isinstance(code, CPythonFakeCode):
                 return CPythonFakeFrame(self, code, w_globals)
             else:
    -            return ObjSpace.createframe(self, code, w_globals, closure)
    +            return ObjSpace.createframe(self, code, w_globals, outer_func)
     
         def gettypefor(self, cls):
             return self.gettypeobject(cls.typedef)
    diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py
    --- a/pypy/objspace/std/stringobject.py
    +++ b/pypy/objspace/std/stringobject.py
    @@ -913,12 +913,16 @@
     def repr__String(space, w_str):
         s = w_str._value
     
    -    buf = StringBuilder(len(s) + 2)
    -
         quote = "'"
         if quote in s and '"' not in s:
             quote = '"'
     
    +    return space.wrap(string_escape_encode(s, quote))
    +
    +def string_escape_encode(s, quote):
    +
    +    buf = StringBuilder(len(s) + 2)
    +
         buf.append(quote)
         startslice = 0
     
    @@ -959,7 +963,7 @@
     
         buf.append(quote)
     
    -    return space.wrap(buf.build())
    +    return buf.build()
     
     
     DEFAULT_NOOP_TABLE = ''.join([chr(i) for i in range(256)])
    diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py
    --- a/pypy/objspace/std/test/test_celldict.py
    +++ b/pypy/objspace/std/test/test_celldict.py
    @@ -39,6 +39,20 @@
             assert d.getitem("a") is None
             assert d.strategy.getdictvalue_no_unwrapping(d, "a") is None
     
    +    def test_same_key_set_twice(self):
    +        strategy = ModuleDictStrategy(space)
    +        storage = strategy.get_empty_storage()
    +        d = W_DictMultiObject(space, strategy, storage)
    +
    +        v1 = strategy.version
    +        x = object()
    +        d.setitem("a", x)
    +        v2 = strategy.version
    +        assert v1 is not v2
    +        d.setitem("a", x)
    +        v3 = strategy.version
    +        assert v2 is v3
    +
     class AppTestModuleDict(object):
         def setup_class(cls):
             cls.space = gettestobjspace(**{"objspace.std.withcelldict": True})
    diff --git a/pypy/objspace/std/test/test_floatobject.py b/pypy/objspace/std/test/test_floatobject.py
    --- a/pypy/objspace/std/test/test_floatobject.py
    +++ b/pypy/objspace/std/test/test_floatobject.py
    @@ -767,3 +767,19 @@
     
         def test_invalid(self):
             raises(ValueError, float.fromhex, "0P")
    +
    +    def test_division_edgecases(self):
    +        import math
    +
    +        # inf
    +        inf = float("inf")
    +        assert math.isnan(inf % 3)
    +        assert math.isnan(inf // 3)
    +        x, y = divmod(inf, 3)
    +        assert math.isnan(x)
    +        assert math.isnan(y)
    +
    +        # divide by 0
    +        raises(ZeroDivisionError, lambda: inf % 0)
    +        raises(ZeroDivisionError, lambda: inf // 0)
    +        raises(ZeroDivisionError, divmod, inf, 0)
    \ No newline at end of file
    diff --git a/pypy/objspace/std/test/test_methodcache.py b/pypy/objspace/std/test/test_methodcache.py
    --- a/pypy/objspace/std/test/test_methodcache.py
    +++ b/pypy/objspace/std/test/test_methodcache.py
    @@ -88,30 +88,37 @@
       
         def test_many_names(self):
             import __pypy__
    -        class A(object):
    -            foo = 5
    -            bar = 6
    -            baz = 7
    -            xyz = 8
    -            stuff = 9
    -            a = 10
    -            foobar = 11
    +        for j in range(20):
    +            class A(object):
    +                foo = 5
    +                bar = 6
    +                baz = 7
    +                xyz = 8
    +                stuff = 9
    +                a = 10
    +                foobar = 11
     
    -        a = A()
    -        names = [name for name in A.__dict__.keys()
    -                      if not name.startswith('_')]
    -        names.sort()
    -        names_repeated = names * 10
    -        result = []
    -        __pypy__.reset_method_cache_counter()
    -        for name in names_repeated:
    -            result.append(getattr(a, name))
    -        append_counter = __pypy__.method_cache_counter("append")
    -        names_counters = [__pypy__.method_cache_counter(name)
    -                          for name in names]
    -        assert append_counter[0] >= 5 * len(names)
    -        for name, count in zip(names, names_counters):
    -            assert count[0] >= 5, str((name, count))
    +            a = A()
    +            names = [name for name in A.__dict__.keys()
    +                          if not name.startswith('_')]
    +            names.sort()
    +            names_repeated = names * 10
    +            result = []
    +            __pypy__.reset_method_cache_counter()
    +            for name in names_repeated:
    +                result.append(getattr(a, name))
    +            append_counter = __pypy__.method_cache_counter("append")
    +            names_counters = [__pypy__.method_cache_counter(name)
    +                              for name in names]
    +            try:
    +                assert append_counter[0] >= 10 * len(names) - 1
    +                for name, count in zip(names, names_counters):
    +                    assert count == (9, 1), str((name, count))
    +                break
    +            except AssertionError:
    +                pass
    +        else:
    +            raise
     
         def test_mutating_bases(self):
             class C(object):
    @@ -134,20 +141,24 @@
     
         def test_custom_metaclass(self):
             import __pypy__
    -        class MetaA(type):
    -            def __getattribute__(self, x):
    -                return 1
    -        def f(self):
    -            return 42
    -        A = type.__new__(MetaA, "A", (), {"f": f})
    -        l = [type.__getattribute__(A, "__new__")(A)] * 10
    -        __pypy__.reset_method_cache_counter()
    -        for i, a in enumerate(l):
    -            assert a.f() == 42
    -        cache_counter = __pypy__.method_cache_counter("f")
    -        assert cache_counter[0] >= 5
    -        assert cache_counter[1] >= 1 # should be (27, 3)
    -        assert sum(cache_counter) == 10
    +        for j in range(20):
    +            class MetaA(type):
    +                def __getattribute__(self, x):
    +                    return 1
    +            def f(self):
    +                return 42
    +            A = type.__new__(MetaA, "A", (), {"f": f})
    +            l = [type.__getattribute__(A, "__new__")(A)] * 10
    +            __pypy__.reset_method_cache_counter()
    +            for i, a in enumerate(l):
    +                assert a.f() == 42
    +            cache_counter = __pypy__.method_cache_counter("f")
    +            assert sum(cache_counter) == 10
    +            if cache_counter == (9, 1):
    +                break
    +            #else the moon is misaligned, try again
    +        else:
    +            raise AssertionError("cache_counter = %r" % (cache_counter,))
     
         def test_mutate_class(self):
             import __pypy__
    diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py
    --- a/pypy/objspace/std/test/test_obj.py
    +++ b/pypy/objspace/std/test/test_obj.py
    @@ -94,3 +94,11 @@
             #assert len(log) == 1
             #assert log[0].message.args == ("object.__init__() takes no parameters",)
             #assert type(log[0].message) is DeprecationWarning
    +
    +    def test_object_str(self):
    +        # obscure case: __str__() must delegate to __repr__() without adding
    +        # type checking on its own
    +        class A(object):
    +            def __repr__(self):
    +                return 123456
    +        assert A().__str__() == 123456
    diff --git a/pypy/objspace/std/test/test_rangeobject.py b/pypy/objspace/std/test/test_rangeobject.py
    --- a/pypy/objspace/std/test/test_rangeobject.py
    +++ b/pypy/objspace/std/test/test_rangeobject.py
    @@ -89,6 +89,9 @@
             assert not self.not_forced(r)
             r.sort()
             assert r == range(1, 100) + [999]
    +        r = range(10)
    +        r.sort(key=lambda x: -x)
    +        assert r == range(9, -1, -1)
     
         def test_pop(self):
             r = range(10)
    diff --git a/pypy/objspace/std/test/test_stringformat.py b/pypy/objspace/std/test/test_stringformat.py
    --- a/pypy/objspace/std/test/test_stringformat.py
    +++ b/pypy/objspace/std/test/test_stringformat.py
    @@ -168,7 +168,7 @@
     
         def test_incomplete_format(self):
             raises(ValueError, '%'.__mod__, ((23,),))
    -        raises(ValueError, '%('.__mod__, ({},))
    +        raises((ValueError, TypeError), '%('.__mod__, ({},))
     
         def test_format_char(self):
             import sys
    diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py
    --- a/pypy/objspace/std/test/test_unicodeobject.py
    +++ b/pypy/objspace/std/test/test_unicodeobject.py
    @@ -780,8 +780,22 @@
             assert type(s) is unicode
             assert s == u'\u1234'
     
    +        # now the same with a new-style class...
    +        class A(object):
    +            def __init__(self, num):
    +                self.num = num
    +            def __str__(self):
    +                return unichr(self.num)
    +
    +        s = '%s' % A(111)    # this is ASCII
    +        assert type(s) is unicode
    +        assert s == chr(111)
    +
    +        s = '%s' % A(0x1234)    # this is not ASCII
    +        assert type(s) is unicode
    +        assert s == u'\u1234'
    +
         def test_formatting_unicode__str__2(self):
    -        skip("this is completely insane")
             class A:
                 def __str__(self):
                     return u'baz'
    @@ -798,9 +812,22 @@
             s = '%s %s' % (a, b)
             assert s == u'baz bar'
     
    +        skip("but this case here is completely insane")
             s = '%s %s' % (b, a)
             assert s == u'foo baz'
     
    +    def test_formatting_unicode__str__3(self):
    +        # "bah" is all I can say
    +        class X(object):
    +            def __repr__(self):
    +                return u'\u1234'
    +        '%s' % X()
    +        #
    +        class X(object):
    +            def __str__(self):
    +                return u'\u1234'
    +        '%s' % X()
    +
         def test_str_subclass(self):
             class Foo9(str):
                 def __unicode__(self):
    diff --git a/pypy/rlib/clibffi.py b/pypy/rlib/clibffi.py
    --- a/pypy/rlib/clibffi.py
    +++ b/pypy/rlib/clibffi.py
    @@ -286,10 +286,10 @@
     
     FFI_OK = cConfig.FFI_OK
     FFI_BAD_TYPEDEF = cConfig.FFI_BAD_TYPEDEF
    -FFI_DEFAULT_ABI = rffi.cast(rffi.USHORT, cConfig.FFI_DEFAULT_ABI)
    +FFI_DEFAULT_ABI = cConfig.FFI_DEFAULT_ABI
     if _WIN32:
    -    FFI_STDCALL = rffi.cast(rffi.USHORT, cConfig.FFI_STDCALL)
    -FFI_TYPE_STRUCT = rffi.cast(rffi.USHORT, cConfig.FFI_TYPE_STRUCT)
    +    FFI_STDCALL = cConfig.FFI_STDCALL
    +FFI_TYPE_STRUCT = cConfig.FFI_TYPE_STRUCT
     FFI_CIFP = rffi.COpaquePtr('ffi_cif', compilation_info=eci)
     
     FFI_CLOSUREP = lltype.Ptr(cConfig.ffi_closure)
    @@ -319,7 +319,7 @@
            which the 'ffistruct' member is a regular FFI_TYPE.
         """
         tpe = lltype.malloc(FFI_STRUCT_P.TO, len(field_types)+1, flavor='raw')
    -    tpe.ffistruct.c_type = FFI_TYPE_STRUCT
    +    tpe.ffistruct.c_type = rffi.cast(rffi.USHORT, FFI_TYPE_STRUCT)
         tpe.ffistruct.c_size = rffi.cast(rffi.SIZE_T, size)
         tpe.ffistruct.c_alignment = rffi.cast(rffi.USHORT, aligment)
         tpe.ffistruct.c_elements = rffi.cast(FFI_TYPE_PP,
    @@ -402,12 +402,20 @@
     
     closureHeap = ClosureHeap()
     
    -FUNCFLAG_STDCALL   = 0
    -FUNCFLAG_CDECL     = 1  # for WINAPI calls
    +FUNCFLAG_STDCALL   = 0    # on Windows: for WINAPI calls
    +FUNCFLAG_CDECL     = 1    # on Windows: for __cdecl calls
     FUNCFLAG_PYTHONAPI = 4
     FUNCFLAG_USE_ERRNO = 8
     FUNCFLAG_USE_LASTERROR = 16
     
    +def get_call_conv(flags, from_jit):
    +    if _WIN32 and (flags & FUNCFLAG_CDECL == 0):
    +        return FFI_STDCALL
    +    else:
    +        return FFI_DEFAULT_ABI
    +get_call_conv._annspecialcase_ = 'specialize:arg(1)'     # hack :-/
    +
    +
     class AbstractFuncPtr(object):
         ll_cif = lltype.nullptr(FFI_CIFP.TO)
         ll_argtypes = lltype.nullptr(FFI_TYPE_PP.TO)
    @@ -427,21 +435,17 @@
             self.ll_cif = lltype.malloc(FFI_CIFP.TO, flavor='raw',
                                         track_allocation=False) # freed by the __del__
     
    -        if _WIN32 and (flags & FUNCFLAG_CDECL == 0):
    -            cc = FFI_STDCALL
    -        else:
    -            cc = FFI_DEFAULT_ABI
    -
             if _MSVC:
                 # This little trick works correctly with MSVC.
                 # It returns small structures in registers
    -            if r_uint(restype.c_type) == FFI_TYPE_STRUCT:
    +            if intmask(restype.c_type) == FFI_TYPE_STRUCT:
                     if restype.c_size <= 4:
                         restype = ffi_type_sint32
                     elif restype.c_size <= 8:
                         restype = ffi_type_sint64
     
    -        res = c_ffi_prep_cif(self.ll_cif, cc,
    +        res = c_ffi_prep_cif(self.ll_cif,
    +                             rffi.cast(rffi.USHORT, get_call_conv(flags,False)),
                                  rffi.cast(rffi.UINT, argnum), restype,
                                  self.ll_argtypes)
             if not res == FFI_OK:
    diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py
    --- a/pypy/rlib/libffi.py
    +++ b/pypy/rlib/libffi.py
    @@ -75,7 +75,7 @@
         @staticmethod
         @jit.elidable
         def is_struct(ffi_type):
    -        return intmask(ffi_type.c_type) == intmask(FFI_TYPE_STRUCT)
    +        return intmask(ffi_type.c_type) == FFI_TYPE_STRUCT
     
     types._import()
     
    @@ -206,6 +206,7 @@
         _immutable_fields_ = ['funcsym']
         argtypes = []
         restype = lltype.nullptr(clibffi.FFI_TYPE_P.TO)
    +    flags = 0
         funcsym = lltype.nullptr(rffi.VOIDP.TO)
     
         def __init__(self, name, argtypes, restype, funcsym, flags=FUNCFLAG_CDECL,
    diff --git a/pypy/rlib/parsing/tree.py b/pypy/rlib/parsing/tree.py
    --- a/pypy/rlib/parsing/tree.py
    +++ b/pypy/rlib/parsing/tree.py
    @@ -6,9 +6,16 @@
             content = ["digraph G{"]
             content.extend(self.dot())
             content.append("}")
    -        p = py.test.ensuretemp("automaton").join("temp.dot")
    +        try:
    +            p = py.test.ensuretemp("automaton").join("temp.dot")
    +            remove = False
    +        except AttributeError: # pytest lacks ensuretemp, make a normal one
    +            p = py.path.local.mkdtemp().join('automaton.dot')
    +            remove = True
             p.write("\n".join(content))
             graphclient.display_dot_file(str(p))
    +        if remove:
    +            p.dirpath().remove()
     
     class Symbol(Node):
     
    diff --git a/pypy/rlib/rcoroutine.py b/pypy/rlib/rcoroutine.py
    deleted file mode 100644
    --- a/pypy/rlib/rcoroutine.py
    +++ /dev/null
    @@ -1,357 +0,0 @@
    -"""
    -Basic Concept:
    ---------------
    -
    -All concurrency is expressed by some means of coroutines.
    -This is the lowest possible exposable interface.
    -
    -A coroutine is a structure that controls a sequence
    -of continuations in time. It contains a frame object
    -that is a restartable stack chain. This frame object
    -is updated on every switch.
    -
    -The frame can be None. Either the coroutine is not yet
    -bound, or it is the current coroutine of some costate.
    -See below. XXX rewrite a definition of these terms.
    -
    -There is always a notation of a "current" and a "last"
    -coroutine. Current has no frame and represents the
    -running program. last is needed to keep track of the
    -coroutine that receives a new frame chain after a switch.
    -
    -A costate object holds last and current.
    -There are different coroutine concepts existing in
    -parallel, like plain interp-level coroutines and
    -app-level structures like coroutines, greenlets and
    -tasklets.
    -Every concept is associated with its own costate object.
    -This allows for peaceful co-existence of many concepts.
    -The type of a switch is determined by the target's costate.
    -"""
    -
    -import py; py.test.skip("fixme: rewrite using rlib.rstacklet")
    -# XXX ^^^ the reason it is not done is that pypy.rlib.rcoroutine
    -# plus pypy/module/_stackless look like faaaaaar too much code
    -# to me :-(
    -
    -from pypy.rlib.rstack import yield_current_frame_to_caller
    -from pypy.rlib.objectmodel import we_are_translated
    -
    -from pypy.interpreter.error import OperationError
    -
    -try:
    -    from greenlet import greenlet
    -    main_greenlet = greenlet.getcurrent()
    -except (ImportError, ValueError):
    -    def greenlet(*args, **kwargs):
    -        raise NotImplementedError("need either greenlets or a translated version of pypy")
    -
    -class FrameChain(object):
    -    """Greenlet-based emulation of the primitive rstack 'frames' of RPython"""
    -
    -    def __init__(self, thunk=None):
    -        if thunk:
    -            self.greenlet = greenlet(thunk)
    -        else:
    -            self.greenlet = greenlet.getcurrent()
    -
    -    def switch(self):
    -        last = FrameChain()
    -        return self.greenlet.switch(last)
    -
    -import sys, os
    -
    -def make_coroutine_classes(baseclass):
    -    class BaseCoState(object):
    -        def __init__(self):
    -            self.current = self.main = None
    -
    -        def __repr__(self):
    -            "NOT_RPYTHON"
    -            # for debugging only
    -            return '<%s current=%r>' % (self.__class__.__name__, self.current)
    -
    -        def update(self, new):
    -            syncstate.leaving = self.current
    -            syncstate.entering = new
    -            self.current = new
    -            frame, new.frame = new.frame, None
    -            return frame
    -
    -
    -    class CoState(BaseCoState):
    -        def __init__(self):
    -            BaseCoState.__init__(self)
    -            self.current = self.main = Coroutine(self)
    -
    -    class CoroutineDamage(SystemError):
    -        pass
    -
    -
    -    class SyncState(object):
    -        def __init__(self):
    -            self.reset()
    -
    -        def reset(self):
    -            self.default_costate = None
    -            self.leaving = None
    -            self.entering = None
    -            self.things_to_do = False
    -            self.temp_exc = None
    -            self.to_delete = []
    -
    -        def switched(self, incoming_frame):
    -            left = syncstate.leaving
    -            entered = syncstate.entering
    -            syncstate.leaving = syncstate.entering = None
    -            if left is not None:   # mostly to work around an annotation problem;
    -                                   # should not really be None
    -                left.frame = incoming_frame
    -                left.goodbye()
    -            if entered is not None:
    -                entered.hello()
    -            if self.things_to_do:
    -                self._do_things_to_do()
    -
    -        def push_exception(self, exc):
    -            self.things_to_do = True
    -            self.temp_exc = exc
    -
    -        def check_for_zombie(self, obj):
    -            return obj in self.to_delete
    -
    -        def postpone_deletion(self, obj):
    -            self.to_delete.append(obj)
    -            self.things_to_do = True
    -
    -        def _do_things_to_do(self):
    -            if self.temp_exc is not None:
    -                # somebody left an unhandled exception and switched to us.
    -                # this both provides default exception handling and the
    -                # way to inject an exception, like CoroutineExit.
    -                e, self.temp_exc = self.temp_exc, None
    -                self.things_to_do = bool(self.to_delete)
    -                raise e
    -            while self.to_delete:
    -                delete, self.to_delete = self.to_delete, []
    -                for obj in delete:
    -                    obj.parent = obj.costate.current
    -                    obj._kill_finally()
    -            else:
    -                self.things_to_do = False
    -
    -        def _freeze_(self):
    -            self.reset()
    -            return False
    -
    -    syncstate = SyncState()
    -
    -
    -    class CoroutineExit(SystemExit):
    -        # XXX SystemExit's __init__ creates problems in bookkeeper.
    -        def __init__(self):
    -            pass
    -
    -    class AbstractThunk(object):
    -        def call(self):
    -            raise NotImplementedError("abstract base class")
    -
    -
    -    class Coroutine(baseclass):
    -        def __init__(self, state=None):
    -            self.frame = None
    -            if state is None:
    -                state = self._get_default_costate()
    -            self.costate = state
    -            self.parent = None
    -            self.thunk = None
    -            self.coroutine_exit = False
    -
    -        def __repr__(self):
    -            'NOT_RPYTHON'
    -            # just for debugging
    -            if hasattr(self, '__name__'):
    -                return '' % (self.__name__, self.frame, self.thunk is not None)
    -            else:
    -                return '' % (self.frame, self.thunk is not None)
    -
    -        def _get_default_costate():
    -            state = syncstate.default_costate
    -            if state is None:
    -                state = syncstate.default_costate = CoState()
    -            return state
    -        _get_default_costate = staticmethod(_get_default_costate)
    -
    -        def _get_default_parent(self):
    -            return self.costate.current
    -
    -        def bind(self, thunk):
    -            assert isinstance(thunk, AbstractThunk)
    -            if self.frame is not None:
    -                raise CoroutineDamage
    -            if self.parent is None:
    -                self.parent = self._get_default_parent()
    -            assert self.parent is not None
    -            self.thunk = thunk
    -            if we_are_translated():
    -                self.frame = self._bind()
    -            else:
    -                self.frame = self._greenlet_bind()
    -
    -        def _greenlet_bind(self):
    -            weak = [self]
    -            def _greenlet_execute(incoming_frame):
    -                try:
    -                    chain2go2next = weak[0]._execute(incoming_frame)
    -                except:
    -                    # no exception is supposed to get out of _execute()
    -                    # better report it directly into the main greenlet then,
    -                    # and hidden to prevent catching
    -                    main_greenlet.throw(AssertionError(
    -                        "unexpected exception out of Coroutine._execute()",
    -                        *sys.exc_info()))
    -                    assert 0
    -                del weak[0]
    -                greenlet.getcurrent().parent = chain2go2next.greenlet
    -                return None   # as the result of the FrameChain.switch()
    -            chain = FrameChain(_greenlet_execute)
    -            return chain
    -
    -        def _bind(self):
    -            state = self.costate
    -            incoming_frame = yield_current_frame_to_caller()
    -            self = state.current
    -            return self._execute(incoming_frame)
    -
    -        def _execute(self, incoming_frame):
    -            state = self.costate
    -            try:
    -                try:
    -                    try:
    -                        exc = None
    -                        thunk = self.thunk
    -                        self.thunk = None
    -                        syncstate.switched(incoming_frame)
    -                        thunk.call()
    -                    except Exception, e:
    -                        exc = e
    -                        raise
    -                finally:
    -                    # warning! we must reload the 'self' from the costate,
    -                    # because after a clone() the 'self' of both copies
    -                    # point to the original!
    -                    self = state.current
    -                    self.finish(exc)
    -            except CoroutineExit:
    -                pass
    -            except Exception, e:
    -                if self.coroutine_exit is False:
    -                    # redirect all unhandled exceptions to the parent
    -                    syncstate.push_exception(e)
    -
    -            while self.parent is not None and self.parent.frame is None:
    -                # greenlet behavior is fine
    -                self.parent = self.parent.parent
    -            return state.update(self.parent)
    -
    -        def switch(self):
    -            if self.frame is None:
    -                # considered a programming error.
    -                # greenlets and tasklets have different ideas about this.
    -                raise CoroutineDamage
    -            state = self.costate
    -            incoming_frame = state.update(self).switch()
    -            syncstate.switched(incoming_frame)
    -
    -        def kill(self):
    -            self._kill(CoroutineExit())
    -
    -        def _kill(self, exc):
    -            if self.frame is None:
    -                return
    -            state = self.costate
    -            syncstate.push_exception(exc)
    -            # careful here - if setting self.parent to state.current would
    -            # create a loop, break it.  The assumption is that 'self'
    -            # will die, so that state.current's chain of parents can be
    -            # modified to skip 'self' without too many people noticing.
    -            p = state.current
    -            if p is self or self.parent is None:
    -                pass  # killing the current of the main - don't change any parent
    -            else:
    -                while p.parent is not None:
    -                    if p.parent is self:
    -                        p.parent = self.parent
    -                        break
    -                    p = p.parent
    -                self.parent = state.current
    -            self.switch()
    -
    -        def _kill_finally(self):
    -            try:
    -                self._userdel()
    -            except Exception:
    -                pass # maybe print a warning?
    -            self.kill()
    -
    -        __already_postponed = False
    -    
    -        def __del__(self):
    -            # provide the necessary clean-up
    -            # note that AppCoroutine has to take care about this
    -            # as well, including a check for user-supplied __del__.
    -            # Additionally note that in the context of __del__, we are
    -            # not in the position to issue a switch.
    -            # we defer it completely.
    -            
    -            # it is necessary to check whether syncstate is None because CPython
    -            # sets it to None when it cleans up the modules, which will lead to
    -            # very strange effects
    -
    -            if not we_are_translated():
    -                # we need to make sure that we postpone each coroutine only once on
    -                # top of CPython, because this resurrects the coroutine and CPython
    -                # calls __del__ again, thus postponing and resurrecting the
    -                # coroutine once more :-(
    -                if self.__already_postponed:
    -                    return
    -                self.__already_postponed = True
    -            if syncstate is not None:
    -                syncstate.postpone_deletion(self)
    -
    -        # coroutines need complete control over their __del__ behaviour. In
    -        # particular they need to care about calling space.userdel themselves
    -        handle_del_manually = True
    -
    -        def _userdel(self):
    -            # override this for exposed coros
    -            pass
    -
    -        def is_alive(self):
    -            return self.frame is not None or self is self.costate.current
    -
    -        def is_zombie(self):
    -            return self.frame is not None and syncstate.check_for_zombie(self)
    -
    -        def getcurrent():
    -            costate = Coroutine._get_default_costate()
    -            return costate.current
    -        getcurrent = staticmethod(getcurrent)
    -
    -        def getmain():
    -            costate = Coroutine._get_default_costate()
    -            return costate.main
    -        getmain = staticmethod(getmain)
    -
    -        def hello(self):
    -            "Called when execution is transferred into this coroutine."
    -
    -        def goodbye(self):
    -            "Called just after execution is transferred away from this coroutine."
    -
    -        def finish(self, exc=None):
    -            "stephan forgot me"
    -
    -    return locals()
    -
    -# _________________________________________________
    diff --git a/pypy/rlib/runicode.py b/pypy/rlib/runicode.py
    --- a/pypy/rlib/runicode.py
    +++ b/pypy/rlib/runicode.py
    @@ -1403,7 +1403,7 @@
                                         s, pos, pos + unicode_bytes)
                 result.append(res)
                 continue
    -        result.append(unichr(t))
    +        result.append(UNICHR(t))
             pos += unicode_bytes
         return result.build(), pos
     
    diff --git a/pypy/rlib/test/test_rcoroutine.py b/pypy/rlib/test/test_rcoroutine.py
    deleted file mode 100644
    --- a/pypy/rlib/test/test_rcoroutine.py
    +++ /dev/null
    @@ -1,348 +0,0 @@
    -"""
    -testing coroutines at interprepter level
    -"""
    -import py
    -import os
    -from pypy import conftest; conftest.translation_test_so_skip_if_appdirect()
    -from pypy.rlib.rcoroutine import make_coroutine_classes
    -from pypy.translator.c.test.test_stackless import StacklessTest
    -from pypy.translator.c import gc
    -
    -def setup_module(mod):
    -    py.test.importorskip('greenlet')
    -
    -d = make_coroutine_classes(object)
    -syncstate = d['syncstate']
    -Coroutine = d['Coroutine']
    -AbstractThunk = d['AbstractThunk']
    -
    -def output(stuff):
    -    os.write(2, stuff + '\n')
    -
    -class _TestCoroutine(StacklessTest):
    -    backendopt = True
    -    Coroutine = Coroutine
    -
    -    def setup_method(self, method):
    -        syncstate.reset()
    -
    -    def _freeze_(self):    # for 'self.Coroutine'
    -        return True
    -
    -    def test_coroutine1(self):
    -
    -        def g(lst, coros):
    -            coro_f, coro_g, coro_h = coros
    -            lst.append(2)
    -            output('g appended 2')
    -            coro_h.switch()
    -            lst.append(5)
    -            output('g appended 5')
    -
    -        def h(lst, coros):
    -            coro_f, coro_g, coro_h = coros
    -            lst.append(3)
    -            output('h appended 3')
    -            coro_f.switch()
    -            lst.append(7)
    -            output('h appended 7')
    -
    -        class T(AbstractThunk):
    -            def __init__(self, func, arg1, arg2):
    -                self.func = func
    -                self.arg1 = arg1
    -                self.arg2 = arg2
    -            def call(self):
    -                self.func(self.arg1, self.arg2)
    -
    -        def f():
    -            lst = [1]
    -            coro_f = Coroutine.getcurrent()
    -            coro_g = self.Coroutine()
    -            coro_h = self.Coroutine()
    -            coros = [coro_f, coro_g, coro_h]
    -            thunk_g = T(g, lst, coros)
    -            output('binding g after f set 1')
    -            coro_g.bind(thunk_g)
    -            thunk_h = T(h, lst, coros)
    -            output('binding h after f set 1')
    -            coro_h.bind(thunk_h)
    -            output('switching to g')
    -            coro_g.switch()
    -            lst.append(4)
    -            output('f appended 4')
    -            coro_g.switch()
    -            lst.append(6)
    -            output('f appended 6')
    -            coro_h.switch()
    -            lst.append(8)
    -            output('f appended 8')
    -            n = 0
    -            for i in lst:
    -                n = n*10 + i
    -            return n
    -
    -        data = self.wrap_stackless_function(f)
    -        assert data == 12345678
    -
    -    def test_coroutine2(self):
    -
    -        class TBase(AbstractThunk):
    -            def call(self):
    -                pass
    -
    -        class T(TBase):
    -            def __init__(self, func, arg1, arg2):
    -                self.func = func
    -                self.arg1 = arg1
    -                self.arg2 = arg2
    -            def call(self):
    -                self.res = self.func(self.arg1, self.arg2)
    -
    -        class T1(TBase):
    -            def __init__(self, func, arg1):
    -                self.func = func
    -                self.arg1 = arg1
    -            def call(self):
    -                self.res = self.func(self.arg1)
    -
    -        def g(lst, coros):
    -            coro_f1, coro_g, coro_h = coros
    -            lst.append(2)
    -            output('g appended 2')
    -            coro_h.switch()
    -            lst.append(5)
    -            output('g appended 5')
    -            output('exiting g')
    -
    -        def h(lst, coros):
    -            coro_f1, coro_g, coro_h = coros
    -            lst.append(3)
    -            output('h appended 3')
    -            coro_f1.switch()
    -            lst.append(7)
    -            output('h appended 7')
    -            output('exiting h')
    -
    -        def f1(coro_f1):
    -            lst = [1]
    -            coro_g = self.Coroutine()
    -            coro_g.__name__ = 'coro_g'
    -            coro_h = self.Coroutine()
    -            coro_h.__name__ = 'coro_h'
    -            coros = [coro_f1, coro_g, coro_h]
    -            thunk_g = T(g, lst, coros)
    -            output('binding g after f1 set 1')
    -            coro_g.bind(thunk_g)
    -            thunk_h = T(h, lst, coros)
    -            output('binding h after f1 set 1')
    -            coro_h.bind(thunk_h)
    -            output('switching to g')
    -            coro_g.switch()
    -            lst.append(4)
    -            output('f1 appended 4')
    -            coro_g.switch()
    -            lst.append(6)
    -            output('f1 appended 6')
    -            coro_h.switch()
    -            lst.append(8)
    -            output('f1 appended 8')
    -            n = 0
    -            for i in lst:
    -                n = n*10 + i
    -            output('exiting f1')
    -            return n     
    -
    -        def f():
    -            coro_f = Coroutine.getcurrent()
    -            coro_f.__name__ = 'coro_f'
    -            coro_f1 = self.Coroutine()
    -            coro_f1.__name__ = 'coro_f1'
    -            thunk_f1 = T1(f1, coro_f1)
    -            output('binding f1 after f set 1')
    -            coro_f1.bind(thunk_f1)
    -            coro_f1.switch()
    -            output('return to main :-(')
    -            return thunk_f1.res
    -
    -        data = self.wrap_stackless_function(f)
    -        assert data == 12345678
    -
    -    def test_kill_raise_del_coro(self):
    -        class T(AbstractThunk):
    -            def __init__(self, func, arg):
    -                self.func = func
    -                self.arg = arg
    -            def call(self):
    -                self.func(self.arg, self)
    -
    -        def g(nrec, t, count=0):
    -            t.count = count
    -            if nrec < 0:
    -                raise ValueError
    -            if nrec:
    -                g(nrec-1, t, count+1)
    -            Coroutine.getmain().switch()
    -
    -        def f():
    -            assert Coroutine.getmain().frame is None
    -            coro_g = self.Coroutine()
    -            coro_g.__name__ = 'coro_g'
    -            thunk_g = T(g, 42)
    -            coro_g.bind(thunk_g)
    -            coro_g.switch()
    -            res = thunk_g.count
    -            res *= 10
    -            res |= coro_g.frame is not None
    -            # testing kill
    -            coro_g.kill()
    -            res *= 10
    -            res |= coro_g.frame is None
    -            coro_g = self.Coroutine()
    -            # see what happens if we __del__
    -            thunk_g = T(g, -42)
    -            coro_g.bind(thunk_g)
    -            try:
    -                coro_g.switch()
    -            except ValueError:
    -                res += 500
    -            return res
    -
    -        data = self.wrap_stackless_function(f)
    -        assert data == 4711
    -
    -    def test_tree_compare(self):
    -        class Node:
    -            def __init__(self, value, left=None, right=None):
    -                self.value = value
    -                self.left = left
    -                self.right = right
    -            def __repr__(self):
    -                return 'Node(%r, %r, %r)'%(self.value, self.left, self.right)
    -
    -        tree1 = Node(1, Node(2, Node(3)))
    -        tree2 = Node(1, Node(3, Node(2)))
    -        tree3 = Node(1, Node(2), Node(3))
    -
    -        class Producer(AbstractThunk):
    -            def __init__(self, tree, objects, consumer):
    -                self.tree = tree
    -                self.objects = objects
    -                self.consumer = consumer
    -            def produce(self, t):
    -                if t is None:
    -                    return
    -                self.objects.append(t.value)
    -                self.consumer.switch()
    -                self.produce(t.left)
    -                self.produce(t.right)
    -            def call(self):
    -                self.produce(self.tree)
    -                while 1:
    -                    self.consumer.switch()
    -        class Consumer(AbstractThunk):
    -            def __init__(self, tree, objects, producer):
    -                self.tree = tree
    -                self.objects = objects
    -                self.producer = producer
    -            def consume(self, t):
    -                if t is None:
    -                    return True
    -                self.producer.switch()
    -                if not self.objects:
    -                    return False
    -                if self.objects.pop(0) != t.value:
    -                    return False
    -                if not self.consume(t.left):
    -                    return False
    -                return self.consume(t.right)
    -
    -            def call(self):
    -                self.result = self.consume(self.tree)
    -                Coroutine.getmain().switch()
    -
    -        def pre_order_eq(t1, t2):
    -            objects = []
    -            producer = self.Coroutine()
    -            consumer = self.Coroutine()
    -
    -            producer.bind(Producer(t1, objects, consumer))
    -            cons = Consumer(t2, objects, producer)
    -            consumer.bind(cons)
    -
    -            consumer.switch()
    -
    -            return cons.result
    -
    -        def ep():
    -            return int("%d%d%d%d"%(pre_order_eq(tree1, tree2),
    -                                   pre_order_eq(tree1, tree1),
    -                                   pre_order_eq(tree1, tree3),
    -                                   pre_order_eq(tree2, tree1),
    -                                   ))
    -
    -        output = self.wrap_stackless_function(ep)
    -        assert output == int('0110')
    -
    -    def test_hello_goodbye(self):
    -
    -        class C(Coroutine):
    -            n = 2
    -            def __init__(self, n):
    -                Coroutine.__init__(self)
    -                self.n = n
    -            def hello(self):
    -                costate.hello_goodbye *= 10
    -                costate.hello_goodbye += self.n
    -            def goodbye(self):
    -                costate.hello_goodbye *= 10
    -                costate.hello_goodbye += self.n + 1
    -
    -        class T(AbstractThunk):
    -            def call(self):
    -                pass
    -
    -        costate = Coroutine._get_default_costate()
    -        costate.current.__class__ = C
    -        costate.hello_goodbye = 0
    -
    -        def ep():
    -            syncstate.default_costate = costate
    -            costate.hello_goodbye = 0
    -            c1 = C(4)
    -            c1.bind(T())
    -            c1.switch()
    -            return costate.hello_goodbye
    -
    -        output = self.wrap_stackless_function(ep)
    -        # expected result:
    -        #   goodbye main   3
    -        #   hello   c1     4
    -        #   goodbye c1     5
    -        #   hello   main   2
    -        assert output == 3452
    -
    -    def test_raise_propagate(self):
    -        class T(AbstractThunk):
    -            def call(self):
    -                raise ValueError
    -
    -        def ep():
    -            c = self.Coroutine()
    -            c.bind(T())
    -            try:
    -                c.switch()
    -            except ValueError:
    -                return 100
    -            else:
    -                return -5
    -
    -        output = self.wrap_stackless_function(ep)
    -        assert output == 100
    -
    -
    -TestCoroutine = _TestCoroutine # to activate
    -class TestCoroutineOnCPython(_TestCoroutine):
    -    def wrap_stackless_function(self, func):
    -        return func()
    -
    diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py
    --- a/pypy/rpython/lltypesystem/ll2ctypes.py
    +++ b/pypy/rpython/lltypesystem/ll2ctypes.py
    @@ -113,7 +113,7 @@
             rffi.LONGLONG:   ctypes.c_longlong,
             rffi.ULONGLONG:  ctypes.c_ulonglong,
             rffi.SIZE_T:     ctypes.c_size_t,
    -        lltype.Bool:     ctypes.c_bool,
    +        lltype.Bool:     getattr(ctypes, "c_bool", ctypes.c_long),
             llmemory.Address:  ctypes.c_void_p,
             llmemory.GCREF:    ctypes.c_void_p,
             llmemory.WeakRef:  ctypes.c_void_p, # XXX
    @@ -1153,7 +1153,11 @@
             # an OverflowError on the following line.
             cvalue = ctypes.cast(ctypes.c_void_p(cvalue), cresulttype)
         else:
    -        cvalue = cresulttype(cvalue).value   # mask high bits off if needed
    +        try:
    +            cvalue = cresulttype(cvalue).value   # mask high bits off if needed
    +        except TypeError:
    +            cvalue = int(cvalue)   # float -> int
    +            cvalue = cresulttype(cvalue).value   # try again
         return ctypes2lltype(RESTYPE, cvalue)
     
     class ForceCastEntry(ExtRegistryEntry):
    diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py
    --- a/pypy/rpython/lltypesystem/lltype.py
    +++ b/pypy/rpython/lltypesystem/lltype.py
    @@ -1283,6 +1283,8 @@
             try:
                 return p._obj._hash_cache_
             except AttributeError:
    +            assert self._T._gckind == 'gc'
    +            assert self      # not for NULL
                 result = hash(p._obj)
                 if cache:
                     try:
    diff --git a/pypy/rpython/lltypesystem/test/test_rffi.py b/pypy/rpython/lltypesystem/test/test_rffi.py
    --- a/pypy/rpython/lltypesystem/test/test_rffi.py
    +++ b/pypy/rpython/lltypesystem/test/test_rffi.py
    @@ -699,7 +699,10 @@
         def test_cast(self):
             res = cast(SIZE_T, -1)
             assert type(res) is r_size_t
    -        assert res == r_size_t(-1)    
    +        assert res == r_size_t(-1)
    +        #
    +        res = cast(lltype.Signed, 42.5)
    +        assert res == 42
         
         def test_rffi_sizeof(self):
             try:
    diff --git a/pypy/rpython/memory/gc/minimark.py b/pypy/rpython/memory/gc/minimark.py
    --- a/pypy/rpython/memory/gc/minimark.py
    +++ b/pypy/rpython/memory/gc/minimark.py
    @@ -1461,6 +1461,7 @@
             # We will fix such references to point to the copy of the young
             # objects when we walk 'old_objects_pointing_to_young'.
             self.old_objects_pointing_to_young.append(newobj)
    +    _trace_drag_out._always_inline_ = True
     
         def _visit_young_rawmalloced_object(self, obj):
             # 'obj' points to a young, raw-malloced object.
    diff --git a/pypy/rpython/memory/gctypelayout.py b/pypy/rpython/memory/gctypelayout.py
    --- a/pypy/rpython/memory/gctypelayout.py
    +++ b/pypy/rpython/memory/gctypelayout.py
    @@ -459,7 +459,7 @@
                 if t._hints.get('immutable'):
                     return
                 if 'immutable_fields' in t._hints:
    -                skip = t._hints['immutable_fields'].fields
    +                skip = t._hints['immutable_fields'].all_immutable_fields()
             for n, t2 in t._flds.iteritems():
                 if isinstance(t2, lltype.Ptr) and t2.TO._gckind == 'gc':
                     if n not in skip:
    diff --git a/pypy/rpython/memory/test/test_gctypelayout.py b/pypy/rpython/memory/test/test_gctypelayout.py
    --- a/pypy/rpython/memory/test/test_gctypelayout.py
    +++ b/pypy/rpython/memory/test/test_gctypelayout.py
    @@ -4,7 +4,7 @@
     from pypy.rpython.memory.gctypelayout import gc_pointers_inside
     from pypy.rpython.lltypesystem import lltype, llmemory, rclass
     from pypy.rpython.test.test_llinterp import get_interpreter
    -from pypy.rpython.rclass import IR_IMMUTABLE
    +from pypy.rpython.rclass import IR_IMMUTABLE, IR_QUASIIMMUTABLE
     from pypy.objspace.flow.model import Constant
     
     class FakeGC:
    @@ -102,7 +102,7 @@
         accessor = rclass.FieldListAccessor()
         S3 = lltype.GcStruct('S', ('x', PT), ('y', PT),
                              hints={'immutable_fields': accessor})
    -    accessor.initialize(S3, {'x': IR_IMMUTABLE})
    +    accessor.initialize(S3, {'x': IR_IMMUTABLE, 'y': IR_QUASIIMMUTABLE})
         #
         s1 = lltype.malloc(S1)
         adr = llmemory.cast_ptr_to_adr(s1)
    diff --git a/pypy/rpython/module/ll_os_stat.py b/pypy/rpython/module/ll_os_stat.py
    --- a/pypy/rpython/module/ll_os_stat.py
    +++ b/pypy/rpython/module/ll_os_stat.py
    @@ -49,19 +49,8 @@
         ]
     N_INDEXABLE_FIELDS = 10
     
    -# for now, check the host Python to know which st_xxx fields exist
    -STAT_FIELDS = [(_name, _TYPE) for (_name, _TYPE) in ALL_STAT_FIELDS
    -                              if hasattr(os.stat_result, _name)]
    -
    -STAT_FIELD_TYPES = dict(STAT_FIELDS)      # {'st_xxx': TYPE}
    -
    -STAT_FIELD_NAMES = [_name for (_name, _TYPE) in ALL_STAT_FIELDS
    -                          if _name in STAT_FIELD_TYPES]
    -
    -del _name, _TYPE
    -
     # For OO backends, expose only the portable fields (the first 10).
    -PORTABLE_STAT_FIELDS = STAT_FIELDS[:N_INDEXABLE_FIELDS]
    +PORTABLE_STAT_FIELDS = ALL_STAT_FIELDS[:N_INDEXABLE_FIELDS]
     
     # ____________________________________________________________
     #
    @@ -142,17 +131,22 @@
         includes = INCLUDES
     )
     
    -if sys.platform != 'win32':
    +if TIMESPEC is not None:
    +    class CConfig_for_timespec:
    +        _compilation_info_ = compilation_info
    +        TIMESPEC = TIMESPEC
    +    TIMESPEC = lltype.Ptr(
    +        platform.configure(CConfig_for_timespec)['TIMESPEC'])
    +
    +
    +def posix_declaration(try_to_add=None):
    +    global STAT_STRUCT
     
         LL_STAT_FIELDS = STAT_FIELDS[:]
    +    if try_to_add:
    +        LL_STAT_FIELDS.append(try_to_add)
     
         if TIMESPEC is not None:
    -        class CConfig_for_timespec:
    -            _compilation_info_ = compilation_info
    -            TIMESPEC = TIMESPEC
    -
    -        TIMESPEC = lltype.Ptr(
    -            platform.configure(CConfig_for_timespec)['TIMESPEC'])
     
             def _expand(lst, originalname, timespecname):
                 for i, (_name, _TYPE) in enumerate(lst):
    @@ -178,9 +172,34 @@
         class CConfig:
             _compilation_info_ = compilation_info
             STAT_STRUCT = platform.Struct('struct %s' % _name_struct_stat, LL_STAT_FIELDS)
    -    config = platform.configure(CConfig)
    +    try:
    +        config = platform.configure(CConfig)
    +    except platform.CompilationError:
    +        if try_to_add:
    +            return    # failed to add this field, give up
    +        raise
     
         STAT_STRUCT = lltype.Ptr(config['STAT_STRUCT'])
    +    if try_to_add:
    +        STAT_FIELDS.append(try_to_add)
    +
    +
    +# This lists only the fields that have been found on the underlying platform.
    +# Initially only the PORTABLE_STAT_FIELDS, but more may be added by the
    +# following loop.
    +STAT_FIELDS = PORTABLE_STAT_FIELDS[:]
    +
    +if sys.platform != 'win32':
    +    posix_declaration()
    +    for _i in range(len(PORTABLE_STAT_FIELDS), len(ALL_STAT_FIELDS)):
    +        posix_declaration(ALL_STAT_FIELDS[_i])
    +    del _i
    +
    +# these two global vars only list the fields defined in the underlying platform
    +STAT_FIELD_TYPES = dict(STAT_FIELDS)      # {'st_xxx': TYPE}
    +STAT_FIELD_NAMES = [_name for (_name, _TYPE) in STAT_FIELDS]
    +del _name, _TYPE
    +
     
     def build_stat_result(st):
         # only for LL backends
    diff --git a/pypy/rpython/module/test/test_ll_os_stat.py b/pypy/rpython/module/test/test_ll_os_stat.py
    --- a/pypy/rpython/module/test/test_ll_os_stat.py
    +++ b/pypy/rpython/module/test/test_ll_os_stat.py
    @@ -2,6 +2,16 @@
     import sys, os
     import py
     
    +
    +class TestLinuxImplementation:
    +    def setup_class(cls):
    +        if not sys.platform.startswith('linux'):
    +            py.test.skip("linux specific tests")
    +
    +    def test_has_all_fields(self):
    +        assert ll_os_stat.STAT_FIELDS == ll_os_stat.ALL_STAT_FIELDS[:13]
    +
    +
     class TestWin32Implementation:
         def setup_class(cls):
             if sys.platform != 'win32':
    diff --git a/pypy/rpython/rclass.py b/pypy/rpython/rclass.py
    --- a/pypy/rpython/rclass.py
    +++ b/pypy/rpython/rclass.py
    @@ -16,6 +16,13 @@
             for x in fields.itervalues():
                 assert isinstance(x, ImmutableRanking)
     
    +    def all_immutable_fields(self):
    +        result = set()
    +        for key, value in self.fields.iteritems():
    +            if value in (IR_IMMUTABLE, IR_IMMUTABLE_ARRAY):
    +                result.add(key)
    +        return result
    +
         def __repr__(self):
             return '' % getattr(self, 'TYPE', '?')
     
    diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py
    --- a/pypy/tool/jitlogparser/parser.py
    +++ b/pypy/tool/jitlogparser/parser.py
    @@ -8,6 +8,7 @@
         bridge = None
         offset = None
         asm = None
    +    failargs = ()
     
         def __init__(self, name, args, res, descr):
             self.name = name
    @@ -18,8 +19,8 @@
             if self._is_guard:
                 self.guard_no = int(self.descr[len('> sys.stderr, "skipping", path
    +        return
    +    for fn in content:
    +        filename = os.path.join(path, fn)
    +        st = os.lstat(filename)
    +        if stat.S_ISDIR(st.st_mode):
    +            clean(filename)
    +            if fn == '__pycache__':
    +                try:
    +                    os.rmdir(filename)
    +                except OSError:
    +                    pass
    +        elif fn.endswith('.pyc') or fn.endswith('.pyo'):
    +            os.unlink(filename)
    +            count += 1
     
     count = 0
     
     for arg in sys.argv[1:] or ['.']:
    -    path = py.path.local(arg)
    -    print "cleaning path", path, "of .pyc files"
    -    for x in path.visit(shouldremove, lambda x: x.check(dotfile=0, link=0)):
    -        x.remove()
    -        count += 1
    +    print "cleaning path", arg, "of .pyc/.pyo/__pycache__ files"
    +    clean(arg)
     
     print "%d files removed" % (count,)
    diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py
    --- a/pypy/tool/release/package.py
    +++ b/pypy/tool/release/package.py
    @@ -52,9 +52,14 @@
                 pypy_c_dir = basedir.join('pypy', 'translator', 'goal')
             pypy_c = pypy_c_dir.join('pypy-c.exe')
             libpypy_c = pypy_c_dir.join('libpypy-c.dll')
    +        libexpat = pypy_c_dir.join('libexpat.dll')
    +        if not libexpat.check():
    +            libexpat = py.path.local.sysfind('libexpat.dll')
    +            assert libexpat, "libexpat.dll not found"
    +            print "Picking %s" % libexpat
             binaries = [(pypy_c, pypy_c.basename),
                         (libpypy_c, libpypy_c.basename),
    -                    (pypy_c_dir.join('libexpat.dll'), 'libexpat.dll')]
    +                    (libexpat, libexpat.basename)]
         else:
             basename = 'pypy-c'
             if override_pypy_c is None:
    diff --git a/pypy/translator/c/src/stacklet/stacklet.c b/pypy/translator/c/src/stacklet/stacklet.c
    --- a/pypy/translator/c/src/stacklet/stacklet.c
    +++ b/pypy/translator/c/src/stacklet/stacklet.c
    @@ -319,10 +319,11 @@
     
     char **_stacklet_translate_pointer(stacklet_handle context, char **ptr)
     {
    +  char *p = (char *)ptr;
    +  long delta;
       if (context == NULL)
         return ptr;
    -  char *p = (char *)ptr;
    -  long delta = p - context->stack_start;
    +  delta = p - context->stack_start;
       if (((unsigned long)delta) < ((unsigned long)context->stack_saved)) {
           /* a pointer to a saved away word */
           char *c = (char *)(context + 1);
    diff --git a/pypy/translator/goal/app_main.py b/pypy/translator/goal/app_main.py
    --- a/pypy/translator/goal/app_main.py
    +++ b/pypy/translator/goal/app_main.py
    @@ -260,6 +260,8 @@
         try:
             import _file
         except ImportError:
    +        if sys.version_info < (2, 7):
    +            return
             import ctypes # HACK: while running on top of CPython
             set_file_encoding = ctypes.pythonapi.PyFile_SetEncodingAndErrors
             set_file_encoding.argtypes = [ctypes.py_object, ctypes.c_char_p, ctypes.c_char_p]
    @@ -479,7 +481,8 @@
                 print >> sys.stderr, "'import site' failed"
     
         readenv = not ignore_environment
    -    io_encoding = readenv and os.getenv("PYTHONIOENCODING")
    +    io_encoding = ((readenv and os.getenv("PYTHONIOENCODING"))
    +                   or sys.getfilesystemencoding())
         if io_encoding:
             set_io_encoding(io_encoding)
     
    diff --git a/pypy/translator/goal/targetpreimportedpypy.py b/pypy/translator/goal/targetpreimportedpypy.py
    deleted file mode 100644
    --- a/pypy/translator/goal/targetpreimportedpypy.py
    +++ /dev/null
    @@ -1,239 +0,0 @@
    -import py
    -
    -import os, sys
    -sys.setrecursionlimit(17000)
    -
    -from pypy.interpreter import gateway
    -from pypy.interpreter.error import OperationError
    -from pypy.translator.goal.ann_override import PyPyAnnotatorPolicy
    -from pypy.config.config import Config, to_optparse, make_dict, SUPPRESS_USAGE
    -from pypy.config.config import ConflictConfigError
    -from pypy.tool.option import make_objspace
    -from pypy.translator.goal.nanos import setup_nanos
    -
    -EXTRA_MODULES = [
    -    #"os",
    -    #"decimal",
    -    #"difflib",
    -    #"tarfile",
    -    #"cookielib",
    -    #"optparse",
    -    "inspect",
    -    "random",
    -]
    -
    -thisdir = py.path.local(__file__).dirpath()
    -
    -try:
    -    this_dir = os.path.dirname(__file__)
    -except NameError:
    -    this_dir = os.path.dirname(sys.argv[0])
    -
    -def debug(msg):
    -    os.write(2, "debug: " + msg + '\n')
    -
    -# __________  Entry point  __________
    -
    -def create_entry_point(space, w_dict):
    -    w_entry_point = space.getitem(w_dict, space.wrap('entry_point'))
    -    w_run_toplevel = space.getitem(w_dict, space.wrap('run_toplevel'))
    -    w_call_finish_gateway = space.wrap(gateway.interp2app(call_finish))
    -    w_call_startup_gateway = space.wrap(gateway.interp2app(call_startup))
    -    w_os = setup_nanos(space)
    -
    -    def entry_point(argv):
    -        space.timer.start("Entrypoint")
    -        #debug("entry point starting") 
    -        #for arg in argv: 
    -        #    debug(" argv -> " + arg)
    -        if len(argv) > 2 and argv[1] == '--heapsize':
    -            # Undocumented option, handled at interp-level.
    -            # It has silently no effect with some GCs.
    -            # It works in Boehm and in the semispace or generational GCs
    -            # (but see comments in semispace.py:set_max_heap_size()).
    -            # At the moment this option exists mainly to support sandboxing.
    -            from pypy.rlib import rgc
    -            rgc.set_max_heap_size(int(argv[2]))
    -            argv = argv[:1] + argv[3:]
    -        try:
    -            try:
    -                space.timer.start("space.startup")
    -                space.call_function(w_run_toplevel, w_call_startup_gateway)
    -                space.timer.stop("space.startup")
    -                w_executable = space.wrap(argv[0])
    -                w_argv = space.newlist([space.wrap(s) for s in argv[1:]])
    -                space.timer.start("w_entry_point")
    -                w_exitcode = space.call_function(w_entry_point, w_executable, w_argv, w_os)
    -                space.timer.stop("w_entry_point")
    -                exitcode = space.int_w(w_exitcode)
    -                # try to pull it all in
    -            ##    from pypy.interpreter import main, interactive, error
    -            ##    con = interactive.PyPyConsole(space)
    -            ##    con.interact()
    -            except OperationError, e:
    -                debug("OperationError:")
    -                debug(" operror-type: " + e.w_type.getname(space))
    -                debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
    -                return 1
    -        finally:
    -            try:
    -                space.timer.start("space.finish")
    -                space.call_function(w_run_toplevel, w_call_finish_gateway)
    -                space.timer.stop("space.finish")
    -            except OperationError, e:
    -                debug("OperationError:")
    -                debug(" operror-type: " + e.w_type.getname(space))
    -                debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
    -                return 1
    -        space.timer.stop("Entrypoint")
    -        space.timer.dump()
    -        return exitcode
    -    return entry_point
    -
    -def call_finish(space):
    
    From noreply at buildbot.pypy.org  Thu Sep  8 05:37:33 2011
    From: noreply at buildbot.pypy.org (wlav)
    Date: Thu,  8 Sep 2011 05:37:33 +0200 (CEST)
    Subject: [pypy-commit] pypy reflex-support: turn type handles into
     rffi.LONGs to make sure they're opaque
    Message-ID: <20110908033733.6B1FF82213@wyvern.cs.uni-duesseldorf.de>
    
    Author: Wim Lavrijsen 
    Branch: reflex-support
    Changeset: r47155:532fbe4e0dd4
    Date: 2011-09-07 20:38 -0700
    http://bitbucket.org/pypy/pypy/changeset/532fbe4e0dd4/
    
    Log:	turn type handles into rffi.LONGs to make sure they're opaque
    
    diff --git a/pypy/module/cppyy/capi/__init__.py b/pypy/module/cppyy/capi/__init__.py
    --- a/pypy/module/cppyy/capi/__init__.py
    +++ b/pypy/module/cppyy/capi/__init__.py
    @@ -5,7 +5,7 @@
     #import cint_capi as backend
     
     
    -C_TYPEHANDLE = rffi.VOIDP
    +C_TYPEHANDLE = rffi.LONG
     C_OBJECT = rffi.VOIDP
     
     C_METHPTRGETTER = lltype.FuncType([C_OBJECT], rffi.VOIDP)
    diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h
    --- a/pypy/module/cppyy/include/capi.h
    +++ b/pypy/module/cppyy/include/capi.h
    @@ -6,7 +6,7 @@
     #ifdef __cplusplus
     extern "C" {
     #endif // ifdef __cplusplus
    -    typedef void* cppyy_typehandle_t;
    +    typedef long cppyy_typehandle_t;
         typedef void* cppyy_object_t;
         typedef void* (*cppyy_methptrgetter_t)(cppyy_object_t);
     
    diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py
    --- a/pypy/module/cppyy/interp_cppyy.py
    +++ b/pypy/module/cppyy/interp_cppyy.py
    @@ -31,7 +31,8 @@
     
     class State(object):
         def __init__(self, space):
    -        self.cpptype_cache = { "void" : W_CPPType(space, "void", NULL_VOIDP) }
    +        self.cpptype_cache = {
    +            "void" : W_CPPType(space, "void", rffi.cast(capi.C_TYPEHANDLE, NULL_VOIDP)) }
             self.cpptemplatetype_cache = {}
     
     @unwrap_spec(name=str)
    @@ -110,7 +111,7 @@
     
         @jit.unroll_safe
         def call(self, cppthis, w_type, args_w):
    -        assert lltype.typeOf(cppthis) == rffi.VOIDP
    +        assert lltype.typeOf(cppthis) == capi.C_OBJECT
             if self.executor is None:
                 raise OperationError(self.space.w_TypeError,
                                      self.space.wrap("return type not handled"))
    @@ -220,7 +221,7 @@
     
         def call(self, cppthis, w_type, args_w):
             newthis = capi.c_allocate(self.cpptype.handle)
    -        assert lltype.typeOf(newthis) == rffi.VOIDP
    +        assert lltype.typeOf(newthis) == capi.C_OBJECT
             try:
                 CPPMethod.call(self, newthis, None, args_w)
             except Exception, e:
    @@ -234,7 +235,7 @@
     
         def __init__(self, space, scope_handle, func_name, functions):
             self.space = space
    -        assert lltype.typeOf(scope_handle) == rffi.VOIDP
    +        assert lltype.typeOf(scope_handle) == capi.C_TYPEHANDLE
             self.scope_handle = scope_handle
             self.func_name = func_name
             self.functions = debug.make_sure_not_resized(functions)
    @@ -260,7 +261,7 @@
         def call(self, w_cppinstance, w_type, args_w):
             cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True)
             cppthis = self._get_cppthis(cppinstance)
    -        assert lltype.typeOf(cppthis) == rffi.VOIDP
    +        assert lltype.typeOf(cppthis) == capi.C_OBJECT
     
             space = self.space
             errmsg = 'None of the overloads matched:'
    @@ -299,7 +300,7 @@
     
         def __init__(self, space, scope_handle, type_name, offset, is_static):
             self.space = space
    -        assert lltype.typeOf(scope_handle) == rffi.VOIDP
    +        assert lltype.typeOf(scope_handle) == capi.C_TYPEHANDLE
             self.scope_handle = scope_handle
             self.converter = converter.get_converter(self.space, type_name)
             self.offset = offset
    @@ -344,10 +345,12 @@
     class W_CPPScope(Wrappable):
         _immutable_fields_ = ["name", "handle"]
     
    +    kind = "scope"
    +
         def __init__(self, space, name, handle):
             self.space = space
             self.name = name
    -        assert lltype.typeOf(handle) == rffi.VOIDP
    +        assert lltype.typeOf(handle) == capi.C_TYPEHANDLE
             self.handle = handle
             self.methods = {}
             # Do not call "self._find_methods()" here, so that a distinction can
    @@ -512,7 +515,7 @@
         def __init__(self, space, name, handle):
             self.space = space
             self.name = name
    -        assert lltype.typeOf(handle) == rffi.VOIDP
    +        assert lltype.typeOf(handle) == capi.C_TYPEHANDLE
             self.handle = handle
     
         def __call__(self, args_w):
    @@ -533,7 +536,7 @@
         def __init__(self, space, cppclass, rawobject, python_owns):
             self.space = space
             self.cppclass = cppclass
    -        assert lltype.typeOf(rawobject) == rffi.VOIDP
    +        assert lltype.typeOf(rawobject) == capi.C_OBJECT
             self.rawobject = rawobject
             self.python_owns = python_owns
     
    diff --git a/pypy/module/cppyy/src/reflexcwrapper.cxx b/pypy/module/cppyy/src/reflexcwrapper.cxx
    --- a/pypy/module/cppyy/src/reflexcwrapper.cxx
    +++ b/pypy/module/cppyy/src/reflexcwrapper.cxx
    @@ -47,12 +47,12 @@
     /* name to handle --------------------------------------------------------- */
     cppyy_typehandle_t cppyy_get_typehandle(const char* class_name) {
         Reflex::Scope s = Reflex::Scope::ByName(class_name);
    -    return s.Id();
    +    return (cppyy_typehandle_t)s.Id();
     }
     
     cppyy_typehandle_t cppyy_get_templatehandle(const char* template_name) {
        Reflex::TypeTemplate tt = Reflex::TypeTemplate::ByName(template_name);
    -   return tt.Id();
    +   return (cppyy_typehandle_t)tt.Id();
     }
     
     
    
    From noreply at buildbot.pypy.org  Thu Sep  8 09:19:28 2011
    From: noreply at buildbot.pypy.org (fijal)
    Date: Thu,  8 Sep 2011 09:19:28 +0200 (CEST)
    Subject: [pypy-commit] pypy space-iterator-improvements: backout
     b02477f065b2, reinstantiane newlist hint. this time we want to use it
    Message-ID: <20110908071928.5D0608203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Maciej Fijalkowski 
    Branch: space-iterator-improvements
    Changeset: r47156:6dfe74cae2b2
    Date: 2011-09-08 09:18 +0200
    http://bitbucket.org/pypy/pypy/changeset/6dfe74cae2b2/
    
    Log:	backout b02477f065b2, reinstantiane newlist hint. this time we want
    	to use it
    
    diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py
    --- a/pypy/rlib/objectmodel.py
    +++ b/pypy/rlib/objectmodel.py
    @@ -19,6 +19,8 @@
     # def f(...
     #
     
    +from pypy.rpython.extregistry import ExtRegistryEntry
    +
     class _Specialize(object):
         def memo(self):
             """ Specialize functions based on argument values. All arguments has
    @@ -177,6 +179,33 @@
         obj.__class__ = FREED_OBJECT
     
     # ____________________________________________________________
    +
    +def newlist(sizehint=0):
    +    """ Create a new list, but pass a hint how big the size should be
    +    preallocated
    +    """
    +    return []
    +
    +class Entry(ExtRegistryEntry):
    +    _about_ = newlist
    +
    +    def compute_result_annotation(self, s_sizehint):
    +        from pypy.annotation.model import SomeInteger
    +        
    +        assert isinstance(s_sizehint, SomeInteger)
    +        return self.bookkeeper.newlist()
    +
    +    def specialize_call(self, orig_hop, i_sizehint=None):
    +        from pypy.rpython.rlist import rtype_newlist
    +        # fish a bit hop
    +        hop = orig_hop.copy()
    +        v = hop.args_v[0]
    +        r, s = hop.r_s_popfirstarg()
    +        if s.is_constant():
    +            v = hop.inputconst(r, s.const)
    +        return rtype_newlist(hop, v_sizehint=v)
    +
    +# ____________________________________________________________
     #
     # id-like functions.  The idea is that calling hash() or id() is not
     # allowed in RPython.  You have to call one of the following more
    @@ -301,8 +330,6 @@
     
     # ----------
     
    -from pypy.rpython.extregistry import ExtRegistryEntry
    -
     class Entry(ExtRegistryEntry):
         _about_ = compute_hash
     
    diff --git a/pypy/rlib/test/test_objectmodel.py b/pypy/rlib/test/test_objectmodel.py
    --- a/pypy/rlib/test/test_objectmodel.py
    +++ b/pypy/rlib/test/test_objectmodel.py
    @@ -424,3 +424,32 @@
         if option.view:
             graph.show()
         return graph
    +
    +
    +def test_newlist():
    +    from pypy.annotation.model import SomeInteger
    +    def f(z):
    +        x = newlist(sizehint=38)
    +        if z < 0:
    +            x.append(1)
    +        return len(x)
    +
    +    graph = getgraph(f, [SomeInteger()])
    +    for llop in graph.startblock.operations:
    +        if llop.opname == 'malloc_varsize':
    +            break
    +    assert llop.args[2].value == 38
    +
    +def test_newlist_nonconst():
    +    from pypy.annotation.model import SomeInteger
    +    def f(z):
    +        x = newlist(sizehint=z)
    +        return len(x)
    +
    +    graph = getgraph(f, [SomeInteger()])
    +    for llop in graph.startblock.operations:
    +        if llop.opname == 'malloc_varsize':
    +            break
    +    assert llop.args[2] is graph.startblock.inputargs[0]
    +
    +    
    diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py
    --- a/pypy/rpython/lltypesystem/rlist.py
    +++ b/pypy/rpython/lltypesystem/rlist.py
    @@ -1,21 +1,14 @@
     from pypy.tool.pairtype import pairtype, pair
    -from pypy.annotation import model as annmodel
    -from pypy.rpython.error import TyperError
    -from pypy.rpython.rmodel import Repr, IntegerRepr, inputconst
    +from pypy.rpython.rmodel import Repr, inputconst
     from pypy.rpython.rmodel import externalvsinternal
     from pypy.rpython.rlist import AbstractBaseListRepr, AbstractListRepr, \
    -        AbstractFixedSizeListRepr, AbstractListIteratorRepr, rtype_newlist, \
    -        rtype_alloc_and_set, ll_setitem_nonneg, ADTIList, ADTIFixedList
    -from pypy.rpython.rlist import dum_nocheck, dum_checkidx
    -from pypy.rpython.lltypesystem.lltype import \
    -     GcForwardReference, Ptr, GcArray, GcStruct, \
    -     Void, Signed, malloc, typeOf, Primitive, \
    -     Bool, nullptr, typeMethod
    +        AbstractFixedSizeListRepr, AbstractListIteratorRepr, \
    +        ll_setitem_nonneg, ADTIList, ADTIFixedList
    +from pypy.rpython.rlist import dum_nocheck
    +from pypy.rpython.lltypesystem.lltype import GcForwardReference, Ptr, GcArray,\
    +     GcStruct, Void, Signed, malloc, typeOf, nullptr, typeMethod
     from pypy.rpython.lltypesystem import rstr
    -from pypy.rpython import robject
     from pypy.rlib.debug import ll_assert
    -from pypy.rpython.lltypesystem import rffi
    -from pypy.rpython.lltypesystem.lloperation import llop
     from pypy.rlib import rgc
     
     # ____________________________________________________________
    @@ -67,6 +60,7 @@
             ITEMARRAY = GcArray(ITEM,
                                 adtmeths = ADTIFixedList({
                                      "ll_newlist": ll_fixed_newlist,
    +                                 "ll_newlist_hint": ll_fixed_newlist,
                                      "ll_newemptylist": ll_fixed_newemptylist,
                                      "ll_length": ll_fixed_length,
                                      "ll_items": ll_fixed_items,
    @@ -100,6 +94,7 @@
                                                   ("items", Ptr(ITEMARRAY)),
                                           adtmeths = ADTIList({
                                               "ll_newlist": ll_newlist,
    +                                          "ll_newlist_hint": ll_newlist_hint,
                                               "ll_newemptylist": ll_newemptylist,
                                               "ll_length": ll_length,
                                               "ll_items": ll_items,
    @@ -267,6 +262,15 @@
     ll_newlist = typeMethod(ll_newlist)
     ll_newlist.oopspec = 'newlist(length)'
     
    +def ll_newlist_hint(LIST, lengthhint):
    +    ll_assert(lengthhint >= 0, "negative list length")
    +    l = malloc(LIST)
    +    l.length = 0
    +    l.items = malloc(LIST.items.TO, lengthhint)
    +    return l
    +ll_newlist_hint = typeMethod(ll_newlist_hint)
    +ll_newlist_hint.oopspec = 'newlist(length)'
    +
     # should empty lists start with no allocated memory, or with a preallocated
     # minimal number of entries?  XXX compare memory usage versus speed, and
     # check how many always-empty lists there are in a typical pypy-c run...
    @@ -337,11 +341,15 @@
         l[index] = item
     ll_fixed_setitem_fast.oopspec = 'list.setitem(l, index, item)'
     
    -def newlist(llops, r_list, items_v):
    +def newlist(llops, r_list, items_v, v_sizehint=None):
         LIST = r_list.LIST
         if len(items_v) == 0:
    -        v_result = llops.gendirectcall(LIST.ll_newemptylist)
    +        if v_sizehint is None:
    +            v_result = llops.gendirectcall(LIST.ll_newemptylist)
    +        else:
    +            v_result = llops.gendirectcall(LIST.ll_newlist_hint, v_sizehint)
         else:
    +        assert v_sizehint is None
             cno = inputconst(Signed, len(items_v))
             v_result = llops.gendirectcall(LIST.ll_newlist, cno)
         v_func = inputconst(Void, dum_nocheck)
    diff --git a/pypy/rpython/ootypesystem/rlist.py b/pypy/rpython/ootypesystem/rlist.py
    --- a/pypy/rpython/ootypesystem/rlist.py
    +++ b/pypy/rpython/ootypesystem/rlist.py
    @@ -124,7 +124,7 @@
             else:
                 return ootype.List()
     
    -    def _generate_newlist(self, llops, items_v):
    +    def _generate_newlist(self, llops, items_v, v_sizehint):
             c_list = inputconst(ootype.Void, self.lowleveltype)
             v_result = llops.genop("new", [c_list], resulttype=self.lowleveltype)
             c_resize = inputconst(ootype.Void, "_ll_resize")
    @@ -150,8 +150,8 @@
     
     
     
    -def newlist(llops, r_list, items_v):
    -    v_result = r_list._generate_newlist(llops, items_v)
    +def newlist(llops, r_list, items_v, v_sizehint=None):
    +    v_result = r_list._generate_newlist(llops, items_v, v_sizehint)
     
         c_setitem = inputconst(ootype.Void, "ll_setitem_fast")
         for i, v_item in enumerate(items_v):
    @@ -224,7 +224,7 @@
         def make_iterator_repr(self):
             return ListIteratorRepr(self)
     
    -    def _generate_newlist(self, llops, items_v):
    +    def _generate_newlist(self, llops, items_v, v_sizehint):
             c_array = inputconst(ootype.Void, self.lowleveltype)
             c_length = inputconst(ootype.Signed, len(items_v))
             v_result = llops.genop("oonewarray", [c_array, c_length], resulttype=self.lowleveltype)
    diff --git a/pypy/rpython/rlist.py b/pypy/rpython/rlist.py
    --- a/pypy/rpython/rlist.py
    +++ b/pypy/rpython/rlist.py
    @@ -2,7 +2,7 @@
     from pypy.objspace.flow.model import Constant
     from pypy.annotation import model as annmodel
     from pypy.rpython.error import TyperError
    -from pypy.rpython.rmodel import Repr, IteratorRepr, IntegerRepr, inputconst
    +from pypy.rpython.rmodel import Repr, IteratorRepr, IntegerRepr
     from pypy.rpython.rstr import AbstractStringRepr, AbstractCharRepr
     from pypy.rpython.lltypesystem.lltype import typeOf, Ptr, Void, Signed, Bool
     from pypy.rpython.lltypesystem.lltype import nullptr, Char, UniChar, Number
    @@ -344,7 +344,7 @@
             return hop.genop('bool_not', [flag], resulttype=Bool)
     
     
    -def rtype_newlist(hop):
    +def rtype_newlist(hop, v_sizehint=None):
         nb_args = hop.nb_args
         r_list = hop.r_result
         if r_list == robject.pyobj_repr: # special case: SomeObject lists!
    @@ -358,7 +358,8 @@
             return v_result
         r_listitem = r_list.item_repr
         items_v = [hop.inputarg(r_listitem, arg=i) for i in range(nb_args)]
    -    return hop.rtyper.type_system.rlist.newlist(hop.llops, r_list, items_v)
    +    return hop.rtyper.type_system.rlist.newlist(hop.llops, r_list, items_v,
    +                                                v_sizehint=v_sizehint)
     
     def rtype_alloc_and_set(hop):
         r_list = hop.r_result
    diff --git a/pypy/rpython/test/test_rlist.py b/pypy/rpython/test/test_rlist.py
    --- a/pypy/rpython/test/test_rlist.py
    +++ b/pypy/rpython/test/test_rlist.py
    @@ -1360,6 +1360,19 @@
                 assert ('foldable' in func.func_name) == \
                        ("y[*]" in immutable_fields)
     
    +    def test_hints(self):
    +        from pypy.rlib.objectmodel import newlist
    +        from pypy.rpython.annlowlevel import hlstr
    +        
    +        def f(z):
    +            z = hlstr(z)
    +            x = newlist(sizehint=13)
    +            x += z
    +            return ''.join(x)
    +
    +        res = self.interpret(f, [self.string_to_ll('abc')])
    +        assert self.ll_to_string(res) == 'abc'
    +
     class TestLLtype(BaseTestRlist, LLRtypeMixin):
         type_system = 'lltype'
         rlist = ll_rlist
    
    From noreply at buildbot.pypy.org  Thu Sep  8 09:24:21 2011
    From: noreply at buildbot.pypy.org (fijal)
    Date: Thu,  8 Sep 2011 09:24:21 +0200 (CEST)
    Subject: [pypy-commit] pypy space-iterator-improvements: use length estimate
     for unpackiterable
    Message-ID: <20110908072421.F329D8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Maciej Fijalkowski 
    Branch: space-iterator-improvements
    Changeset: r47157:81a9a624253f
    Date: 2011-09-08 09:23 +0200
    http://bitbucket.org/pypy/pypy/changeset/81a9a624253f/
    
    Log:	use length estimate for unpackiterable
    
    diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
    --- a/pypy/interpreter/baseobjspace.py
    +++ b/pypy/interpreter/baseobjspace.py
    @@ -8,13 +8,13 @@
     from pypy.interpreter.miscutils import ThreadLocals
     from pypy.tool.cache import Cache
     from pypy.tool.uid import HUGEVAL_BYTES
    -from pypy.rlib.objectmodel import we_are_translated
    +from pypy.rlib.objectmodel import we_are_translated, newlist
     from pypy.rlib.debug import make_sure_not_resized
     from pypy.rlib.timer import DummyTimer, Timer
     from pypy.rlib.rarithmetic import r_uint
     from pypy.rlib import jit
     from pypy.tool.sourcetools import func_with_new_name
    -import os, sys, py
    +import os, sys
     
     __all__ = ['ObjSpace', 'OperationError', 'Wrappable', 'W_Root']
     
    @@ -757,7 +757,15 @@
             w_iterator = self.iter(w_iterable)
             # If we know the expected length we can preallocate.
             if expected_length == -1:
    -            items = []
    +            try:
    +                lgt_estimate = self.int_w(self.len(w_iterable))
    +            except OperationError:
    +                items = []
    +            else:
    +                try:
    +                    items = newlist(lgt_estimate)
    +                except MemoryError:
    +                    items = [] # it might have lied
             else:
                 items = [None] * expected_length
             idx = 0
    
    From noreply at buildbot.pypy.org  Thu Sep  8 09:26:51 2011
    From: noreply at buildbot.pypy.org (fijal)
    Date: Thu,  8 Sep 2011 09:26:51 +0200 (CEST)
    Subject: [pypy-commit] pypy space-iterator-improvements: a minor
    	simplification
    Message-ID: <20110908072651.880AD8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Maciej Fijalkowski 
    Branch: space-iterator-improvements
    Changeset: r47158:31445f5be38e
    Date: 2011-09-08 09:26 +0200
    http://bitbucket.org/pypy/pypy/changeset/31445f5be38e/
    
    Log:	a minor simplification
    
    diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
    --- a/pypy/interpreter/baseobjspace.py
    +++ b/pypy/interpreter/baseobjspace.py
    @@ -758,7 +758,7 @@
             # If we know the expected length we can preallocate.
             if expected_length == -1:
                 try:
    -                lgt_estimate = self.int_w(self.len(w_iterable))
    +                lgt_estimate = self.len_w(w_iterable)
                 except OperationError:
                     items = []
                 else:
    
    From noreply at buildbot.pypy.org  Thu Sep  8 09:50:54 2011
    From: noreply at buildbot.pypy.org (fijal)
    Date: Thu,  8 Sep 2011 09:50:54 +0200 (CEST)
    Subject: [pypy-commit] pypy space-iterator-improvements: A questionable test,
     but matches cpython behavior. The change itself makes
    Message-ID: <20110908075054.881408203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Maciej Fijalkowski 
    Branch: space-iterator-improvements
    Changeset: r47159:b4bff794fea0
    Date: 2011-09-08 09:50 +0200
    http://bitbucket.org/pypy/pypy/changeset/b4bff794fea0/
    
    Log:	A questionable test, but matches cpython behavior. The change itself
    	makes sense however.
    
    diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
    --- a/pypy/interpreter/baseobjspace.py
    +++ b/pypy/interpreter/baseobjspace.py
    @@ -759,7 +759,10 @@
             if expected_length == -1:
                 try:
                     lgt_estimate = self.len_w(w_iterable)
    -            except OperationError:
    +            except OperationError, o:
    +                if (not o.match(self, self.w_AttributeError) and
    +                    not o.match(self, self.w_TypeError)):
    +                    raise
                     items = []
                 else:
                     try:
    diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
    --- a/pypy/interpreter/test/test_objspace.py
    +++ b/pypy/interpreter/test/test_objspace.py
    @@ -71,6 +71,23 @@
             assert err.value.match(space, space.w_ValueError)
             err = raises(OperationError, space.unpackiterable, w_l, 5)
             assert err.value.match(space, space.w_ValueError)
    +        w_a = space.appexec((), """():
    +        class A(object):
    +            def __iter__(self):
    +                return self
    +            def next(self):
    +                raise StopIteration
    +            def __len__(self):
    +                1/0
    +        return A()
    +        """)
    +        try:
    +            space.unpackiterable(w_a)
    +        except OperationError, o:
    +            if not o.match(space, space.w_ZeroDivisionError):
    +                raise Exception("DID NOT RAISE")
    +        else:
    +            raise Exception("DID NOT RAISE")
     
         def test_fixedview(self):
             space = self.space
    
    From noreply at buildbot.pypy.org  Thu Sep  8 10:41:50 2011
    From: noreply at buildbot.pypy.org (lac)
    Date: Thu,  8 Sep 2011 10:41:50 +0200 (CEST)
    Subject: [pypy-commit] pypy.org extradoc: add Samuele,
    	who has also promised us a photo.
    Message-ID: <20110908084150.6E68A8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Laura Creighton 
    Branch: extradoc
    Changeset: r254:ffde8342de9d
    Date: 2011-09-08 10:41 +0200
    http://bitbucket.org/pypy/pypy.org/changeset/ffde8342de9d/
    
    Log:	add Samuele, who has also promised us a photo.
    
    diff --git a/source/people.txt b/source/people.txt
    --- a/source/people.txt
    +++ b/source/people.txt
    @@ -1,6 +1,6 @@
     ---
     layout: page
    -title: Current people of PyPy
    +title: People of PyPy
     ---
     
     Armin Rigo
    @@ -129,6 +129,18 @@
     .. _`tox`: http://codespeak.net/tox
     .. _`execnet`: http://codespeak.net/execnet
     
    +Samuele Pedroni
    +===============
    +
    +Samuele Pedroni got involved with PyPy almost at its inception in the
    +spring of 2003. One of the design contributors to PyPy, his help has
    +ranged from infrastructure and processes, through building out
    +RPython... optimizing the Python interpreter, to compressing resume
    +data in the last incarnation of the JIT compiler. Tempted away into the
    +application side of the software equation, these days he contributes
    +some words and wisdom to PyPy's paper writing.
    +
    +
     Many more people
     ================
     
    
    From noreply at buildbot.pypy.org  Thu Sep  8 11:33:07 2011
    From: noreply at buildbot.pypy.org (fijal)
    Date: Thu,  8 Sep 2011 11:33:07 +0200 (CEST)
    Subject: [pypy-commit] pypy.org extradoc: remove people from the list,
     it generates more problems than gain and also gets one slot
    Message-ID: <20110908093307.DDC268203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Maciej Fijalkowski 
    Branch: extradoc
    Changeset: r255:451af0702e2d
    Date: 2011-09-08 11:32 +0200
    http://bitbucket.org/pypy/pypy.org/changeset/451af0702e2d/
    
    Log:	remove people from the list, it generates more problems than gain
    	and also gets one slot
    
    diff --git a/source/_layouts/site.genshi b/source/_layouts/site.genshi
    --- a/source/_layouts/site.genshi
    +++ b/source/_layouts/site.genshi
    @@ -11,7 +11,6 @@
           ('Performance', 'http://speed.pypy.org'),
           ('Dev Documentation', 'http://doc.pypy.org'),
           ('Blog', 'http://morepypy.blogspot.com'),
    -      ('People', 'people.html'),
           ('Contact', 'contact.html'),
           ],
         }
    
    From noreply at buildbot.pypy.org  Thu Sep  8 11:36:46 2011
    From: noreply at buildbot.pypy.org (fijal)
    Date: Thu,  8 Sep 2011 11:36:46 +0200 (CEST)
    Subject: [pypy-commit] pypy.org extradoc: Backed out changeset 451af0702e2d
    Message-ID: <20110908093646.671428203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Maciej Fijalkowski 
    Branch: extradoc
    Changeset: r256:dd354a153c5c
    Date: 2011-09-08 11:36 +0200
    http://bitbucket.org/pypy/pypy.org/changeset/dd354a153c5c/
    
    Log:	Backed out changeset 451af0702e2d
    
    diff --git a/source/_layouts/site.genshi b/source/_layouts/site.genshi
    --- a/source/_layouts/site.genshi
    +++ b/source/_layouts/site.genshi
    @@ -11,6 +11,7 @@
           ('Performance', 'http://speed.pypy.org'),
           ('Dev Documentation', 'http://doc.pypy.org'),
           ('Blog', 'http://morepypy.blogspot.com'),
    +      ('People', 'people.html'),
           ('Contact', 'contact.html'),
           ],
         }
    
    From noreply at buildbot.pypy.org  Thu Sep  8 14:29:56 2011
    From: noreply at buildbot.pypy.org (hakanardo)
    Date: Thu,  8 Sep 2011 14:29:56 +0200 (CEST)
    Subject: [pypy-commit] pypy default: uint8 support
    Message-ID: <20110908122956.1F51A8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Hakan Ardo 
    Branch: 
    Changeset: r47160:12d91e2900e9
    Date: 2011-09-08 14:29 +0200
    http://bitbucket.org/pypy/pypy/changeset/12d91e2900e9/
    
    Log:	uint8 support
    
    diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py
    --- a/pypy/module/micronumpy/interp_dtype.py
    +++ b/pypy/module/micronumpy/interp_dtype.py
    @@ -317,6 +317,17 @@
     class W_Int8Dtype(IntegerArithmeticDtype, W_Int8Dtype):
         pass
     
    +W_UInt8Dtype = create_low_level_dtype(
    +    num = 1, kind = SIGNEDLTR, name = "uint8",
    +    aliases = ["uint8"],
    +    applevel_types = [],
    +    T = rffi.UCHAR,
    +    valtype = rffi.UCHAR._type,
    +    expected_size = 1,
    +)
    +class W_UInt8Dtype(IntegerArithmeticDtype, W_UInt8Dtype):
    +    pass
    +
     W_Int16Dtype = create_low_level_dtype(
         num = 3, kind = SIGNEDLTR, name = "int16",
         aliases = ["int16"],
    @@ -368,6 +379,7 @@
     ALL_DTYPES = [
         W_BoolDtype,
         W_Int8Dtype, W_Int16Dtype, W_Int32Dtype, W_Int64Dtype,
    +    W_UInt8Dtype,
         W_Float64Dtype
     ]
     
    diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py
    --- a/pypy/module/micronumpy/test/test_dtypes.py
    +++ b/pypy/module/micronumpy/test/test_dtypes.py
    @@ -12,6 +12,7 @@
             assert dtype(d) is d
             assert dtype(None) is dtype(float)
             raises(TypeError, dtype, 1042)
    +        assert dtype('uint8').num == 1
     
         def test_dtype_with_types(self):
             from numpy import dtype
    @@ -90,6 +91,15 @@
             for i in range(5):
                 assert b[i] == i * 2
     
    +    def test_add_uint8(self):
    +        from numpy import array, dtype
    +
    +        a = array(range(5), dtype="uint8")
    +        b = a + a
    +        assert b.dtype is dtype("uint8")
    +        for i in range(5):
    +            assert b[i] == i * 2
    +
         def test_add_int16(self):
             from numpy import array, dtype
     
    @@ -109,3 +119,15 @@
     
             # You can't subclass dtype
             raises(TypeError, type, "Foo", (dtype,), {})
    +
    +    def test_int_ranges(self):
    +        from numpy import array
    +        for dtype, minval, maxval in [("int8", -128, 127),
    +                                      ("uint8", 0, 255),
    +                                      ("int16", -32768, 32767)]:
    +            a = array([minval, maxval, minval-1, maxval+1], dtype)
    +            assert a[0] == minval
    +            assert a[1] == maxval
    +            assert a[2] == maxval
    +            assert a[3] == minval
    +            
    
    From noreply at buildbot.pypy.org  Thu Sep  8 14:52:11 2011
    From: noreply at buildbot.pypy.org (l.diekmann)
    Date: Thu,  8 Sep 2011 14:52:11 +0200 (CEST)
    Subject: [pypy-commit] pypy default: prepare listobject and timsort for
     liststrategies that will be merged later
    Message-ID: <20110908125211.BF5898203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Lukas Diekmann 
    Branch: 
    Changeset: r47161:1d6e48b23004
    Date: 2011-09-08 14:48 +0200
    http://bitbucket.org/pypy/pypy/changeset/1d6e48b23004/
    
    Log:	prepare listobject and timsort for liststrategies that will be
    	merged later
    
    diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py
    --- a/pypy/objspace/std/listobject.py
    +++ b/pypy/objspace/std/listobject.py
    @@ -8,7 +8,7 @@
     
     from pypy.objspace.std import slicetype
     from pypy.interpreter import gateway, baseobjspace
    -from pypy.rlib.listsort import TimSort
    +from pypy.rlib.listsort import make_timsort_class
     from pypy.interpreter.argument import Signature
     
     class W_ListObject(W_Object):
    @@ -445,6 +445,7 @@
             self.w_key = w_key
             self.w_item = w_item
     
    +TimSort = make_timsort_class()
     # NOTE: all the subclasses of TimSort should inherit from a common subclass,
     #       so make sure that only SimpleSort inherits directly from TimSort.
     #       This is necessary to hide the parent method TimSort.lt() from the
    diff --git a/pypy/rlib/listsort.py b/pypy/rlib/listsort.py
    --- a/pypy/rlib/listsort.py
    +++ b/pypy/rlib/listsort.py
    @@ -7,588 +7,589 @@
     ## ------------------------------------------------------------------------
     ##         Adapted from CPython, original code and algorithms by Tim Peters
     
    -## CAREFUL:
    -## this class has to be used carefully, because all the lists that are
    -## sorted will be unified
    +def make_timsort_class():
     
    -class TimSort:
    -    """TimSort(list).sort()
    +    class TimSort:
    +        """TimSort(list).sort()
     
    -    Sorts the list in-place, using the overridable method lt() for comparison.
    -    """
    +        Sorts the list in-place, using the overridable method lt() for comparison.
    +        """
     
    -    def __init__(self, list, listlength=None):
    -        self.list = list
    -        if listlength is None:
    -            listlength = len(list)
    -        self.listlength = listlength
    +        def __init__(self, list, listlength=None):
    +            self.list = list
    +            if listlength is None:
    +                listlength = len(list)
    +            self.listlength = listlength
     
    -    def lt(self, a, b):
    -        return a < b
    +        def lt(self, a, b):
    +            return a < b
     
    -    def le(self, a, b):
    -        return not self.lt(b, a)   # always use self.lt() as the primitive
    +        def le(self, a, b):
    +            return not self.lt(b, a)   # always use self.lt() as the primitive
     
    -    # binarysort is the best method for sorting small arrays: it does
    -    # few compares, but can do data movement quadratic in the number of
    -    # elements.
    -    # "a" is a contiguous slice of a list, and is sorted via binary insertion.
    -    # This sort is stable.
    -    # On entry, the first "sorted" elements are already sorted.
    -    # Even in case of error, the output slice will be some permutation of
    -    # the input (nothing is lost or duplicated).
    +        # binarysort is the best method for sorting small arrays: it does
    +        # few compares, but can do data movement quadratic in the number of
    +        # elements.
    +        # "a" is a contiguous slice of a list, and is sorted via binary insertion.
    +        # This sort is stable.
    +        # On entry, the first "sorted" elements are already sorted.
    +        # Even in case of error, the output slice will be some permutation of
    +        # the input (nothing is lost or duplicated).
     
    -    def binarysort(self, a, sorted=1):
    -        for start in xrange(a.base + sorted, a.base + a.len):
    -            # set l to where list[start] belongs
    -            l = a.base
    -            r = start
    -            pivot = a.list[r]
    -            # Invariants:
    -            # pivot >= all in [base, l).
    -            # pivot  < all in [r, start).
    -            # The second is vacuously true at the start.
    -            while l < r:
    -                p = l + ((r - l) >> 1)
    -                if self.lt(pivot, a.list[p]):
    -                    r = p
    +        def binarysort(self, a, sorted=1):
    +            for start in xrange(a.base + sorted, a.base + a.len):
    +                # set l to where list[start] belongs
    +                l = a.base
    +                r = start
    +                pivot = a.list[r]
    +                # Invariants:
    +                # pivot >= all in [base, l).
    +                # pivot  < all in [r, start).
    +                # The second is vacuously true at the start.
    +                while l < r:
    +                    p = l + ((r - l) >> 1)
    +                    if self.lt(pivot, a.list[p]):
    +                        r = p
    +                    else:
    +                        l = p+1
    +                assert l == r
    +                # The invariants still hold, so pivot >= all in [base, l) and
    +                # pivot < all in [l, start), so pivot belongs at l.  Note
    +                # that if there are elements equal to pivot, l points to the
    +                # first slot after them -- that's why this sort is stable.
    +                # Slide over to make room.
    +                for p in xrange(start, l, -1):
    +                    a.list[p] = a.list[p-1]
    +                a.list[l] = pivot
    +
    +        # Compute the length of the run in the slice "a".
    +        # "A run" is the longest ascending sequence, with
    +        #
    +        #     a[0] <= a[1] <= a[2] <= ...
    +        #
    +        # or the longest descending sequence, with
    +        #
    +        #     a[0] > a[1] > a[2] > ...
    +        #
    +        # Return (run, descending) where descending is False in the former case,
    +        # or True in the latter.
    +        # For its intended use in a stable mergesort, the strictness of the defn of
    +        # "descending" is needed so that the caller can safely reverse a descending
    +        # sequence without violating stability (strict > ensures there are no equal
    +        # elements to get out of order).
    +
    +        def count_run(self, a):
    +            if a.len <= 1:
    +                n = a.len
    +                descending = False
    +            else:
    +                n = 2
    +                if self.lt(a.list[a.base + 1], a.list[a.base]):
    +                    descending = True
    +                    for p in xrange(a.base + 2, a.base + a.len):
    +                        if self.lt(a.list[p], a.list[p-1]):
    +                            n += 1
    +                        else:
    +                            break
                     else:
    -                    l = p+1
    -            assert l == r
    -            # The invariants still hold, so pivot >= all in [base, l) and
    -            # pivot < all in [l, start), so pivot belongs at l.  Note
    -            # that if there are elements equal to pivot, l points to the
    -            # first slot after them -- that's why this sort is stable.
    -            # Slide over to make room.
    -            for p in xrange(start, l, -1):
    -                a.list[p] = a.list[p-1]
    -            a.list[l] = pivot
    +                    descending = False
    +                    for p in xrange(a.base + 2, a.base + a.len):
    +                        if self.lt(a.list[p], a.list[p-1]):
    +                            break
    +                        else:
    +                            n += 1
    +            return ListSlice(a.list, a.base, n), descending
     
    -    # Compute the length of the run in the slice "a".
    -    # "A run" is the longest ascending sequence, with
    -    #
    -    #     a[0] <= a[1] <= a[2] <= ...
    -    #
    -    # or the longest descending sequence, with
    -    #
    -    #     a[0] > a[1] > a[2] > ...
    -    #
    -    # Return (run, descending) where descending is False in the former case,
    -    # or True in the latter.
    -    # For its intended use in a stable mergesort, the strictness of the defn of
    -    # "descending" is needed so that the caller can safely reverse a descending
    -    # sequence without violating stability (strict > ensures there are no equal
    -    # elements to get out of order).
    +        # Locate the proper position of key in a sorted vector; if the vector
    +        # contains an element equal to key, return the position immediately to the
    +        # left of the leftmost equal element -- or to the right of the rightmost
    +        # equal element if the flag "rightmost" is set.
    +        #
    +        # "hint" is an index at which to begin the search, 0 <= hint < a.len.
    +        # The closer hint is to the final result, the faster this runs.
    +        #
    +        # The return value is the index 0 <= k <= a.len such that
    +        #
    +        #     a[k-1] < key <= a[k]      (if rightmost is False)
    +        #     a[k-1] <= key < a[k]      (if rightmost is True)
    +        #
    +        # as long as the indices are in bound.  IOW, key belongs at index k;
    +        # or, IOW, the first k elements of a should precede key, and the last
    +        # n-k should follow key.
     
    -    def count_run(self, a):
    -        if a.len <= 1:
    -            n = a.len
    -            descending = False
    -        else:
    -            n = 2
    -            if self.lt(a.list[a.base + 1], a.list[a.base]):
    -                descending = True
    -                for p in xrange(a.base + 2, a.base + a.len):
    -                    if self.lt(a.list[p], a.list[p-1]):
    -                        n += 1
    -                    else:
    +        def gallop(self, key, a, hint, rightmost):
    +            assert 0 <= hint < a.len
    +            if rightmost:
    +                lower = self.le   # search for the largest k for which a[k] <= key
    +            else:
    +                lower = self.lt   # search for the largest k for which a[k] < key
    +
    +            p = a.base + hint
    +            lastofs = 0
    +            ofs = 1
    +            if lower(a.list[p], key):
    +                # a[hint] < key -- gallop right, until
    +                #     a[hint + lastofs] < key <= a[hint + ofs]
    +
    +                maxofs = a.len - hint     # a[a.len-1] is highest
    +                while ofs < maxofs:
    +                    if lower(a.list[p + ofs], key):
    +                        lastofs = ofs
    +                        try:
    +                            ofs = ovfcheck_lshift(ofs, 1)
    +                        except OverflowError:
    +                            ofs = maxofs
    +                        else:
    +                            ofs = ofs + 1
    +                    else:  # key <= a[hint + ofs]
                             break
    +
    +                if ofs > maxofs:
    +                    ofs = maxofs
    +                # Translate back to offsets relative to a.
    +                lastofs += hint
    +                ofs += hint
    +
                 else:
    -                descending = False
    -                for p in xrange(a.base + 2, a.base + a.len):
    -                    if self.lt(a.list[p], a.list[p-1]):
    +                # key <= a[hint] -- gallop left, until
    +                #     a[hint - ofs] < key <= a[hint - lastofs]
    +                maxofs = hint + 1   # a[0] is lowest
    +                while ofs < maxofs:
    +                    if lower(a.list[p - ofs], key):
                             break
                         else:
    -                        n += 1
    -        return ListSlice(a.list, a.base, n), descending
    +                        # key <= a[hint - ofs]
    +                        lastofs = ofs
    +                        try:
    +                            ofs = ovfcheck_lshift(ofs, 1)
    +                        except OverflowError:
    +                            ofs = maxofs
    +                        else:
    +                            ofs = ofs + 1
    +                if ofs > maxofs:
    +                    ofs = maxofs
    +                # Translate back to positive offsets relative to a.
    +                lastofs, ofs = hint-ofs, hint-lastofs
     
    -    # Locate the proper position of key in a sorted vector; if the vector
    -    # contains an element equal to key, return the position immediately to the
    -    # left of the leftmost equal element -- or to the right of the rightmost
    -    # equal element if the flag "rightmost" is set.
    -    #
    -    # "hint" is an index at which to begin the search, 0 <= hint < a.len.
    -    # The closer hint is to the final result, the faster this runs.
    -    #
    -    # The return value is the index 0 <= k <= a.len such that
    -    #
    -    #     a[k-1] < key <= a[k]      (if rightmost is False)
    -    #     a[k-1] <= key < a[k]      (if rightmost is True)
    -    #
    -    # as long as the indices are in bound.  IOW, key belongs at index k;
    -    # or, IOW, the first k elements of a should precede key, and the last
    -    # n-k should follow key.
    +            assert -1 <= lastofs < ofs <= a.len
     
    -    def gallop(self, key, a, hint, rightmost):
    -        assert 0 <= hint < a.len
    -        if rightmost:
    -            lower = self.le   # search for the largest k for which a[k] <= key
    -        else:
    -            lower = self.lt   # search for the largest k for which a[k] < key
    +            # Now a[lastofs] < key <= a[ofs], so key belongs somewhere to the
    +            # right of lastofs but no farther right than ofs.  Do a binary
    +            # search, with invariant a[lastofs-1] < key <= a[ofs].
     
    -        p = a.base + hint
    -        lastofs = 0
    -        ofs = 1
    -        if lower(a.list[p], key):
    -            # a[hint] < key -- gallop right, until
    -            #     a[hint + lastofs] < key <= a[hint + ofs]
    +            lastofs += 1
    +            while lastofs < ofs:
    +                m = lastofs + ((ofs - lastofs) >> 1)
    +                if lower(a.list[a.base + m], key):
    +                    lastofs = m+1   # a[m] < key
    +                else:
    +                    ofs = m         # key <= a[m]
     
    -            maxofs = a.len - hint     # a[a.len-1] is highest
    -            while ofs < maxofs:
    -                if lower(a.list[p + ofs], key):
    -                    lastofs = ofs
    -                    try:
    -                        ofs = ovfcheck_lshift(ofs, 1)
    -                    except OverflowError:
    -                        ofs = maxofs
    -                    else:
    -                        ofs = ofs + 1
    -                else:  # key <= a[hint + ofs]
    -                    break
    +            assert lastofs == ofs         # so a[ofs-1] < key <= a[ofs]
    +            return ofs
     
    -            if ofs > maxofs:
    -                ofs = maxofs
    -            # Translate back to offsets relative to a.
    -            lastofs += hint
    -            ofs += hint
    +        # hint for the annotator: the argument 'rightmost' is always passed in as
    +        # a constant (either True or False), so we can specialize the function for
    +        # the two cases.  (This is actually needed for technical reasons: the
    +        # variable 'lower' must contain a known method, which is the case in each
    +        # specialized version but not in the unspecialized one.)
    +        gallop._annspecialcase_ = "specialize:arg(4)"
     
    -        else:
    -            # key <= a[hint] -- gallop left, until
    -            #     a[hint - ofs] < key <= a[hint - lastofs]
    -            maxofs = hint + 1   # a[0] is lowest
    -            while ofs < maxofs:
    -                if lower(a.list[p - ofs], key):
    -                    break
    -                else:
    -                    # key <= a[hint - ofs]
    -                    lastofs = ofs
    -                    try:
    -                        ofs = ovfcheck_lshift(ofs, 1)
    -                    except OverflowError:
    -                        ofs = maxofs
    -                    else:
    -                        ofs = ofs + 1
    -            if ofs > maxofs:
    -                ofs = maxofs
    -            # Translate back to positive offsets relative to a.
    -            lastofs, ofs = hint-ofs, hint-lastofs
    +        # ____________________________________________________________
     
    -        assert -1 <= lastofs < ofs <= a.len
    +        # When we get into galloping mode, we stay there until both runs win less
    +        # often than MIN_GALLOP consecutive times.  See listsort.txt for more info.
    +        MIN_GALLOP = 7
     
    -        # Now a[lastofs] < key <= a[ofs], so key belongs somewhere to the
    -        # right of lastofs but no farther right than ofs.  Do a binary
    -        # search, with invariant a[lastofs-1] < key <= a[ofs].
    -        
    -        lastofs += 1
    -        while lastofs < ofs:
    -            m = lastofs + ((ofs - lastofs) >> 1)
    -            if lower(a.list[a.base + m], key):
    -                lastofs = m+1   # a[m] < key
    -            else:
    -                ofs = m         # key <= a[m]
    +        def merge_init(self):
    +            # This controls when we get *into* galloping mode.  It's initialized
    +            # to MIN_GALLOP.  merge_lo and merge_hi tend to nudge it higher for
    +            # random data, and lower for highly structured data.
    +            self.min_gallop = self.MIN_GALLOP
     
    -        assert lastofs == ofs         # so a[ofs-1] < key <= a[ofs]
    -        return ofs
    +            # A stack of n pending runs yet to be merged.  Run #i starts at
    +            # address pending[i].base and extends for pending[i].len elements.
    +            # It's always true (so long as the indices are in bounds) that
    +            #
    +            #     pending[i].base + pending[i].len == pending[i+1].base
    +            #
    +            # so we could cut the storage for this, but it's a minor amount,
    +            # and keeping all the info explicit simplifies the code.
    +            self.pending = []
     
    -    # hint for the annotator: the argument 'rightmost' is always passed in as
    -    # a constant (either True or False), so we can specialize the function for
    -    # the two cases.  (This is actually needed for technical reasons: the
    -    # variable 'lower' must contain a known method, which is the case in each
    -    # specialized version but not in the unspecialized one.)
    -    gallop._annspecialcase_ = "specialize:arg(4)"
    +        # Merge the slice "a" with the slice "b" in a stable way, in-place.
    +        # a.len and b.len must be > 0, and a.base + a.len == b.base.
    +        # Must also have that b.list[b.base] < a.list[a.base], that
    +        # a.list[a.base+a.len-1] belongs at the end of the merge, and should have
    +        # a.len <= b.len.  See listsort.txt for more info.
     
    -    # ____________________________________________________________
    +        def merge_lo(self, a, b):
    +            assert a.len > 0 and b.len > 0 and a.base + a.len == b.base
    +            min_gallop = self.min_gallop
    +            dest = a.base
    +            a = a.copyitems()
     
    -    # When we get into galloping mode, we stay there until both runs win less
    -    # often than MIN_GALLOP consecutive times.  See listsort.txt for more info.
    -    MIN_GALLOP = 7
    +            # Invariant: elements in "a" are waiting to be reinserted into the list
    +            # at "dest".  They should be merged with the elements of "b".
    +            # b.base == dest + a.len.
    +            # We use a finally block to ensure that the elements remaining in
    +            # the copy "a" are reinserted back into self.list in all cases.
    +            try:
    +                self.list[dest] = b.popleft()
    +                dest += 1
    +                if a.len == 1 or b.len == 0:
    +                    return
     
    -    def merge_init(self):
    -        # This controls when we get *into* galloping mode.  It's initialized
    -        # to MIN_GALLOP.  merge_lo and merge_hi tend to nudge it higher for
    -        # random data, and lower for highly structured data.
    -        self.min_gallop = self.MIN_GALLOP
    +                while True:
    +                    acount = 0   # number of times A won in a row
    +                    bcount = 0   # number of times B won in a row
     
    -        # A stack of n pending runs yet to be merged.  Run #i starts at
    -        # address pending[i].base and extends for pending[i].len elements.
    -        # It's always true (so long as the indices are in bounds) that
    -        #
    -        #     pending[i].base + pending[i].len == pending[i+1].base
    -        #
    -        # so we could cut the storage for this, but it's a minor amount,
    -        # and keeping all the info explicit simplifies the code.
    -        self.pending = []
    +                    # Do the straightforward thing until (if ever) one run
    +                    # appears to win consistently.
    +                    while True:
    +                        if self.lt(b.list[b.base], a.list[a.base]):
    +                            self.list[dest] = b.popleft()
    +                            dest += 1
    +                            if b.len == 0:
    +                                return
    +                            bcount += 1
    +                            acount = 0
    +                            if bcount >= min_gallop:
    +                                break
    +                        else:
    +                            self.list[dest] = a.popleft()
    +                            dest += 1
    +                            if a.len == 1:
    +                                return
    +                            acount += 1
    +                            bcount = 0
    +                            if acount >= min_gallop:
    +                                break
     
    -    # Merge the slice "a" with the slice "b" in a stable way, in-place.
    -    # a.len and b.len must be > 0, and a.base + a.len == b.base.
    -    # Must also have that b.list[b.base] < a.list[a.base], that
    -    # a.list[a.base+a.len-1] belongs at the end of the merge, and should have
    -    # a.len <= b.len.  See listsort.txt for more info.
    +                    # One run is winning so consistently that galloping may
    +                    # be a huge win.  So try that, and continue galloping until
    +                    # (if ever) neither run appears to be winning consistently
    +                    # anymore.
    +                    min_gallop += 1
     
    -    def merge_lo(self, a, b):
    -        assert a.len > 0 and b.len > 0 and a.base + a.len == b.base
    -        min_gallop = self.min_gallop
    -        dest = a.base
    -        a = a.copyitems()
    +                    while True:
    +                        min_gallop -= min_gallop > 1
    +                        self.min_gallop = min_gallop
     
    -        # Invariant: elements in "a" are waiting to be reinserted into the list
    -        # at "dest".  They should be merged with the elements of "b".
    -        # b.base == dest + a.len.
    -        # We use a finally block to ensure that the elements remaining in
    -        # the copy "a" are reinserted back into self.list in all cases.
    -        try:
    -            self.list[dest] = b.popleft()
    -            dest += 1
    -            if a.len == 1 or b.len == 0:
    -                return
    +                        acount = self.gallop(b.list[b.base], a, hint=0,
    +                                             rightmost=True)
    +                        for p in xrange(a.base, a.base + acount):
    +                            self.list[dest] = a.list[p]
    +                            dest += 1
    +                        a.advance(acount)
    +                        # a.len==0 is impossible now if the comparison
    +                        # function is consistent, but we can't assume
    +                        # that it is.
    +                        if a.len <= 1:
    +                            return
     
    -            while True:
    -                acount = 0   # number of times A won in a row
    -                bcount = 0   # number of times B won in a row
    -
    -                # Do the straightforward thing until (if ever) one run
    -                # appears to win consistently.
    -                while True:
    -                    if self.lt(b.list[b.base], a.list[a.base]):
                             self.list[dest] = b.popleft()
                             dest += 1
                             if b.len == 0:
                                 return
    -                        bcount += 1
    -                        acount = 0
    -                        if bcount >= min_gallop:
    -                            break
    -                    else:
    +
    +                        bcount = self.gallop(a.list[a.base], b, hint=0,
    +                                             rightmost=False)
    +                        for p in xrange(b.base, b.base + bcount):
    +                            self.list[dest] = b.list[p]
    +                            dest += 1
    +                        b.advance(bcount)
    +                        if b.len == 0:
    +                            return
    +
                             self.list[dest] = a.popleft()
                             dest += 1
                             if a.len == 1:
                                 return
    -                        acount += 1
    -                        bcount = 0
    -                        if acount >= min_gallop:
    +
    +                        if acount < self.MIN_GALLOP and bcount < self.MIN_GALLOP:
                                 break
     
    -                # One run is winning so consistently that galloping may
    -                # be a huge win.  So try that, and continue galloping until
    -                # (if ever) neither run appears to be winning consistently
    -                # anymore.
    -                min_gallop += 1
    +                    min_gallop += 1  # penalize it for leaving galloping mode
    +                    self.min_gallop = min_gallop
    +
    +            finally:
    +                # The last element of a belongs at the end of the merge, so we copy
    +                # the remaining elements of b before the remaining elements of a.
    +                assert a.len >= 0 and b.len >= 0
    +                for p in xrange(b.base, b.base + b.len):
    +                    self.list[dest] = b.list[p]
    +                    dest += 1
    +                for p in xrange(a.base, a.base + a.len):
    +                    self.list[dest] = a.list[p]
    +                    dest += 1
    +
    +        # Same as merge_lo(), but should have a.len >= b.len.
    +
    +        def merge_hi(self, a, b):
    +            assert a.len > 0 and b.len > 0 and a.base + a.len == b.base
    +            min_gallop = self.min_gallop
    +            dest = b.base + b.len
    +            b = b.copyitems()
    +
    +            # Invariant: elements in "b" are waiting to be reinserted into the list
    +            # before "dest".  They should be merged with the elements of "a".
    +            # a.base + a.len == dest - b.len.
    +            # We use a finally block to ensure that the elements remaining in
    +            # the copy "b" are reinserted back into self.list in all cases.
    +            try:
    +                dest -= 1
    +                self.list[dest] = a.popright()
    +                if a.len == 0 or b.len == 1:
    +                    return
     
                     while True:
    -                    min_gallop -= min_gallop > 1
    +                    acount = 0   # number of times A won in a row
    +                    bcount = 0   # number of times B won in a row
    +
    +                    # Do the straightforward thing until (if ever) one run
    +                    # appears to win consistently.
    +                    while True:
    +                        nexta = a.list[a.base + a.len - 1]
    +                        nextb = b.list[b.base + b.len - 1]
    +                        if self.lt(nextb, nexta):
    +                            dest -= 1
    +                            self.list[dest] = nexta
    +                            a.len -= 1
    +                            if a.len == 0:
    +                                return
    +                            acount += 1
    +                            bcount = 0
    +                            if acount >= min_gallop:
    +                                break
    +                        else:
    +                            dest -= 1
    +                            self.list[dest] = nextb
    +                            b.len -= 1
    +                            if b.len == 1:
    +                                return
    +                            bcount += 1
    +                            acount = 0
    +                            if bcount >= min_gallop:
    +                                break
    +
    +                    # One run is winning so consistently that galloping may
    +                    # be a huge win.  So try that, and continue galloping until
    +                    # (if ever) neither run appears to be winning consistently
    +                    # anymore.
    +                    min_gallop += 1
    +
    +                    while True:
    +                        min_gallop -= min_gallop > 1
    +                        self.min_gallop = min_gallop
    +
    +                        nextb = b.list[b.base + b.len - 1]
    +                        k = self.gallop(nextb, a, hint=a.len-1, rightmost=True)
    +                        acount = a.len - k
    +                        for p in xrange(a.base + a.len - 1, a.base + k - 1, -1):
    +                            dest -= 1
    +                            self.list[dest] = a.list[p]
    +                        a.len -= acount
    +                        if a.len == 0:
    +                            return
    +
    +                        dest -= 1
    +                        self.list[dest] = b.popright()
    +                        if b.len == 1:
    +                            return
    +
    +                        nexta = a.list[a.base + a.len - 1]
    +                        k = self.gallop(nexta, b, hint=b.len-1, rightmost=False)
    +                        bcount = b.len - k
    +                        for p in xrange(b.base + b.len - 1, b.base + k - 1, -1):
    +                            dest -= 1
    +                            self.list[dest] = b.list[p]
    +                        b.len -= bcount
    +                        # b.len==0 is impossible now if the comparison
    +                        # function is consistent, but we can't assume
    +                        # that it is.
    +                        if b.len <= 1:
    +                            return
    +
    +                        dest -= 1
    +                        self.list[dest] = a.popright()
    +                        if a.len == 0:
    +                            return
    +
    +                        if acount < self.MIN_GALLOP and bcount < self.MIN_GALLOP:
    +                            break
    +
    +                    min_gallop += 1  # penalize it for leaving galloping mode
                         self.min_gallop = min_gallop
     
    -                    acount = self.gallop(b.list[b.base], a, hint=0,
    -                                         rightmost=True)
    -                    for p in xrange(a.base, a.base + acount):
    -                        self.list[dest] = a.list[p]
    -                        dest += 1
    -                    a.advance(acount)
    -                    # a.len==0 is impossible now if the comparison
    -                    # function is consistent, but we can't assume
    -                    # that it is.
    -                    if a.len <= 1:
    -                        return
    +            finally:
    +                # The last element of a belongs at the end of the merge, so we copy
    +                # the remaining elements of a and then the remaining elements of b.
    +                assert a.len >= 0 and b.len >= 0
    +                for p in xrange(a.base + a.len - 1, a.base - 1, -1):
    +                    dest -= 1
    +                    self.list[dest] = a.list[p]
    +                for p in xrange(b.base + b.len - 1, b.base - 1, -1):
    +                    dest -= 1
    +                    self.list[dest] = b.list[p]
     
    -                    self.list[dest] = b.popleft()
    -                    dest += 1
    -                    if b.len == 0:
    -                        return
    +        # Merge the two runs at stack indices i and i+1.
     
    -                    bcount = self.gallop(a.list[a.base], b, hint=0,
    -                                         rightmost=False)
    -                    for p in xrange(b.base, b.base + bcount):
    -                        self.list[dest] = b.list[p]
    -                        dest += 1
    -                    b.advance(bcount)
    -                    if b.len == 0:
    -                        return
    +        def merge_at(self, i):
    +            a = self.pending[i]
    +            b = self.pending[i+1]
    +            assert a.len > 0 and b.len > 0
    +            assert a.base + a.len == b.base
     
    -                    self.list[dest] = a.popleft()
    -                    dest += 1
    -                    if a.len == 1:
    -                        return
    +            # Record the length of the combined runs and remove the run b
    +            self.pending[i] = ListSlice(self.list, a.base, a.len + b.len)
    +            del self.pending[i+1]
     
    -                    if acount < self.MIN_GALLOP and bcount < self.MIN_GALLOP:
    -                        break
    -
    -                min_gallop += 1  # penalize it for leaving galloping mode
    -                self.min_gallop = min_gallop
    -
    -        finally:
    -            # The last element of a belongs at the end of the merge, so we copy
    -            # the remaining elements of b before the remaining elements of a.
    -            assert a.len >= 0 and b.len >= 0
    -            for p in xrange(b.base, b.base + b.len):
    -                self.list[dest] = b.list[p]
    -                dest += 1
    -            for p in xrange(a.base, a.base + a.len):
    -                self.list[dest] = a.list[p]
    -                dest += 1
    -
    -    # Same as merge_lo(), but should have a.len >= b.len.
    -
    -    def merge_hi(self, a, b):
    -        assert a.len > 0 and b.len > 0 and a.base + a.len == b.base
    -        min_gallop = self.min_gallop
    -        dest = b.base + b.len
    -        b = b.copyitems()
    -
    -        # Invariant: elements in "b" are waiting to be reinserted into the list
    -        # before "dest".  They should be merged with the elements of "a".
    -        # a.base + a.len == dest - b.len.
    -        # We use a finally block to ensure that the elements remaining in
    -        # the copy "b" are reinserted back into self.list in all cases.
    -        try:
    -            dest -= 1
    -            self.list[dest] = a.popright()
    -            if a.len == 0 or b.len == 1:
    +            # Where does b start in a?  Elements in a before that can be
    +            # ignored (already in place).
    +            k = self.gallop(b.list[b.base], a, hint=0, rightmost=True)
    +            a.advance(k)
    +            if a.len == 0:
                     return
     
    -            while True:
    -                acount = 0   # number of times A won in a row
    -                bcount = 0   # number of times B won in a row
    +            # Where does a end in b?  Elements in b after that can be
    +            # ignored (already in place).
    +            b.len = self.gallop(a.list[a.base+a.len-1], b, hint=b.len-1,
    +                                rightmost=False)
    +            if b.len == 0:
    +                return
     
    -                # Do the straightforward thing until (if ever) one run
    -                # appears to win consistently.
    -                while True:
    -                    nexta = a.list[a.base + a.len - 1]
    -                    nextb = b.list[b.base + b.len - 1]
    -                    if self.lt(nextb, nexta):
    -                        dest -= 1
    -                        self.list[dest] = nexta
    -                        a.len -= 1
    -                        if a.len == 0:
    -                            return
    -                        acount += 1
    -                        bcount = 0
    -                        if acount >= min_gallop:
    -                            break
    +            # Merge what remains of the runs.  The direction is chosen to
    +            # minimize the temporary storage needed.
    +            if a.len <= b.len:
    +                self.merge_lo(a, b)
    +            else:
    +                self.merge_hi(a, b)
    +
    +        # Examine the stack of runs waiting to be merged, merging adjacent runs
    +        # until the stack invariants are re-established:
    +        #
    +        # 1. len[-3] > len[-2] + len[-1]
    +        # 2. len[-2] > len[-1]
    +        #
    +        # See listsort.txt for more info.
    +
    +        def merge_collapse(self):
    +            p = self.pending
    +            while len(p) > 1:
    +                if len(p) >= 3 and p[-3].len <= p[-2].len + p[-1].len:
    +                    if p[-3].len < p[-1].len:
    +                        self.merge_at(-3)
                         else:
    -                        dest -= 1
    -                        self.list[dest] = nextb
    -                        b.len -= 1
    -                        if b.len == 1:
    -                            return
    -                        bcount += 1
    -                        acount = 0
    -                        if bcount >= min_gallop:
    -                            break
    +                        self.merge_at(-2)
    +                elif p[-2].len <= p[-1].len:
    +                    self.merge_at(-2)
    +                else:
    +                    break
     
    -                # One run is winning so consistently that galloping may
    -                # be a huge win.  So try that, and continue galloping until
    -                # (if ever) neither run appears to be winning consistently
    -                # anymore.
    -                min_gallop += 1
    +        # Regardless of invariants, merge all runs on the stack until only one
    +        # remains.  This is used at the end of the mergesort.
     
    -                while True:
    -                    min_gallop -= min_gallop > 1
    -                    self.min_gallop = min_gallop
    -
    -                    nextb = b.list[b.base + b.len - 1]
    -                    k = self.gallop(nextb, a, hint=a.len-1, rightmost=True)
    -                    acount = a.len - k
    -                    for p in xrange(a.base + a.len - 1, a.base + k - 1, -1):
    -                        dest -= 1
    -                        self.list[dest] = a.list[p]
    -                    a.len -= acount
    -                    if a.len == 0:
    -                        return
    -
    -                    dest -= 1
    -                    self.list[dest] = b.popright()
    -                    if b.len == 1:
    -                        return
    -
    -                    nexta = a.list[a.base + a.len - 1]
    -                    k = self.gallop(nexta, b, hint=b.len-1, rightmost=False)
    -                    bcount = b.len - k
    -                    for p in xrange(b.base + b.len - 1, b.base + k - 1, -1):
    -                        dest -= 1
    -                        self.list[dest] = b.list[p]
    -                    b.len -= bcount
    -                    # b.len==0 is impossible now if the comparison
    -                    # function is consistent, but we can't assume
    -                    # that it is.
    -                    if b.len <= 1:
    -                        return
    -
    -                    dest -= 1
    -                    self.list[dest] = a.popright()
    -                    if a.len == 0:
    -                        return
    -
    -                    if acount < self.MIN_GALLOP and bcount < self.MIN_GALLOP:
    -                        break
    -
    -                min_gallop += 1  # penalize it for leaving galloping mode
    -                self.min_gallop = min_gallop
    -
    -        finally:
    -            # The last element of a belongs at the end of the merge, so we copy
    -            # the remaining elements of a and then the remaining elements of b.
    -            assert a.len >= 0 and b.len >= 0
    -            for p in xrange(a.base + a.len - 1, a.base - 1, -1):
    -                dest -= 1
    -                self.list[dest] = a.list[p]
    -            for p in xrange(b.base + b.len - 1, b.base - 1, -1):
    -                dest -= 1
    -                self.list[dest] = b.list[p]
    -
    -    # Merge the two runs at stack indices i and i+1.
    -
    -    def merge_at(self, i):
    -        a = self.pending[i]
    -        b = self.pending[i+1]
    -        assert a.len > 0 and b.len > 0
    -        assert a.base + a.len == b.base
    -
    -        # Record the length of the combined runs and remove the run b
    -        self.pending[i] = ListSlice(self.list, a.base, a.len + b.len)
    -        del self.pending[i+1]
    -
    -        # Where does b start in a?  Elements in a before that can be
    -        # ignored (already in place).
    -        k = self.gallop(b.list[b.base], a, hint=0, rightmost=True)
    -        a.advance(k)
    -        if a.len == 0:
    -            return
    -
    -        # Where does a end in b?  Elements in b after that can be
    -        # ignored (already in place).
    -        b.len = self.gallop(a.list[a.base+a.len-1], b, hint=b.len-1,
    -                            rightmost=False)
    -        if b.len == 0:
    -            return
    -
    -        # Merge what remains of the runs.  The direction is chosen to
    -        # minimize the temporary storage needed.
    -        if a.len <= b.len:
    -            self.merge_lo(a, b)
    -        else:
    -            self.merge_hi(a, b)
    -
    -    # Examine the stack of runs waiting to be merged, merging adjacent runs
    -    # until the stack invariants are re-established:
    -    #
    -    # 1. len[-3] > len[-2] + len[-1]
    -    # 2. len[-2] > len[-1]
    -    #
    -    # See listsort.txt for more info.
    -
    -    def merge_collapse(self):
    -        p = self.pending
    -        while len(p) > 1:
    -            if len(p) >= 3 and p[-3].len <= p[-2].len + p[-1].len:
    -                if p[-3].len < p[-1].len:
    +        def merge_force_collapse(self):
    +            p = self.pending
    +            while len(p) > 1:
    +                if len(p) >= 3 and p[-3].len < p[-1].len:
                         self.merge_at(-3)
                     else:
                         self.merge_at(-2)
    -            elif p[-2].len <= p[-1].len:
    -                self.merge_at(-2)
    -            else:
    -                break
     
    -    # Regardless of invariants, merge all runs on the stack until only one
    -    # remains.  This is used at the end of the mergesort.
    +        # Compute a good value for the minimum run length; natural runs shorter
    +        # than this are boosted artificially via binary insertion.
    +        #
    +        # If n < 64, return n (it's too small to bother with fancy stuff).
    +        # Else if n is an exact power of 2, return 32.
    +        # Else return an int k, 32 <= k <= 64, such that n/k is close to, but
    +        # strictly less than, an exact power of 2.
    +        #
    +        # See listsort.txt for more info.
     
    -    def merge_force_collapse(self):
    -        p = self.pending
    -        while len(p) > 1:
    -            if len(p) >= 3 and p[-3].len < p[-1].len:
    -                self.merge_at(-3)
    -            else:
    -                self.merge_at(-2)
    +        def merge_compute_minrun(self, n):
    +            r = 0    # becomes 1 if any 1 bits are shifted off
    +            while n >= 64:
    +                r |= n & 1
    +                n >>= 1
    +            return n + r
     
    -    # Compute a good value for the minimum run length; natural runs shorter
    -    # than this are boosted artificially via binary insertion.
    -    #
    -    # If n < 64, return n (it's too small to bother with fancy stuff).
    -    # Else if n is an exact power of 2, return 32.
    -    # Else return an int k, 32 <= k <= 64, such that n/k is close to, but
    -    # strictly less than, an exact power of 2.
    -    #
    -    # See listsort.txt for more info.
    +        # ____________________________________________________________
    +        # Entry point.
     
    -    def merge_compute_minrun(self, n):
    -        r = 0    # becomes 1 if any 1 bits are shifted off
    -        while n >= 64:
    -            r |= n & 1
    -            n >>= 1
    -        return n + r
    +        def sort(self):
    +            remaining = ListSlice(self.list, 0, self.listlength)
    +            if remaining.len < 2:
    +                return
     
    -    # ____________________________________________________________
    -    # Entry point.
    +            # March over the array once, left to right, finding natural runs,
    +            # and extending short natural runs to minrun elements.
    +            self.merge_init()
    +            minrun = self.merge_compute_minrun(remaining.len)
     
    -    def sort(self):
    -        remaining = ListSlice(self.list, 0, self.listlength)
    -        if remaining.len < 2:
    -            return
    +            while remaining.len > 0:
    +                # Identify next run.
    +                run, descending = self.count_run(remaining)
    +                if descending:
    +                    run.reverse()
    +                # If short, extend to min(minrun, nremaining).
    +                if run.len < minrun:
    +                    sorted = run.len
    +                    run.len = min(minrun, remaining.len)
    +                    self.binarysort(run, sorted)
    +                # Advance remaining past this run.
    +                remaining.advance(run.len)
    +                # Push run onto pending-runs stack, and maybe merge.
    +                self.pending.append(run)
    +                self.merge_collapse()
     
    -        # March over the array once, left to right, finding natural runs,
    -        # and extending short natural runs to minrun elements.
    -        self.merge_init()
    -        minrun = self.merge_compute_minrun(remaining.len)
    +            assert remaining.base == self.listlength
     
    -        while remaining.len > 0:
    -            # Identify next run.
    -            run, descending = self.count_run(remaining)
    -            if descending:
    -                run.reverse()
    -            # If short, extend to min(minrun, nremaining).
    -            if run.len < minrun:
    -                sorted = run.len
    -                run.len = min(minrun, remaining.len)
    -                self.binarysort(run, sorted)
    -            # Advance remaining past this run.
    -            remaining.advance(run.len)
    -            # Push run onto pending-runs stack, and maybe merge.
    -            self.pending.append(run)
    -            self.merge_collapse()
    +            self.merge_force_collapse()
    +            assert len(self.pending) == 1
    +            assert self.pending[0].base == 0
    +            assert self.pending[0].len == self.listlength
     
    -        assert remaining.base == self.listlength
     
    -        self.merge_force_collapse()
    -        assert len(self.pending) == 1
    -        assert self.pending[0].base == 0
    -        assert self.pending[0].len == self.listlength
    +    class ListSlice:
    +        "A sublist of a list."
     
    +        def __init__(self, list, base, len):
    +            self.list = list
    +            self.base = base
    +            self.len  = len
     
    -class ListSlice:
    -    "A sublist of a list."
    +        def copyitems(self):
    +            "Make a copy of the slice of the original list."
    +            start = self.base
    +            stop  = self.base + self.len
    +            assert 0 <= start <= stop     # annotator hint
    +            return ListSlice(self.list[start:stop], 0, self.len)
     
    -    def __init__(self, list, base, len):
    -        self.list = list
    -        self.base = base
    -        self.len  = len
    +        def advance(self, n):
    +            self.base += n
    +            self.len -= n
     
    -    def copyitems(self):
    -        "Make a copy of the slice of the original list."
    -        start = self.base
    -        stop  = self.base + self.len
    -        assert 0 <= start <= stop     # annotator hint
    -        return ListSlice(self.list[start:stop], 0, self.len)
    +        def popleft(self):
    +            result = self.list[self.base]
    +            self.base += 1
    +            self.len -= 1
    +            return result
     
    -    def advance(self, n):
    -        self.base += n
    -        self.len -= n
    +        def popright(self):
    +            self.len -= 1
    +            return self.list[self.base + self.len]
     
    -    def popleft(self):
    -        result = self.list[self.base]
    -        self.base += 1
    -        self.len -= 1
    -        return result
    +        def reverse(self):
    +            "Reverse the slice in-place."
    +            list = self.list
    +            lo = self.base
    +            hi = lo + self.len - 1
    +            while lo < hi:
    +                list[lo], list[hi] = list[hi], list[lo]
    +                lo += 1
    +                hi -= 1
    +    return TimSort
     
    -    def popright(self):
    -        self.len -= 1
    -        return self.list[self.base + self.len]
    -
    -    def reverse(self):
    -        "Reverse the slice in-place."
    -        list = self.list
    -        lo = self.base
    -        hi = lo + self.len - 1
    -        while lo < hi:
    -            list[lo], list[hi] = list[hi], list[lo]
    -            lo += 1
    -            hi -= 1
    +TimSort = make_timsort_class() #backward compatible interface
    
    From noreply at buildbot.pypy.org  Thu Sep  8 14:52:12 2011
    From: noreply at buildbot.pypy.org (l.diekmann)
    Date: Thu,  8 Sep 2011 14:52:12 +0200 (CEST)
    Subject: [pypy-commit] pypy default: merge
    Message-ID: <20110908125212.EF38E8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Lukas Diekmann 
    Branch: 
    Changeset: r47162:e6ecaadbcaf8
    Date: 2011-09-08 14:51 +0200
    http://bitbucket.org/pypy/pypy/changeset/e6ecaadbcaf8/
    
    Log:	merge
    
    diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py
    --- a/pypy/module/micronumpy/interp_dtype.py
    +++ b/pypy/module/micronumpy/interp_dtype.py
    @@ -317,6 +317,17 @@
     class W_Int8Dtype(IntegerArithmeticDtype, W_Int8Dtype):
         pass
     
    +W_UInt8Dtype = create_low_level_dtype(
    +    num = 1, kind = SIGNEDLTR, name = "uint8",
    +    aliases = ["uint8"],
    +    applevel_types = [],
    +    T = rffi.UCHAR,
    +    valtype = rffi.UCHAR._type,
    +    expected_size = 1,
    +)
    +class W_UInt8Dtype(IntegerArithmeticDtype, W_UInt8Dtype):
    +    pass
    +
     W_Int16Dtype = create_low_level_dtype(
         num = 3, kind = SIGNEDLTR, name = "int16",
         aliases = ["int16"],
    @@ -368,6 +379,7 @@
     ALL_DTYPES = [
         W_BoolDtype,
         W_Int8Dtype, W_Int16Dtype, W_Int32Dtype, W_Int64Dtype,
    +    W_UInt8Dtype,
         W_Float64Dtype
     ]
     
    diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py
    --- a/pypy/module/micronumpy/test/test_dtypes.py
    +++ b/pypy/module/micronumpy/test/test_dtypes.py
    @@ -12,6 +12,7 @@
             assert dtype(d) is d
             assert dtype(None) is dtype(float)
             raises(TypeError, dtype, 1042)
    +        assert dtype('uint8').num == 1
     
         def test_dtype_with_types(self):
             from numpy import dtype
    @@ -90,6 +91,15 @@
             for i in range(5):
                 assert b[i] == i * 2
     
    +    def test_add_uint8(self):
    +        from numpy import array, dtype
    +
    +        a = array(range(5), dtype="uint8")
    +        b = a + a
    +        assert b.dtype is dtype("uint8")
    +        for i in range(5):
    +            assert b[i] == i * 2
    +
         def test_add_int16(self):
             from numpy import array, dtype
     
    @@ -109,3 +119,15 @@
     
             # You can't subclass dtype
             raises(TypeError, type, "Foo", (dtype,), {})
    +
    +    def test_int_ranges(self):
    +        from numpy import array
    +        for dtype, minval, maxval in [("int8", -128, 127),
    +                                      ("uint8", 0, 255),
    +                                      ("int16", -32768, 32767)]:
    +            a = array([minval, maxval, minval-1, maxval+1], dtype)
    +            assert a[0] == minval
    +            assert a[1] == maxval
    +            assert a[2] == maxval
    +            assert a[3] == minval
    +            
    
    From noreply at buildbot.pypy.org  Thu Sep  8 18:39:42 2011
    From: noreply at buildbot.pypy.org (hager)
    Date: Thu,  8 Sep 2011 18:39:42 +0200 (CEST)
    Subject: [pypy-commit] pypy ppc-jit-backend: Added a basic test for register
    	allocation.
    Message-ID: <20110908163942.333BE8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: hager 
    Branch: ppc-jit-backend
    Changeset: r47163:5dbba4ebe9c8
    Date: 2011-09-08 15:22 +0200
    http://bitbucket.org/pypy/pypy/changeset/5dbba4ebe9c8/
    
    Log:	Added a basic test for register allocation.
    
    diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py
    --- a/pypy/jit/backend/test/runner_test.py
    +++ b/pypy/jit/backend/test/runner_test.py
    @@ -113,6 +113,25 @@
             assert res == 3
             assert fail.identifier == 1
     
    +    def test_basic_register_allocator(self):
    +        i0 = BoxInt()
    +        operations = []
    +        prev_box = i0
    +        for _ in range(500):
    +            next_box = BoxInt()
    +            operations.append(
    +                ResOperation(rop.INT_ADD, [prev_box, ConstInt(1)], next_box))
    +            prev_box = next_box
    +        operations.append(ResOperation(rop.FINISH, [prev_box], None, descr=BasicFailDescr(1)))
    +        inputargs = [i0]
    +        looptoken = LoopToken()
    +        self.cpu.compile_loop(inputargs, operations, looptoken)
    +        self.cpu.set_future_value_int(0, 20)
    +        fail = self.cpu.execute_token(looptoken)
    +        res = self.cpu.get_latest_value_int(0)
    +        assert res == 520
    +        assert fail.identifier == 1
    +
         def test_compile_linear_float_loop(self):
             if not self.cpu.supports_floats:
                 py.test.skip("floats not supported")
    
    From noreply at buildbot.pypy.org  Thu Sep  8 18:39:43 2011
    From: noreply at buildbot.pypy.org (hager)
    Date: Thu,  8 Sep 2011 18:39:43 +0200 (CEST)
    Subject: [pypy-commit] pypy ppc-jit-backend: (arigo, hager):
    Message-ID: <20110908163943.6BEED8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: hager 
    Branch: ppc-jit-backend
    Changeset: r47164:f1da4802e995
    Date: 2011-09-08 15:29 +0200
    http://bitbucket.org/pypy/pypy/changeset/f1da4802e995/
    
    Log:	(arigo, hager): Started refactoring of the PPC cpu and assembler.
    	The goal is to have code which is similar to the ARM backend. Most
    	important: Use register allocator.
    
    diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py
    --- a/pypy/jit/backend/llsupport/regalloc.py
    +++ b/pypy/jit/backend/llsupport/regalloc.py
    @@ -1,4 +1,3 @@
    -
     from pypy.jit.metainterp.history import Const, Box, REF
     from pypy.rlib.objectmodel import we_are_translated
     from pypy.jit.metainterp.resoperation import rop
    diff --git a/pypy/jit/backend/ppc/ppcgen/arch.py b/pypy/jit/backend/ppc/ppcgen/arch.py
    --- a/pypy/jit/backend/ppc/ppcgen/arch.py
    +++ b/pypy/jit/backend/ppc/ppcgen/arch.py
    @@ -10,5 +10,10 @@
         IS_PPC_32 = False
         IS_PPC_64 = True
     
    +ALL_REGS        = range(32)
     NONVOLATILES    = [2] + range(13, 32)
     VOLATILES       = [0] + range(3, 13)
    +
    +MY_COPY_OF_REGS = 0
    +
    +GPR_SAVE_AREA   = len(NONVOLATILES) * WORD
    diff --git a/pypy/jit/backend/ppc/ppcgen/locations.py b/pypy/jit/backend/ppc/ppcgen/locations.py
    new file mode 100644
    --- /dev/null
    +++ b/pypy/jit/backend/ppc/ppcgen/locations.py
    @@ -0,0 +1,62 @@
    +from pypy.jit.metainterp.history import INT, FLOAT, REF
    +from pypy.jit.backend.arm.arch import WORD
    +class AssemblerLocation(object):
    +    _immutable_ = True
    +    type = INT
    +
    +    def is_imm(self):
    +        return False
    +
    +    def is_stack(self):
    +        return False
    +
    +    def is_reg(self):
    +        return False
    +
    +    def is_vfp_reg(self):
    +        return False
    +
    +    def is_imm_float(self):
    +        return False
    +
    +    def as_key(self):
    +        raise NotImplementedError
    +
    +class RegisterLocation(AssemblerLocation):
    +    _immutable_ = True
    +    width = WORD
    +
    +    def __init__(self, value):
    +        self.value = value
    +
    +    def __repr__(self):
    +        return 'r%d' % self.value
    +
    +    def is_reg(self):
    +        return True
    +
    +    def as_key(self):
    +        return self.value
    +
    +class ImmLocation(AssemblerLocation):
    +    _immutable_ = True
    +    width = WORD
    +
    +
    +    def __init__(self, value):
    +        self.value = value
    +
    +    def getint(self):
    +        return self.value
    +
    +    def __repr__(self):
    +        return "imm(%d)" % (self.value)
    +
    +    def is_imm(self):
    +        return True
    +
    +    def as_key(self):
    +        return self.value + 40
    +
    +def imm(val):
    +    return ImmLocation(val)
    diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py
    --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py
    +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py
    @@ -2,16 +2,25 @@
     import struct
     from pypy.jit.backend.ppc.ppcgen.ppc_form import PPCForm as Form
     from pypy.jit.backend.ppc.ppcgen.ppc_field import ppc_fields
    +from pypy.jit.backend.ppc.ppcgen.regalloc import (TempInt, PPCFrameManager,
    +                                                  Regalloc)
     from pypy.jit.backend.ppc.ppcgen.assembler import Assembler
     from pypy.jit.backend.ppc.ppcgen.symbol_lookup import lookup
    -from pypy.jit.backend.ppc.ppcgen.arch import IS_PPC_32, WORD, NONVOLATILES
    +from pypy.jit.backend.ppc.ppcgen.arch import (IS_PPC_32, WORD, NONVOLATILES,
    +                                              GPR_SAVE_AREA)
    +import pypy.jit.backend.ppc.ppcgen.register as r
     from pypy.jit.metainterp.history import Const, ConstPtr
     from pypy.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin
     from pypy.jit.backend.llsupport.asmmemmgr import AsmMemoryManager
    +from pypy.jit.backend.llsupport.regalloc import (RegisterManager, 
    +                                                 compute_vars_longevity)
     from pypy.jit.backend.llsupport import symbolic
    +from pypy.jit.backend.model import CompiledLoopToken
     from pypy.rpython.lltypesystem import lltype, rffi, rstr
     from pypy.jit.metainterp.resoperation import rop
    -from pypy.jit.metainterp.history import BoxInt, ConstInt, Box
    +from pypy.jit.metainterp.history import (BoxInt, ConstInt, ConstPtr,
    +                                         ConstFloat, Box, INT, REF, FLOAT)
    +from pypy.jit.backend.x86.support import values_array
     
     A = Form("frD", "frA", "frB", "XO3", "Rc")
     A1 = Form("frD", "frB", "XO3", "Rc")
    @@ -906,10 +915,13 @@
         return (w >> 16) & 0x0000FFFF
     
     class PPCBuilder(PPCAssembler):
    -    def __init__(self):
    +    def __init__(self, cpu, failargs_limit=1000):
             PPCAssembler.__init__(self)
    +        self.cpu = cpu
    +        self.fail_boxes_int = values_array(lltype.Signed, failargs_limit)
     
    -    def load_word(self, rD, word):
    +    def load_imm(self, rD, word):
    +        rD = rD.as_key()
             if word <= 32767 and word >= -32768:
                 self.li(rD, word)
             elif IS_PPC_32 or (word <= 2147483647 and word >= -2147483648):
    @@ -923,7 +935,7 @@
                 self.oris(rD, rD, high(word))
                 self.ori(rD, rD, lo(word))
     
    -    def load_from(self, rD, addr):
    +    def load_from_addr(self, rD, addr):
             if IS_PPC_32:
                 self.addis(rD, 0, ha(addr))
                 self.lwz(rD, rD, la(addr))
    @@ -932,26 +944,78 @@
                 self.ld(rD, rD, 0)
     
         def store_reg(self, source_reg, addr):
    -        self.load_word(0, addr)
    +        self.load_imm(r.r0, addr)
             if IS_PPC_32:
    -            self.stwx(source_reg, 0, 0)
    +            self.stwx(source_reg.value, 0, 0)
             else:
    -            self.stdx(source_reg, 0, 0)
    +            # ? 
    +            self.std(source_reg.value, 0, 10)
     
    -    def save_nonvolatiles(self, framesize):
    +    def _save_nonvolatiles(self):
             for i, reg in enumerate(NONVOLATILES):
    -            if IS_PPC_32:
    -                self.stw(reg, 1, framesize - WORD * i)
    +            self.stw(reg, 1, self.framesize - 4 * i)
    +
    +    def _restore_nonvolatiles(self):
    +        for i, reg in enumerate(NONVOLATILES):
    +            self.lwz(reg, 1, self.framesize - i * 4)
    +
    +    def _make_prologue(self):
    +        self.stwu(1, 1, -self.framesize)
    +        self.mflr(0)
    +        self.stw(0, 1, self.framesize + 4)
    +        self._save_nonvolatiles()
    +
    +    def _make_epilogue(self):
    +        self._restore_nonvolatiles()
    +
    +    def gen_bootstrap_code(self, nonfloatlocs, inputargs):
    +        for i in range(len(nonfloatlocs)):
    +            loc = nonfloatlocs[i]
    +            arg = inputargs[i]
    +            assert arg.type != FLOAT
    +            if arg.type == INT:
    +                addr = self.fail_boxes_int.get_addr_for_num(i)
    +            elif args.type == REF:
    +                addr = self.fail_boxes_ptr.get_addr_for_num(i)
                 else:
    -                self.std(reg, 1, framesize - WORD * i)
    +                assert 0, "%s not supported" % arg.type
    +            if loc.is_reg():
    +                reg = loc
    +            else:
    +                assert 0, "FIX LATER"
    +            self.load_from_addr(reg.value, addr)
     
    -    def restore_nonvolatiles(self, framesize):
    -        for i, reg in enumerate(NONVOLATILES):
    -            if IS_PPC_32:
    -                self.lwz(reg, 1, framesize - WORD * i)
    +    def assemble_loop(self, inputargs, operations, looptoken, log):
    +        self.framesize = 256 + GPR_SAVE_AREA
    +        clt = CompiledLoopToken(self.cpu, looptoken.number)
    +        looptoken.compiled_loop_token = clt
    +
    +        longevity = compute_vars_longevity(inputargs, operations)
    +        regalloc = Regalloc(longevity, assembler=self,
    +                            frame_manager=PPCFrameManager())
    +
    +        self._make_prologue()
    +        nonfloatlocs = regalloc.prepare_loop(inputargs, operations, looptoken)
    +        self.gen_bootstrap_code(nonfloatlocs, inputargs)
    +        self._walk_operations(operations, regalloc)
    +        looptoken.ppc_code = self.assemble(True)
    +
    +    def _walk_operations(self, operations, regalloc):
    +        while regalloc.position() < len(operations) - 1:
    +            regalloc.next_instruction()
    +            pos = regalloc.position()
    +            op = operations[pos]
    +            opnum = op.getopnum()
    +            if op.has_no_side_effect() and op.result not in regalloc.longevity:
    +                regalloc.possibly_free_vars_for_op(op)
                 else:
    -                self.ld(reg, 1, framesize - WORD * i)
    -        
    +                arglocs = regalloc.operations[opnum](regalloc, op)
    +                if arglocs is not None:
    +                    self.operations[opnum](self, op, arglocs, regalloc)
    +            if op.result:
    +                regalloc.possibly_free_var(op.result)
    +            regalloc.possibly_free_vars_for_op(op)
    +            regalloc._check_invariants()
     
         # translate a trace operation to corresponding machine code
         def build_op(self, trace_op, cpu):
    @@ -994,14 +1058,23 @@
             if isinstance(arg0, Box):
                 reg0 = cpu.reg_map[arg0]
             else:
    -            reg0 = cpu.get_next_register()
    +            #reg0 = cpu.get_next_register()
    +            box = TempInt()
    +            reg0 = cpu.rm.force_allocate_reg(box)
                 self.load_word(reg0, arg0.value)
             if isinstance(arg1, Box):
                 reg1 = cpu.reg_map[arg1]
             else:
    -            reg1 = cpu.get_next_register()
    +            #reg1 = cpu.get_next_register()
    +            #reg1 = cpu.rm.force_allocate_reg(arg1)
    +            box = TempInt()
    +            reg1 = cpu.rm.force_allocate_reg(box)
    +            boxed = cpu.rm.make_sure_var_in_reg(box)
                 self.load_word(reg1, arg1.value)
    -        free_reg = cpu.next_free_register
    +            import pdb; pdb.set_trace()
    +        #free_reg = cpu.next_free_register
    +        free_reg = cpu.rm.force_allocate_reg(op.result)
    +
             return free_reg, reg0, reg1
     
         def _int_op_epilog(self, op, cpu, result_reg):
    @@ -1045,8 +1118,14 @@
         #             CODE GENERATION             #
         # --------------------------------------- #
     
    -    def emit_int_add(self, op, cpu, reg0, reg1, free_reg):
    -        self.add(free_reg, reg0, reg1)
    +    def emit_int_add(self, op, arglocs, regalloc):
    +        l0, l1, res = arglocs
    +        if l0.is_imm():
    +            self.addi(res.value, l1.value, l0.value)
    +        elif l1.is_imm():
    +            self.addi(res.value, l0.value, l1.value)
    +        else:
    +            self.add(res.value, l0.value, l1.value)
     
         def emit_int_add_ovf(self, op, cpu, reg0, reg1, free_reg):
             self.addo(free_reg, reg0, reg1)
    @@ -1419,7 +1498,10 @@
             arg_reg = 3
             for arg in args:
                 if isinstance(arg, Box):
    -                self.mr(arg_reg, cpu.reg_map[arg])
    +                try:
    +                    self.mr(arg_reg, cpu.reg_map[arg])
    +                except KeyError:
    +                    self.lwz(arg_reg, 1, cpu.mem_map[arg])
                 elif isinstance(arg, Const):
                     self.load_word(arg_reg, arg.value)
                 else:
    @@ -1435,16 +1517,15 @@
                 for i, arg in enumerate(remaining_args):
                     if isinstance(arg, Box):
                         #self.mr(0, cpu.reg_map[arg])
    -                    if IS_PPC_32:
    +                    try:
                             self.stw(cpu.reg_map[arg], 1, 8 + WORD * i)
    -                    else:
    -                        self.std(cpu.reg_map[arg], 1, 8 + WORD * i)
    +                    except KeyError:
    +                        self.load_word(0, cpu.mem_map[arg])
    +                        self.lwzx(0, 1, 0)
    +                        self.stw(0, 1, 8 + WORD * i)
                     elif isinstance(arg, Const):
                         self.load_word(0, arg.value)
    -                    if IS_PPC_32:
    -                        self.stw(0, 1, 8 + WORD * i)
    -                    else:
    -                        self.std(0, 1, 8 + WORD * i)
    +                    self.stw(0, 1, 8 + WORD * i)
                     else:
                         assert 0, "%s not supported yet" % arg
     
    @@ -1584,34 +1665,23 @@
     
         #_____________________________________
     
    -    def emit_finish(self, op, cpu):
    +    def emit_finish(self, op, arglocs, regalloc):
             descr = op.getdescr()
    -        identifier = self._get_identifier_from_descr(descr, cpu)
    -        cpu.saved_descr[identifier] = descr
    +        identifier = self._get_identifier_from_descr(descr, self.cpu)
    +        self.cpu.saved_descr[identifier] = descr
             args = op.getarglist()
    -        for index, arg in enumerate(args):
    -            if isinstance(arg, Box):
    -                regnum = cpu.reg_map[arg]
    -                addr = cpu.fail_boxes_int.get_addr_for_num(index)
    -                self.store_reg(regnum, addr)
    -            elif isinstance(arg, ConstInt):
    -                addr = cpu.fail_boxes_int.get_addr_for_num(index)
    -                self.load_word(cpu.next_free_register, arg.value)
    -                self.store_reg(cpu.next_free_register, addr)
    -            else:
    -                assert 0, "arg type not suported"
    +        for index, arg in enumerate(arglocs):
    +            addr = self.fail_boxes_int.get_addr_for_num(index)
    +            self.store_reg(arg, addr)
     
    -        framesize = 16 * WORD + 20 * WORD
    +        framesize = 256 + GPR_SAVE_AREA
     
    -        self.restore_nonvolatiles(framesize)
    +        self._restore_nonvolatiles()
     
    -        if IS_PPC_32:
    -            self.lwz(0, 1, framesize + WORD) # 36
    -        else:
    -            self.ld(0, 1, framesize + WORD) # 36
    +        self.lwz(0, 1, framesize + 4) # 36
             self.mtlr(0)
             self.addi(1, 1, framesize)
    -        self.load_word(3, identifier)
    +        self.load_imm(r.r3, identifier)
             self.blr()
     
         def emit_jump(self, op, cpu):
    @@ -1694,7 +1764,7 @@
                 oplist[val] = not_implemented
         return oplist
     
    -PPCBuilder.oplist = make_operations()
    +PPCBuilder.operations = make_operations()
     
     if __name__ == '__main__':
         main()
    diff --git a/pypy/jit/backend/ppc/ppcgen/regalloc.py b/pypy/jit/backend/ppc/ppcgen/regalloc.py
    new file mode 100644
    --- /dev/null
    +++ b/pypy/jit/backend/ppc/ppcgen/regalloc.py
    @@ -0,0 +1,218 @@
    +from pypy.jit.backend.llsupport.regalloc import (RegisterManager, FrameManager,
    +                                                 TempBox, compute_vars_longevity,
    +                                                 compute_loop_consts)
    +from pypy.jit.backend.ppc.ppcgen.arch import (WORD, MY_COPY_OF_REGS)
    +from pypy.jit.metainterp.history import INT, REF, Const, ConstInt, ConstPtr
    +from pypy.jit.metainterp.resoperation import rop
    +from pypy.jit.backend.ppc.ppcgen import locations
    +from pypy.rpython.lltypesystem import rffi, lltype
    +import pypy.jit.backend.ppc.ppcgen.register as r
    +
    +class TempInt(TempBox):
    +    type = INT
    +
    +    def __repr__(self):
    +        return "" % (id(self),)
    +
    +class TempPtr(TempBox):
    +    type = REF
    +
    +    def __repr__(self):
    +        return "" % (id(self),)
    +
    +class PPCRegisterManager(RegisterManager):
    +    all_regs              = r.ALL_REGS
    +    box_types             = None       # or a list of acceptable types
    +    no_lower_byte_regs    = all_regs
    +    save_around_call_regs = r.VOLATILES
    +
    +    REGLOC_TO_COPY_AREA_OFS = {
    +        r.r0:   MY_COPY_OF_REGS + 0 * WORD,
    +        r.r2:   MY_COPY_OF_REGS + 1 * WORD,
    +        r.r3:   MY_COPY_OF_REGS + 2 * WORD,
    +        r.r4:   MY_COPY_OF_REGS + 3 * WORD,
    +        r.r5:   MY_COPY_OF_REGS + 4 * WORD,
    +        r.r6:   MY_COPY_OF_REGS + 5 * WORD,
    +        r.r7:   MY_COPY_OF_REGS + 6 * WORD,
    +        r.r8:   MY_COPY_OF_REGS + 7 * WORD,
    +        r.r9:   MY_COPY_OF_REGS + 8 * WORD,
    +        r.r10:  MY_COPY_OF_REGS + 9 * WORD,
    +        r.r11:  MY_COPY_OF_REGS + 10 * WORD,
    +        r.r12:  MY_COPY_OF_REGS + 11 * WORD,
    +        r.r13:  MY_COPY_OF_REGS + 12 * WORD,
    +        r.r14:  MY_COPY_OF_REGS + 13 * WORD,
    +        r.r15:  MY_COPY_OF_REGS + 14 * WORD,
    +        r.r16:  MY_COPY_OF_REGS + 15 * WORD,
    +        r.r17:  MY_COPY_OF_REGS + 16 * WORD,
    +        r.r18:  MY_COPY_OF_REGS + 17 * WORD,
    +        r.r19:  MY_COPY_OF_REGS + 18 * WORD,
    +        r.r20:  MY_COPY_OF_REGS + 19 * WORD,
    +        r.r21:  MY_COPY_OF_REGS + 20 * WORD,
    +        r.r22:  MY_COPY_OF_REGS + 21 * WORD,
    +        r.r23:  MY_COPY_OF_REGS + 22 * WORD,
    +        r.r24:  MY_COPY_OF_REGS + 23 * WORD,
    +        r.r25:  MY_COPY_OF_REGS + 24 * WORD,
    +        r.r26:  MY_COPY_OF_REGS + 25 * WORD,
    +        r.r27:  MY_COPY_OF_REGS + 26 * WORD,
    +        r.r28:  MY_COPY_OF_REGS + 27 * WORD,
    +        r.r29:  MY_COPY_OF_REGS + 28 * WORD,
    +        r.r30:  MY_COPY_OF_REGS + 29 * WORD,
    +        r.r31:  MY_COPY_OF_REGS + 30 * WORD,
    +    }
    +
    +    def __init__(self, longevity, frame_manager=None, assembler=None):
    +        RegisterManager.__init__(self, longevity, frame_manager, assembler)
    +
    +    def call_result_location(self, v):
    +        return r.r3
    +
    +    def convert_to_imm(self, c):
    +        if isinstance(c, ConstInt):
    +            return locations.ImmLocation(c.value)
    +        else:
    +            assert isinstance(c, ConstPtr)
    +            return locations.ImmLocation(rffi.cast(lltype.Signed, c.value))
    +
    +class PPCFrameManager(FrameManager):
    +    def __init__(self):
    +        FrameManager.__init__(self)
    +        self.frame_depth = 1
    +
    +class Regalloc(object):
    +    def __init__(self, longevity, frame_manager=None, assembler=None):
    +        self.cpu = assembler.cpu
    +        self.longevity = longevity
    +        self.frame_manager = frame_manager
    +        self.assembler = assembler
    +        self.rm = PPCRegisterManager(longevity, frame_manager, assembler)
    +
    +    def prepare_loop(self, inputargs, operations, looptoken):
    +        loop_consts = compute_loop_consts(inputargs, operations[-1], looptoken)
    +        inputlen = len(inputargs)
    +        nonfloatlocs = [None] * len(inputargs)
    +        for i in range(inputlen):
    +            arg = inputargs[i]
    +            assert not isinstance(arg, Const)
    +            if arg not in loop_consts and self.longevity[arg][1] > -1:
    +                self.try_allocate_reg(arg)
    +            loc = self.loc(arg)
    +            nonfloatlocs[i] = loc
    +        self.possibly_free_vars(inputargs)
    +        return nonfloatlocs
    +
    +    def possibly_free_var(self, var):
    +        self.rm.possibly_free_var(var)
    +
    +    def possibly_free_vars(self, vars):
    +        for var in vars:
    +            self.possibly_free_var(var)
    +
    +    def possibly_free_vars_for_op(self, op):
    +        for i in range(op.numargs()):
    +            var = op.getarg(i)
    +            if var is not None:
    +                self.possibly_free_var(var)
    +
    +    def try_allocate_reg(self, v, selected_reg=None, need_lower_byte=False):
    +        return self.rm.try_allocate_reg(v, selected_reg, need_lower_byte)
    +
    +    def force_allocate_reg(self, var, forbidden_vars=[], selected_reg=None, 
    +            need_lower_byte=False):
    +        return self.rm.force_allocate_reg(var, forbidden_vars, selected_reg,
    +                need_lower_byte)
    +
    +    def _check_invariants(self):
    +        self.rm._check_invariants()
    +
    +    def loc(self, var):
    +        return self.rm.loc(var)
    +
    +    def position(self):
    +        return self.rm.position
    +
    +    def next_instruction(self):
    +        self.rm.next_instruction()
    +
    +    def _check_imm_arg(self, arg):
    +        return isinstance(arg, ConstInt)
    +
    +    def _ensure_value_is_boxed(self, thing, forbidden_vars=[]):
    +        box = None
    +        loc = None
    +        if isinstance(thing, Const):
    +            if isinstance(thing, ConstPtr):
    +                box = TempPtr()
    +            else:
    +                box = TempInt()
    +            loc = self.force_allocate_reg(box, forbidden_vars=forbidden_vars)
    +            imm = self.rm.convert_to_imm(thing)
    +            self.assembler.load_word(loc, imm)
    +        else:
    +            loc = self.make_sure_var_in_reg(thing,
    +                    forbidden_vars=forbidden_vars)
    +            box = thing
    +        return loc, box
    +
    +    def make_sure_var_in_reg(self, var, forbidden_vars=[],
    +                             selected_reg=None, need_lower_byte=False):
    +        return self.rm.make_sure_var_in_reg(var, forbidden_vars,
    +                selected_reg, need_lower_byte)
    +
    +    # ******************************************************
    +    # *         P R E P A R E  O P E R A T I O N S         * 
    +    # ******************************************************
    +
    +    def prepare_int_add(self, op):
    +        boxes = op.getarglist()
    +        b0, b1 = boxes
    +        imm_b0 = self._check_imm_arg(b0)
    +        imm_b1 = self._check_imm_arg(b1)
    +        if not imm_b0 and imm_b1:
    +            l0, box = self._ensure_value_is_boxed(b0)
    +            l1 = self.make_sure_var_in_reg(b1, [b0])
    +            boxes.append(box)
    +        elif imm_b0 and not imm_b1:
    +            l0 = self.make_sure_var_in_reg(b0)
    +            l1, box = self._ensure_value_is_boxed(b1, [b0])
    +            boxes.append(box)
    +        else:
    +            l0, box = self._ensure_value_is_boxed(b0)
    +            boxes.append(box)
    +            l1, box = self._ensure_value_is_boxed(b1, [box])
    +            boxes.append(box)
    +        #return [l0, l1], boxes
    +        locs = [l0, l1]
    +        self.possibly_free_vars(boxes)
    +        res = self.force_allocate_reg(op.result)
    +        return locs + [res]
    +
    +    def prepare_finish(self, op):
    +        #args = [locations.imm(self.frame_manager.frame_depth)]
    +        args = []
    +        for i in range(op.numargs()):
    +            arg = op.getarg(i)
    +            if arg:
    +                args.append(self.loc(arg))
    +                self.possibly_free_var(arg)
    +            else:
    +                args.append(None)
    +        return args
    +
    +def make_operation_list():
    +    def not_implemented(self, op, *args):
    +        raise NotImplementedError, op
    +
    +    operations = [None] * (rop._LAST + 1)
    +    for key, val in rop.__dict__.items():
    +        key = key.lower()
    +        if key.startswith("_"):
    +            continue
    +        methname = "prepare_%s" % key
    +        if hasattr(Regalloc, methname):
    +            func = getattr(Regalloc, methname).im_func
    +        else:
    +            func = not_implemented
    +        operations[val] = func
    +    return operations
    +
    +Regalloc.operations = make_operation_list()
    diff --git a/pypy/jit/backend/ppc/ppcgen/register.py b/pypy/jit/backend/ppc/ppcgen/register.py
    new file mode 100644
    --- /dev/null
    +++ b/pypy/jit/backend/ppc/ppcgen/register.py
    @@ -0,0 +1,11 @@
    +from pypy.jit.backend.ppc.ppcgen.locations import RegisterLocation
    +
    +ALL_REGS = [RegisterLocation(i) for i in range(32)]
    +
    +r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15, r16,\
    +    r17, r18, r19, r20, r21, r22, r23, r24, r25, r26, r27, r28, r29, r30, r31\
    +    = ALL_REGS
    +
    +NONVOLATILES    = [r2, r13, r14, r15, r16, r17, r18, r19, r20, r21, r22, r23,
    +                    r24, r25, r26, r27, r28, r29, r30, r31]
    +VOLATILES       = [r0, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12]
    diff --git a/pypy/jit/backend/ppc/runner.py b/pypy/jit/backend/ppc/runner.py
    --- a/pypy/jit/backend/ppc/runner.py
    +++ b/pypy/jit/backend/ppc/runner.py
    @@ -12,7 +12,8 @@
     from pypy.jit.backend.x86 import regloc
     from pypy.jit.backend.x86.support import values_array
     from pypy.jit.backend.ppc.ppcgen.ppc_assembler import PPCBuilder
    -from pypy.jit.backend.ppc.ppcgen.arch import IS_PPC_32, NONVOLATILES
    +from pypy.jit.backend.ppc.ppcgen.arch import NONVOLATILES, GPR_SAVE_AREA, WORD
    +from pypy.jit.backend.ppc.ppcgen.regalloc import PPCRegisterManager, PPCFrameManager
     import sys
     
     from pypy.tool.ansi_print import ansi_log
    @@ -28,48 +29,15 @@
             AbstractLLCPU.__init__(self, rtyper, stats, opts,
                                    translate_support_code, gcdescr)
     
    -        # pointer to an array of ints
    -        # XXX length of the integer array is 1000 for now
    -        self.fail_boxes_int = values_array(lltype.Signed, 1000)
    -
             # floats are not supported yet
             self.supports_floats = False
             self.total_compiled_loops = 0
             self.total_compiled_bridges = 0
    +        self.asm = PPCBuilder(self)
     
    -    # compile a given trace
    -    def compile_loop(self, inputargs, operations, looptoken, log=True):
    +    def compile_loop(self, inputargs, operations, looptoken, log=False):
             self.saved_descr = {}
    -        self.patch_list = []
    -        self.reg_map = {}
    -        self.fail_box_count = 0
    -
    -        codebuilder = PPCBuilder()
    -        
    -        # function prologue
    -        self._make_prologue(codebuilder)
    -
    -        # initialize registers from memory
    -        self.next_free_register = 3
    -        for index, arg in enumerate(inputargs):
    -            self.reg_map[arg] = self.next_free_register
    -            addr = self.fail_boxes_int.get_addr_for_num(index)
    -            codebuilder.load_from(self.next_free_register, addr)
    -            self.next_free_register += 1
    -        
    -        self.startpos = codebuilder.get_relative_pos()
    -
    -        # generate code for operations
    -        self._walk_trace_ops(codebuilder, operations)
    -
    -        # function epilogue
    -        self._make_epilogue(codebuilder)
    -
    -        f = codebuilder.assemble()
    -        looptoken.ppc_code = f
    -        looptoken.codebuilder = codebuilder
    -        self.total_compiled_loops += 1
    -        self.teardown()
    +        self.asm.assemble_loop(inputargs, operations, looptoken, log)
     
         def compile_bridge(self, descr, inputargs, operations, looptoken):
             self.saved_descr = {}
    @@ -110,18 +78,7 @@
             self.next_free_register += 1
             return reg
     
    -    def _make_prologue(self, codebuilder):
    -        framesize = 16 * WORD + 20 * WORD
    -        if IS_PPC_32:
    -            codebuilder.stwu(1, 1, -framesize)
    -            codebuilder.mflr(0)
    -            codebuilder.stw(0, 1, framesize + WORD)
    -        else:
    -            codebuilder.stdu(1, 1, -framesize)
    -            codebuilder.mflr(0)
    -            codebuilder.std(0, 1, framesize + WORD)
    -        codebuilder.save_nonvolatiles(framesize)
    -
    +    # XXX not used by now, move to ppc_assembler
         def _make_epilogue(self, codebuilder):
             for op_index, fail_index, guard, reglist in self.patch_list:
                 curpos = codebuilder.get_relative_pos()
    @@ -147,22 +104,17 @@
                 descr.patch_pos = patch_pos
                 descr.used_mem_indices = used_mem_indices
     
    -            framesize = 16 * WORD + 20 * WORD
    -            codebuilder.restore_nonvolatiles(framesize)
    +            codebuilder.restore_nonvolatiles(self.framesize)
     
    -            if IS_PPC_32:
    -                codebuilder.lwz(0, 1, framesize + WORD) # 36
    -            else:
    -                codebuilder.ld(0, 1, framesize + WORD) # 36
    +            codebuilder.lwz(0, 1, self.framesize + 4)
                 codebuilder.mtlr(0)
    -            codebuilder.addi(1, 1, framesize)
    -
    +            codebuilder.addi(1, 1, self.framesize)
                 codebuilder.li(3, fail_index)            
                 codebuilder.blr()
     
         # set value in fail_boxes_int
         def set_future_value_int(self, index, value_int):
    -        self.fail_boxes_int.setitem(index, value_int)
    +        self.asm.fail_boxes_int.setitem(index, value_int)
     
         def set_future_value_ref(self, index, pointer):
             sign_ptr = rffi.cast(lltype.Signed, pointer)
    @@ -174,8 +126,9 @@
     
         # executes the stored machine code in the token
         def execute_token(self, looptoken):   
    -        descr_index = looptoken.ppc_code()
    -        return self.saved_descr[descr_index]
    +        addr = looptoken.ppc_code
    +        fail_index = addr()
    +        return self.saved_descr[fail_index]
     
         # return the number of values that can be returned
         def get_latest_value_count(self):
    @@ -183,7 +136,7 @@
     
         # fetch the result of the computation and return it
         def get_latest_value_int(self, index):
    -        value = self.fail_boxes_int.getitem(index)
    +        value = self.asm.fail_boxes_int.getitem(index)
             return value
     
         def get_latest_value_ref(self, index):
    
    From noreply at buildbot.pypy.org  Thu Sep  8 18:39:44 2011
    From: noreply at buildbot.pypy.org (hager)
    Date: Thu,  8 Sep 2011 18:39:44 +0200 (CEST)
    Subject: [pypy-commit] pypy ppc-jit-backend: merge
    Message-ID: <20110908163944.9AA1C8203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: hager 
    Branch: ppc-jit-backend
    Changeset: r47165:1965c8f56c96
    Date: 2011-09-08 15:48 +0200
    http://bitbucket.org/pypy/pypy/changeset/1965c8f56c96/
    
    Log:	merge
    
    diff --git a/pypy/jit/backend/ppc/runner.py b/pypy/jit/backend/ppc/runner.py
    --- a/pypy/jit/backend/ppc/runner.py
    +++ b/pypy/jit/backend/ppc/runner.py
    @@ -107,6 +107,10 @@
                 codebuilder.restore_nonvolatiles(self.framesize)
     
                 codebuilder.lwz(0, 1, self.framesize + 4)
    +            if IS_PPC_32:
    +                codebuilder.lwz(0, 1, framesize + WORD) # 36
    +            else:
    +                codebuilder.ld(0, 1, framesize + WORD) # 36
                 codebuilder.mtlr(0)
                 codebuilder.addi(1, 1, self.framesize)
                 codebuilder.li(3, fail_index)            
    
    From noreply at buildbot.pypy.org  Thu Sep  8 18:40:42 2011
    From: noreply at buildbot.pypy.org (antocuni)
    Date: Thu,  8 Sep 2011 18:40:42 +0200 (CEST)
    Subject: [pypy-commit] pypy.org extradoc: (vak) add some docs about creating
    	a tarball
    Message-ID: <20110908164042.CB7878203C@wyvern.cs.uni-duesseldorf.de>
    
    Author: Antonio Cuni 
    Branch: extradoc
    Changeset: r257:426ba9681185
    Date: 2011-09-08 16:23 +0200
    http://bitbucket.org/pypy/pypy.org/changeset/426ba9681185/
    
    Log:	(vak) add some docs about creating a tarball
    
    diff --git a/download.html b/download.html
    --- a/download.html
    +++ b/download.html
    @@ -57,6 +57,7 @@
     
  • Installing (optional)
  • Building from source
  • +
  • Packaging
  • Checksums
@@ -168,6 +169,14 @@
+
+

Packaging

+

Once PyPy is translated from source the binary package similar to those provided in the section Default (with a JIT Compiler) above could be easily created with package.py script as following:

+
+cd ./pypy/pypy/tool/release/
+python package.py ../../.. pypy-my-own-package-name
+
+

Checksums

Here are the checksums for each of the downloads (md5 and sha1):

@@ -238,4 +247,4 @@
- \ No newline at end of file + diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -24,6 +24,7 @@ * `Installing`_ (optional) * `Building from source`_ + * `Packaging`_ * `Checksums`_ .. _`Default (with a JIT Compiler)`: @@ -167,6 +168,15 @@ .. _`nightly binary builds`: http://buildbot.pypy.org/nightly/trunk/ .. _`shadow stack`: http://pypy.readthedocs.org/en/latest/config/translation.gcrootfinder.html + +Packaging +--------- + +Once PyPy is translated from source the binary package similar to those provided in the section `Default (with a JIT Compiler)`_ above could be easily created with ``package.py`` script as following:: + + cd ./pypy/pypy/tool/release/ + python package.py ../../.. pypy-my-own-package-name + Checksums --------- From noreply at buildbot.pypy.org Thu Sep 8 18:41:15 2011 From: noreply at buildbot.pypy.org (hager) Date: Thu, 8 Sep 2011 18:41:15 +0200 (CEST) Subject: [pypy-commit] pypy ppc-jit-backend: Accidentally killed some of David's patches. Message-ID: <20110908164115.1291A8203C@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r47166:0b04014b188e Date: 2011-09-08 16:46 +0200 http://bitbucket.org/pypy/pypy/changeset/0b04014b188e/ Log: Accidentally killed some of David's patches. Readded them to runner.py and ppc_assembler.py. diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -948,21 +948,31 @@ if IS_PPC_32: self.stwx(source_reg.value, 0, 0) else: - # ? - self.std(source_reg.value, 0, 10) + self.std(source_reg.value, 0, 0) def _save_nonvolatiles(self): for i, reg in enumerate(NONVOLATILES): - self.stw(reg, 1, self.framesize - 4 * i) + if IS_PPC_32: + self.stw(reg, 1, self.framesize - WORD * i) + else: + self.ld(reg, 1, self.framesize - WORD * i) def _restore_nonvolatiles(self): for i, reg in enumerate(NONVOLATILES): - self.lwz(reg, 1, self.framesize - i * 4) + if IS_PPC_32: + self.lwz(reg, 1, self.framesize - WORD * i) + else: + self.ld(reg, 1, self.framesize - WORD * i) def _make_prologue(self): - self.stwu(1, 1, -self.framesize) - self.mflr(0) - self.stw(0, 1, self.framesize + 4) + if IS_PPC_32: + self.stwu(1, 1, -self.framesize) + self.mflr(0) + self.stw(0, 1, self.framesize + 4) + else: + self.stdu(1, 1, -self.framesize) + self.mflr(0) + self.std(0, 1, self.framesize + 4) self._save_nonvolatiles() def _make_epilogue(self): @@ -998,7 +1008,7 @@ nonfloatlocs = regalloc.prepare_loop(inputargs, operations, looptoken) self.gen_bootstrap_code(nonfloatlocs, inputargs) self._walk_operations(operations, regalloc) - looptoken.ppc_code = self.assemble(True) + looptoken.ppc_code = self.assemble() def _walk_operations(self, operations, regalloc): while regalloc.position() < len(operations) - 1: @@ -1678,7 +1688,10 @@ self._restore_nonvolatiles() - self.lwz(0, 1, framesize + 4) # 36 + if IS_PPC_32: + self.lwz(0, 1, self.framesize + WORD) + else: + self.ld(0, 1, framesize + WORD) self.mtlr(0) self.addi(1, 1, framesize) self.load_imm(r.r3, identifier) From noreply at buildbot.pypy.org Thu Sep 8 20:03:52 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Thu, 8 Sep 2011 20:03:52 +0200 (CEST) Subject: [pypy-commit] pypy default: Backed out changeset 12d91e2900e9 Message-ID: <20110908180352.90B328203C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r47167:c5238eb065e4 Date: 2011-09-08 19:16 +0200 http://bitbucket.org/pypy/pypy/changeset/c5238eb065e4/ Log: Backed out changeset 12d91e2900e9 diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -317,17 +317,6 @@ class W_Int8Dtype(IntegerArithmeticDtype, W_Int8Dtype): pass -W_UInt8Dtype = create_low_level_dtype( - num = 1, kind = SIGNEDLTR, name = "uint8", - aliases = ["uint8"], - applevel_types = [], - T = rffi.UCHAR, - valtype = rffi.UCHAR._type, - expected_size = 1, -) -class W_UInt8Dtype(IntegerArithmeticDtype, W_UInt8Dtype): - pass - W_Int16Dtype = create_low_level_dtype( num = 3, kind = SIGNEDLTR, name = "int16", aliases = ["int16"], @@ -379,7 +368,6 @@ ALL_DTYPES = [ W_BoolDtype, W_Int8Dtype, W_Int16Dtype, W_Int32Dtype, W_Int64Dtype, - W_UInt8Dtype, W_Float64Dtype ] diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -12,7 +12,6 @@ assert dtype(d) is d assert dtype(None) is dtype(float) raises(TypeError, dtype, 1042) - assert dtype('uint8').num == 1 def test_dtype_with_types(self): from numpy import dtype @@ -91,15 +90,6 @@ for i in range(5): assert b[i] == i * 2 - def test_add_uint8(self): - from numpy import array, dtype - - a = array(range(5), dtype="uint8") - b = a + a - assert b.dtype is dtype("uint8") - for i in range(5): - assert b[i] == i * 2 - def test_add_int16(self): from numpy import array, dtype @@ -119,15 +109,3 @@ # You can't subclass dtype raises(TypeError, type, "Foo", (dtype,), {}) - - def test_int_ranges(self): - from numpy import array - for dtype, minval, maxval in [("int8", -128, 127), - ("uint8", 0, 255), - ("int16", -32768, 32767)]: - a = array([minval, maxval, minval-1, maxval+1], dtype) - assert a[0] == minval - assert a[1] == maxval - assert a[2] == maxval - assert a[3] == minval - From noreply at buildbot.pypy.org Thu Sep 8 20:03:53 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Thu, 8 Sep 2011 20:03:53 +0200 (CEST) Subject: [pypy-commit] pypy default: hg merge Message-ID: <20110908180353.C163782213@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r47168:1df75f76fa6b Date: 2011-09-08 19:17 +0200 http://bitbucket.org/pypy/pypy/changeset/1df75f76fa6b/ Log: hg merge diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -317,17 +317,6 @@ class W_Int8Dtype(IntegerArithmeticDtype, W_Int8Dtype): pass -W_UInt8Dtype = create_low_level_dtype( - num = 1, kind = SIGNEDLTR, name = "uint8", - aliases = ["uint8"], - applevel_types = [], - T = rffi.UCHAR, - valtype = rffi.UCHAR._type, - expected_size = 1, -) -class W_UInt8Dtype(IntegerArithmeticDtype, W_UInt8Dtype): - pass - W_Int16Dtype = create_low_level_dtype( num = 3, kind = SIGNEDLTR, name = "int16", aliases = ["int16"], @@ -379,7 +368,6 @@ ALL_DTYPES = [ W_BoolDtype, W_Int8Dtype, W_Int16Dtype, W_Int32Dtype, W_Int64Dtype, - W_UInt8Dtype, W_Float64Dtype ] diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -12,7 +12,6 @@ assert dtype(d) is d assert dtype(None) is dtype(float) raises(TypeError, dtype, 1042) - assert dtype('uint8').num == 1 def test_dtype_with_types(self): from numpy import dtype @@ -91,15 +90,6 @@ for i in range(5): assert b[i] == i * 2 - def test_add_uint8(self): - from numpy import array, dtype - - a = array(range(5), dtype="uint8") - b = a + a - assert b.dtype is dtype("uint8") - for i in range(5): - assert b[i] == i * 2 - def test_add_int16(self): from numpy import array, dtype @@ -119,15 +109,3 @@ # You can't subclass dtype raises(TypeError, type, "Foo", (dtype,), {}) - - def test_int_ranges(self): - from numpy import array - for dtype, minval, maxval in [("int8", -128, 127), - ("uint8", 0, 255), - ("int16", -32768, 32767)]: - a = array([minval, maxval, minval-1, maxval+1], dtype) - assert a[0] == minval - assert a[1] == maxval - assert a[2] == maxval - assert a[3] == minval - From noreply at buildbot.pypy.org Thu Sep 8 20:51:00 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Thu, 8 Sep 2011 20:51:00 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: jit support for uint_mod Message-ID: <20110908185100.5B9FD8203C@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: unsigned-dtypes Changeset: r47169:c13e01ca3ae7 Date: 2011-09-08 20:50 +0200 http://bitbucket.org/pypy/pypy/changeset/c13e01ca3ae7/ Log: jit support for uint_mod diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -440,6 +440,7 @@ rewrite_op_ullong_mod_zer = _do_builtin_call rewrite_op_gc_identityhash = _do_builtin_call rewrite_op_gc_id = _do_builtin_call + rewrite_op_uint_mod = _do_builtin_call # ---------- # getfield/setfield/mallocs etc. diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -366,6 +366,9 @@ raise ZeroDivisionError return llop.ullong_mod(lltype.SignedLongLong, xll, yll) +def _ll_2_uint_mod(xll, yll): + return llop.uint_mod(lltype.Unsigned, xll, yll) + # libffi support # -------------- diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2953,6 +2953,19 @@ res = self.meta_interp(f, [32]) assert res == f(32) self.check_loops(arraylen_gc=2) + + def test_ulonglong_mod(self): + myjitdriver = JitDriver(greens = [], reds = ['sa', 'n', 'i']) + def f(n): + sa = i = rffi.cast(rffi.ULONGLONG, 1) + while i < rffi.cast(rffi.ULONGLONG, n): + myjitdriver.jit_merge_point(sa=sa, n=n, i=i) + sa += sa % i + i += 1 + res = self.meta_interp(f, [32]) + assert res == f(32) + + class TestOOtype(BasicTests, OOJitMixin): diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -2,7 +2,7 @@ from pypy.module.micronumpy import interp_ufuncs, signature from pypy.module.micronumpy.compile import (numpy_compile, FakeSpace, FloatObject) -from pypy.module.micronumpy.interp_dtype import W_Float64Dtype, W_Int64Dtype +from pypy.module.micronumpy.interp_dtype import W_Float64Dtype, W_Int64Dtype, W_UInt64Dtype from pypy.module.micronumpy.interp_numarray import (BaseArray, SingleDimArray, SingleDimSlice, scalar_w) from pypy.rlib.nonconst import NonConstant @@ -15,6 +15,7 @@ cls.space = FakeSpace() cls.float64_dtype = cls.space.fromcache(W_Float64Dtype) cls.int64_dtype = cls.space.fromcache(W_Int64Dtype) + cls.uint64_dtype = cls.space.fromcache(W_UInt64Dtype) def test_add(self): def f(i): @@ -303,6 +304,26 @@ 'int_lt': 1, 'guard_true': 1, 'jump': 1}) assert result == 11.0 + def test_uint64_mod(self): + space = self.space + float64_dtype = self.float64_dtype + uint64_dtype = self.uint64_dtype + + def f(n): + if NonConstant(False): + dtype = uint64_dtype + else: + dtype = float64_dtype + ar = SingleDimArray(n, dtype=dtype) + for i in range(n): + ar.get_concrete().setitem(i, uint64_dtype.box(7)) + v = ar.descr_mod(space, ar).descr_sum(space) + assert isinstance(v, FloatObject) + return v.floatval + + result = self.meta_interp(f, [5], listops=True, backendopt=True) + assert result == f(5) + class TestTranslation(object): def test_compile(self): x = numpy_compile('aa+f*f/a-', 10) From noreply at buildbot.pypy.org Thu Sep 8 21:34:42 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Thu, 8 Sep 2011 21:34:42 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: fix the test I think Message-ID: <20110908193442.9999E8203C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: unsigned-dtypes Changeset: r47170:67ef7976413c Date: 2011-09-08 13:34 -0600 http://bitbucket.org/pypy/pypy/changeset/67ef7976413c/ Log: fix the test I think diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -1,7 +1,7 @@ from pypy.jit.metainterp.test.support import LLJitMixin from pypy.module.micronumpy import interp_ufuncs, signature from pypy.module.micronumpy.compile import (numpy_compile, FakeSpace, - FloatObject) + FloatObject, IntObject) from pypy.module.micronumpy.interp_dtype import W_Float64Dtype, W_Int64Dtype, W_UInt64Dtype from pypy.module.micronumpy.interp_numarray import (BaseArray, SingleDimArray, SingleDimSlice, scalar_w) @@ -311,15 +311,17 @@ def f(n): if NonConstant(False): + dtype = float64_dtype + else: dtype = uint64_dtype - else: - dtype = float64_dtype ar = SingleDimArray(n, dtype=dtype) - for i in range(n): + i = 0 + while i < n: ar.get_concrete().setitem(i, uint64_dtype.box(7)) - v = ar.descr_mod(space, ar).descr_sum(space) - assert isinstance(v, FloatObject) - return v.floatval + i += 1 + v = ar.descr_add(space, ar).descr_sum(space) + assert isinstance(v, IntObject) + return v.intval result = self.meta_interp(f, [5], listops=True, backendopt=True) assert result == f(5) From noreply at buildbot.pypy.org Thu Sep 8 23:02:51 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Thu, 8 Sep 2011 23:02:51 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: set mod test back to using mod. Still fails, but we can translate on 64-bit right now. Message-ID: <20110908210251.C4D7B8203C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: unsigned-dtypes Changeset: r47171:98070aa15afd Date: 2011-09-08 15:02 -0600 http://bitbucket.org/pypy/pypy/changeset/98070aa15afd/ Log: set mod test back to using mod. Still fails, but we can translate on 64-bit right now. diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -319,7 +319,7 @@ while i < n: ar.get_concrete().setitem(i, uint64_dtype.box(7)) i += 1 - v = ar.descr_add(space, ar).descr_sum(space) + v = ar.descr_mod(space, ar).descr_sum(space) assert isinstance(v, IntObject) return v.intval From noreply at buildbot.pypy.org Thu Sep 8 23:44:24 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Thu, 8 Sep 2011 23:44:24 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: indexing by tuples done. Added some other tasks. Message-ID: <20110908214424.274038203C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: extradoc Changeset: r3895:0cfdcff602ea Date: 2011-09-08 15:41 -0600 http://bitbucket.org/pypy/extradoc/changeset/0cfdcff602ea/ Log: indexing by tuples done. Added some other tasks. diff --git a/planning/micronumpy.txt b/planning/micronumpy.txt --- a/planning/micronumpy.txt +++ b/planning/micronumpy.txt @@ -16,7 +16,11 @@ - a good sort function -- indexing by tuples and lists +- indexing by arrays and lists + +- endianness + +- scalar types like numpy.int8 - add multi-dim arrays From noreply at buildbot.pypy.org Fri Sep 9 01:27:26 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Fri, 9 Sep 2011 01:27:26 +0200 (CEST) Subject: [pypy-commit] pypy default: Give meaningful class names in tests Message-ID: <20110908232726.745868203C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r47172:74cb13e6e9d0 Date: 2011-09-08 00:09 +0200 http://bitbucket.org/pypy/pypy/changeset/74cb13e6e9d0/ Log: Give meaningful class names in tests diff --git a/pypy/module/cpyext/test/foo.c b/pypy/module/cpyext/test/foo.c --- a/pypy/module/cpyext/test/foo.c +++ b/pypy/module/cpyext/test/foo.c @@ -215,36 +215,36 @@ typedef struct { PyUnicodeObject HEAD; int val; -} FuuObject; +} UnicodeSubclassObject; -static int Fuu_init(FuuObject *self, PyObject *args, PyObject *kwargs) { +static int UnicodeSubclass_init(UnicodeSubclassObject *self, PyObject *args, PyObject *kwargs) { self->val = 42; return 0; } static PyObject * -Fuu_escape(PyTypeObject* type, PyObject *args) +UnicodeSubclass_escape(PyTypeObject* type, PyObject *args) { Py_RETURN_TRUE; } static PyObject * -Fuu_get_val(FuuObject *self) { +UnicodeSubclass_get_val(UnicodeSubclassObject *self) { return PyInt_FromLong(self->val); } -static PyMethodDef Fuu_methods[] = { - {"escape", (PyCFunction) Fuu_escape, METH_VARARGS, NULL}, - {"get_val", (PyCFunction) Fuu_get_val, METH_NOARGS, NULL}, +static PyMethodDef UnicodeSubclass_methods[] = { + {"escape", (PyCFunction) UnicodeSubclass_escape, METH_VARARGS, NULL}, + {"get_val", (PyCFunction) UnicodeSubclass_get_val, METH_NOARGS, NULL}, {NULL} /* Sentinel */ }; -PyTypeObject FuuType = { +PyTypeObject UnicodeSubtype = { PyObject_HEAD_INIT(NULL) 0, "foo.fuu", - sizeof(FuuObject), + sizeof(UnicodeSubclassObject), 0, 0, /*tp_dealloc*/ 0, /*tp_print*/ @@ -277,7 +277,7 @@ /* Attribute descriptor and subclassing stuff */ - Fuu_methods,/*tp_methods*/ + UnicodeSubclass_methods,/*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ @@ -287,7 +287,7 @@ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ - (initproc) Fuu_init, /*tp_init*/ + (initproc) UnicodeSubclass_init, /*tp_init*/ 0, /*tp_alloc will be set to PyType_GenericAlloc in module init*/ 0, /*tp_new*/ 0, /*tp_free Low-level free-memory routine */ @@ -299,11 +299,11 @@ 0 /*tp_weaklist*/ }; -PyTypeObject Fuu2Type = { +PyTypeObject UnicodeSubtype2 = { PyObject_HEAD_INIT(NULL) 0, "foo.fuu2", - sizeof(FuuObject), + sizeof(UnicodeSubclassObject), 0, 0, /*tp_dealloc*/ 0, /*tp_print*/ @@ -628,15 +628,15 @@ footype.tp_new = PyType_GenericNew; - FuuType.tp_base = &PyUnicode_Type; - Fuu2Type.tp_base = &FuuType; + UnicodeSubtype.tp_base = &PyUnicode_Type; + UnicodeSubtype2.tp_base = &UnicodeSubtype; MetaType.tp_base = &PyType_Type; if (PyType_Ready(&footype) < 0) return; - if (PyType_Ready(&FuuType) < 0) + if (PyType_Ready(&UnicodeSubtype) < 0) return; - if (PyType_Ready(&Fuu2Type) < 0) + if (PyType_Ready(&UnicodeSubtype2) < 0) return; if (PyType_Ready(&MetaType) < 0) return; @@ -655,9 +655,9 @@ return; if (PyDict_SetItemString(d, "fooType", (PyObject *)&footype) < 0) return; - if (PyDict_SetItemString(d, "FuuType", (PyObject *) &FuuType) < 0) + if (PyDict_SetItemString(d, "UnicodeSubtype", (PyObject *) &UnicodeSubtype) < 0) return; - if(PyDict_SetItemString(d, "Fuu2Type", (PyObject *) &Fuu2Type) < 0) + if (PyDict_SetItemString(d, "UnicodeSubtype2", (PyObject *) &UnicodeSubtype2) < 0) return; if (PyDict_SetItemString(d, "MetaType", (PyObject *) &MetaType) < 0) return; diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -119,16 +119,16 @@ module = self.import_module(name='foo') obj = module.new() # call __new__ - newobj = module.FuuType(u"xyz") + newobj = module.UnicodeSubtype(u"xyz") assert newobj == u"xyz" - assert isinstance(newobj, module.FuuType) + assert isinstance(newobj, module.UnicodeSubtype) assert isinstance(module.fooType(), module.fooType) class bar(module.fooType): pass assert isinstance(bar(), bar) - fuu = module.FuuType + fuu = module.UnicodeSubtype class fuu2(fuu): def baz(self): return self @@ -137,20 +137,20 @@ def test_init(self): module = self.import_module(name="foo") - newobj = module.FuuType() + newobj = module.UnicodeSubtype() assert newobj.get_val() == 42 # this subtype should inherit tp_init - newobj = module.Fuu2Type() + newobj = module.UnicodeSubtype2() assert newobj.get_val() == 42 # this subclass redefines __init__ - class Fuu2(module.FuuType): + class UnicodeSubclass2(module.UnicodeSubtype): def __init__(self): self.foobar = 32 - super(Fuu2, self).__init__() + super(UnicodeSubclass2, self).__init__() - newobj = Fuu2() + newobj = UnicodeSubclass2() assert newobj.get_val() == 42 assert newobj.foobar == 32 From noreply at buildbot.pypy.org Fri Sep 9 01:27:27 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Fri, 9 Sep 2011 01:27:27 +0200 (CEST) Subject: [pypy-commit] pypy default: Implement PyFunction_GetCode() Message-ID: <20110908232727.A8ED08203C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r47173:0d46b9d3db65 Date: 2011-09-08 00:17 +0200 http://bitbucket.org/pypy/pypy/changeset/0d46b9d3db65/ Log: Implement PyFunction_GetCode() diff --git a/pypy/module/cpyext/funcobject.py b/pypy/module/cpyext/funcobject.py --- a/pypy/module/cpyext/funcobject.py +++ b/pypy/module/cpyext/funcobject.py @@ -40,6 +40,13 @@ from pypy.module.cpyext.object import PyObject_dealloc PyObject_dealloc(space, py_obj) + at cpython_api([PyObject], PyObject) +def PyFunction_GetCode(space, w_func): + """Return the code object associated with the function object op.""" + func = space.interp_w(Function, w_func) + w_code = space.wrap(func.code) + return borrow_from(w_func, w_code) + @cpython_api([PyObject, PyObject, PyObject], PyObject) def PyMethod_New(space, w_func, w_self, w_cls): """Return a new method object, with func being any callable object; this is the diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -920,12 +920,6 @@ raise NotImplementedError @cpython_api([PyObject], PyObject) -def PyFunction_GetCode(space, op): - """Return the code object associated with the function object op.""" - borrow_from() - raise NotImplementedError - - at cpython_api([PyObject], PyObject) def PyFunction_GetGlobals(space, op): """Return the globals dictionary associated with the function object op.""" borrow_from() diff --git a/pypy/module/cpyext/test/test_funcobject.py b/pypy/module/cpyext/test/test_funcobject.py --- a/pypy/module/cpyext/test/test_funcobject.py +++ b/pypy/module/cpyext/test/test_funcobject.py @@ -36,6 +36,14 @@ w_method2 = api.PyMethod_New(w_function, w_self, w_class) assert space.eq_w(w_method, w_method2) + def test_getcode(self, space, api): + w_function = space.appexec([], """(): + def func(x): return x + return func + """) + w_code = api.PyFunction_GetCode(w_function) + assert w_code.co_name == "func" + def test_newcode(self, space, api): filename = rffi.str2charp('filename') funcname = rffi.str2charp('funcname') From noreply at buildbot.pypy.org Fri Sep 9 01:27:28 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Fri, 9 Sep 2011 01:27:28 +0200 (CEST) Subject: [pypy-commit] pypy default: Add macro PyFunction_GET_CODE Message-ID: <20110908232728.D76E98203C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r47174:648203ec240e Date: 2011-09-08 00:18 +0200 http://bitbucket.org/pypy/pypy/changeset/648203ec240e/ Log: Add macro PyFunction_GET_CODE diff --git a/pypy/module/cpyext/include/funcobject.h b/pypy/module/cpyext/include/funcobject.h --- a/pypy/module/cpyext/include/funcobject.h +++ b/pypy/module/cpyext/include/funcobject.h @@ -12,6 +12,8 @@ PyObject *func_name; /* The __name__ attribute, a string object */ } PyFunctionObject; +#define PyFunction_GET_CODE(obj) PyFunction_GetCode((PyObject*)(obj)) + #define PyMethod_GET_FUNCTION(obj) PyMethod_Function((PyObject*)(obj)) #define PyMethod_GET_SELF(obj) PyMethod_Self((PyObject*)(obj)) #define PyMethod_GET_CLASS(obj) PyMethod_Class((PyObject*)(obj)) From noreply at buildbot.pypy.org Fri Sep 9 01:27:30 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Fri, 9 Sep 2011 01:27:30 +0200 (CEST) Subject: [pypy-commit] pypy default: Add Py_TRASHCAN_ macros, I don't know if we really need to implement them Message-ID: <20110908232730.12DF28203C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r47175:3d0b4b06db50 Date: 2011-09-08 00:21 +0200 http://bitbucket.org/pypy/pypy/changeset/3d0b4b06db50/ Log: Add Py_TRASHCAN_ macros, I don't know if we really need to implement them diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -501,6 +501,9 @@ #define PyObject_TypeCheck(ob, tp) \ ((ob)->ob_type == (tp) || PyType_IsSubtype((ob)->ob_type, (tp))) +#define Py_TRASHCAN_SAFE_BEGIN(pyObj) +#define Py_TRASHCAN_SAFE_END(pyObj) + /* Copied from CPython ----------------------------- */ int PyObject_AsReadBuffer(PyObject *, const void **, Py_ssize_t *); int PyObject_AsWriteBuffer(PyObject *, void **, Py_ssize_t *); diff --git a/pypy/module/cpyext/include/pythonrun.h b/pypy/module/cpyext/include/pythonrun.h --- a/pypy/module/cpyext/include/pythonrun.h +++ b/pypy/module/cpyext/include/pythonrun.h @@ -12,6 +12,7 @@ #define Py_Py3kWarningFlag 0 #define Py_FrozenFlag 0 +#define Py_VerboseFlag 0 typedef struct { int cf_flags; /* bitmask of CO_xxx flags relevant to future */ From noreply at buildbot.pypy.org Fri Sep 9 01:27:31 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Fri, 9 Sep 2011 01:27:31 +0200 (CEST) Subject: [pypy-commit] pypy default: Expose a few members of the PyCodeObject structure Message-ID: <20110908232731.45A7B8203C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r47176:9962794cb329 Date: 2011-09-08 01:04 +0200 http://bitbucket.org/pypy/pypy/changeset/9962794cb329/ Log: Expose a few members of the PyCodeObject structure diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -10,7 +10,7 @@ from pypy.interpreter.argument import Signature from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import NoneNotWrapped, unwrap_spec -from pypy.interpreter.astcompiler.consts import (CO_OPTIMIZED, +from pypy.interpreter.astcompiler.consts import ( CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED, CO_GENERATOR, CO_CONTAINSGLOBALS) from pypy.rlib.rarithmetic import intmask diff --git a/pypy/module/cpyext/funcobject.py b/pypy/module/cpyext/funcobject.py --- a/pypy/module/cpyext/funcobject.py +++ b/pypy/module/cpyext/funcobject.py @@ -4,9 +4,21 @@ cpython_api, bootstrap_function, cpython_struct, build_type_checkers) from pypy.module.cpyext.pyobject import ( PyObject, make_ref, from_ref, Py_DecRef, make_typedescr, borrow_from) +from pypy.rlib.unroll import unrolling_iterable from pypy.interpreter.error import OperationError from pypy.interpreter.function import Function, Method from pypy.interpreter.pycode import PyCode +from pypy.interpreter import pycode + +CODE_FLAGS = dict( + CO_OPTIMIZED = 0x0001, + CO_NEWLOCALS = 0x0002, + CO_VARARGS = 0x0004, + CO_VARKEYWORDS = 0x0008, + CO_NESTED = 0x0010, + CO_GENERATOR = 0x0020, +) +ALL_CODE_FLAGS = unrolling_iterable(CODE_FLAGS.items()) PyFunctionObjectStruct = lltype.ForwardReference() PyFunctionObject = lltype.Ptr(PyFunctionObjectStruct) @@ -16,7 +28,12 @@ PyCodeObjectStruct = lltype.ForwardReference() PyCodeObject = lltype.Ptr(PyCodeObjectStruct) -cpython_struct("PyCodeObject", PyObjectFields, PyCodeObjectStruct) +PyCodeObjectFields = PyObjectFields + \ + (("co_name", PyObject), + ("co_flags", rffi.INT), + ("co_argcount", rffi.INT), + ) +cpython_struct("PyCodeObject", PyCodeObjectFields, PyCodeObjectStruct) @bootstrap_function def init_functionobject(space): @@ -24,6 +41,10 @@ basestruct=PyFunctionObject.TO, attach=function_attach, dealloc=function_dealloc) + make_typedescr(PyCode.typedef, + basestruct=PyCodeObject.TO, + attach=code_attach, + dealloc=code_dealloc) PyFunction_Check, PyFunction_CheckExact = build_type_checkers("Function", Function) PyMethod_Check, PyMethod_CheckExact = build_type_checkers("Method", Method) @@ -40,6 +61,24 @@ from pypy.module.cpyext.object import PyObject_dealloc PyObject_dealloc(space, py_obj) +def code_attach(space, py_obj, w_obj): + py_code = rffi.cast(PyCodeObject, py_obj) + assert isinstance(w_obj, PyCode) + py_code.c_co_name = make_ref(space, space.wrap(w_obj.co_name)) + co_flags = 0 + for name, value in ALL_CODE_FLAGS: + if w_obj.co_flags & getattr(pycode, name): + co_flags |= value + rffi.setintfield(py_code, 'c_co_flags', co_flags) + rffi.setintfield(py_code, 'c_co_argcount', w_obj.co_argcount) + + at cpython_api([PyObject], lltype.Void, external=False) +def code_dealloc(space, py_obj): + py_code = rffi.cast(PyCodeObject, py_obj) + Py_DecRef(space, py_code.c_co_name) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) + @cpython_api([PyObject], PyObject) def PyFunction_GetCode(space, w_func): """Return the code object associated with the function object op.""" diff --git a/pypy/module/cpyext/include/code.h b/pypy/module/cpyext/include/code.h --- a/pypy/module/cpyext/include/code.h +++ b/pypy/module/cpyext/include/code.h @@ -4,7 +4,21 @@ extern "C" { #endif -typedef PyObject PyCodeObject; +typedef struct { + PyObject_HEAD + PyObject *co_name; + int co_argcount; + int co_flags; +} PyCodeObject; + +/* Masks for co_flags above */ +/* These values are also in funcobject.py */ +#define CO_OPTIMIZED 0x0001 +#define CO_NEWLOCALS 0x0002 +#define CO_VARARGS 0x0004 +#define CO_VARKEYWORDS 0x0008 +#define CO_NESTED 0x0010 +#define CO_GENERATOR 0x0020 #ifdef __cplusplus } diff --git a/pypy/module/cpyext/test/test_funcobject.py b/pypy/module/cpyext/test/test_funcobject.py --- a/pypy/module/cpyext/test/test_funcobject.py +++ b/pypy/module/cpyext/test/test_funcobject.py @@ -2,8 +2,12 @@ from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref -from pypy.module.cpyext.funcobject import PyFunctionObject +from pypy.module.cpyext.funcobject import ( + PyFunctionObject, PyCodeObject, CODE_FLAGS) from pypy.interpreter.function import Function, Method +from pypy.interpreter.pycode import PyCode + +globals().update(CODE_FLAGS) class TestFunctionObject(BaseApiTest): def test_function(self, space, api): @@ -38,12 +42,36 @@ def test_getcode(self, space, api): w_function = space.appexec([], """(): - def func(x): return x + def func(x, y, z): return x return func """) w_code = api.PyFunction_GetCode(w_function) assert w_code.co_name == "func" + ref = make_ref(space, w_code) + assert (from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is + space.gettypeobject(PyCode.typedef)) + assert "func" == space.unwrap( + from_ref(space, rffi.cast(PyCodeObject, ref).c_co_name)) + assert 3 == rffi.cast(PyCodeObject, ref).c_co_argcount + api.Py_DecRef(ref) + + def test_co_flags(self, space, api): + def get_flags(signature, body="pass"): + w_code = space.appexec([], """(): + def func(%s): %s + return func.__code__ + """ % (signature, body)) + ref = make_ref(space, w_code) + co_flags = rffi.cast(PyCodeObject, ref).c_co_flags + api.Py_DecRef(ref) + return co_flags + assert get_flags("x") == CO_NESTED | CO_OPTIMIZED | CO_NEWLOCALS + assert get_flags("x", "exec x") == CO_NESTED | CO_NEWLOCALS + assert get_flags("x, *args") & CO_VARARGS + assert get_flags("x, **kw") & CO_VARKEYWORDS + assert get_flags("x", "yield x") & CO_GENERATOR + def test_newcode(self, space, api): filename = rffi.str2charp('filename') funcname = rffi.str2charp('funcname') From noreply at buildbot.pypy.org Fri Sep 9 01:27:32 2011 From: noreply at buildbot.pypy.org (amauryfa) Date: Fri, 9 Sep 2011 01:27:32 +0200 (CEST) Subject: [pypy-commit] pypy default: A ll2ctypes bug that depends on the order of the fields in the structure. Message-ID: <20110908232732.798268203C@wyvern.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r47177:d5b0b000a97e Date: 2011-09-09 01:11 +0200 http://bitbucket.org/pypy/pypy/changeset/d5b0b000a97e/ Log: A ll2ctypes bug that depends on the order of the fields in the structure. Found while trying to implement PyHeapTypeObject in cpyext, the equivalent of CPython:: PyHeapTypeObject *type = malloc(); type->ht_type.tp_as_number = &type->as_number; diff --git a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py --- a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py +++ b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py @@ -81,6 +81,26 @@ lltype.free(s, flavor='raw') assert not ALLOCATED # detects memory leaks in the test + def test_get_pointer(self): + py.test.skip("FIXME") + # Equivalent of the C code:: + # struct S1 { struct S2 *ptr; struct S2 buf; }; + # struct S1 s1; + # s1.ptr = & s1.buf; + S2 = lltype.Struct('S2', ('y', lltype.Signed)) + S1 = lltype.Struct('S', + ('sub', lltype.Struct('SUB', + ('ptr', lltype.Ptr(S2)))), + ('ptr', lltype.Ptr(S2)), + ('buf', S2), # Works when this field is first! + ) + s1 = lltype.malloc(S1, flavor='raw') + s1.ptr = s1.buf + s1.sub.ptr = s1.buf + + x = rffi.cast(rffi.CCHARP, s1) + lltype.free(s1, flavor='raw') + def test_struct_ptrs(self): S2 = lltype.Struct('S2', ('y', lltype.Signed)) S1 = lltype.Struct('S', ('x', lltype.Signed), ('p', lltype.Ptr(S2))) From noreply at buildbot.pypy.org Fri Sep 9 02:49:24 2011 From: noreply at buildbot.pypy.org (wlav) Date: Fri, 9 Sep 2011 02:49:24 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: fix link issue with CINT backend Message-ID: <20110909004924.7F6D78203C@wyvern.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r47178:b3c360704811 Date: 2011-09-08 17:50 -0700 http://bitbucket.org/pypy/pypy/changeset/b3c360704811/ Log: fix link issue with CINT backend diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -25,12 +25,15 @@ _cintdll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) with rffi.scoped_str2charp('libCore.so') as ll_libname: _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) +with rffi.scoped_str2charp('libMathCore.so') as ll_libname: + _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) eci = ExternalCompilationInfo( separate_module_files=[srcpath.join("cintcwrapper.cxx")], include_dirs=[incpath] + rootincpath, includes=["cintcwrapper.h"], library_dirs=rootlibpath, + link_extra=["-lMathCore", "-lCore", "-lCint"], use_cpp_linker=True, ) From noreply at buildbot.pypy.org Fri Sep 9 11:53:18 2011 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 9 Sep 2011 11:53:18 +0200 (CEST) Subject: [pypy-commit] pypy ffistruct: refactor the interface for struct_{get, set}field. Now we pass the ffitype at runtime, and the values are handled as lltype.Signed. struct_{get, set}field take care of doing cast of the correct LLTYPE Message-ID: <20110909095318.487DC8203C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: ffistruct Changeset: r47179:4003bd45c5ff Date: 2011-09-09 11:46 +0200 http://bitbucket.org/pypy/pypy/changeset/4003bd45c5ff/ Log: refactor the interface for struct_{get,set}field. Now we pass the ffitype at runtime, and the values are handled as lltype.Signed. struct_{get,set}field take care of doing cast of the correct LLTYPE diff --git a/pypy/rlib/clibffi.py b/pypy/rlib/clibffi.py --- a/pypy/rlib/clibffi.py +++ b/pypy/rlib/clibffi.py @@ -231,6 +231,9 @@ lltype.Bool : _unsigned_type_for(lltype.Bool), } +ffitype_map = unrolling_iterable(TYPE_MAP.iteritems()) + + def external(name, args, result, **kwds): return rffi.llexternal(name, args, result, compilation_info=eci, **kwds) diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -414,9 +414,33 @@ # ====================================================================== +def struct_getfield_int(ffitype, addr, offset): + """ + Return the field of type ``ffitype`` at ``addr+offset``, widened to + lltype.Signed. + """ + for TYPE, ffitype2 in clibffi.ffitype_map: + if ffitype is ffitype2: + value = _struct_getfield(TYPE, addr, offset) + return rffi.cast(lltype.Signed, value) + assert False, "cannot find the given ffitype" + +def struct_setfield_int(ffitype, addr, offset, value): + """ + Set the field of type ``ffitype`` at ``addr+offset``. ``value`` is of + type lltype.Signed, and it's automatically converted to the right type. + """ + for TYPE, ffitype2 in clibffi.ffitype_map: + if ffitype is ffitype2: + value = rffi.cast(TYPE, value) + _struct_setfield(TYPE, addr, offset, value) + return + assert False, "cannot find the given ffitype" + + @jit.dont_look_inside @specialize.arg(0) -def struct_getfield(TYPE, addr, offset): +def _struct_getfield(TYPE, addr, offset): """ Read the field of type TYPE at addr+offset. addr is of type rffi.VOIDP, offset is an int. @@ -428,9 +452,9 @@ @jit.dont_look_inside @specialize.arg(0) -def struct_setfield(TYPE, addr, offset, value): +def _struct_setfield(TYPE, addr, offset, value): """ - Read the field of type TYPE at addr+offset. + Write the field of type TYPE at addr+offset. addr is of type rffi.VOIDP, offset is an int. """ addr = rffi.ptradd(addr, offset) diff --git a/pypy/rlib/test/test_libffi.py b/pypy/rlib/test/test_libffi.py --- a/pypy/rlib/test/test_libffi.py +++ b/pypy/rlib/test/test_libffi.py @@ -5,7 +5,7 @@ from pypy.rlib.rarithmetic import r_singlefloat, r_longlong, r_ulonglong from pypy.rlib.test.test_clibffi import BaseFfiTest, get_libm_name, make_struct_ffitype_e from pypy.rlib.libffi import CDLL, Func, get_libc_name, ArgChain, types -from pypy.rlib.libffi import IS_32_BIT, struct_getfield, struct_setfield +from pypy.rlib.libffi import IS_32_BIT, struct_getfield_int, struct_setfield_int class TestLibffiMisc(BaseFfiTest): @@ -54,18 +54,22 @@ def test_struct_fields(self): longsize = 4 if IS_32_BIT else 8 - POINT = lltype.Struct('POINT', ('x', rffi.LONG), ('y', rffi.LONG)) + POINT = lltype.Struct('POINT', + ('x', rffi.LONG), + ('y', rffi.SHORT) + ) + y_ofs = longsize p = lltype.malloc(POINT, flavor='raw') p.x = 42 - p.y = 43 + p.y = rffi.cast(rffi.SHORT, -1) addr = rffi.cast(rffi.VOIDP, p) - assert struct_getfield(rffi.LONG, addr, 0) == 42 - assert struct_getfield(rffi.LONG, addr, longsize) == 43 + assert struct_getfield_int(types.slong, addr, 0) == 42 + assert struct_getfield_int(types.sshort, addr, y_ofs) == -1 # - struct_setfield(rffi.LONG, addr, 0, 123) - struct_setfield(rffi.LONG, addr, longsize, 321) - assert p.x == 123 - assert p.y == 321 + struct_setfield_int(types.slong, addr, 0, 43) + struct_setfield_int(types.sshort, addr, y_ofs, 0x1234FFFE) # 0x1234 is masked out + assert p.x == 43 + assert p.y == -2 # lltype.free(p, flavor='raw') From noreply at buildbot.pypy.org Fri Sep 9 11:53:19 2011 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 9 Sep 2011 11:53:19 +0200 (CEST) Subject: [pypy-commit] pypy ffistruct: fix the _ffi module to use the new interface exposed by libffi Message-ID: <20110909095319.78C1D8203C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: ffistruct Changeset: r47180:1d749ac82a53 Date: 2011-09-09 11:51 +0200 http://bitbucket.org/pypy/pypy/changeset/1d749ac82a53/ Log: fix the _ffi module to use the new interface exposed by libffi diff --git a/pypy/module/_ffi/interp_struct.py b/pypy/module/_ffi/interp_struct.py --- a/pypy/module/_ffi/interp_struct.py +++ b/pypy/module/_ffi/interp_struct.py @@ -114,7 +114,7 @@ assert w_ffitype is app_types.slong # XXX: handle all cases FIELD_TYPE = rffi.LONG # - value = libffi.struct_getfield(FIELD_TYPE, self.rawmem, offset) + value = libffi.struct_getfield_int(w_ffitype.ffitype, self.rawmem, offset) return space.wrap(value) @unwrap_spec(name=str) @@ -124,7 +124,7 @@ FIELD_TYPE = rffi.LONG value = space.int_w(w_value) # - libffi.struct_setfield(FIELD_TYPE, self.rawmem, offset, value) + libffi.struct_setfield_int(w_ffitype.ffitype, self.rawmem, offset, value) From noreply at buildbot.pypy.org Fri Sep 9 11:53:20 2011 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 9 Sep 2011 11:53:20 +0200 (CEST) Subject: [pypy-commit] pypy ffistruct: merge heads Message-ID: <20110909095320.A79DF8203C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: ffistruct Changeset: r47181:7bcdee76d84b Date: 2011-09-09 11:52 +0200 http://bitbucket.org/pypy/pypy/changeset/7bcdee76d84b/ Log: merge heads diff --git a/pypy/module/_ffi/interp_struct.py b/pypy/module/_ffi/interp_struct.py --- a/pypy/module/_ffi/interp_struct.py +++ b/pypy/module/_ffi/interp_struct.py @@ -46,7 +46,7 @@ def allocate(self, space): return W__StructInstance(self) - @jit.elidable_promote() + @jit.elidable_promote('0') def get_type_and_offset_for_field(self, name): try: w_field = self.name2w_field[name] From noreply at buildbot.pypy.org Fri Sep 9 15:36:13 2011 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 9 Sep 2011 15:36:13 +0200 (CEST) Subject: [pypy-commit] pypy ffistruct: implement fielddescrof_dynamic for the llgraph backend Message-ID: <20110909133613.43AF58203C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: ffistruct Changeset: r47182:f86c80625b41 Date: 2011-09-09 11:49 +0200 http://bitbucket.org/pypy/pypy/changeset/f86c80625b41/ Log: implement fielddescrof_dynamic for the llgraph backend diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -764,7 +764,9 @@ op_getfield_gc_pure = op_getfield_gc def op_getfield_raw(self, fielddescr, struct): - if fielddescr.typeinfo == REF: + if fielddescr.arg_types == 'dynamic': # abuse of .arg_types + return do_getfield_raw_dynamic(struct, fielddescr) + elif fielddescr.typeinfo == REF: return do_getfield_raw_ptr(struct, fielddescr.ofs) elif fielddescr.typeinfo == INT: return do_getfield_raw_int(struct, fielddescr.ofs) @@ -817,7 +819,9 @@ raise NotImplementedError def op_setfield_raw(self, fielddescr, struct, newvalue): - if fielddescr.typeinfo == REF: + if fielddescr.arg_types == 'dynamic': # abuse of .arg_types + do_setfield_raw_dynamic(struct, fielddescr, newvalue) + elif fielddescr.typeinfo == REF: do_setfield_raw_ptr(struct, fielddescr.ofs, newvalue) elif fielddescr.typeinfo == INT: do_setfield_raw_int(struct, fielddescr.ofs, newvalue) @@ -1370,6 +1374,17 @@ def do_getfield_raw_ptr(struct, fieldnum): return cast_to_ptr(_getfield_raw(struct, fieldnum)) +def do_getfield_raw_dynamic(struct, fielddescr): + from pypy.rlib import libffi + addr = cast_from_int(rffi.VOIDP, struct) + ofs = fielddescr.ofs + if fielddescr.is_pointer_field(): + assert False, 'fixme' + elif fielddescr.is_float_field(): + assert False, 'fixme' + else: + return libffi._struct_getfield(lltype.Signed, addr, ofs) + def do_new(size): TYPE = symbolic.Size2Type[size] x = lltype.malloc(TYPE, zero=True) @@ -1453,6 +1468,17 @@ newvalue = cast_from_ptr(FIELDTYPE, newvalue) setattr(ptr, fieldname, newvalue) +def do_setfield_raw_dynamic(struct, fielddescr, newvalue): + from pypy.rlib import libffi + addr = cast_from_int(rffi.VOIDP, struct) + ofs = fielddescr.ofs + if fielddescr.is_pointer_field(): + assert False, 'fixme' + elif fielddescr.is_float_field(): + assert False, 'fixme' + else: + libffi._struct_setfield(lltype.Signed, addr, ofs, newvalue) + def do_newstr(length): x = rstr.mallocstr(length) return cast_to_ptr(x) diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -316,6 +316,16 @@ token = history.getkind(getattr(S, fieldname)) return self.getdescr(ofs, token[0], name=fieldname) + def fielddescrof_dynamic(self, offset, fieldsize, is_pointer, is_float, is_signed): + if is_pointer: + typeinfo = REF + elif is_float: + typeinfo = FLOAT + else: + typeinfo = INT + # we abuse the arg_types field to distinguish dynamic and static descrs + return self.getdescr(offset, typeinfo, arg_types='dynamic', name='') + def calldescrof(self, FUNC, ARGS, RESULT, extrainfo): arg_types = [] for ARG in ARGS: diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -1509,20 +1509,38 @@ assert s.x == chr(190) assert s.y == chr(150) - def test_field_raw_pure(self): - # This is really testing the same thing as test_field_basic but can't - # hurt... - S = lltype.Struct('S', ('x', lltype.Signed)) + def test_fielddescrof_dynamic(self): + S = lltype.Struct('S', + ('x', lltype.Signed), + ('y', lltype.Signed), + ) + longsize = rffi.sizeof(lltype.Signed) + y_ofs = longsize s = lltype.malloc(S, flavor='raw') sa = llmemory.cast_ptr_to_adr(s) s_box = BoxInt(heaptracker.adr2int(sa)) + # + field = self.cpu.fielddescrof(S, 'y') + field_dyn = self.cpu.fielddescrof_dynamic(offset=y_ofs, + fieldsize=longsize, + is_pointer=False, + is_float=False, + is_signed=True) + assert field.is_pointer_field() == field_dyn.is_pointer_field() + assert field.is_float_field() == field_dyn.is_float_field() + if 'llgraph' not in str(self.cpu): + assert field.is_signed_field() == field_dyn.is_signed_field() + assert field.get_field_size() == field_dyn.get_field_size() + + # for get_op, set_op in ((rop.GETFIELD_RAW, rop.SETFIELD_RAW), (rop.GETFIELD_RAW_PURE, rop.SETFIELD_RAW)): - fd = self.cpu.fielddescrof(S, 'x') - self.execute_operation(set_op, [s_box, BoxInt(32)], 'void', - descr=fd) - res = self.execute_operation(get_op, [s_box], 'int', descr=fd) - assert res.getint() == 32 + for descr in (field, field_dyn): + self.execute_operation(set_op, [s_box, BoxInt(32)], 'void', + descr=descr) + res = self.execute_operation(get_op, [s_box], 'int', descr=descr) + assert res.getint() == 32 + lltype.free(s, flavor='raw') def test_new_with_vtable(self): From noreply at buildbot.pypy.org Fri Sep 9 15:36:14 2011 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 9 Sep 2011 15:36:14 +0200 (CEST) Subject: [pypy-commit] pypy ffistruct: add a JIT optimization which turns calls to libffi.struct_{get, set}field_* into {SET, GET}FIELD_RAW. optimizeopt unit test are coming in the next checkin Message-ID: <20110909133614.7D94D82213@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: ffistruct Changeset: r47183:a86930b6e9ff Date: 2011-09-08 16:57 +0200 http://bitbucket.org/pypy/pypy/changeset/a86930b6e9ff/ Log: add a JIT optimization which turns calls to libffi.struct_{get,set}field_* into {SET,GET}FIELD_RAW. optimizeopt unit test are coming in the next checkin diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -48,6 +48,8 @@ OS_LIBFFI_PREPARE = 60 OS_LIBFFI_PUSH_ARG = 61 OS_LIBFFI_CALL = 62 + OS_LIBFFI_STRUCT_GETFIELD = 63 + OS_LIBFFI_STRUCT_SETFIELD = 64 # OS_LLONG_INVERT = 69 OS_LLONG_ADD = 70 diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -1483,6 +1483,12 @@ elif oopspec_name.startswith('libffi_call_'): oopspecindex = EffectInfo.OS_LIBFFI_CALL extraeffect = EffectInfo.EF_RANDOM_EFFECTS + elif oopspec_name == 'libffi_struct_getfield': + oopspecindex = EffectInfo.OS_LIBFFI_STRUCT_GETFIELD + extraeffect = EffectInfo.EF_CANNOT_RAISE + elif oopspec_name == 'libffi_struct_setfield': + oopspecindex = EffectInfo.OS_LIBFFI_STRUCT_SETFIELD + extraeffect = EffectInfo.EF_CANNOT_RAISE else: assert False, 'unsupported oopspec: %s' % oopspec_name return self._handle_oopspec_call(op, args, oopspecindex, extraeffect) diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -392,7 +392,6 @@ def _ll_3_libffi_call_void(llfunc, funcsym, ll_args): return func(llfunc)._do_call(funcsym, ll_args, lltype.Void) - # in the following calls to builtins, the JIT is allowed to look inside: inline_calls_to = [ ('int_floordiv_ovf_zer', [lltype.Signed, lltype.Signed], lltype.Signed), diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py --- a/pypy/jit/metainterp/optimizeopt/fficall.py +++ b/pypy/jit/metainterp/optimizeopt/fficall.py @@ -1,7 +1,9 @@ from pypy.rpython.annlowlevel import cast_base_ptr_to_instance +from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.libffi import Func from pypy.rlib.debug import debug_print +from pypy.rlib import libffi, clibffi from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method @@ -116,6 +118,9 @@ ops = self.do_push_arg(op) elif oopspec == EffectInfo.OS_LIBFFI_CALL: ops = self.do_call(op) + elif (oopspec == EffectInfo.OS_LIBFFI_STRUCT_GETFIELD or + oopspec == EffectInfo.OS_LIBFFI_STRUCT_SETFIELD): + ops = self.do_struct_getsetfield(op, oopspec) # for op in ops: self.emit_operation(op) @@ -190,6 +195,46 @@ ops.append(newop) return ops + def do_struct_getsetfield(self, op, oopspec): + ffitypeval = self.getvalue(op.getarg(1)) + addrval = self.getvalue(op.getarg(2)) + offsetval = self.getvalue(op.getarg(3)) + if not ffitypeval.is_constant() or not offsetval.is_constant(): + return [op] + # + ffitypeaddr = ffitypeval.box.getaddr() + ffitype = llmemory.cast_adr_to_ptr(ffitypeaddr, clibffi.FFI_TYPE_P) + offset = offsetval.box.getint() + descr = self._get_field_descr(ffitype, offset) + # + arglist = [addrval.force_box()] + if oopspec == EffectInfo.OS_LIBFFI_STRUCT_GETFIELD: + opnum = rop.GETFIELD_RAW + else: + opnum = rop.SETFIELD_RAW + newval = self.getvalue(op.getarg(4)) + arglist.append(newval.force_box()) + # + newop = ResOperation(opnum, arglist, op.result, descr=descr) + return [newop] + + def _get_field_descr(self, ffitype, offset): + kind = libffi.types.getkind(ffitype) + is_pointer = is_float = is_signed = False + if ffitype is libffi.types.pointer: + is_pointer = True + elif kind == 'i': + is_signed = True + elif kind == 'f' or kind == 'I' or kind == 'U': + # longlongs are treated as floats, see e.g. llsupport/descr.py:getDescrClass + is_float = True + else: + assert False, "unsupported ffitype or kind" + # + fieldsize = ffitype.c_size + return self.optimizer.cpu.fielddescrof_dynamic(offset, fieldsize, + is_pointer, is_float, is_signed) + def propagate_forward(self, op): if self.logops is not None: debug_print(self.logops.repr_of_resop(op)) diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py --- a/pypy/jit/metainterp/test/test_fficall.py +++ b/pypy/jit/metainterp/test/test_fficall.py @@ -3,8 +3,8 @@ from pypy.rlib.rarithmetic import r_singlefloat, r_longlong, r_ulonglong from pypy.rlib.jit import JitDriver, promote, dont_look_inside from pypy.rlib.unroll import unrolling_iterable -from pypy.rlib.libffi import ArgChain -from pypy.rlib.libffi import IS_32_BIT +from pypy.rlib.libffi import ArgChain, types +from pypy.rlib.libffi import IS_32_BIT, struct_setfield_int, struct_getfield_int from pypy.rlib.test.test_libffi import TestLibffiCall as _TestLibffiCall from pypy.rpython.lltypesystem import lltype, rffi from pypy.rlib.objectmodel import specialize @@ -93,5 +93,26 @@ test_byval_result.dont_track_allocations = True + class TestFfiCallSupportAll(TestFfiCall): supports_all = True # supports_{floats,longlong,singlefloats} + + + def test_struct_getfield(self): + myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'addr']) + + def f(n): + i = 0 + addr = lltype.malloc(rffi.VOIDP.TO, 10, flavor='raw') + while i < n: + myjitdriver.jit_merge_point(n=n, i=i, addr=addr) + struct_setfield_int(types.slong, addr, 0, 1) + i += struct_getfield_int(types.slong, addr, 0) + lltype.free(addr, flavor='raw') + return i + assert self.meta_interp(f, [20]) == f(20) + self.check_loops( + setfield_raw=1, + getfield_raw=1, + call=0) + diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -414,6 +414,7 @@ # ====================================================================== + at jit.oopspec('libffi_struct_getfield(ffitype, addr, offset)') def struct_getfield_int(ffitype, addr, offset): """ Return the field of type ``ffitype`` at ``addr+offset``, widened to @@ -425,6 +426,8 @@ return rffi.cast(lltype.Signed, value) assert False, "cannot find the given ffitype" + + at jit.oopspec('libffi_struct_setfield(ffitype, addr, offset, value)') def struct_setfield_int(ffitype, addr, offset, value): """ Set the field of type ``ffitype`` at ``addr+offset``. ``value`` is of @@ -438,7 +441,6 @@ assert False, "cannot find the given ffitype" - at jit.dont_look_inside @specialize.arg(0) def _struct_getfield(TYPE, addr, offset): """ @@ -450,7 +452,6 @@ return rffi.cast(PTR_FIELD, addr)[0] - at jit.dont_look_inside @specialize.arg(0) def _struct_setfield(TYPE, addr, offset, value): """ From noreply at buildbot.pypy.org Fri Sep 9 15:36:15 2011 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 9 Sep 2011 15:36:15 +0200 (CEST) Subject: [pypy-commit] pypy ffistruct: add optimizeopt unit test for the struct_{get, set}field optimization Message-ID: <20110909133615.B0C73822AB@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: ffistruct Changeset: r47184:00cacb3f4be4 Date: 2011-09-09 15:32 +0200 http://bitbucket.org/pypy/pypy/changeset/00cacb3f4be4/ Log: add optimizeopt unit test for the struct_{get,set}field optimization diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py @@ -56,6 +56,13 @@ restype=types.sint, flags=43) # + ffi_slong = types.slong + dyn_123_field = cpu.fielddescrof_dynamic(offset=123, + fieldsize=types.slong.c_size, + is_pointer=False, + is_float=False, + is_signed=True) + # def calldescr(cpu, FUNC, oopspecindex, extraeffect=None): if extraeffect == EffectInfo.EF_RANDOM_EFFECTS: f = None # means "can force all" really @@ -69,6 +76,8 @@ libffi_push_arg = calldescr(cpu, FUNC, EffectInfo.OS_LIBFFI_PUSH_ARG) libffi_call = calldescr(cpu, FUNC, EffectInfo.OS_LIBFFI_CALL, EffectInfo.EF_RANDOM_EFFECTS) + libffi_struct_getfield = calldescr(cpu, FUNC, EffectInfo.OS_LIBFFI_STRUCT_GETFIELD) + libffi_struct_setfield = calldescr(cpu, FUNC, EffectInfo.OS_LIBFFI_STRUCT_SETFIELD) namespace = namespace.__dict__ @@ -277,3 +286,20 @@ jump(i3, f1, p2) """ loop = self.optimize_loop(ops, expected) + + def test_ffi_struct_fields(self): + ops = """ + [i0] + i1 = call(0, ConstClass(ffi_slong), i0, 123, descr=libffi_struct_getfield) + i2 = int_add(i1, 1) + call(0, ConstClass(ffi_slong), i0, 123, i2, descr=libffi_struct_setfield) + jump(i1) + """ + expected = """ + [i0] + i1 = getfield_raw(i0, descr=dyn_123_field) + i2 = int_add(i1, 1) + setfield_raw(i0, i2, descr=dyn_123_field) + jump(i1) + """ + loop = self.optimize_loop(ops, expected) From noreply at buildbot.pypy.org Fri Sep 9 15:36:16 2011 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 9 Sep 2011 15:36:16 +0200 (CEST) Subject: [pypy-commit] pypy ffistruct: another unit test for when not to optimize Message-ID: <20110909133616.E2ED5822B3@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: ffistruct Changeset: r47185:3acdc8abbdc4 Date: 2011-09-09 15:35 +0200 http://bitbucket.org/pypy/pypy/changeset/3acdc8abbdc4/ Log: another unit test for when not to optimize diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py @@ -303,3 +303,13 @@ jump(i1) """ loop = self.optimize_loop(ops, expected) + + def test_ffi_struct_fields_nonconst(self): + ops = """ + [i0, i1] + i2 = call(0, ConstClass(ffi_slong), i0, i1, descr=libffi_struct_getfield) + i3 = call(0, i1 , i0, 123, descr=libffi_struct_getfield) + jump(i1) + """ + expected = ops + loop = self.optimize_loop(ops, expected) From noreply at buildbot.pypy.org Fri Sep 9 16:51:10 2011 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 9 Sep 2011 16:51:10 +0200 (CEST) Subject: [pypy-commit] pypy ffistruct: implement fielddescrof_dynamic for the ll backends Message-ID: <20110909145110.5DFB88203C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: ffistruct Changeset: r47186:4e3a2c7b46cb Date: 2011-09-09 16:49 +0200 http://bitbucket.org/pypy/pypy/changeset/4e3a2c7b46cb/ Log: implement fielddescrof_dynamic for the ll backends diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -113,6 +113,17 @@ def repr_of_descr(self): return '<%s %s %s>' % (self._clsname, self.name, self.offset) +class DynamicFieldDescr(BaseFieldDescr): + + def __init__(self, offset, fieldsize, is_pointer, is_float, is_signed): + self.offset = offset + self._fieldsize = fieldsize + self._is_pointer_field = is_pointer + self._is_float_field = is_float + self._is_field_signed = is_signed + + def get_field_size(self, translate_support_code): + return self._fieldsize class NonGcPtrFieldDescr(BaseFieldDescr): _clsname = 'NonGcPtrFieldDescr' diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -13,6 +13,7 @@ from pypy.jit.backend.llsupport.symbolic import WORD, unroll_basic_sizes from pypy.jit.backend.llsupport.descr import get_size_descr, BaseSizeDescr from pypy.jit.backend.llsupport.descr import get_field_descr, BaseFieldDescr +from pypy.jit.backend.llsupport.descr import DynamicFieldDescr from pypy.jit.backend.llsupport.descr import get_array_descr, BaseArrayDescr from pypy.jit.backend.llsupport.descr import get_call_descr from pypy.jit.backend.llsupport.descr import BaseIntCallDescr, GcPtrCallDescr @@ -225,6 +226,9 @@ def fielddescrof(self, STRUCT, fieldname): return get_field_descr(self.gc_ll_descr, STRUCT, fieldname) + def fielddescrof_dynamic(self, offset, fieldsize, is_pointer, is_float, is_signed): + return DynamicFieldDescr(offset, fieldsize, is_pointer, is_float, is_signed) + def unpack_fielddescr(self, fielddescr): assert isinstance(fielddescr, BaseFieldDescr) return fielddescr.offset diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -1529,8 +1529,7 @@ assert field.is_pointer_field() == field_dyn.is_pointer_field() assert field.is_float_field() == field_dyn.is_float_field() if 'llgraph' not in str(self.cpu): - assert field.is_signed_field() == field_dyn.is_signed_field() - assert field.get_field_size() == field_dyn.get_field_size() + assert field.is_field_signed() == field_dyn.is_field_signed() # for get_op, set_op in ((rop.GETFIELD_RAW, rop.SETFIELD_RAW), diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py --- a/pypy/jit/metainterp/test/test_fficall.py +++ b/pypy/jit/metainterp/test/test_fficall.py @@ -115,4 +115,3 @@ setfield_raw=1, getfield_raw=1, call=0) - From noreply at buildbot.pypy.org Fri Sep 9 16:51:49 2011 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 9 Sep 2011 16:51:49 +0200 (CEST) Subject: [pypy-commit] pypy ffistruct: fix translation by iterating only on the types which are known to be integers Message-ID: <20110909145149.2017C8203C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: ffistruct Changeset: r47187:108d00ff4edd Date: 2011-09-09 16:49 +0200 http://bitbucket.org/pypy/pypy/changeset/108d00ff4edd/ Log: fix translation by iterating only on the types which are known to be integers diff --git a/pypy/rlib/clibffi.py b/pypy/rlib/clibffi.py --- a/pypy/rlib/clibffi.py +++ b/pypy/rlib/clibffi.py @@ -210,10 +210,7 @@ elif sz == 8: return ffi_type_uint64 else: raise ValueError("unsupported type size for %r" % (TYPE,)) -TYPE_MAP = { - rffi.DOUBLE : ffi_type_double, - rffi.FLOAT : ffi_type_float, - rffi.LONGDOUBLE : ffi_type_longdouble, +TYPE_MAP_INT = { rffi.UCHAR : ffi_type_uchar, rffi.CHAR : ffi_type_schar, rffi.SHORT : ffi_type_sshort, @@ -226,11 +223,24 @@ rffi.LONG : _signed_type_for(rffi.LONG), rffi.ULONGLONG : _unsigned_type_for(rffi.ULONGLONG), rffi.LONGLONG : _signed_type_for(rffi.LONGLONG), - lltype.Void : ffi_type_void, lltype.UniChar : _unsigned_type_for(lltype.UniChar), lltype.Bool : _unsigned_type_for(lltype.Bool), +} + +TYPE_MAP_FLOAT = { + rffi.DOUBLE : ffi_type_double, + rffi.FLOAT : ffi_type_float, + rffi.LONGDOUBLE : ffi_type_longdouble, } +TYPE_MAP = { + lltype.Void : ffi_type_void, + } +TYPE_MAP.update(TYPE_MAP_INT) +TYPE_MAP.update(TYPE_MAP_FLOAT) + +ffitype_map_int = unrolling_iterable(TYPE_MAP_INT.iteritems()) +ffitype_map_float = unrolling_iterable(TYPE_MAP_FLOAT.iteritems()) ffitype_map = unrolling_iterable(TYPE_MAP.iteritems()) diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -419,7 +419,7 @@ Return the field of type ``ffitype`` at ``addr+offset``, widened to lltype.Signed. """ - for TYPE, ffitype2 in clibffi.ffitype_map: + for TYPE, ffitype2 in clibffi.ffitype_map_int: if ffitype is ffitype2: value = _struct_getfield(TYPE, addr, offset) return rffi.cast(lltype.Signed, value) @@ -430,7 +430,7 @@ Set the field of type ``ffitype`` at ``addr+offset``. ``value`` is of type lltype.Signed, and it's automatically converted to the right type. """ - for TYPE, ffitype2 in clibffi.ffitype_map: + for TYPE, ffitype2 in clibffi.ffitype_map_int: if ffitype is ffitype2: value = rffi.cast(TYPE, value) _struct_setfield(TYPE, addr, offset, value) From noreply at buildbot.pypy.org Fri Sep 9 16:51:50 2011 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 9 Sep 2011 16:51:50 +0200 (CEST) Subject: [pypy-commit] pypy ffistruct: merge heads Message-ID: <20110909145150.658138203C@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: ffistruct Changeset: r47188:5219d5921d77 Date: 2011-09-09 16:51 +0200 http://bitbucket.org/pypy/pypy/changeset/5219d5921d77/ Log: merge heads diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -764,7 +764,9 @@ op_getfield_gc_pure = op_getfield_gc def op_getfield_raw(self, fielddescr, struct): - if fielddescr.typeinfo == REF: + if fielddescr.arg_types == 'dynamic': # abuse of .arg_types + return do_getfield_raw_dynamic(struct, fielddescr) + elif fielddescr.typeinfo == REF: return do_getfield_raw_ptr(struct, fielddescr.ofs) elif fielddescr.typeinfo == INT: return do_getfield_raw_int(struct, fielddescr.ofs) @@ -817,7 +819,9 @@ raise NotImplementedError def op_setfield_raw(self, fielddescr, struct, newvalue): - if fielddescr.typeinfo == REF: + if fielddescr.arg_types == 'dynamic': # abuse of .arg_types + do_setfield_raw_dynamic(struct, fielddescr, newvalue) + elif fielddescr.typeinfo == REF: do_setfield_raw_ptr(struct, fielddescr.ofs, newvalue) elif fielddescr.typeinfo == INT: do_setfield_raw_int(struct, fielddescr.ofs, newvalue) @@ -1370,6 +1374,17 @@ def do_getfield_raw_ptr(struct, fieldnum): return cast_to_ptr(_getfield_raw(struct, fieldnum)) +def do_getfield_raw_dynamic(struct, fielddescr): + from pypy.rlib import libffi + addr = cast_from_int(rffi.VOIDP, struct) + ofs = fielddescr.ofs + if fielddescr.is_pointer_field(): + assert False, 'fixme' + elif fielddescr.is_float_field(): + assert False, 'fixme' + else: + return libffi._struct_getfield(lltype.Signed, addr, ofs) + def do_new(size): TYPE = symbolic.Size2Type[size] x = lltype.malloc(TYPE, zero=True) @@ -1453,6 +1468,17 @@ newvalue = cast_from_ptr(FIELDTYPE, newvalue) setattr(ptr, fieldname, newvalue) +def do_setfield_raw_dynamic(struct, fielddescr, newvalue): + from pypy.rlib import libffi + addr = cast_from_int(rffi.VOIDP, struct) + ofs = fielddescr.ofs + if fielddescr.is_pointer_field(): + assert False, 'fixme' + elif fielddescr.is_float_field(): + assert False, 'fixme' + else: + libffi._struct_setfield(lltype.Signed, addr, ofs, newvalue) + def do_newstr(length): x = rstr.mallocstr(length) return cast_to_ptr(x) diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -316,6 +316,16 @@ token = history.getkind(getattr(S, fieldname)) return self.getdescr(ofs, token[0], name=fieldname) + def fielddescrof_dynamic(self, offset, fieldsize, is_pointer, is_float, is_signed): + if is_pointer: + typeinfo = REF + elif is_float: + typeinfo = FLOAT + else: + typeinfo = INT + # we abuse the arg_types field to distinguish dynamic and static descrs + return self.getdescr(offset, typeinfo, arg_types='dynamic', name='') + def calldescrof(self, FUNC, ARGS, RESULT, extrainfo): arg_types = [] for ARG in ARGS: diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -113,6 +113,17 @@ def repr_of_descr(self): return '<%s %s %s>' % (self._clsname, self.name, self.offset) +class DynamicFieldDescr(BaseFieldDescr): + + def __init__(self, offset, fieldsize, is_pointer, is_float, is_signed): + self.offset = offset + self._fieldsize = fieldsize + self._is_pointer_field = is_pointer + self._is_float_field = is_float + self._is_field_signed = is_signed + + def get_field_size(self, translate_support_code): + return self._fieldsize class NonGcPtrFieldDescr(BaseFieldDescr): _clsname = 'NonGcPtrFieldDescr' diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -13,6 +13,7 @@ from pypy.jit.backend.llsupport.symbolic import WORD, unroll_basic_sizes from pypy.jit.backend.llsupport.descr import get_size_descr, BaseSizeDescr from pypy.jit.backend.llsupport.descr import get_field_descr, BaseFieldDescr +from pypy.jit.backend.llsupport.descr import DynamicFieldDescr from pypy.jit.backend.llsupport.descr import get_array_descr, BaseArrayDescr from pypy.jit.backend.llsupport.descr import get_call_descr from pypy.jit.backend.llsupport.descr import BaseIntCallDescr, GcPtrCallDescr @@ -225,6 +226,9 @@ def fielddescrof(self, STRUCT, fieldname): return get_field_descr(self.gc_ll_descr, STRUCT, fieldname) + def fielddescrof_dynamic(self, offset, fieldsize, is_pointer, is_float, is_signed): + return DynamicFieldDescr(offset, fieldsize, is_pointer, is_float, is_signed) + def unpack_fielddescr(self, fielddescr): assert isinstance(fielddescr, BaseFieldDescr) return fielddescr.offset diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -1509,20 +1509,37 @@ assert s.x == chr(190) assert s.y == chr(150) - def test_field_raw_pure(self): - # This is really testing the same thing as test_field_basic but can't - # hurt... - S = lltype.Struct('S', ('x', lltype.Signed)) + def test_fielddescrof_dynamic(self): + S = lltype.Struct('S', + ('x', lltype.Signed), + ('y', lltype.Signed), + ) + longsize = rffi.sizeof(lltype.Signed) + y_ofs = longsize s = lltype.malloc(S, flavor='raw') sa = llmemory.cast_ptr_to_adr(s) s_box = BoxInt(heaptracker.adr2int(sa)) + # + field = self.cpu.fielddescrof(S, 'y') + field_dyn = self.cpu.fielddescrof_dynamic(offset=y_ofs, + fieldsize=longsize, + is_pointer=False, + is_float=False, + is_signed=True) + assert field.is_pointer_field() == field_dyn.is_pointer_field() + assert field.is_float_field() == field_dyn.is_float_field() + if 'llgraph' not in str(self.cpu): + assert field.is_field_signed() == field_dyn.is_field_signed() + + # for get_op, set_op in ((rop.GETFIELD_RAW, rop.SETFIELD_RAW), (rop.GETFIELD_RAW_PURE, rop.SETFIELD_RAW)): - fd = self.cpu.fielddescrof(S, 'x') - self.execute_operation(set_op, [s_box, BoxInt(32)], 'void', - descr=fd) - res = self.execute_operation(get_op, [s_box], 'int', descr=fd) - assert res.getint() == 32 + for descr in (field, field_dyn): + self.execute_operation(set_op, [s_box, BoxInt(32)], 'void', + descr=descr) + res = self.execute_operation(get_op, [s_box], 'int', descr=descr) + assert res.getint() == 32 + lltype.free(s, flavor='raw') def test_new_with_vtable(self): diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -48,6 +48,8 @@ OS_LIBFFI_PREPARE = 60 OS_LIBFFI_PUSH_ARG = 61 OS_LIBFFI_CALL = 62 + OS_LIBFFI_STRUCT_GETFIELD = 63 + OS_LIBFFI_STRUCT_SETFIELD = 64 # OS_LLONG_INVERT = 69 OS_LLONG_ADD = 70 diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -1483,6 +1483,12 @@ elif oopspec_name.startswith('libffi_call_'): oopspecindex = EffectInfo.OS_LIBFFI_CALL extraeffect = EffectInfo.EF_RANDOM_EFFECTS + elif oopspec_name == 'libffi_struct_getfield': + oopspecindex = EffectInfo.OS_LIBFFI_STRUCT_GETFIELD + extraeffect = EffectInfo.EF_CANNOT_RAISE + elif oopspec_name == 'libffi_struct_setfield': + oopspecindex = EffectInfo.OS_LIBFFI_STRUCT_SETFIELD + extraeffect = EffectInfo.EF_CANNOT_RAISE else: assert False, 'unsupported oopspec: %s' % oopspec_name return self._handle_oopspec_call(op, args, oopspecindex, extraeffect) diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -392,7 +392,6 @@ def _ll_3_libffi_call_void(llfunc, funcsym, ll_args): return func(llfunc)._do_call(funcsym, ll_args, lltype.Void) - # in the following calls to builtins, the JIT is allowed to look inside: inline_calls_to = [ ('int_floordiv_ovf_zer', [lltype.Signed, lltype.Signed], lltype.Signed), diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py --- a/pypy/jit/metainterp/optimizeopt/fficall.py +++ b/pypy/jit/metainterp/optimizeopt/fficall.py @@ -1,7 +1,9 @@ from pypy.rpython.annlowlevel import cast_base_ptr_to_instance +from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.libffi import Func from pypy.rlib.debug import debug_print +from pypy.rlib import libffi, clibffi from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method @@ -116,6 +118,9 @@ ops = self.do_push_arg(op) elif oopspec == EffectInfo.OS_LIBFFI_CALL: ops = self.do_call(op) + elif (oopspec == EffectInfo.OS_LIBFFI_STRUCT_GETFIELD or + oopspec == EffectInfo.OS_LIBFFI_STRUCT_SETFIELD): + ops = self.do_struct_getsetfield(op, oopspec) # for op in ops: self.emit_operation(op) @@ -190,6 +195,46 @@ ops.append(newop) return ops + def do_struct_getsetfield(self, op, oopspec): + ffitypeval = self.getvalue(op.getarg(1)) + addrval = self.getvalue(op.getarg(2)) + offsetval = self.getvalue(op.getarg(3)) + if not ffitypeval.is_constant() or not offsetval.is_constant(): + return [op] + # + ffitypeaddr = ffitypeval.box.getaddr() + ffitype = llmemory.cast_adr_to_ptr(ffitypeaddr, clibffi.FFI_TYPE_P) + offset = offsetval.box.getint() + descr = self._get_field_descr(ffitype, offset) + # + arglist = [addrval.force_box()] + if oopspec == EffectInfo.OS_LIBFFI_STRUCT_GETFIELD: + opnum = rop.GETFIELD_RAW + else: + opnum = rop.SETFIELD_RAW + newval = self.getvalue(op.getarg(4)) + arglist.append(newval.force_box()) + # + newop = ResOperation(opnum, arglist, op.result, descr=descr) + return [newop] + + def _get_field_descr(self, ffitype, offset): + kind = libffi.types.getkind(ffitype) + is_pointer = is_float = is_signed = False + if ffitype is libffi.types.pointer: + is_pointer = True + elif kind == 'i': + is_signed = True + elif kind == 'f' or kind == 'I' or kind == 'U': + # longlongs are treated as floats, see e.g. llsupport/descr.py:getDescrClass + is_float = True + else: + assert False, "unsupported ffitype or kind" + # + fieldsize = ffitype.c_size + return self.optimizer.cpu.fielddescrof_dynamic(offset, fieldsize, + is_pointer, is_float, is_signed) + def propagate_forward(self, op): if self.logops is not None: debug_print(self.logops.repr_of_resop(op)) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py @@ -56,6 +56,13 @@ restype=types.sint, flags=43) # + ffi_slong = types.slong + dyn_123_field = cpu.fielddescrof_dynamic(offset=123, + fieldsize=types.slong.c_size, + is_pointer=False, + is_float=False, + is_signed=True) + # def calldescr(cpu, FUNC, oopspecindex, extraeffect=None): if extraeffect == EffectInfo.EF_RANDOM_EFFECTS: f = None # means "can force all" really @@ -69,6 +76,8 @@ libffi_push_arg = calldescr(cpu, FUNC, EffectInfo.OS_LIBFFI_PUSH_ARG) libffi_call = calldescr(cpu, FUNC, EffectInfo.OS_LIBFFI_CALL, EffectInfo.EF_RANDOM_EFFECTS) + libffi_struct_getfield = calldescr(cpu, FUNC, EffectInfo.OS_LIBFFI_STRUCT_GETFIELD) + libffi_struct_setfield = calldescr(cpu, FUNC, EffectInfo.OS_LIBFFI_STRUCT_SETFIELD) namespace = namespace.__dict__ @@ -277,3 +286,30 @@ jump(i3, f1, p2) """ loop = self.optimize_loop(ops, expected) + + def test_ffi_struct_fields(self): + ops = """ + [i0] + i1 = call(0, ConstClass(ffi_slong), i0, 123, descr=libffi_struct_getfield) + i2 = int_add(i1, 1) + call(0, ConstClass(ffi_slong), i0, 123, i2, descr=libffi_struct_setfield) + jump(i1) + """ + expected = """ + [i0] + i1 = getfield_raw(i0, descr=dyn_123_field) + i2 = int_add(i1, 1) + setfield_raw(i0, i2, descr=dyn_123_field) + jump(i1) + """ + loop = self.optimize_loop(ops, expected) + + def test_ffi_struct_fields_nonconst(self): + ops = """ + [i0, i1] + i2 = call(0, ConstClass(ffi_slong), i0, i1, descr=libffi_struct_getfield) + i3 = call(0, i1 , i0, 123, descr=libffi_struct_getfield) + jump(i1) + """ + expected = ops + loop = self.optimize_loop(ops, expected) diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py --- a/pypy/jit/metainterp/test/test_fficall.py +++ b/pypy/jit/metainterp/test/test_fficall.py @@ -3,8 +3,8 @@ from pypy.rlib.rarithmetic import r_singlefloat, r_longlong, r_ulonglong from pypy.rlib.jit import JitDriver, promote, dont_look_inside from pypy.rlib.unroll import unrolling_iterable -from pypy.rlib.libffi import ArgChain -from pypy.rlib.libffi import IS_32_BIT +from pypy.rlib.libffi import ArgChain, types +from pypy.rlib.libffi import IS_32_BIT, struct_setfield_int, struct_getfield_int from pypy.rlib.test.test_libffi import TestLibffiCall as _TestLibffiCall from pypy.rpython.lltypesystem import lltype, rffi from pypy.rlib.objectmodel import specialize @@ -93,5 +93,25 @@ test_byval_result.dont_track_allocations = True + class TestFfiCallSupportAll(TestFfiCall): supports_all = True # supports_{floats,longlong,singlefloats} + + + def test_struct_getfield(self): + myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'addr']) + + def f(n): + i = 0 + addr = lltype.malloc(rffi.VOIDP.TO, 10, flavor='raw') + while i < n: + myjitdriver.jit_merge_point(n=n, i=i, addr=addr) + struct_setfield_int(types.slong, addr, 0, 1) + i += struct_getfield_int(types.slong, addr, 0) + lltype.free(addr, flavor='raw') + return i + assert self.meta_interp(f, [20]) == f(20) + self.check_loops( + setfield_raw=1, + getfield_raw=1, + call=0) diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -414,6 +414,7 @@ # ====================================================================== + at jit.oopspec('libffi_struct_getfield(ffitype, addr, offset)') def struct_getfield_int(ffitype, addr, offset): """ Return the field of type ``ffitype`` at ``addr+offset``, widened to @@ -425,6 +426,8 @@ return rffi.cast(lltype.Signed, value) assert False, "cannot find the given ffitype" + + at jit.oopspec('libffi_struct_setfield(ffitype, addr, offset, value)') def struct_setfield_int(ffitype, addr, offset, value): """ Set the field of type ``ffitype`` at ``addr+offset``. ``value`` is of @@ -438,7 +441,6 @@ assert False, "cannot find the given ffitype" - at jit.dont_look_inside @specialize.arg(0) def _struct_getfield(TYPE, addr, offset): """ @@ -450,7 +452,6 @@ return rffi.cast(PTR_FIELD, addr)[0] - at jit.dont_look_inside @specialize.arg(0) def _struct_setfield(TYPE, addr, offset, value): """ From noreply at buildbot.pypy.org Fri Sep 9 18:00:31 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 9 Sep 2011 18:00:31 +0200 (CEST) Subject: [pypy-commit] pypy default: Make getwindowsversion return a sructseq, rather than a tuple. (untested, not ata windows machine) Message-ID: <20110909160031.C0F2D8203C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r47189:101130586a1a Date: 2011-09-09 09:00 -0700 http://bitbucket.org/pypy/pypy/changeset/101130586a1a/ Log: Make getwindowsversion return a sructseq, rather than a tuple. (untested, not ata windows machine) diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py --- a/pypy/module/sys/test/test_sysmodule.py +++ b/pypy/module/sys/test/test_sysmodule.py @@ -6,11 +6,11 @@ import sys def test_stdin_exists(space): - space.sys.get('stdin') + space.sys.get('stdin') space.sys.get('__stdin__') def test_stdout_exists(space): - space.sys.get('stdout') + space.sys.get('stdout') space.sys.get('__stdout__') class AppTestAppSysTests: @@ -25,7 +25,7 @@ assert 'sys' in modules, ( "An entry for sys " "is not in sys.modules.") sys2 = sys.modules['sys'] - assert sys is sys2, "import sys is not sys.modules[sys]." + assert sys is sys2, "import sys is not sys.modules[sys]." def test_builtin_in_modules(self): import sys modules = sys.modules @@ -89,12 +89,12 @@ else: raise AssertionError, "ZeroDivisionError not caught" - def test_io(self): + def test_io(self): import sys assert isinstance(sys.__stdout__, file) assert isinstance(sys.__stderr__, file) assert isinstance(sys.__stdin__, file) - + if self.appdirect and not isinstance(sys.stdin, file): return @@ -324,7 +324,7 @@ import sys if self.appdirect: skip("not worth running appdirect") - + encoding = sys.getdefaultencoding() try: sys.setdefaultencoding("ascii") @@ -334,11 +334,11 @@ sys.setdefaultencoding("latin-1") assert sys.getdefaultencoding() == 'latin-1' assert unicode('\x80') == u'\u0080' - + finally: sys.setdefaultencoding(encoding) - + # testing sys.settrace() is done in test_trace.py # testing sys.setprofile() is done in test_profile.py @@ -372,6 +372,16 @@ assert isinstance(v[3], int) assert isinstance(v[4], str) + assert v[0] == v.major + assert v[1] == v.minor + assert v[2] == v.build + assert v[3] == v.platform + assert v[4] == v.service_pack + + # This is how platform.py calls it. Make sure tuple still has 5 + # elements + maj, min, buildno, plat, csd = sys.getwindowsversion() + def test_winver(self): import sys if hasattr(sys, "winver"): @@ -564,7 +574,7 @@ if self.ready: break time.sleep(0.1) return sys._current_frames() - + frames = f() thisframe = frames.pop(thread_id) assert thisframe.f_code.co_name == 'f' diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -1,11 +1,13 @@ """ Implementation of interpreter-level 'sys' routines. """ +import sys + +from pypy.interpreter import gateway from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec, NoneNotWrapped +from pypy.rlib import jit from pypy.rlib.runicode import MAXUNICODE -from pypy.rlib import jit -import sys # ____________________________________________________________ @@ -58,7 +60,7 @@ space.setitem(w_result, space.wrap(thread_ident), space.wrap(f)) - return w_result + return w_result def setrecursionlimit(space, w_new_limit): """setrecursionlimit() sets the maximum number of nested calls that @@ -124,7 +126,7 @@ """Set the global debug tracing function. It will be called on each function call. See the debugger chapter in the library manual.""" space.getexecutioncontext().settrace(w_func) - + def setprofile(space, w_func): """Set the profiling function. It will be called on each function call and return. See the profiler chapter in the library manual.""" @@ -145,14 +147,35 @@ a debugger from a checkpoint, to recursively debug some other code.""" return space.getexecutioncontext().call_tracing(w_func, w_args) + +app = gateway.applevel(''' +"NOT_RPYTHON" +from _structseq import structseqtype, structseqfield + +class windows_version_info: + __metaclass__ = structseqtype + + name = "sys.getwindowsversion" + + major = structseqfield(0, "Major version number") + minor = structseqfield(1, "Minor version number") + build = structseqfield(2, "Build number") + platform = structseqfield(3, "Operating system platform") + service_pack = structseqfield(4, "Latest Service Pack installed on the system") +''') + def getwindowsversion(space): from pypy.rlib import rwin32 info = rwin32.GetVersionEx() - return space.newtuple([space.wrap(info[0]), - space.wrap(info[1]), - space.wrap(info[2]), - space.wrap(info[3]), - space.wrap(info[4])]) + w_windows_version_info = app.wget(space, "windows_version_info") + raw_version = space.newtuple([ + space.wrap(info[0]), + space.wrap(info[1]), + space.wrap(info[2]), + space.wrap(info[3]), + space.wrap(info[4]) + ]) + return space.call_function(w_windows_version_info, raw_version) @jit.dont_look_inside def get_dllhandle(space): From noreply at buildbot.pypy.org Fri Sep 9 22:02:39 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Fri, 9 Sep 2011 22:02:39 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: fixed jit codewriter problem, but blackhole still complains about there not being a cast from ulonglong to float Message-ID: <20110909200239.9C4D28203C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: unsigned-dtypes Changeset: r47190:a74952455bd7 Date: 2011-09-09 20:01 +0000 http://bitbucket.org/pypy/pypy/changeset/a74952455bd7/ Log: fixed jit codewriter problem, but blackhole still complains about there not being a cast from ulonglong to float diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -884,9 +884,15 @@ v = v_arg oplist = [] if unsigned1: - opname = 'cast_uint_to_longlong' + if unsigned2: + opname = 'cast_uint_to_ulonglong' + else: + opname = 'cast_uint_to_longlong' else: - opname = 'cast_int_to_longlong' + if unsigned2: + opname = 'cast_int_to_ulonglong' + else: + opname = 'cast_int_to_longlong' op2 = self.rewrite_operation( SpaceOperation(opname, [v], v_result) ) @@ -996,6 +1002,19 @@ return op2 ''' % (_op, _oopspec.lower(), _oopspec, _oopspec)).compile() + for _op, _oopspec in [('cast_int_to_ulonglong', 'FROM_INT'), + ('cast_uint_to_ulonglong', 'FROM_UINT'), + ]: + exec py.code.Source(''' + def rewrite_op_%s(self, op): + args = op.args + op1 = self.prepare_builtin_call(op, "ullong_%s", args) + op2 = self._handle_oopspec_call(op1, args, + EffectInfo.OS_LLONG_%s, + EffectInfo.EF_ELIDABLE_CANNOT_RAISE) + return op2 + ''' % (_op, _oopspec.lower(), _oopspec)).compile() + def _normalize(self, oplist): if isinstance(oplist, SpaceOperation): return [oplist] diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -315,9 +315,15 @@ def _ll_1_llong_from_int(x): return r_longlong(intmask(x)) +def _ll_1_ullong_from_int(x): + return r_ulonglong(intmask(x)) + def _ll_1_llong_from_uint(x): return r_longlong(r_uint(x)) +def _ll_1_ullong_from_uint(x): + return r_ulonglong(r_uint(x)) + def _ll_1_llong_to_int(xll): return intmask(xll) From noreply at buildbot.pypy.org Fri Sep 9 22:45:58 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Fri, 9 Sep 2011 22:45:58 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: put in cast_ulonglong_to_float. Now translates on 32-bit as well. Message-ID: <20110909204558.CA89A8203C@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: unsigned-dtypes Changeset: r47191:0a94e1a139da Date: 2011-09-09 20:45 +0000 http://bitbucket.org/pypy/pypy/changeset/0a94e1a139da/ Log: put in cast_ulonglong_to_float. Now translates on 32-bit as well. diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -1004,6 +1004,7 @@ for _op, _oopspec in [('cast_int_to_ulonglong', 'FROM_INT'), ('cast_uint_to_ulonglong', 'FROM_UINT'), + ('cast_ulonglong_to_float', 'TO_FLOAT'), ]: exec py.code.Source(''' def rewrite_op_%s(self, op): diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -333,6 +333,9 @@ def _ll_1_llong_to_float(xll): return float(rffi.cast(lltype.SignedLongLong, xll)) +def _ll_1_ullong_to_float(xull): + return float(rffi.cast(lltype.UnsignedLongLong, xull)) + def _ll_1_llong_abs(xll): if xll < 0: From noreply at buildbot.pypy.org Sat Sep 10 02:21:11 2011 From: noreply at buildbot.pypy.org (wlav) Date: Sat, 10 Sep 2011 02:21:11 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: object identity conservation Message-ID: <20110910002111.665828203C@wyvern.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r47192:2aee950f746f Date: 2011-09-09 14:25 -0700 http://bitbucket.org/pypy/pypy/changeset/2aee950f746f/ Log: object identity conservation diff --git a/pypy/module/cppyy/capi/__init__.py b/pypy/module/cppyy/capi/__init__.py --- a/pypy/module/cppyy/capi/__init__.py +++ b/pypy/module/cppyy/capi/__init__.py @@ -5,8 +5,12 @@ #import cint_capi as backend +C_NULL_VOIDP = lltype.nullptr(rffi.VOIDP.TO) + C_TYPEHANDLE = rffi.LONG +C_NULL_TYPEHANDLE = rffi.cast(C_TYPEHANDLE, C_NULL_VOIDP) C_OBJECT = rffi.VOIDP +C_NULL_OBJECT = C_NULL_VOIDP C_METHPTRGETTER = lltype.FuncType([C_OBJECT], rffi.VOIDP) C_METHPTRGETTER_PTR = lltype.Ptr(C_METHPTRGETTER) diff --git a/pypy/module/cppyy/executor.py b/pypy/module/cppyy/executor.py --- a/pypy/module/cppyy/executor.py +++ b/pypy/module/cppyy/executor.py @@ -266,6 +266,10 @@ ptr_result = rffi.cast(rffi.VOIDP, long_result) return interp_cppyy.new_instance(space, w_returntype, self.cpptype, ptr_result, True) + def execute_libffi(self, space, w_returntype, libffifunc, argchain): + from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + raise FastCallNotPossible + _executors = {} def get_executor(space, name): diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -7,16 +7,15 @@ from pypy.rpython.lltypesystem import rffi, lltype -from pypy.rlib import libffi, rdynload +from pypy.rlib import libffi, rdynload, rweakref from pypy.rlib import jit, debug from pypy.module.cppyy import converter, executor, helper + class FastCallNotPossible(Exception): pass -NULL_VOIDP = lltype.nullptr(rffi.VOIDP.TO) - def _direct_ptradd(ptr, offset): # TODO: factor out with convert.py address = rffi.cast(rffi.CCHARP, ptr) return rffi.cast(rffi.VOIDP, lltype.direct_ptradd(address, offset)) @@ -32,7 +31,7 @@ class State(object): def __init__(self, space): self.cpptype_cache = { - "void" : W_CPPType(space, "void", rffi.cast(capi.C_TYPEHANDLE, NULL_VOIDP)) } + "void" : W_CPPType(space, "void", capi.C_NULL_TYPEHANDLE) } self.cpptemplatetype_cache = {} @unwrap_spec(name=str) @@ -254,7 +253,7 @@ cppinstance.cppclass.handle, self.scope_handle, cppinstance.rawobject) cppthis = _direct_ptradd(cppinstance.rawobject, offset) else: - cppthis = NULL_VOIDP + cppthis = capi.C_NULL_OBJECT return cppthis @jit.unroll_safe @@ -556,8 +555,9 @@ def destruct(self): assert isinstance(self, W_CPPInstance) if self.rawobject: + memory_regulator.unregister(self) capi.c_destruct(self.cppclass.handle, self.rawobject) - self.rawobject = NULL_VOIDP + self.rawobject = capi.C_NULL_OBJECT def __del__(self): if self.python_owns: @@ -574,13 +574,43 @@ ) W_CPPInstance.typedef.acceptable_as_base_class = True -def new_instance(space, w_type, cpptype, rawptr, owns): + +class MemoryRegulator: + # TODO: (?) An object address is not unique if e.g. the class has a + # public data member of class type at the start of its definition and + # has no virtual functions. A _key class that hashes on address and + # type would be better, but my attempt failed in the rtyper, claiming + # a call on None ("None()") and needed a default ctor. (??) + # Note that for now, the associated test carries an m_padding to make + # a difference in the addresses. + def __init__(self): + self.objects = rweakref.RWeakValueDictionary(int, W_CPPInstance) + + def register(self, obj): + int_address = int(rffi.cast(rffi.LONG, obj.rawobject)) + self.objects.set(int_address, obj) + + def unregister(self, obj): + int_address = int(rffi.cast(rffi.LONG, obj.rawobject)) + self.objects.set(int_address, None) + + def retrieve(self, address): + int_address = int(rffi.cast(rffi.LONG, address)) + return self.objects.get(int_address) + +memory_regulator = MemoryRegulator() + + +def new_instance(space, w_type, cpptype, rawobject, python_owns): + obj = memory_regulator.retrieve(rawobject) + if obj and obj.cppclass == cpptype: + return obj w_cppinstance = space.allocate_instance(W_CPPInstance, w_type) cppinstance = space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=False) - W_CPPInstance.__init__(cppinstance, space, cpptype, rawptr, owns) + W_CPPInstance.__init__(cppinstance, space, cpptype, rawobject, python_owns) + memory_regulator.register(cppinstance) return w_cppinstance - @unwrap_spec(cppinstance=W_CPPInstance) def addressof(space, cppinstance): address = rffi.cast(rffi.LONG, cppinstance.rawobject) @@ -591,4 +621,9 @@ rawobject = rffi.cast(rffi.VOIDP, address) w_cpptype = space.findattr(w_type, space.wrap("_cpp_proxy")) cpptype = space.interp_w(W_CPPType, w_cpptype, can_be_None=False) + + obj = memory_regulator.retrieve(rawobject) + if obj and obj.cppclass == cpptype: + return obj + return new_instance(space, w_type, cpptype, rawobject, owns) diff --git a/pypy/module/cppyy/test/advancedcpp.cxx b/pypy/module/cppyy/test/advancedcpp.cxx --- a/pypy/module/cppyy/test/advancedcpp.cxx +++ b/pypy/module/cppyy/test/advancedcpp.cxx @@ -60,7 +60,7 @@ double my_global_array[500]; -// for life-line testing +// for life-line and identity testing int some_class_with_data::some_data::s_num_data = 0; diff --git a/pypy/module/cppyy/test/advancedcpp.h b/pypy/module/cppyy/test/advancedcpp.h --- a/pypy/module/cppyy/test/advancedcpp.h +++ b/pypy/module/cppyy/test/advancedcpp.h @@ -256,7 +256,7 @@ //=========================================================================== -class some_class_with_data { // for life-line testing +class some_class_with_data { // for life-line and identity testing public: class some_data { public: @@ -275,6 +275,7 @@ return m_data; } + int m_padding; some_data m_data; }; diff --git a/pypy/module/cppyy/test/advancedcpp.xml b/pypy/module/cppyy/test/advancedcpp.xml --- a/pypy/module/cppyy/test/advancedcpp.xml +++ b/pypy/module/cppyy/test/advancedcpp.xml @@ -26,6 +26,9 @@ + + + diff --git a/pypy/module/cppyy/test/advancedcpp_LinkDef.h b/pypy/module/cppyy/test/advancedcpp_LinkDef.h --- a/pypy/module/cppyy/test/advancedcpp_LinkDef.h +++ b/pypy/module/cppyy/test/advancedcpp_LinkDef.h @@ -40,6 +40,9 @@ #pragma link C++ class some_abstract_class; #pragma link C++ class some_concrete_class; +#pragma link C++ class some_convertible; +#pragma link C++ class some_class_with_data; +#pragma link C++ class some_class_with_data::some_data; #pragma link C++ class pointer_pass; diff --git a/pypy/module/cppyy/test/test_advancedcpp.py b/pypy/module/cppyy/test/test_advancedcpp.py --- a/pypy/module/cppyy/test/test_advancedcpp.py +++ b/pypy/module/cppyy/test/test_advancedcpp.py @@ -357,7 +357,38 @@ assert o == cppyy.bind_object(addr, o.__class__) #assert o == cppyy.bind_object(addr, "some_concrete_class") - def test10_multi_methods(self): + def test10_object_identity(self): + """Test object identity""" + + import cppyy + some_concrete_class = cppyy.gbl.some_concrete_class + some_class_with_data = cppyy.gbl.some_class_with_data + + o = some_concrete_class() + addr = cppyy.addressof(o) + + o2 = cppyy.bind_object(addr, some_concrete_class) + assert o is o2 + + o3 = cppyy.bind_object(addr, some_class_with_data) + assert not o is o3 + + d1 = some_class_with_data() + d2 = d1.gime_copy() + assert not d1 is d2 + + dd1a = d1.gime_data() + dd1b = d1.gime_data() + assert dd1a is dd1b + + dd2 = d2.gime_data() + assert not dd1a is dd2 + assert not dd1b is dd2 + + d2.destruct() + d1.destruct() + + def test11_multi_methods(self): """Test calling of methods from multiple inheritance""" import cppyy diff --git a/pypy/module/cppyy/test/test_pythonify.py b/pypy/module/cppyy/test/test_pythonify.py --- a/pypy/module/cppyy/test/test_pythonify.py +++ b/pypy/module/cppyy/test/test_pythonify.py @@ -237,6 +237,7 @@ pl_a = example01_class.staticCyclePayload(pl, 66.) pl_a.getData() == 66. assert payload_class.count == 1 + pl_a = None pl = None gc.collect() assert payload_class.count == 0 From noreply at buildbot.pypy.org Sat Sep 10 07:59:51 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 10 Sep 2011 07:59:51 +0200 (CEST) Subject: [pypy-commit] pypy default: More fields for getwindowsversion (can't test, no windows). Message-ID: <20110910055951.D3F868203C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r47193:ca11f6ae93ae Date: 2011-09-09 22:59 -0700 http://bitbucket.org/pypy/pypy/changeset/ca11f6ae93ae/ Log: More fields for getwindowsversion (can't test, no windows). diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py --- a/pypy/module/sys/test/test_sysmodule.py +++ b/pypy/module/sys/test/test_sysmodule.py @@ -378,6 +378,11 @@ assert v[3] == v.platform assert v[4] == v.service_pack + assert isinstance(v.service_pack_minor, int) + assert isinstance(v.service_pack_major, int) + assert isinstance(v.suite_mask, int) + assert isinstance(v.product_type, int) + # This is how platform.py calls it. Make sure tuple still has 5 # elements maj, min, buildno, plat, csd = sys.getwindowsversion() diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -162,8 +162,16 @@ build = structseqfield(2, "Build number") platform = structseqfield(3, "Operating system platform") service_pack = structseqfield(4, "Latest Service Pack installed on the system") + + # Because the indices aren't consecutive, they aren't included when + # unpacking and other such operations. + service_pack_major = structseqfield(10, "Service Pack major version number") + service_pack_minor = structseqfield(11, "Service Pack minor version number") + suite_mask = structseqfield(12, "Bit mask identifying available product suites") + product_type = structseqfield(13, "System product type") ''') + def getwindowsversion(space): from pypy.rlib import rwin32 info = rwin32.GetVersionEx() @@ -173,7 +181,11 @@ space.wrap(info[1]), space.wrap(info[2]), space.wrap(info[3]), - space.wrap(info[4]) + space.wrap(info[4]), + space.wrap(info[5]), + space.wrap(info[6]), + space.wrap(info[7]), + space.wrap(info[8]), ]) return space.call_function(w_windows_version_info, raw_version) diff --git a/pypy/rlib/rwin32.py b/pypy/rlib/rwin32.py --- a/pypy/rlib/rwin32.py +++ b/pypy/rlib/rwin32.py @@ -55,14 +55,19 @@ SYSTEMTIME = rffi_platform.Struct('SYSTEMTIME', []) - OSVERSIONINFO = rffi_platform.Struct( - 'OSVERSIONINFO', + OSVERSIONINFOEX = rffi_platform.Struct( + 'OSVERSIONINFOEX', [('dwOSVersionInfoSize', rffi.UINT), ('dwMajorVersion', rffi.UINT), ('dwMinorVersion', rffi.UINT), ('dwBuildNumber', rffi.UINT), ('dwPlatformId', rffi.UINT), - ('szCSDVersion', rffi.CFixedArray(lltype.Char, 1))]) + ('szCSDVersion', rffi.CFixedArray(lltype.Char, 1)), + ('wServicePackMajor', rffi.USHORT), + ('wServicePackMinor', rffi.USHORT), + ('wSuiteMask', rffi.USHORT), + ('wProductType', rffi.UCHAR), + ]) LPSECURITY_ATTRIBUTES = rffi_platform.SimpleType( "LPSECURITY_ATTRIBUTES", rffi.CCHARP) @@ -225,14 +230,14 @@ lltype.free(buf, flavor='raw') _GetVersionEx = winexternal('GetVersionExA', - [lltype.Ptr(OSVERSIONINFO)], + [lltype.Ptr(OSVERSIONINFOEX)], DWORD) @jit.dont_look_inside def GetVersionEx(): info = lltype.malloc(OSVERSIONINFO, flavor='raw') rffi.setintfield(info, 'c_dwOSVersionInfoSize', - rffi.sizeof(OSVERSIONINFO)) + rffi.sizeof(OSVERSIONINFOEX)) try: if not _GetVersionEx(info): raise lastWindowsError() @@ -241,7 +246,11 @@ rffi.cast(lltype.Signed, info.c_dwBuildNumber), rffi.cast(lltype.Signed, info.c_dwPlatformId), rffi.charp2str(rffi.cast(rffi.CCHARP, - info.c_szCSDVersion))) + info.c_szCSDVersion)), + rffi.cast(lltype.Signed, info.c_wServicePackMajor), + rffi.cast(lltype.Signed, info.c_wServicePackMinor), + rffi.cast(lltype.Signed, info.c_wSuiteMask), + rffi.cast(lltype.Signed, info.c_wProductType)) finally: lltype.free(info, flavor='raw') From noreply at buildbot.pypy.org Sat Sep 10 09:47:07 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Sep 2011 09:47:07 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: 'cast_ulonglong_to_float' is really different from 'cast_longlong_to_float'. Message-ID: <20110910074707.2FD348203C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: unsigned-dtypes Changeset: r47194:51bb610254c9 Date: 2011-09-10 09:46 +0200 http://bitbucket.org/pypy/pypy/changeset/51bb610254c9/ Log: 'cast_ulonglong_to_float' is really different from 'cast_longlong_to_float'. diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -74,6 +74,7 @@ OS_LLONG_UGE = 91 OS_LLONG_URSHIFT = 92 OS_LLONG_FROM_UINT = 93 + OS_LLONG_UNSIGNED_TO_FLOAT = 94 # OS_MATH_SQRT = 100 diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -1004,7 +1004,7 @@ for _op, _oopspec in [('cast_int_to_ulonglong', 'FROM_INT'), ('cast_uint_to_ulonglong', 'FROM_UINT'), - ('cast_ulonglong_to_float', 'TO_FLOAT'), + ('cast_ulonglong_to_float', 'UNSIGNED_TO_FLOAT'), ]: exec py.code.Source(''' def rewrite_op_%s(self, op): From noreply at buildbot.pypy.org Sat Sep 10 09:50:54 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Sep 2011 09:50:54 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: Fix the tests. Message-ID: <20110910075054.7A91B8203C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: unsigned-dtypes Changeset: r47195:4a19645b0df3 Date: 2011-09-10 09:50 +0200 http://bitbucket.org/pypy/pypy/changeset/4a19645b0df3/ Log: Fix the tests. diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py --- a/pypy/jit/codewriter/test/test_flatten.py +++ b/pypy/jit/codewriter/test/test_flatten.py @@ -829,14 +829,15 @@ self.encoding_test(f, [rffi.cast(FROM, 42)], expectedstr, transform=True) elif TO in (rffi.LONG, rffi.ULONG): + if rffi.cast(FROM, -1) < 0: + fnname = "llong_from_int" + else: + fnname = "llong_from_uint" if TO == rffi.LONG: TO = rffi.LONGLONG else: TO = rffi.ULONGLONG - if rffi.cast(FROM, -1) < 0: - fnname = "llong_from_int" - else: - fnname = "llong_from_uint" + fnname = "u" + fnname expected.pop() # remove int_return expected.append( "residual_call_irf_f $<* fn %s>, , I[%s], R[], F[] -> %%f0" diff --git a/pypy/jit/codewriter/test/test_longlong.py b/pypy/jit/codewriter/test/test_longlong.py --- a/pypy/jit/codewriter/test/test_longlong.py +++ b/pypy/jit/codewriter/test/test_longlong.py @@ -57,7 +57,8 @@ assert op1.opname == 'residual_call_irf_f' else: assert op1.opname == 'residual_call_irf_i' - gotindex = getattr(EffectInfo, 'OS_' + op1.args[0].value.upper()) + gotindex = getattr(EffectInfo, + 'OS_' + op1.args[0].value.upper().lstrip('U')) assert gotindex == oopspecindex assert op1.args[1] == 'calldescr-%d' % oopspecindex assert list(op1.args[2]) == [v for v in vlist From noreply at buildbot.pypy.org Sat Sep 10 10:14:51 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Sep 2011 10:14:51 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: Shorter name. Add cast_float_to_ulonglong. Message-ID: <20110910081451.4DC488203C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: unsigned-dtypes Changeset: r47196:a29e7bc98682 Date: 2011-09-10 10:14 +0200 http://bitbucket.org/pypy/pypy/changeset/a29e7bc98682/ Log: Shorter name. Add cast_float_to_ulonglong. diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -74,7 +74,7 @@ OS_LLONG_UGE = 91 OS_LLONG_URSHIFT = 92 OS_LLONG_FROM_UINT = 93 - OS_LLONG_UNSIGNED_TO_FLOAT = 94 + OS_LLONG_U_TO_FLOAT = 94 # OS_MATH_SQRT = 100 diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -1004,7 +1004,8 @@ for _op, _oopspec in [('cast_int_to_ulonglong', 'FROM_INT'), ('cast_uint_to_ulonglong', 'FROM_UINT'), - ('cast_ulonglong_to_float', 'UNSIGNED_TO_FLOAT'), + ('cast_float_to_ulonglong', 'FROM_FLOAT'), + ('cast_ulonglong_to_float', 'U_TO_FLOAT'), ]: exec py.code.Source(''' def rewrite_op_%s(self, op): From noreply at buildbot.pypy.org Sat Sep 10 10:44:53 2011 From: noreply at buildbot.pypy.org (boemmels) Date: Sat, 10 Sep 2011 10:44:53 +0200 (CEST) Subject: [pypy-commit] lang-scheme default: Added .hgignore Message-ID: <20110910084453.324428203C@wyvern.cs.uni-duesseldorf.de> Author: Juergen Boemmels Branch: Changeset: r8:fee3904afcff Date: 2011-09-07 20:35 +0200 http://bitbucket.org/pypy/lang-scheme/changeset/fee3904afcff/ Log: Added .hgignore diff --git a/.hgignore b/.hgignore new file mode 100644 --- /dev/null +++ b/.hgignore @@ -0,0 +1,7 @@ +syntax: glob +*.py[co] +*~ +.*.swp + +syntax: regexp +^scheme/ss-c From noreply at buildbot.pypy.org Sat Sep 10 10:44:54 2011 From: noreply at buildbot.pypy.org (boemmels) Date: Sat, 10 Sep 2011 10:44:54 +0200 (CEST) Subject: [pypy-commit] lang-scheme default: Stub a character class, parses and implements simple characters Message-ID: <20110910084454.543F88203C@wyvern.cs.uni-duesseldorf.de> Author: Juergen Boemmels Branch: Changeset: r9:5c3fc0d3dfb0 Date: 2011-09-08 23:33 +0200 http://bitbucket.org/pypy/lang-scheme/changeset/5c3fc0d3dfb0/ Log: Stub a character class, parses and implements simple characters diff --git a/scheme/object.py b/scheme/object.py --- a/scheme/object.py +++ b/scheme/object.py @@ -129,6 +129,16 @@ def __repr__(self): return "" +class W_Character(W_Root): + def __init__(self, val): + self.chrval = val + + def to_string(self): + return self.chrval + + def __repr__(self): + return "" + class W_Real(W_Root): def __init__(self, val): self.exact = False diff --git a/scheme/ssparser.py b/scheme/ssparser.py --- a/scheme/ssparser.py +++ b/scheme/ssparser.py @@ -2,7 +2,7 @@ from pypy.rlib.parsing.makepackrat import BacktrackException, Status from scheme.object import W_Pair, W_Integer, W_String, symbol, \ w_nil, W_Boolean, W_Real, quote, qq, unquote, unquote_splicing, \ - w_ellipsis, SchemeSyntaxError + w_ellipsis, W_Character, SchemeSyntaxError def str_unquote(s): str_lst = [] @@ -31,6 +31,11 @@ IGNORE* return {W_String(str_unquote(c))}; + CHARACTER: + c = `#\\.` + IGNORE* + return {W_Character(c[2])}; + SYMBOL: c = `[\+\-\*\^\?a-zA-Z!<=>_~/$%&:][\+\-\*\^\?a-zA-Z0-9!<=>_~/$%&:]*` IGNORE* @@ -100,6 +105,7 @@ | FIXNUM | BOOLEAN | SYMBOL + | CHARACTER | STRING; list: diff --git a/scheme/test/test_parser.py b/scheme/test/test_parser.py --- a/scheme/test/test_parser.py +++ b/scheme/test/test_parser.py @@ -1,7 +1,7 @@ import py from scheme.ssparser import parse from scheme.object import W_Boolean, W_Real, W_Integer, W_String -from scheme.object import W_Pair, W_Nil, W_Symbol, W_Symbol +from scheme.object import W_Pair, W_Nil, W_Symbol, W_Character from pypy.rlib.parsing.makepackrat import BacktrackException def parse_sexpr(expr): @@ -17,8 +17,8 @@ return w_obj.strval elif isinstance(w_obj, W_Symbol): return w_obj.name - elif isinstance(w_obj, W_Symbol): - return w_obj.name + elif isinstance(w_obj, W_Character): + return w_obj.chrval elif isinstance(w_obj, W_Boolean): return w_obj.boolval elif isinstance(w_obj, W_Pair): @@ -39,6 +39,9 @@ w_fixnum = parse_sexpr('1123') assert unwrap(w_fixnum) == 1123 assert isinstance(w_fixnum, W_Integer) + w_char = parse_sexpr('#\\a') + assert isinstance(w_char, W_Character) + assert unwrap(w_char) == 'a' def test_symbol(): w_sym = parse_sexpr('abfa__') From noreply at buildbot.pypy.org Sat Sep 10 10:44:55 2011 From: noreply at buildbot.pypy.org (boemmels) Date: Sat, 10 Sep 2011 10:44:55 +0200 (CEST) Subject: [pypy-commit] lang-scheme default: dot (.) is valid in symbols Message-ID: <20110910084455.6C1588203C@wyvern.cs.uni-duesseldorf.de> Author: Juergen Boemmels Branch: Changeset: r10:61ac600ed18d Date: 2011-09-09 22:00 +0200 http://bitbucket.org/pypy/lang-scheme/changeset/61ac600ed18d/ Log: dot (.) is valid in symbols diff --git a/scheme/ssparser.py b/scheme/ssparser.py --- a/scheme/ssparser.py +++ b/scheme/ssparser.py @@ -37,7 +37,7 @@ return {W_Character(c[2])}; SYMBOL: - c = `[\+\-\*\^\?a-zA-Z!<=>_~/$%&:][\+\-\*\^\?a-zA-Z0-9!<=>_~/$%&:]*` + c = `[\+\-\*\^\?a-zA-Z!<=>_~/$%&:][\+\-\*\^\?a-zA-Z0-9!<=>_~/$%&:.]*` IGNORE* return {symbol(c)}; diff --git a/scheme/test/test_parser.py b/scheme/test/test_parser.py --- a/scheme/test/test_parser.py +++ b/scheme/test/test_parser.py @@ -54,6 +54,7 @@ '-', 'set!', 'eqv?', + 'foo.bar', ] for s in more_syms: w_sym = parse_sexpr(s) From noreply at buildbot.pypy.org Sat Sep 10 18:27:23 2011 From: noreply at buildbot.pypy.org (snus_mumrik) Date: Sat, 10 Sep 2011 18:27:23 +0200 (CEST) Subject: [pypy-commit] pypy numpy-indexing-by-arrays: Initial (unoptimized) impementation of indexing by boolean vectors. Message-ID: <20110910162723.639238203C@wyvern.cs.uni-duesseldorf.de> Author: Ilya Osadchiy Branch: numpy-indexing-by-arrays Changeset: r47197:fc54fc827233 Date: 2011-09-10 19:25 +0300 http://bitbucket.org/pypy/pypy/changeset/fc54fc827233/ Log: Initial (unoptimized) impementation of indexing by boolean vectors. diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -230,8 +230,12 @@ bool_dtype = space.fromcache(interp_dtype.W_BoolDtype) int_dtype = space.fromcache(interp_dtype.W_Int64Dtype) if w_idx.find_dtype() is bool_dtype: - # TODO: indexing by bool array - raise NotImplementedError("sorry, not yet implemented") + # Indexing by boolean array + new_sig = signature.Signature.find_sig([ + IndexedByBoolArray.signature, self.signature + ]) + res = IndexedByBoolArray(new_sig, bool_dtype, self, w_idx) + return space.wrap(res) else: # Indexing by array @@ -470,6 +474,54 @@ val = self.source.eval(idx).convert_to(self.res_dtype) return val +class IndexedByBoolArray(VirtualArray): + """ + Intermediate class for performing indexing of array by another array + """ + # TODO: override "compute" to optimize (?) + signature = signature.BaseSignature() + def __init__(self, signature, bool_dtype, source, index): + VirtualArray.__init__(self, signature, source.find_dtype()) + self.source = source + self.index = index + self.bool_dtype = bool_dtype + self.size = -1 + self.cur_idx = 0 + + def _del_sources(self): + self.source = None + self.index = None + + def _find_size(self): + # Finding size may be long, so we store the result for reuse. + if self.size != -1: + return self.size + # TODO: avoid index.get_concrete by using "sum" (reduce with "add") + idxs = self.index.get_concrete() + s = 0 + i = 0 + while i < self.index.find_size(): + idx_val = self.bool_dtype.unbox(idxs.eval(i).convert_to(self.bool_dtype)) + assert(isinstance(idx_val, bool)) + if idx_val is True: + s += 1 + i += 1 + self.size = s + return self.size + + def _eval(self, i): + if i == 0: + self.cur_idx = 0 + while True: + idx_val = self.bool_dtype.unbox(self.index.eval(self.cur_idx).convert_to(self.bool_dtype)) + assert(isinstance(idx_val, bool)) + if idx_val is True: + break + self.cur_idx += 1 + val = self.source.eval(self.cur_idx).convert_to(self.res_dtype) + self.cur_idx += 1 + return val + class ViewArray(BaseArray): """ Class for representing views of arrays, they will reflect changes of parent diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -133,6 +133,22 @@ for i in xrange(6): assert a_by_list[i] == range(5)[idx_list[i]] + def test_index_by_bool_array(self): + from numpy import array, dtype + a = array(range(5)) + ind = array([False, True, False, True, False]) + assert ind.dtype is dtype(bool) + # get length before actual calculation + b0 = a[ind] + assert len(b0) == 2 + assert b0[0] == 1 + assert b0[1] == 3 + # get length after actual calculation + b1 = a[ind] + assert b1[0] == 1 + assert b1[1] == 3 + assert len(b1) == 2 + def test_setitem(self): from numpy import array a = array(range(5)) From noreply at buildbot.pypy.org Sat Sep 10 19:52:19 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Sep 2011 19:52:19 +0200 (CEST) Subject: [pypy-commit] pypy default: Temporarily disable sys._current_frames(). The tests are left failing Message-ID: <20110910175219.94C3E8203C@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47198:dec57323f88b Date: 2011-09-10 19:51 +0200 http://bitbucket.org/pypy/pypy/changeset/dec57323f88b/ Log: Temporarily disable sys._current_frames(). The tests are left failing to remind us that it needs to be fixed. diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -52,6 +52,8 @@ current stack frame. This function should be used for specialized purposes only.""" + raise OperationError(space.w_NotImplementedError, + space.wrap("XXX sys._current_frames() incompatible with the JIT")) w_result = space.newdict() ecs = space.threadlocals.getallvalues() for thread_ident, ec in ecs.items(): From noreply at buildbot.pypy.org Sun Sep 11 04:04:30 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 11 Sep 2011 04:04:30 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix translation (and you know.. functionality) on windows. Message-ID: <20110911020430.D63F58203C@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r47199:84ad4c442f43 Date: 2011-09-10 19:04 -0700 http://bitbucket.org/pypy/pypy/changeset/84ad4c442f43/ Log: Fix translation (and you know.. functionality) on windows. diff --git a/pypy/rlib/rwin32.py b/pypy/rlib/rwin32.py --- a/pypy/rlib/rwin32.py +++ b/pypy/rlib/rwin32.py @@ -235,7 +235,7 @@ @jit.dont_look_inside def GetVersionEx(): - info = lltype.malloc(OSVERSIONINFO, flavor='raw') + info = lltype.malloc(OSVERSIONINFOEX, flavor='raw') rffi.setintfield(info, 'c_dwOSVersionInfoSize', rffi.sizeof(OSVERSIONINFOEX)) try: From noreply at buildbot.pypy.org Sun Sep 11 13:28:43 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Sep 2011 13:28:43 +0200 (CEST) Subject: [pypy-commit] pypy default: Copy the test from af74db5394fb. Message-ID: <20110911112843.AAA6B82041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47200:60692d2409f7 Date: 2011-09-11 13:28 +0200 http://bitbucket.org/pypy/pypy/changeset/60692d2409f7/ Log: Copy the test from af74db5394fb. diff --git a/pypy/module/test_lib_pypy/test_greenlet.py b/pypy/module/test_lib_pypy/test_greenlet.py --- a/pypy/module/test_lib_pypy/test_greenlet.py +++ b/pypy/module/test_lib_pypy/test_greenlet.py @@ -241,3 +241,20 @@ g1 = greenlet(f1) raises(ValueError, g1.throw, ValueError) assert g1.dead + + def test_exc_info_save_restore(self): + # sys.exc_info save/restore behaviour is wrong on CPython's greenlet + from greenlet import greenlet + import sys + def f(): + try: + raise ValueError('fun') + except: + exc_info = sys.exc_info() + greenlet(h).switch() + assert exc_info == sys.exc_info() + + def h(): + assert sys.exc_info() == (None, None, None) + + greenlet(f).switch() From noreply at buildbot.pypy.org Sun Sep 11 15:52:07 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Sep 2011 15:52:07 +0200 (CEST) Subject: [pypy-commit] pypy default: Kill dead code. Message-ID: <20110911135207.B9F3F82041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47201:ba8e63bfe45d Date: 2011-09-11 14:13 +0200 http://bitbucket.org/pypy/pypy/changeset/ba8e63bfe45d/ Log: Kill dead code. diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -81,58 +81,6 @@ # ________________________________________________________________ - - class Subcontext(object): - # coroutine: subcontext support - - def __init__(self): - self.topframe = None - self.w_tracefunc = None - self.profilefunc = None - self.w_profilefuncarg = None - self.is_tracing = 0 - - def enter(self, ec): - ec.topframeref = jit.non_virtual_ref(self.topframe) - ec.w_tracefunc = self.w_tracefunc - ec.profilefunc = self.profilefunc - ec.w_profilefuncarg = self.w_profilefuncarg - ec.is_tracing = self.is_tracing - ec.space.frame_trace_action.fire() - - def leave(self, ec): - self.topframe = ec.gettopframe() - self.w_tracefunc = ec.w_tracefunc - self.profilefunc = ec.profilefunc - self.w_profilefuncarg = ec.w_profilefuncarg - self.is_tracing = ec.is_tracing - - def clear_framestack(self): - self.topframe = None - - # the following interface is for pickling and unpickling - def getstate(self, space): - if self.topframe is None: - return space.w_None - return self.topframe - - def setstate(self, space, w_state): - from pypy.interpreter.pyframe import PyFrame - if space.is_w(w_state, space.w_None): - self.topframe = None - else: - self.topframe = space.interp_w(PyFrame, w_state) - - def getframestack(self): - lst = [] - f = self.topframe - while f is not None: - lst.append(f) - f = f.f_backref() - lst.reverse() - return lst - # coroutine: I think this is all, folks! - def c_call_trace(self, frame, w_func, args=None): "Profile the call of a builtin function" self._c_call_return_trace(frame, w_func, args, 'c_call') From noreply at buildbot.pypy.org Sun Sep 11 15:52:09 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Sep 2011 15:52:09 +0200 (CEST) Subject: [pypy-commit] pypy default: A test for hidden_applevel code objects, and fixing the fact that Message-ID: <20110911135209.00A9C82041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47202:4c296bad5568 Date: 2011-09-11 14:48 +0200 http://bitbucket.org/pypy/pypy/changeset/4c296bad5568/ Log: A test for hidden_applevel code objects, and fixing the fact that the 'f_back' attribute of frames might return a hidden frame. diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -614,7 +614,8 @@ return self.get_builtin().getdict(space) def fget_f_back(self, space): - return self.space.wrap(self.f_backref()) + f_backref = ExecutionContext.getnextframe_nohidden(self) + return self.space.wrap(f_backref) def fget_f_lasti(self, space): return self.space.wrap(self.last_instr) diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -6,6 +6,14 @@ def setup_class(cls): cls.w_udir = cls.space.wrap(str(udir.udir)) cls.w_tempfile1 = cls.space.wrap(str(udir.udir.join('tempfile1'))) + w_call_further = cls.space.appexec([], """(): + def call_further(f): + return f() + return call_further + """) + assert not w_call_further.code.hidden_applevel + w_call_further.code.hidden_applevel = True # hack + cls.w_call_further = w_call_further # test for the presence of the attributes, not functionality @@ -107,6 +115,20 @@ frame = f() assert frame.f_back.f_code.co_name == 'f' + def test_f_back_hidden(self): + import sys + def f(): + return (sys._getframe(0), + sys._getframe(1), + sys._getframe(0).f_back) + def main(): + return self.call_further(f) + f0, f1, f1bis = main() + assert f0.f_code.co_name == 'f' + assert f1.f_code.co_name == 'main' + assert f1bis is f1 + assert f0.f_back is f1 + def test_f_exc_xxx(self): import sys From noreply at buildbot.pypy.org Sun Sep 11 15:52:10 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Sep 2011 15:52:10 +0200 (CEST) Subject: [pypy-commit] pypy default: 'honor__builtins__' is generally False, but just in case, be safe Message-ID: <20110911135210.3C14582041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47203:b96796f0767d Date: 2011-09-11 15:49 +0200 http://bitbucket.org/pypy/pypy/changeset/b96796f0767d/ Log: 'honor__builtins__' is generally False, but just in case, be safe against corner cases in which w_globals is None. diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -66,7 +66,7 @@ make_sure_not_resized(self.locals_stack_w) check_nonneg(self.nlocals) # - if space.config.objspace.honor__builtins__: + if space.config.objspace.honor__builtins__ and w_globals is not None: self.builtin = space.builtin.pick_builtin(w_globals) # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. From noreply at buildbot.pypy.org Sun Sep 11 15:52:11 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Sep 2011 15:52:11 +0200 (CEST) Subject: [pypy-commit] pypy default: Proper handling of 'f_back' in continulets. Message-ID: <20110911135211.72B4382041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47204:db9617836395 Date: 2011-09-11 15:51 +0200 http://bitbucket.org/pypy/pypy/changeset/db9617836395/ Log: Proper handling of 'f_back' in continulets. diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -5,6 +5,7 @@ from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app +from pypy.interpreter.pycode import PyCode class W_Continulet(Wrappable): @@ -30,6 +31,7 @@ start_state.origin = self start_state.w_callable = w_callable start_state.args = __args__ + self.bottomframe = make_fresh_frame(self.space) self.sthread = build_sthread(self.space) try: self.h = self.sthread.new(new_stacklet_callback) @@ -52,7 +54,6 @@ start_state.clear() raise geterror(self.space, "continulet not initialized yet") ec = self.check_sthread() - saved_topframeref = ec.topframeref # start_state.origin = self if to is None: @@ -74,8 +75,6 @@ start_state.clear() raise getmemoryerror(self.space) # - ec = sthread.ec - ec.topframeref = saved_topframeref return get_result() def descr_switch(self, w_value=None, w_to=None): @@ -123,13 +122,21 @@ # ____________________________________________________________ +# Continulet objects maintain a dummy frame object in order to ensure +# that the 'f_back' chain is consistent. We hide this dummy frame +# object by giving it a dummy code object with hidden_applevel=True. class State: def __init__(self, space): + from pypy.interpreter.astcompiler.consts import CO_OPTIMIZED self.space = space w_module = space.getbuiltinmodule('_continuation') self.w_error = space.getattr(w_module, space.wrap('error')) self.w_memoryerror = OperationError(space.w_MemoryError, space.w_None) + self.dummy_pycode = PyCode(space, 0, 0, 0, CO_OPTIMIZED, + '', [], [], [], '', + '', 0, '', [], [], + hidden_applevel=True) def geterror(space, message): cs = space.fromcache(State) @@ -139,6 +146,10 @@ cs = space.fromcache(State) return cs.w_memoryerror +def make_fresh_frame(space): + cs = space.fromcache(State) + return space.FrameClass(space, cs.dummy_pycode, None, None) + # ____________________________________________________________ @@ -178,9 +189,8 @@ # space = self.space try: - ec = self.sthread.ec - ec.topframeref = jit.vref_None - + assert self.sthread.ec.topframeref() is None + self.sthread.ec.topframeref = jit.non_virtual_ref(self.bottomframe) if start_state.propagate_exception is not None: raise start_state.propagate_exception # just propagate it further if start_state.w_value is not space.w_None: @@ -193,6 +203,7 @@ start_state.propagate_exception = e else: start_state.w_value = w_result + self.sthread.ec.topframeref = jit.vref_None start_state.origin = self start_state.destination = self return self.h @@ -205,6 +216,11 @@ start_state.origin = None start_state.destination = None self.h, origin.h = origin.h, h + # + current = sthread.ec.topframeref + sthread.ec.topframeref = self.bottomframe.f_backref + self.bottomframe.f_backref = origin.bottomframe.f_backref + origin.bottomframe.f_backref = current def get_result(): if start_state.propagate_exception: @@ -240,6 +256,9 @@ contlist.append(cont) # if len(contlist) > 1: - other = contlist[-1].h + otherh = contlist[-1].h + otherb = contlist[-1].bottomframe.f_backref for cont in contlist: - other, cont.h = cont.h, other + otherh, cont.h = cont.h, otherh + b = cont.bottomframe + otherb, b.f_backref = b.f_backref, otherb diff --git a/pypy/module/_continuation/test/test_stacklet.py b/pypy/module/_continuation/test/test_stacklet.py --- a/pypy/module/_continuation/test/test_stacklet.py +++ b/pypy/module/_continuation/test/test_stacklet.py @@ -312,7 +312,7 @@ res = f() assert res == 2002 - def test_f_back_is_None_for_now(self): + def test_f_back(self): import sys from _continuation import continulet # @@ -321,6 +321,7 @@ c.switch(sys._getframe(0).f_back) c.switch(sys._getframe(1)) c.switch(sys._getframe(1).f_back) + assert sys._getframe(2) is f3.f_back c.switch(sys._getframe(2)) def f(c): g(c) @@ -331,10 +332,21 @@ f2 = c.switch() assert f2.f_code.co_name == 'f' f3 = c.switch() - assert f3.f_code.co_name == 'f' - f4 = c.switch() - assert f4 is None - raises(ValueError, c.switch) # "call stack is not deep enough" + assert f3 is f2 + assert f1.f_back is f3 + def main(): + f4 = c.switch() + assert f4.f_code.co_name == 'main', repr(f4.f_code.co_name) + assert f3.f_back is f1 # not running, so a loop + def main2(): + f5 = c.switch() + assert f5.f_code.co_name == 'main2', repr(f5.f_code.co_name) + assert f3.f_back is f1 # not running, so a loop + main() + main2() + res = c.switch() + assert res is None + assert f3.f_back is None def test_traceback_is_complete(self): import sys @@ -609,6 +621,7 @@ assert res == "ok" def test_permute(self): + import sys from _continuation import continulet, permute # def f1(c1): @@ -617,14 +630,18 @@ return "done" # def f2(c2): + assert sys._getframe(1).f_code.co_name == 'main' permute(c1, c2) + assert sys._getframe(1).f_code.co_name == 'f1' return "ok" # c1 = continulet(f1) c2 = continulet(f2) - c1.switch() - res = c2.switch() - assert res == "done" + def main(): + c1.switch() + res = c2.switch() + assert res == "done" + main() def test_various_depths(self): skip("may fail on top of CPython") From noreply at buildbot.pypy.org Sun Sep 11 16:03:07 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Sep 2011 16:03:07 +0200 (CEST) Subject: [pypy-commit] pypy default: Translation fix. Message-ID: <20110911140307.ADA8082041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47205:8626ee8b9c1e Date: 2011-09-11 16:02 +0200 http://bitbucket.org/pypy/pypy/changeset/8626ee8b9c1e/ Log: Translation fix. diff --git a/pypy/rlib/_jit_vref.py b/pypy/rlib/_jit_vref.py --- a/pypy/rlib/_jit_vref.py +++ b/pypy/rlib/_jit_vref.py @@ -46,6 +46,7 @@ def specialize_call(self, hop): r_generic_object = getinstancerepr(hop.rtyper, None) [v] = hop.inputargs(r_generic_object) # might generate a cast_pointer + hop.exception_cannot_occur() return v def rtype_simple_call(self, hop): From noreply at buildbot.pypy.org Sun Sep 11 17:19:20 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Sep 2011 17:19:20 +0200 (CEST) Subject: [pypy-commit] pypy default: This has been fixed. Message-ID: <20110911151920.2A6FF82041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47206:f18568e244e9 Date: 2011-09-11 16:08 +0200 http://bitbucket.org/pypy/pypy/changeset/f18568e244e9/ Log: This has been fixed. diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -215,11 +215,6 @@ * Support for other CPUs than x86 and x86-64 -* The app-level ``f_back`` field of frames crossing continulet boundaries - is None for now, unlike what I explain in the theoretical overview - above. It mostly means that in a ``pdb.set_trace()`` you cannot go - ``up`` past countinulet boundaries. This could be fixed. - .. __: `recursion depth limit`_ (*) Pickling, as well as changing threads, could be implemented by using From noreply at buildbot.pypy.org Sun Sep 11 17:19:21 2011 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Sep 2011 17:19:21 +0200 (CEST) Subject: [pypy-commit] pypy default: Skip this test in runappdirect mode. Message-ID: <20110911151921.6564282041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47207:7a3965ac609b Date: 2011-09-11 17:18 +0200 http://bitbucket.org/pypy/pypy/changeset/7a3965ac609b/ Log: Skip this test in runappdirect mode. diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -1,4 +1,5 @@ from pypy.tool import udir +from pypy.conftest import option class AppTestPyFrame: @@ -6,14 +7,15 @@ def setup_class(cls): cls.w_udir = cls.space.wrap(str(udir.udir)) cls.w_tempfile1 = cls.space.wrap(str(udir.udir.join('tempfile1'))) - w_call_further = cls.space.appexec([], """(): - def call_further(f): - return f() - return call_further - """) - assert not w_call_further.code.hidden_applevel - w_call_further.code.hidden_applevel = True # hack - cls.w_call_further = w_call_further + if not option.runappdirect: + w_call_further = cls.space.appexec([], """(): + def call_further(f): + return f() + return call_further + """) + assert not w_call_further.code.hidden_applevel + w_call_further.code.hidden_applevel = True # hack + cls.w_call_further = w_call_further # test for the presence of the attributes, not functionality @@ -116,6 +118,8 @@ assert frame.f_back.f_code.co_name == 'f' def test_f_back_hidden(self): + if not hasattr(self, 'call_further'): + skip("not for runappdirect testing") import sys def f(): return (sys._getframe(0), From noreply at buildbot.pypy.org Mon Sep 12 10:09:40 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 12 Sep 2011 10:09:40 +0200 (CEST) Subject: [pypy-commit] pypy default: Added StringBuilder at app level to complement UnicodeBuilder. Also moved from an extra done flag to just setting builder to None. Message-ID: <20110912080940.212FE82041@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r47208:204a6bf73f36 Date: 2011-09-12 04:09 -0400 http://bitbucket.org/pypy/pypy/changeset/204a6bf73f36/ Log: Added StringBuilder at app level to complement UnicodeBuilder. Also moved from an extra done flag to just setting builder to None. diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -8,6 +8,7 @@ appleveldefs = {} interpleveldefs = { + "StringBuilder": "interp_builders.W_StringBuilder", "UnicodeBuilder": "interp_builders.W_UnicodeBuilder", } diff --git a/pypy/module/__pypy__/interp_builders.py b/pypy/module/__pypy__/interp_builders.py --- a/pypy/module/__pypy__/interp_builders.py +++ b/pypy/module/__pypy__/interp_builders.py @@ -2,49 +2,53 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef -from pypy.rlib.rstring import UnicodeBuilder +from pypy.rlib.rstring import UnicodeBuilder, StringBuilder -class W_UnicodeBuilder(Wrappable): - def __init__(self, space, size): - if size < 0: - self.builder = UnicodeBuilder() - else: - self.builder = UnicodeBuilder(size) - self.done = False +def create_builder(name, strtype, builder_cls): + class W_Builder(Wrappable): + def __init__(self, space, size): + if size < 0: + self.builder = builder_cls() + else: + self.builder = builder_cls(size) - def _check_done(self, space): - if self.done: - raise OperationError(space.w_ValueError, space.wrap("Can't operate on a done builder")) + def _check_done(self, space): + if self.builder is None: + raise OperationError(space.w_ValueError, space.wrap("Can't operate on a done builder")) - @unwrap_spec(size=int) - def descr__new__(space, w_subtype, size=-1): - return W_UnicodeBuilder(space, size) + @unwrap_spec(size=int) + def descr__new__(space, w_subtype, size=-1): + return W_Builder(space, size) - @unwrap_spec(s=unicode) - def descr_append(self, space, s): - self._check_done(space) - self.builder.append(s) + @unwrap_spec(s=strtype) + def descr_append(self, space, s): + self._check_done(space) + self.builder.append(s) - @unwrap_spec(s=unicode, start=int, end=int) - def descr_append_slice(self, space, s, start, end): - self._check_done(space) - if not 0 <= start <= end <= len(s): - raise OperationError(space.w_ValueError, space.wrap("bad start/stop")) - self.builder.append_slice(s, start, end) + @unwrap_spec(s=strtype, start=int, end=int) + def descr_append_slice(self, space, s, start, end): + self._check_done(space) + if not 0 <= start <= end <= len(s): + raise OperationError(space.w_ValueError, space.wrap("bad start/stop")) + self.builder.append_slice(s, start, end) - def descr_build(self, space): - self._check_done(space) - w_s = space.wrap(self.builder.build()) - self.done = True - return w_s + def descr_build(self, space): + self._check_done(space) + w_s = space.wrap(self.builder.build()) + self.builder = None + return w_s + W_Builder.__name__ = "W_%s" % name + W_Builder.typedef = TypeDef(name, + __new__ = interp2app(W_Builder.descr__new__.im_func), -W_UnicodeBuilder.typedef = TypeDef("UnicodeBuilder", - __new__ = interp2app(W_UnicodeBuilder.descr__new__.im_func), + append = interp2app(W_Builder.descr_append), + append_slice = interp2app(W_Builder.descr_append_slice), + build = interp2app(W_Builder.descr_build), + ) + W_Builder.typedef.acceptable_as_base_class = False + return W_Builder - append = interp2app(W_UnicodeBuilder.descr_append), - append_slice = interp2app(W_UnicodeBuilder.descr_append_slice), - build = interp2app(W_UnicodeBuilder.descr_build), -) -W_UnicodeBuilder.typedef.acceptable_as_base_class = False +W_StringBuilder = create_builder("StringBuilder", str, StringBuilder) +W_UnicodeBuilder = create_builder("UnicodeBuilder", unicode, UnicodeBuilder) \ No newline at end of file diff --git a/pypy/module/__pypy__/test/test_builders.py b/pypy/module/__pypy__/test/test_builders.py --- a/pypy/module/__pypy__/test/test_builders.py +++ b/pypy/module/__pypy__/test/test_builders.py @@ -31,4 +31,14 @@ raises(ValueError, b.append_slice, u"1", 2, 1) s = b.build() assert s == "cde" - raises(ValueError, b.append_slice, u"abc", 1, 2) \ No newline at end of file + raises(ValueError, b.append_slice, u"abc", 1, 2) + + def test_stringbuilder(self): + from __pypy__.builders import StringBuilder + b = StringBuilder() + b.append("abc") + b.append("123") + b.append("you and me") + s = b.build() + assert s == "abc123you and me" + raises(ValueError, b.build) \ No newline at end of file From noreply at buildbot.pypy.org Mon Sep 12 10:11:55 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 Sep 2011 10:11:55 +0200 (CEST) Subject: [pypy-commit] pypy space-iterator-improvements: oops, forgot Message-ID: <20110912081155.0BE9682041@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: space-iterator-improvements Changeset: r47209:4034a4d30bfa Date: 2011-09-12 10:11 +0200 http://bitbucket.org/pypy/pypy/changeset/4034a4d30bfa/ Log: oops, forgot diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -203,6 +203,7 @@ r, s = hop.r_s_popfirstarg() if s.is_constant(): v = hop.inputconst(r, s.const) + hop.exception_is_here() return rtype_newlist(hop, v_sizehint=v) # ____________________________________________________________ From noreply at buildbot.pypy.org Mon Sep 12 10:37:49 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 Sep 2011 10:37:49 +0200 (CEST) Subject: [pypy-commit] pypy space-iterator-improvements: typo Message-ID: <20110912083749.3ACB182041@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: space-iterator-improvements Changeset: r47210:a2b808a9a3eb Date: 2011-09-12 10:37 +0200 http://bitbucket.org/pypy/pypy/changeset/a2b808a9a3eb/ Log: typo diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py --- a/pypy/rpython/lltypesystem/rlist.py +++ b/pypy/rpython/lltypesystem/rlist.py @@ -269,7 +269,7 @@ l.items = malloc(LIST.items.TO, lengthhint) return l ll_newlist_hint = typeMethod(ll_newlist_hint) -ll_newlist_hint.oopspec = 'newlist(length)' +ll_newlist_hint.oopspec = 'newlist(lengthhint)' # should empty lists start with no allocated memory, or with a preallocated # minimal number of entries? XXX compare memory usage versus speed, and From noreply at buildbot.pypy.org Mon Sep 12 10:51:34 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 12 Sep 2011 10:51:34 +0200 (CEST) Subject: [pypy-commit] pypy improve-heap-caching-tracing: - fix XXXs in tests Message-ID: <20110912085134.6052F82041@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: improve-heap-caching-tracing Changeset: r47211:6be75d4f91c3 Date: 2011-09-08 22:02 +0200 http://bitbucket.org/pypy/pypy/changeset/6be75d4f91c3/ Log: - fix XXXs in tests - use dict.keys() instead of list(dict) diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py --- a/pypy/jit/metainterp/heapcache.py +++ b/pypy/jit/metainterp/heapcache.py @@ -1,4 +1,3 @@ -from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.metainterp.history import ConstInt from pypy.jit.metainterp.resoperation import rop @@ -45,14 +44,14 @@ return # A special case for ll_arraycopy, because it is so common, and its # effects are so well defined. - elif effectinfo.oopspecindex == EffectInfo.OS_ARRAYCOPY: + elif effectinfo.oopspecindex == effectinfo.OS_ARRAYCOPY: # The destination box if argboxes[2] in self.new_boxes: # XXX: no descr here so we invalidate any of them, not just # of the correct type for descr, cache in self.heap_array_cache.iteritems(): for idx, cache in cache.iteritems(): - for frombox in list(cache): + for frombox in cache.keys(): if frombox not in self.new_boxes: del cache[frombox] return diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py --- a/pypy/jit/metainterp/test/test_heapcache.py +++ b/pypy/jit/metainterp/test/test_heapcache.py @@ -25,6 +25,8 @@ EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE = 5 #can raise and force virtualizables EF_RANDOM_EFFECTS = 6 #can do whatever + OS_ARRAYCOPY = 0 + def __init__(self, extraeffect, oopspecindex): self.extraeffect = extraeffect self.oopspecindex = oopspecindex @@ -305,14 +307,13 @@ # Just need the destination box for this call h.invalidate_caches( rop.CALL, - # XXX: hardcoded oopspecindex - FakeCallDescr(FakeEffektinfo.EF_CANNOT_RAISE, 1), + FakeCallDescr(FakeEffektinfo.EF_CANNOT_RAISE, FakeEffektinfo.OS_ARRAYCOPY), [None, None, box2, None, None] ) assert h.getarrayitem(box1, descr1, index1) is box2 h.invalidate_caches( rop.CALL, - FakeCallDescr(FakeEffektinfo.EF_CANNOT_RAISE, 1), + FakeCallDescr(FakeEffektinfo.EF_CANNOT_RAISE, FakeEffektinfo.OS_ARRAYCOPY), [None, None, box3, None, None] ) assert h.getarrayitem(box1, descr1, index1) is None @@ -321,8 +322,7 @@ assert h.getarrayitem(box4, descr1, index1) is box2 h.invalidate_caches( rop.CALL, - # XXX: hardcoded oopspecindex - FakeCallDescr(FakeEffektinfo.EF_CANNOT_RAISE, 1), + FakeCallDescr(FakeEffektinfo.EF_CANNOT_RAISE, FakeEffektinfo.OS_ARRAYCOPY), [None, None, box2, None, None] ) assert h.getarrayitem(box4, descr1, index1) is None From noreply at buildbot.pypy.org Mon Sep 12 10:51:35 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 12 Sep 2011 10:51:35 +0200 (CEST) Subject: [pypy-commit] pypy improve-heap-caching-tracing: add slightly theoretical XXX Message-ID: <20110912085135.954A782041@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: improve-heap-caching-tracing Changeset: r47212:b4778788d50b Date: 2011-09-08 22:09 +0200 http://bitbucket.org/pypy/pypy/changeset/b4778788d50b/ Log: add slightly theoretical XXX diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py --- a/pypy/jit/metainterp/heapcache.py +++ b/pypy/jit/metainterp/heapcache.py @@ -49,6 +49,8 @@ if argboxes[2] in self.new_boxes: # XXX: no descr here so we invalidate any of them, not just # of the correct type + # XXX: in theory the indices of the copy could be looked at + # as well for descr, cache in self.heap_array_cache.iteritems(): for idx, cache in cache.iteritems(): for frombox in cache.keys(): From noreply at buildbot.pypy.org Mon Sep 12 11:27:44 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Sep 2011 11:27:44 +0200 (CEST) Subject: [pypy-commit] buildbot default: Add a commented-out attempt at using manhole Message-ID: <20110912092744.64C7982041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r592:5d9ee93fb629 Date: 2011-09-12 11:26 +0200 http://bitbucket.org/pypy/buildbot/changeset/5d9ee93fb629/ Log: Add a commented-out attempt at using manhole diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -3,6 +3,7 @@ from buildbot.buildslave import BuildSlave from buildbot.status.html import WebStatus from buildbot.process.builder import Builder +#from buildbot import manhole from pypybuildbot.pypylist import PyPyList from pypybuildbot.ircbot import IRC # side effects @@ -330,6 +331,9 @@ }, ], + # http://readthedocs.org/docs/buildbot/en/latest/tour.html#debugging-with-manhole + #'manhole': manhole.PasswordManhole("tcp:1234:interface=127.0.0.1", + # "buildmaster","XndZopHM"), 'buildbotURL': 'http://buildbot.pypy.org/', # with a trailing '/'! 'projectURL': 'http://pypy.org/', 'projectName': 'PyPy'} From noreply at buildbot.pypy.org Mon Sep 12 11:27:45 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Sep 2011 11:27:45 +0200 (CEST) Subject: [pypy-commit] buildbot default: Win32 compat. Message-ID: <20110912092745.7AC3A82041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r593:ebdc074a0d49 Date: 2011-09-12 11:27 +0200 http://bitbucket.org/pypy/buildbot/changeset/ebdc074a0d49/ Log: Win32 compat. diff --git a/bot2/pypybuildbot/pypylist.py b/bot2/pypybuildbot/pypylist.py --- a/bot2/pypybuildbot/pypylist.py +++ b/bot2/pypybuildbot/pypylist.py @@ -58,9 +58,12 @@ self.platform = None def parse_filename(self): - if not self.filename.endswith('.tar.bz2'): + for ext in ['.tar.bz2', '.zip']: + if self.filename.endswith(ext): + break + else: raise ValueError - name = self.filename.replace('.tar.bz2', '') + name = self.filename.replace(ext, '') dashes = name.count('-') if dashes == 4: # svn based From noreply at buildbot.pypy.org Mon Sep 12 11:32:28 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Sep 2011 11:32:28 +0200 (CEST) Subject: [pypy-commit] buildbot default: Use the hg repository instead of the svn one for benchmarks. Message-ID: <20110912093228.1F9AF82041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r594:411fd463e2f7 Date: 2011-09-12 11:32 +0200 http://bitbucket.org/pypy/buildbot/changeset/411fd463e2f7/ Log: Use the hg repository instead of the svn one for benchmarks. diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -154,14 +154,7 @@ self.build.setProperty('got_revision', got_revision, 'got_revision') self.build.setProperty('final_file_name', final_file_name, 'got_revision') -def setup_steps(platform, factory, workdir=None): - # XXX: this assumes that 'hg' is in the path - import getpass - repourl = 'https://bitbucket.org/pypy/pypy/' - if getpass.getuser() == 'antocuni': - # for debugging - repourl = '/home/antocuni/pypy/default' - # +def update_hg(platform, factory, repourl, workdir, use_branch): if platform == 'win32': command = "if not exist .hg rmdir /q /s ." else: @@ -190,8 +183,23 @@ command = "hg pull", workdir = workdir)) # - factory.addStep(UpdateCheckout(workdir = workdir, - haltOnFailure=True)) + if use_branch: + factory.addStep(UpdateCheckout(workdir = workdir, + haltOnFailure=True)) + else: + factory.addStep(ShellCmd(description="hg update", + command = "hg update --clean", + workdir = workdir)) + +def setup_steps(platform, factory, workdir=None): + # XXX: this assumes that 'hg' is in the path + import getpass + repourl = 'https://bitbucket.org/pypy/pypy/' + if getpass.getuser() == 'antocuni': + # for debugging + repourl = '/home/antocuni/pypy/default' + # + update_hg(platform, factory, repourl, workdir, use_branch=True) # factory.addStep(CheckGotRevision(workdir=workdir)) @@ -309,10 +317,10 @@ factory.BuildFactory.__init__(self) setup_steps(platform, self) - self.addStep(ShellCmd(description="checkout benchmarks", - command=['svn', 'co', 'https://bitbucket.org/pypy/benchmarks/trunk', - 'benchmarks'], - workdir='.')) + # + repourl = 'https://bitbucket.org/pypy/benchmarks' + update_hg(platform, self, repourl, 'benchmarks', use_branch=False) + # self.addStep( Translate( translationArgs=['-Ojit'], From noreply at buildbot.pypy.org Mon Sep 12 11:38:21 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Sep 2011 11:38:21 +0200 (CEST) Subject: [pypy-commit] pypy default: Give a more explicit error message. Message-ID: <20110912093821.BF19A82041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47213:b8e9360ba188 Date: 2011-09-12 11:37 +0200 http://bitbucket.org/pypy/pypy/changeset/b8e9360ba188/ Log: Give a more explicit error message. diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -242,8 +242,10 @@ # we have been seen by other means so rtyping should not choke # on us identifier = self.code.identifier - assert Function._all.get(identifier, self) is self, ("duplicate " - "function ids") + previous = Function._all.get(identifier, self) + assert previous is self, ( + "duplicate function ids with identifier=%r: %r and %r" % ( + identifier, previous, self)) self.add_to_table() return False From noreply at buildbot.pypy.org Mon Sep 12 11:38:23 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Sep 2011 11:38:23 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20110912093823.076A382041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47214:1c76004cedc6 Date: 2011-09-12 11:38 +0200 http://bitbucket.org/pypy/pypy/changeset/1c76004cedc6/ Log: merge heads diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -8,6 +8,7 @@ appleveldefs = {} interpleveldefs = { + "StringBuilder": "interp_builders.W_StringBuilder", "UnicodeBuilder": "interp_builders.W_UnicodeBuilder", } diff --git a/pypy/module/__pypy__/interp_builders.py b/pypy/module/__pypy__/interp_builders.py --- a/pypy/module/__pypy__/interp_builders.py +++ b/pypy/module/__pypy__/interp_builders.py @@ -2,49 +2,53 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef -from pypy.rlib.rstring import UnicodeBuilder +from pypy.rlib.rstring import UnicodeBuilder, StringBuilder -class W_UnicodeBuilder(Wrappable): - def __init__(self, space, size): - if size < 0: - self.builder = UnicodeBuilder() - else: - self.builder = UnicodeBuilder(size) - self.done = False +def create_builder(name, strtype, builder_cls): + class W_Builder(Wrappable): + def __init__(self, space, size): + if size < 0: + self.builder = builder_cls() + else: + self.builder = builder_cls(size) - def _check_done(self, space): - if self.done: - raise OperationError(space.w_ValueError, space.wrap("Can't operate on a done builder")) + def _check_done(self, space): + if self.builder is None: + raise OperationError(space.w_ValueError, space.wrap("Can't operate on a done builder")) - @unwrap_spec(size=int) - def descr__new__(space, w_subtype, size=-1): - return W_UnicodeBuilder(space, size) + @unwrap_spec(size=int) + def descr__new__(space, w_subtype, size=-1): + return W_Builder(space, size) - @unwrap_spec(s=unicode) - def descr_append(self, space, s): - self._check_done(space) - self.builder.append(s) + @unwrap_spec(s=strtype) + def descr_append(self, space, s): + self._check_done(space) + self.builder.append(s) - @unwrap_spec(s=unicode, start=int, end=int) - def descr_append_slice(self, space, s, start, end): - self._check_done(space) - if not 0 <= start <= end <= len(s): - raise OperationError(space.w_ValueError, space.wrap("bad start/stop")) - self.builder.append_slice(s, start, end) + @unwrap_spec(s=strtype, start=int, end=int) + def descr_append_slice(self, space, s, start, end): + self._check_done(space) + if not 0 <= start <= end <= len(s): + raise OperationError(space.w_ValueError, space.wrap("bad start/stop")) + self.builder.append_slice(s, start, end) - def descr_build(self, space): - self._check_done(space) - w_s = space.wrap(self.builder.build()) - self.done = True - return w_s + def descr_build(self, space): + self._check_done(space) + w_s = space.wrap(self.builder.build()) + self.builder = None + return w_s + W_Builder.__name__ = "W_%s" % name + W_Builder.typedef = TypeDef(name, + __new__ = interp2app(W_Builder.descr__new__.im_func), -W_UnicodeBuilder.typedef = TypeDef("UnicodeBuilder", - __new__ = interp2app(W_UnicodeBuilder.descr__new__.im_func), + append = interp2app(W_Builder.descr_append), + append_slice = interp2app(W_Builder.descr_append_slice), + build = interp2app(W_Builder.descr_build), + ) + W_Builder.typedef.acceptable_as_base_class = False + return W_Builder - append = interp2app(W_UnicodeBuilder.descr_append), - append_slice = interp2app(W_UnicodeBuilder.descr_append_slice), - build = interp2app(W_UnicodeBuilder.descr_build), -) -W_UnicodeBuilder.typedef.acceptable_as_base_class = False +W_StringBuilder = create_builder("StringBuilder", str, StringBuilder) +W_UnicodeBuilder = create_builder("UnicodeBuilder", unicode, UnicodeBuilder) \ No newline at end of file diff --git a/pypy/module/__pypy__/test/test_builders.py b/pypy/module/__pypy__/test/test_builders.py --- a/pypy/module/__pypy__/test/test_builders.py +++ b/pypy/module/__pypy__/test/test_builders.py @@ -31,4 +31,14 @@ raises(ValueError, b.append_slice, u"1", 2, 1) s = b.build() assert s == "cde" - raises(ValueError, b.append_slice, u"abc", 1, 2) \ No newline at end of file + raises(ValueError, b.append_slice, u"abc", 1, 2) + + def test_stringbuilder(self): + from __pypy__.builders import StringBuilder + b = StringBuilder() + b.append("abc") + b.append("123") + b.append("you and me") + s = b.build() + assert s == "abc123you and me" + raises(ValueError, b.build) \ No newline at end of file From noreply at buildbot.pypy.org Mon Sep 12 12:29:36 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 Sep 2011 12:29:36 +0200 (CEST) Subject: [pypy-commit] pypy default: fix translation, hopefully Message-ID: <20110912102936.114AB82041@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r47215:971b55704207 Date: 2011-09-12 12:29 +0200 http://bitbucket.org/pypy/pypy/changeset/971b55704207/ Log: fix translation, hopefully diff --git a/pypy/module/__pypy__/interp_builders.py b/pypy/module/__pypy__/interp_builders.py --- a/pypy/module/__pypy__/interp_builders.py +++ b/pypy/module/__pypy__/interp_builders.py @@ -3,6 +3,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef from pypy.rlib.rstring import UnicodeBuilder, StringBuilder +from pypy.tool.sourcetools import func_with_new_name def create_builder(name, strtype, builder_cls): @@ -41,8 +42,9 @@ W_Builder.__name__ = "W_%s" % name W_Builder.typedef = TypeDef(name, - __new__ = interp2app(W_Builder.descr__new__.im_func), - + __new__ = interp2app(func_with_new_name( + W_Builder.descr__new__.im_func, + '%s_new' % (name,))), append = interp2app(W_Builder.descr_append), append_slice = interp2app(W_Builder.descr_append_slice), build = interp2app(W_Builder.descr_build), @@ -51,4 +53,4 @@ return W_Builder W_StringBuilder = create_builder("StringBuilder", str, StringBuilder) -W_UnicodeBuilder = create_builder("UnicodeBuilder", unicode, UnicodeBuilder) \ No newline at end of file +W_UnicodeBuilder = create_builder("UnicodeBuilder", unicode, UnicodeBuilder) From noreply at buildbot.pypy.org Mon Sep 12 12:45:39 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 Sep 2011 12:45:39 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add an abstract Message-ID: <20110912104539.6798582041@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3896:0304bc44cf9d Date: 2011-09-12 12:45 +0200 http://bitbucket.org/pypy/extradoc/changeset/0304bc44cf9d/ Log: add an abstract diff --git a/talk/pyconar2011/abstract.rst b/talk/pyconar2011/abstract.rst new file mode 100644 --- /dev/null +++ b/talk/pyconar2011/abstract.rst @@ -0,0 +1,10 @@ +Little things that PyPy makes possible +====================================== + +PyPy is just a python interpreter. However, there are things that were not +quite possible to do with Python, like real time video processing, because +of Python interpreter limitations. This is no longer the case -- I'll present +few demos and explain details of those little things that run on PyPy that +make python an even more awesome language than it was before. + + From noreply at buildbot.pypy.org Mon Sep 12 12:55:56 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Sep 2011 12:55:56 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: "a, the" Message-ID: <20110912105556.5C0AA82041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r3897:45189551e038 Date: 2011-09-12 12:55 +0200 http://bitbucket.org/pypy/extradoc/changeset/45189551e038/ Log: "a, the" diff --git a/talk/pyconar2011/abstract.rst b/talk/pyconar2011/abstract.rst --- a/talk/pyconar2011/abstract.rst +++ b/talk/pyconar2011/abstract.rst @@ -4,7 +4,7 @@ PyPy is just a python interpreter. However, there are things that were not quite possible to do with Python, like real time video processing, because of Python interpreter limitations. This is no longer the case -- I'll present -few demos and explain details of those little things that run on PyPy that +a few demos and explain the details of those little things that run on PyPy that make python an even more awesome language than it was before. From noreply at buildbot.pypy.org Mon Sep 12 13:20:42 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 Sep 2011 13:20:42 +0200 (CEST) Subject: [pypy-commit] pypy default: remove unnecessary imports Message-ID: <20110912112042.09EF982041@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r47218:4c07862b1f06 Date: 2011-09-12 12:49 +0200 http://bitbucket.org/pypy/pypy/changeset/4c07862b1f06/ Log: remove unnecessary imports diff --git a/pypy/rpython/rbuilder.py b/pypy/rpython/rbuilder.py --- a/pypy/rpython/rbuilder.py +++ b/pypy/rpython/rbuilder.py @@ -1,13 +1,11 @@ from pypy.rpython.rmodel import Repr -from pypy.rpython.annlowlevel import llhelper from pypy.rpython.lltypesystem import lltype from pypy.rlib.rstring import INIT_SIZE from pypy.annotation.model import SomeChar, SomeUnicodeCodePoint class AbstractStringBuilderRepr(Repr): def rtyper_new(self, hop): - repr = hop.r_result if len(hop.args_v) == 0: v_arg = hop.inputconst(lltype.Signed, INIT_SIZE) else: From noreply at buildbot.pypy.org Mon Sep 12 13:20:43 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 Sep 2011 13:20:43 +0200 (CEST) Subject: [pypy-commit] pypy default: support builder-or-none Message-ID: <20110912112043.42F1B82041@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r47219:fa0be6dd8a1b Date: 2011-09-12 13:09 +0200 http://bitbucket.org/pypy/pypy/changeset/fa0be6dd8a1b/ Log: support builder-or-none diff --git a/pypy/rlib/rstring.py b/pypy/rlib/rstring.py --- a/pypy/rlib/rstring.py +++ b/pypy/rlib/rstring.py @@ -2,7 +2,8 @@ """ from pypy.annotation.model import (SomeObject, SomeString, s_None, SomeChar, - SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString, SomePtr) + SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString, SomePtr, SomePBC) +from pypy.tool.pairtype import pair, pairtype from pypy.rpython.extregistry import ExtRegistryEntry @@ -170,3 +171,14 @@ class UnicodeBuilderEntry(BaseEntry, ExtRegistryEntry): _about_ = UnicodeBuilder use_unicode = True + +class __extend__(pairtype(SomeStringBuilder, SomePBC)): + def union((sb, p)): + assert p.const is None + return SomeStringBuilder(can_be_None=True) + +class __extend__(pairtype(SomePBC, SomeStringBuilder)): + def union((p, sb)): + assert p.const is None + return SomeStringBuilder(can_be_None=True) + diff --git a/pypy/rlib/test/test_rstring.py b/pypy/rlib/test/test_rstring.py --- a/pypy/rlib/test/test_rstring.py +++ b/pypy/rlib/test/test_rstring.py @@ -2,7 +2,6 @@ from pypy.rlib.rstring import StringBuilder, UnicodeBuilder, split, rsplit - def test_split(): assert split("", 'x') == [''] assert split("a", "a", 1) == ['', ''] @@ -42,4 +41,5 @@ assert s.getlength() == len('aabcb') s.append_multiple_char(u'd', 4) assert s.build() == 'aabcbdddd' - assert isinstance(s.build(), unicode) \ No newline at end of file + assert isinstance(s.build(), unicode) + diff --git a/pypy/rpython/lltypesystem/rbuilder.py b/pypy/rpython/lltypesystem/rbuilder.py --- a/pypy/rpython/lltypesystem/rbuilder.py +++ b/pypy/rpython/lltypesystem/rbuilder.py @@ -4,7 +4,7 @@ from pypy.rpython.annlowlevel import llstr from pypy.rpython.rptr import PtrRepr from pypy.rpython.lltypesystem import lltype, rstr -from pypy.rpython.lltypesystem.lltype import staticAdtMethod +from pypy.rpython.lltypesystem.lltype import staticAdtMethod, nullptr from pypy.rpython.lltypesystem.rstr import (STR, UNICODE, char_repr, string_repr, unichar_repr, unicode_repr) from pypy.rpython.rbuilder import AbstractStringBuilderRepr @@ -54,6 +54,9 @@ MAX = 16*1024*1024 class BaseStringBuilderRepr(AbstractStringBuilderRepr): + def empty(self): + return nullptr(self.lowleveltype.TO) + @classmethod def ll_new(cls, init_size): if init_size < 0 or init_size > MAX: @@ -123,6 +126,10 @@ return ll_builder.buf return rgc.ll_shrink_array(ll_builder.buf, final_size) + @classmethod + def ll_is_true(cls, ll_builder): + return ll_builder != nullptr(cls.lowleveltype.TO) + class StringBuilderRepr(BaseStringBuilderRepr): lowleveltype = lltype.Ptr(STRINGBUILDER) basetp = STR diff --git a/pypy/rpython/ootypesystem/rbuilder.py b/pypy/rpython/ootypesystem/rbuilder.py --- a/pypy/rpython/ootypesystem/rbuilder.py +++ b/pypy/rpython/ootypesystem/rbuilder.py @@ -7,6 +7,9 @@ MAX = 16*1024*1024 class BaseBuilderRepr(AbstractStringBuilderRepr): + def empty(self): + return ootype.null(self.lowleveltype) + @classmethod def ll_new(cls, init_size): if init_size < 0 or init_size > MAX: @@ -36,6 +39,10 @@ def ll_build(builder): return builder.ll_build() + @staticmethod + def ll_is_true(builder): + return bool(builder) + class StringBuilderRepr(BaseBuilderRepr): lowleveltype = ootype.StringBuilder string_repr = string_repr diff --git a/pypy/rpython/rbuilder.py b/pypy/rpython/rbuilder.py --- a/pypy/rpython/rbuilder.py +++ b/pypy/rpython/rbuilder.py @@ -48,3 +48,13 @@ vlist = hop.inputargs(self) hop.exception_cannot_occur() return hop.gendirectcall(self.ll_build, *vlist) + + def rtype_is_true(self, hop): + vlist = hop.inputargs(self) + hop.exception_cannot_occur() + return hop.gendirectcall(self.ll_is_true, *vlist) + + def convert_const(self, value): + if not value is None: + raise TypeError("Prebuilt builedrs that are not none unsupported") + return self.empty() diff --git a/pypy/rpython/test/test_rbuilder.py b/pypy/rpython/test/test_rbuilder.py --- a/pypy/rpython/test/test_rbuilder.py +++ b/pypy/rpython/test/test_rbuilder.py @@ -84,6 +84,24 @@ res = self.ll_to_string(self.interpret(func, [5])) assert res == "hello" + def test_builder_or_none(self): + def g(s): + if s: + s.append("3") + return bool(s) + + def func(i): + if i: + s = StringBuilder() + else: + s = None + return g(s) + res = self.interpret(func, [0]) + assert not res + res = self.interpret(func, [1]) + assert res + + class TestLLtype(BaseTestStringBuilder, LLRtypeMixin): pass @@ -93,4 +111,4 @@ def test_unicode_getlength(self): py.test.skip("getlength(): not implemented on ootype") def test_append_charpsize(self): - py.test.skip("append_charpsize(): not implemented on ootype") \ No newline at end of file + py.test.skip("append_charpsize(): not implemented on ootype") From noreply at buildbot.pypy.org Mon Sep 12 13:20:44 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 Sep 2011 13:20:44 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20110912112044.713DD82041@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r47220:9d498078c5a1 Date: 2011-09-12 13:20 +0200 http://bitbucket.org/pypy/pypy/changeset/9d498078c5a1/ Log: merge diff --git a/pypy/rlib/rzlib.py b/pypy/rlib/rzlib.py --- a/pypy/rlib/rzlib.py +++ b/pypy/rlib/rzlib.py @@ -1,3 +1,4 @@ +from __future__ import with_statement import sys from pypy.rlib.rstring import StringBuilder From noreply at buildbot.pypy.org Mon Sep 12 15:36:12 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Sep 2011 15:36:12 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: Pickling and unpickling of continulets. Message-ID: <20110912133612.E64BC82041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47221:9c8a5211c7d2 Date: 2011-09-12 14:01 +0200 http://bitbucket.org/pypy/pypy/changeset/9c8a5211c7d2/ Log: Pickling and unpickling of continulets. Based on the same idea as d0a4f767e2f1. From noreply at buildbot.pypy.org Mon Sep 12 15:36:14 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Sep 2011 15:36:14 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: Pickling and unpickling of empty continulets. Message-ID: <20110912133614.29EEF82041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47222:b3847d33555e Date: 2011-09-12 14:02 +0200 http://bitbucket.org/pypy/pypy/changeset/b3847d33555e/ Log: Pickling and unpickling of empty continulets. diff --git a/pypy/module/_continuation/__init__.py b/pypy/module/_continuation/__init__.py --- a/pypy/module/_continuation/__init__.py +++ b/pypy/module/_continuation/__init__.py @@ -37,4 +37,5 @@ interpleveldefs = { 'continulet': 'interp_continuation.W_Continulet', 'permute': 'interp_continuation.permute', + '_p': 'interp_continuation.unpickle', # pickle support } diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -102,12 +102,22 @@ and not self.sthread.is_empty_handle(self.h)) return self.space.newbool(valid) + def descr__reduce__(self): + space = self.space + w_continulet_type = space.type(space.wrap(self)) + return space.newtuple([getunpickle(space), + space.newtuple([w_continulet_type])]) + def W_Continulet___new__(space, w_subtype, __args__): r = space.allocate_instance(W_Continulet, w_subtype) r.__init__(space) return space.wrap(r) +def unpickle(space, w_subtype): + """Pickle support.""" + return W_Continulet___new__(space, w_subtype, None) + W_Continulet.typedef = TypeDef( 'continulet', @@ -117,6 +127,7 @@ switch = interp2app(W_Continulet.descr_switch), throw = interp2app(W_Continulet.descr_throw), is_pending = interp2app(W_Continulet.descr_is_pending), + __reduce__ = interp2app(W_Continulet.descr__reduce__), ) @@ -137,6 +148,7 @@ '', [], [], [], '', '', 0, '', [], [], hidden_applevel=True) + self.w_unpickle = w_module.get('_p') def geterror(space, message): cs = space.fromcache(State) @@ -150,6 +162,10 @@ cs = space.fromcache(State) return space.FrameClass(space, cs.dummy_pycode, None, None) +def getunpickle(space): + cs = space.fromcache(State) + return cs.w_unpickle + # ____________________________________________________________ From noreply at buildbot.pypy.org Mon Sep 12 15:36:15 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Sep 2011 15:36:15 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: Workaround. Message-ID: <20110912133615.590D982041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47223:dc94a2f7f7ee Date: 2011-09-12 15:24 +0200 http://bitbucket.org/pypy/pypy/changeset/dc94a2f7f7ee/ Log: Workaround. diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -72,7 +72,9 @@ space = self.space retval = [] for arg in self.code.getargs(): - w_val = space.getitem(self.w_locals, space.wrap(arg)) + w_val = space.finditem(self.w_locals, space.wrap(arg)) + if w_val is None: + w_val = space.wrap('') retval.append((arg, w_val)) return retval From noreply at buildbot.pypy.org Mon Sep 12 15:36:16 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Sep 2011 15:36:16 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: Fix the tests, with a comment for why it is a bit incomplete. Message-ID: <20110912133616.87EA882041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47224:f5768854286a Date: 2011-09-12 15:35 +0200 http://bitbucket.org/pypy/pypy/changeset/f5768854286a/ Log: Fix the tests, with a comment for why it is a bit incomplete. diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -103,10 +103,18 @@ return self.space.newbool(valid) def descr__reduce__(self): + # xxx this is known to be not completely correct with respect + # to subclasses, e.g. no __slots__ support, no looking for a + # __getnewargs__ or __getstate__ defined in the subclass, etc. + # Doing the right thing looks involved, though... space = self.space w_continulet_type = space.type(space.wrap(self)) - return space.newtuple([getunpickle(space), - space.newtuple([w_continulet_type])]) + args = [getunpickle(space), + space.newtuple([w_continulet_type])] + w_dict = self.getdict(space) + if w_dict is not None: + args = args + [w_dict] + return space.newtuple(args) def W_Continulet___new__(space, w_subtype, __args__): @@ -116,7 +124,9 @@ def unpickle(space, w_subtype): """Pickle support.""" - return W_Continulet___new__(space, w_subtype, None) + r = space.allocate_instance(W_Continulet, w_subtype) + r.__init__(space) + return space.wrap(r) W_Continulet.typedef = TypeDef( From noreply at buildbot.pypy.org Mon Sep 12 15:44:57 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Sep 2011 15:44:57 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: Rename start_state into global_state. Message-ID: <20110912134457.14ABC82041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47225:4c38c0864ce5 Date: 2011-09-12 15:44 +0200 http://bitbucket.org/pypy/pypy/changeset/4c38c0864ce5/ Log: Rename start_state into global_state. diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -21,16 +21,16 @@ def check_sthread(self): ec = self.space.getexecutioncontext() if ec.stacklet_thread is not self.sthread: - start_state.clear() + global_state.clear() raise geterror(self.space, "inter-thread support is missing") return ec def descr_init(self, w_callable, __args__): if self.sthread is not None: raise geterror(self.space, "continulet already __init__ialized") - start_state.origin = self - start_state.w_callable = w_callable - start_state.args = __args__ + global_state.origin = self + global_state.w_callable = w_callable + global_state.args = __args__ self.bottomframe = make_fresh_frame(self.space) self.sthread = build_sthread(self.space) try: @@ -39,46 +39,46 @@ raise MemoryError except MemoryError: self.sthread = None - start_state.clear() + global_state.clear() raise getmemoryerror(self.space) def switch(self, w_to): to = self.space.interp_w(W_Continulet, w_to, can_be_None=True) if to is not None: if to.sthread is None: - start_state.clear() + global_state.clear() raise geterror(self.space, "continulet not initialized yet") if self is to: # double-switch to myself: no-op return get_result() if self.sthread is None: - start_state.clear() + global_state.clear() raise geterror(self.space, "continulet not initialized yet") ec = self.check_sthread() # - start_state.origin = self + global_state.origin = self if to is None: # simple switch: going to self.h - start_state.destination = self + global_state.destination = self else: # double switch: the final destination is to.h - start_state.destination = to + global_state.destination = to # - h = start_state.destination.h + h = global_state.destination.h sthread = self.sthread if sthread.is_empty_handle(h): - start_state.clear() + global_state.clear() raise geterror(self.space, "continulet already finished") # try: do_switch(sthread, h) except MemoryError: - start_state.clear() + global_state.clear() raise getmemoryerror(self.space) # return get_result() def descr_switch(self, w_value=None, w_to=None): - start_state.w_value = w_value + global_state.w_value = w_value return self.switch(w_to) def descr_throw(self, w_type, w_val=None, w_tb=None, w_to=None): @@ -93,8 +93,8 @@ # operr = OperationError(w_type, w_val, tb) operr.normalize_exception(space) - start_state.w_value = None - start_state.propagate_exception = operr + global_state.w_value = None + global_state.propagate_exception = operr return self.switch(w_to) def descr_is_pending(self): @@ -191,7 +191,7 @@ # ____________________________________________________________ -class StartState: # xxx a single global to pass around the function to start +class GlobalState: def clear(self): self.origin = None self.destination = None @@ -199,15 +199,15 @@ self.args = None self.w_value = None self.propagate_exception = None -start_state = StartState() -start_state.clear() +global_state = GlobalState() +global_state.clear() def new_stacklet_callback(h, arg): - self = start_state.origin - w_callable = start_state.w_callable - args = start_state.args - start_state.clear() + self = global_state.origin + w_callable = global_state.w_callable + args = global_state.args + global_state.clear() try: do_switch(self.sthread, h) except MemoryError: @@ -217,30 +217,30 @@ try: assert self.sthread.ec.topframeref() is None self.sthread.ec.topframeref = jit.non_virtual_ref(self.bottomframe) - if start_state.propagate_exception is not None: - raise start_state.propagate_exception # just propagate it further - if start_state.w_value is not space.w_None: + if global_state.propagate_exception is not None: + raise global_state.propagate_exception # just propagate it further + if global_state.w_value is not space.w_None: raise OperationError(space.w_TypeError, space.wrap( "can't send non-None value to a just-started continulet")) args = args.prepend(self.space.wrap(self)) w_result = space.call_args(w_callable, args) except Exception, e: - start_state.propagate_exception = e + global_state.propagate_exception = e else: - start_state.w_value = w_result + global_state.w_value = w_result self.sthread.ec.topframeref = jit.vref_None - start_state.origin = self - start_state.destination = self + global_state.origin = self + global_state.destination = self return self.h def do_switch(sthread, h): h = sthread.switch(h) - origin = start_state.origin - self = start_state.destination - start_state.origin = None - start_state.destination = None + origin = global_state.origin + self = global_state.destination + global_state.origin = None + global_state.destination = None self.h, origin.h = origin.h, h # current = sthread.ec.topframeref @@ -249,12 +249,12 @@ origin.bottomframe.f_backref = current def get_result(): - if start_state.propagate_exception: - e = start_state.propagate_exception - start_state.propagate_exception = None + if global_state.propagate_exception: + e = global_state.propagate_exception + global_state.propagate_exception = None raise e - w_value = start_state.w_value - start_state.w_value = None + w_value = global_state.w_value + global_state.w_value = None return w_value def build_sthread(space): From noreply at buildbot.pypy.org Mon Sep 12 15:44:58 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Sep 2011 15:44:58 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: Forgot to add this file. Message-ID: <20110912134458.467D582041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47226:20c84364a06c Date: 2011-09-12 15:44 +0200 http://bitbucket.org/pypy/pypy/changeset/20c84364a06c/ Log: Forgot to add this file. diff --git a/pypy/module/_continuation/test/test_zpickle.py b/pypy/module/_continuation/test/test_zpickle.py new file mode 100644 --- /dev/null +++ b/pypy/module/_continuation/test/test_zpickle.py @@ -0,0 +1,81 @@ +from pypy.conftest import gettestobjspace + + +class AppTestPickle: + version = 0 + + def setup_class(cls): + cls.space = gettestobjspace(usemodules=('_continuation',), + CALL_METHOD=True) + cls.space.appexec([], """(): + global continulet, A, __name__ + + import sys + __name__ = 'test_pickle_continulet' + thismodule = type(sys)(__name__) + sys.modules[__name__] = thismodule + + from _continuation import continulet + class A(continulet): + pass + + thismodule.__dict__.update(globals()) + """) + cls.w_version = cls.space.wrap(cls.version) + + def test_basic_setup(self): + from _continuation import continulet + lst = [4] + co = continulet(lst.append) + assert lst == [4] + res = co.switch() + assert res is None + assert lst == [4, co] + + def test_pickle_continulet_empty(self): + from _continuation import continulet + lst = [4] + co = continulet.__new__(continulet) + import pickle + pckl = pickle.dumps(co, self.version) + print repr(pckl) + co2 = pickle.loads(pckl) + assert co2 is not co + assert not co.is_pending() + assert not co2.is_pending() + # the empty unpickled coroutine can still be used: + result = [5] + co2.__init__(result.append) + res = co2.switch() + assert res is None + assert result == [5, co2] + + def test_pickle_continulet_empty_subclass(self): + from test_pickle_continulet import continulet, A + lst = [4] + co = continulet.__new__(A) + co.foo = 'bar' + co.bar = 'baz' + import pickle + pckl = pickle.dumps(co, self.version) + print repr(pckl) + co2 = pickle.loads(pckl) + assert co2 is not co + assert not co.is_pending() + assert not co2.is_pending() + assert type(co) is type(co2) is A + assert co.foo == co2.foo == 'bar' + assert co.bar == co2.bar == 'baz' + # the empty unpickled coroutine can still be used: + result = [5] + co2.__init__(result.append) + res = co2.switch() + assert res is None + assert result == [5, co2] + + +class AppTestPickle_v1(AppTestPickle): + version = 1 + +class AppTestPickle_v2(AppTestPickle): + version = 2 From noreply at buildbot.pypy.org Mon Sep 12 15:48:03 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 Sep 2011 15:48:03 +0200 (CEST) Subject: [pypy-commit] pypy default: a bit of copy paste Message-ID: <20110912134803.53A8C82041@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r47227:2868163b3dc4 Date: 2011-09-12 15:47 +0200 http://bitbucket.org/pypy/pypy/changeset/2868163b3dc4/ Log: a bit of copy paste diff --git a/pypy/rlib/rstring.py b/pypy/rlib/rstring.py --- a/pypy/rlib/rstring.py +++ b/pypy/rlib/rstring.py @@ -181,4 +181,14 @@ def union((p, sb)): assert p.const is None return SomeStringBuilder(can_be_None=True) - + +class __extend__(pairtype(SomeUnicodeBuilder, SomePBC)): + def union((sb, p)): + assert p.const is None + return SomeUnicodeBuilder(can_be_None=True) + +class __extend__(pairtype(SomePBC, SomeUnicodeBuilder)): + def union((p, sb)): + assert p.const is None + return SomeUnicodeBuilder(can_be_None=True) + From noreply at buildbot.pypy.org Mon Sep 12 15:58:36 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 Sep 2011 15:58:36 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: rewrite Message-ID: <20110912135836.B955182041@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3898:b3e443e20951 Date: 2011-09-12 15:58 +0200 http://bitbucket.org/pypy/extradoc/changeset/b3e443e20951/ Log: rewrite diff --git a/talk/pyconar2011/abstract.rst b/talk/pyconar2011/abstract.rst --- a/talk/pyconar2011/abstract.rst +++ b/talk/pyconar2011/abstract.rst @@ -1,10 +1,13 @@ Little things that PyPy makes possible ====================================== -PyPy is just a python interpreter. However, there are things that were not -quite possible to do with Python, like real time video processing, because -of Python interpreter limitations. This is no longer the case -- I'll present -a few demos and explain the details of those little things that run on PyPy that -make python an even more awesome language than it was before. +PyPy is just a python interpreter. One of the original goals of the project +were to make existing python programs run faster, and PyPy succeeded in that. +However, the even more exciting part is that optimizations implemented in PyPy +let people do things in Python that were not possible before, like real-time +video processing, numeric array manipulation faster than in C etc. etc. - +During the talk I'll present some demos and talk what things are possible having +a decent optimizing just-in-time compiler and briefly discuss strategies that +we used for achieving this. I'll also discuss how faster-than-C, pypy's original +goal from years ago, was after all not that far off. From noreply at buildbot.pypy.org Mon Sep 12 16:04:37 2011 From: noreply at buildbot.pypy.org (hager) Date: Mon, 12 Sep 2011 16:04:37 +0200 (CEST) Subject: [pypy-commit] pypy ppc-jit-backend: Applied more code of the ARM backend to PPC backend. Message-ID: <20110912140437.69EC382041@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r47228:5fd785b4cb83 Date: 2011-09-12 16:03 +0200 http://bitbucket.org/pypy/pypy/changeset/5fd785b4cb83/ Log: Applied more code of the ARM backend to PPC backend. diff --git a/pypy/jit/backend/ppc/ppcgen/condition.py b/pypy/jit/backend/ppc/ppcgen/condition.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/ppc/ppcgen/condition.py @@ -0,0 +1,1 @@ +LE = 0 diff --git a/pypy/jit/backend/ppc/ppcgen/helper/__init__.py b/pypy/jit/backend/ppc/ppcgen/helper/__init__.py new file mode 100644 diff --git a/pypy/jit/backend/ppc/ppcgen/helper/assembler.py b/pypy/jit/backend/ppc/ppcgen/helper/assembler.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/ppc/ppcgen/helper/assembler.py @@ -0,0 +1,17 @@ +import pypy.jit.backend.ppc.ppcgen.condition as c + +def gen_emit_cmp_op(condition): + def f(self, op, arglocs, regalloc): + l0, l1, res = arglocs + if l1.is_imm(): + self.cmpwi(0, l0.value, l1.value) + else: + self.cmpw(0, l0.value, l1.value) + + if condition == c.LE: + self.cror(0, 0, 2) + + resval = res.value + self.mfcr(resval) + self.rlwinm(resval, resval, 1, 31, 31) + return f diff --git a/pypy/jit/backend/ppc/ppcgen/helper/regalloc.py b/pypy/jit/backend/ppc/ppcgen/helper/regalloc.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/ppc/ppcgen/helper/regalloc.py @@ -0,0 +1,23 @@ +from pypy.jit.metainterp.history import ConstInt + +def _check_imm_arg(arg): + return isinstance(arg, ConstInt) + +def prepare_cmp_op(): + def f(self, op): + boxes = op.getarglist() + arg0, arg1 = boxes + imm_a0 = _check_imm_arg(arg0) + imm_a1 = _check_imm_arg(arg1) + l0, box = self._ensure_value_is_boxed(arg0, forbidden_vars=boxes) + boxes.append(box) + if imm_a1 and not imm_a0: + l1 = self.make_sure_var_in_reg(arg1, boxes) + else: + l1, box = self._ensure_value_is_boxed(arg1, forbidden_vars=boxes) + boxes.append(box) + self.possibly_free_vars(boxes) + res = self.force_allocate_reg(op.result) + self.possibly_free_var(op.result) + return [l0, l1, res] + return f diff --git a/pypy/jit/backend/ppc/ppcgen/jump.py b/pypy/jit/backend/ppc/ppcgen/jump.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/ppc/ppcgen/jump.py @@ -0,0 +1,110 @@ +# ../x86/jump.py +# XXX combine with ../x86/jump.py and move to llsupport +import sys +from pypy.tool.pairtype import extendabletype + +def remap_frame_layout(assembler, src_locations, dst_locations, tmpreg): + pending_dests = len(dst_locations) + srccount = {} # maps dst_locations to how many times the same + # location appears in src_locations + for dst in dst_locations: + key = dst.as_key() + assert key not in srccount, "duplicate value in dst_locations!" + srccount[key] = 0 + for i in range(len(dst_locations)): + src = src_locations[i] + if src.is_imm(): + continue + key = src.as_key() + if key in srccount: + if key == dst_locations[i].as_key(): + srccount[key] = -sys.maxint # ignore a move "x = x" + pending_dests -= 1 + else: + srccount[key] += 1 + + while pending_dests > 0: + progress = False + for i in range(len(dst_locations)): + dst = dst_locations[i] + key = dst.as_key() + if srccount[key] == 0: + srccount[key] = -1 # means "it's done" + pending_dests -= 1 + src = src_locations[i] + if not src.is_imm(): + key = src.as_key() + if key in srccount: + srccount[key] -= 1 + _move(assembler, src, dst, tmpreg) + progress = True + if not progress: + # we are left with only pure disjoint cycles + sources = {} # maps dst_locations to src_locations + for i in range(len(dst_locations)): + src = src_locations[i] + dst = dst_locations[i] + sources[dst.as_key()] = src + # + for i in range(len(dst_locations)): + dst = dst_locations[i] + originalkey = dst.as_key() + if srccount[originalkey] >= 0: + assembler.regalloc_push(dst) + while True: + key = dst.as_key() + assert srccount[key] == 1 + # ^^^ because we are in a simple cycle + srccount[key] = -1 + pending_dests -= 1 + src = sources[key] + if src.as_key() == originalkey: + break + _move(assembler, src, dst, tmpreg) + dst = src + assembler.regalloc_pop(dst) + assert pending_dests == 0 + +def _move(assembler, src, dst, tmpreg): + if dst.is_stack() and src.is_stack(): + assembler.regalloc_mov(src, tmpreg) + src = tmpreg + assembler.regalloc_mov(src, dst) + +def remap_frame_layout_mixed(assembler, + src_locations1, dst_locations1, tmpreg1, + src_locations2, dst_locations2, tmpreg2): + # find and push the xmm stack locations from src_locations2 that + # are going to be overwritten by dst_locations1 + from pypy.jit.backend.ppc.ppcgen.arch import WORD + extrapushes = [] + dst_keys = {} + for loc in dst_locations1: + dst_keys[loc.as_key()] = None + src_locations2red = [] + dst_locations2red = [] + for i in range(len(src_locations2)): + loc = src_locations2[i] + dstloc = dst_locations2[i] + if loc.is_stack(): + key = loc.as_key() + if (key in dst_keys or (loc.width > WORD and + (key + 1) in dst_keys)): + assembler.regalloc_push(loc) + extrapushes.append(dstloc) + continue + src_locations2red.append(loc) + dst_locations2red.append(dstloc) + src_locations2 = src_locations2red + dst_locations2 = dst_locations2red + # + # remap the integer and pointer registers and stack locations + remap_frame_layout(assembler, src_locations1, dst_locations1, tmpreg1) + # + # remap the vfp registers and stack locations + remap_frame_layout(assembler, src_locations2, dst_locations2, tmpreg2) + # + # finally, pop the extra xmm stack locations + while len(extrapushes) > 0: + loc = extrapushes.pop() + assembler.regalloc_pop(loc) diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -8,8 +8,10 @@ from pypy.jit.backend.ppc.ppcgen.symbol_lookup import lookup from pypy.jit.backend.ppc.ppcgen.arch import (IS_PPC_32, WORD, NONVOLATILES, GPR_SAVE_AREA) +from pypy.jit.backend.ppc.ppcgen.helper.assembler import gen_emit_cmp_op import pypy.jit.backend.ppc.ppcgen.register as r -from pypy.jit.metainterp.history import Const, ConstPtr +import pypy.jit.backend.ppc.ppcgen.condition as c +from pypy.jit.metainterp.history import Const, ConstPtr, LoopToken from pypy.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin from pypy.jit.backend.llsupport.asmmemmgr import AsmMemoryManager from pypy.jit.backend.llsupport.regalloc import (RegisterManager, @@ -976,7 +978,40 @@ self._save_nonvolatiles() def _make_epilogue(self): - self._restore_nonvolatiles() + for op_index, fail_index, guard, reglist in self.patch_list: + curpos = self.get_relative_pos() + offset = curpos - (4 * op_index) + assert (1 << 15) > offset + self.beq(offset) + self.patch_op(op_index) + + # store return parameters in memory + used_mem_indices = [] + for index, reg in enumerate(reglist): + # if reg is None, then there is a hole in the failargs + if reg is not None: + addr = self.fail_boxes_int.get_addr_for_num(index) + self.store_reg(reg, addr) + used_mem_indices.append(index) + + patch_op = self.get_number_of_ops() + patch_pos = self.get_relative_pos() + descr = self.cpu.saved_descr[fail_index] + descr.patch_op = patch_op + descr.patch_pos = patch_pos + descr.used_mem_indices = used_mem_indices + + self._restore_nonvolatiles() + + self.lwz(0, 1, self.framesize + 4) + if IS_PPC_32: + self.lwz(0, 1, self.framesize + WORD) # 36 + else: + self.ld(0, 1, self.framesize + WORD) # 36 + self.mtlr(0) + self.addi(1, 1, self.framesize) + self.li(r.r3.value, fail_index) + self.blr() def gen_bootstrap_code(self, nonfloatlocs, inputargs): for i in range(len(nonfloatlocs)): @@ -997,6 +1032,9 @@ def assemble_loop(self, inputargs, operations, looptoken, log): self.framesize = 256 + GPR_SAVE_AREA + self.patch_list = [] + self.startpos = self.get_relative_pos() + clt = CompiledLoopToken(self.cpu, looptoken.number) looptoken.compiled_loop_token = clt @@ -1007,7 +1045,15 @@ self._make_prologue() nonfloatlocs = regalloc.prepare_loop(inputargs, operations, looptoken) self.gen_bootstrap_code(nonfloatlocs, inputargs) + + looptoken._ppc_loop_code = self.get_relative_pos() + looptoken._ppc_arglocs = [nonfloatlocs] + looptoken._ppc_bootstrap_code = 0 + self._walk_operations(operations, regalloc) + self._make_epilogue() + + looptoken.ppc_code = self.assemble() def _walk_operations(self, operations, regalloc): @@ -1068,23 +1114,18 @@ if isinstance(arg0, Box): reg0 = cpu.reg_map[arg0] else: - #reg0 = cpu.get_next_register() box = TempInt() reg0 = cpu.rm.force_allocate_reg(box) self.load_word(reg0, arg0.value) if isinstance(arg1, Box): reg1 = cpu.reg_map[arg1] else: - #reg1 = cpu.get_next_register() - #reg1 = cpu.rm.force_allocate_reg(arg1) box = TempInt() reg1 = cpu.rm.force_allocate_reg(box) boxed = cpu.rm.make_sure_var_in_reg(box) self.load_word(reg1, arg1.value) import pdb; pdb.set_trace() - #free_reg = cpu.next_free_register free_reg = cpu.rm.force_allocate_reg(op.result) - return free_reg, reg0, reg1 def _int_op_epilog(self, op, cpu, result_reg): @@ -1092,34 +1133,18 @@ cpu.reg_map[result] = result_reg cpu.next_free_register += 1 - def _guard_epilog(self, op, cpu): - fail_descr = op.getdescr() - fail_index = self._get_identifier_from_descr(fail_descr, cpu) - fail_descr.index = fail_index - cpu.saved_descr[fail_index] = fail_descr - numops = self.get_number_of_ops() - self.beq(0) - failargs = op.getfailargs() - reglist = [] - for failarg in failargs: - if failarg is None: - reglist.append(None) - else: - reglist.append(cpu.reg_map[failarg]) - cpu.patch_list.append((numops, fail_index, op, reglist)) - # Fetches the identifier from a descr object. # If it has no identifier, then an unused identifier # is generated # XXX could be overwritten later on, better approach? - def _get_identifier_from_descr(self, descr, cpu): + def _get_identifier_from_descr(self, descr): try: identifier = descr.identifier except AttributeError: identifier = None if identifier is not None: return identifier - keys = cpu.saved_descr.keys() + keys = self.cpu.saved_descr.keys() if keys == []: return 1 return max(keys) + 1 @@ -1208,6 +1233,12 @@ else: self.divdu(free_reg, reg0, reg1) + # **************************************************** + # * C O M P A R I S O N S T U F F * + # **************************************************** + + emit_int_le = gen_emit_cmp_op(c.LE) + def emit_int_eq(self, op, cpu, reg0, reg1, free_reg): self.xor(free_reg, reg0, reg1) if IS_PPC_32: @@ -1217,15 +1248,6 @@ self.cntlzd(free_reg, free_reg) self.srdi(free_reg, free_reg, 6) - def emit_int_le(self, op, cpu, reg0, reg1, free_reg): - if IS_PPC_32: - self.cmpw(7, reg0, reg1) - else: - self.cmpd(7, reg0, reg1) - self.cror(31, 30, 28) - self.mfcr(free_reg) - self.rlwinm(free_reg, free_reg, 0, 31, 31) - def emit_int_lt(self, op, cpu, reg0, reg1, free_reg): if IS_PPC_32: self.cmpw(7, reg0, reg1) @@ -1575,10 +1597,26 @@ # GUARD OPERATIONS * #****************************** - def emit_guard_true(self, op, cpu): - arg0 = op.getarg(0) - regnum = cpu.reg_map[arg0] - self.cmpi(0, 1, regnum, 0) + def _guard_epilogue(self, op, failargs): + fail_descr = op.getdescr() + fail_index = self._get_identifier_from_descr(fail_descr) + fail_descr.index = fail_index + self.cpu.saved_descr[fail_index] = fail_descr + numops = self.get_number_of_ops() + self.beq(0) + reglist = [] + for failarg in failargs: + if failarg is None: + reglist.append(None) + else: + reglist.append(failarg) + self.patch_list.append((numops, fail_index, op, reglist)) + + def emit_guard_true(self, op, arglocs, regalloc): + l0 = arglocs[0] + failargs = arglocs[1:] + self.cmpi(l0.value, 0) + self._guard_epilogue(op, failargs) def emit_guard_false(self, op, cpu): arg0 = op.getarg(0) @@ -1677,7 +1715,7 @@ def emit_finish(self, op, arglocs, regalloc): descr = op.getdescr() - identifier = self._get_identifier_from_descr(descr, self.cpu) + identifier = self._get_identifier_from_descr(descr) self.cpu.saved_descr[identifier] = descr args = op.getarglist() for index, arg in enumerate(arglocs): @@ -1697,14 +1735,14 @@ self.load_imm(r.r3, identifier) self.blr() - def emit_jump(self, op, cpu): - for index, arg in enumerate(op.getarglist()): - target = index + 3 - regnum = cpu.reg_map[arg] - self.mr(target, regnum) - - offset = self.get_relative_pos() - self.b(-offset + cpu.startpos) + def emit_jump(self, op, arglocs, regalloc): + descr = op.getdescr() + assert isinstance(descr, LoopToken) + if descr._ppc_bootstrap_code == 0: + curpos = self.get_relative_pos() + self.b(descr._ppc_loop_code - curpos) + else: + assert 0, "case not implemented yet" class BranchUpdater(PPCAssembler): def __init__(self): diff --git a/pypy/jit/backend/ppc/ppcgen/regalloc.py b/pypy/jit/backend/ppc/ppcgen/regalloc.py --- a/pypy/jit/backend/ppc/ppcgen/regalloc.py +++ b/pypy/jit/backend/ppc/ppcgen/regalloc.py @@ -2,7 +2,11 @@ TempBox, compute_vars_longevity, compute_loop_consts) from pypy.jit.backend.ppc.ppcgen.arch import (WORD, MY_COPY_OF_REGS) -from pypy.jit.metainterp.history import INT, REF, Const, ConstInt, ConstPtr +from pypy.jit.backend.ppc.ppcgen.jump import remap_frame_layout_mixed +from pypy.jit.backend.ppc.ppcgen.helper.regalloc import (_check_imm_arg, + prepare_cmp_op) +from pypy.jit.metainterp.history import (INT, REF, FLOAT, Const, ConstInt, + ConstPtr, LoopToken) from pypy.jit.metainterp.resoperation import rop from pypy.jit.backend.ppc.ppcgen import locations from pypy.rpython.lltypesystem import rffi, lltype @@ -133,9 +137,6 @@ def next_instruction(self): self.rm.next_instruction() - def _check_imm_arg(self, arg): - return isinstance(arg, ConstInt) - def _ensure_value_is_boxed(self, thing, forbidden_vars=[]): box = None loc = None @@ -165,8 +166,8 @@ def prepare_int_add(self, op): boxes = op.getarglist() b0, b1 = boxes - imm_b0 = self._check_imm_arg(b0) - imm_b1 = self._check_imm_arg(b1) + imm_b0 = _check_imm_arg(b0) + imm_b1 = _check_imm_arg(b1) if not imm_b0 and imm_b1: l0, box = self._ensure_value_is_boxed(b0) l1 = self.make_sure_var_in_reg(b1, [b0]) @@ -180,7 +181,6 @@ boxes.append(box) l1, box = self._ensure_value_is_boxed(b1, [box]) boxes.append(box) - #return [l0, l1], boxes locs = [l0, l1] self.possibly_free_vars(boxes) res = self.force_allocate_reg(op.result) @@ -198,6 +198,40 @@ args.append(None) return args + def _prepare_guard(self, op, args=None): + if args is None: + args = [] + for arg in op.getfailargs(): + if arg: + args.append(self.loc(arg)) + else: + args.append(None) + return args + + def prepare_guard_true(self, op): + l0, box = self._ensure_value_is_boxed(op.getarg(0)) + args = self._prepare_guard(op, [l0]) + self.possibly_free_var(box) + self.possibly_free_vars(op.getfailargs()) + return args + + def prepare_jump(self, op): + descr = op.getdescr() + assert isinstance(descr, LoopToken) + nonfloatlocs = descr._ppc_arglocs[0] + + tmploc = r.r0 + src_locs1 = [self.loc(op.getarg(i)) for i in range(op.numargs()) + if op.getarg(i).type != FLOAT] + assert tmploc not in nonfloatlocs + dst_locs1 = [loc for loc in nonfloatlocs if loc is not None] + remap_frame_layout_mixed(self.assembler, + src_locs1, dst_locs1, tmploc, + [], [], None) + return [] + + prepare_int_le = prepare_cmp_op() + def make_operation_list(): def not_implemented(self, op, *args): raise NotImplementedError, op From noreply at buildbot.pypy.org Mon Sep 12 16:10:54 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 Sep 2011 16:10:54 +0200 (CEST) Subject: [pypy-commit] pypy default: merge space-iterator-improvements. this branch preallocates a correctly size list for unpackiterable Message-ID: <20110912141054.A06A682041@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r47229:b4ee85d4978b Date: 2011-09-12 16:10 +0200 http://bitbucket.org/pypy/pypy/changeset/b4ee85d4978b/ Log: merge space-iterator-improvements. this branch preallocates a correctly size list for unpackiterable diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -8,13 +8,13 @@ from pypy.interpreter.miscutils import ThreadLocals from pypy.tool.cache import Cache from pypy.tool.uid import HUGEVAL_BYTES -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, newlist from pypy.rlib.debug import make_sure_not_resized from pypy.rlib.timer import DummyTimer, Timer from pypy.rlib.rarithmetic import r_uint from pypy.rlib import jit from pypy.tool.sourcetools import func_with_new_name -import os, sys, py +import os, sys __all__ = ['ObjSpace', 'OperationError', 'Wrappable', 'W_Root'] @@ -757,7 +757,18 @@ w_iterator = self.iter(w_iterable) # If we know the expected length we can preallocate. if expected_length == -1: - items = [] + try: + lgt_estimate = self.len_w(w_iterable) + except OperationError, o: + if (not o.match(self, self.w_AttributeError) and + not o.match(self, self.w_TypeError)): + raise + items = [] + else: + try: + items = newlist(lgt_estimate) + except MemoryError: + items = [] # it might have lied else: items = [None] * expected_length idx = 0 diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -71,6 +71,23 @@ assert err.value.match(space, space.w_ValueError) err = raises(OperationError, space.unpackiterable, w_l, 5) assert err.value.match(space, space.w_ValueError) + w_a = space.appexec((), """(): + class A(object): + def __iter__(self): + return self + def next(self): + raise StopIteration + def __len__(self): + 1/0 + return A() + """) + try: + space.unpackiterable(w_a) + except OperationError, o: + if not o.match(space, space.w_ZeroDivisionError): + raise Exception("DID NOT RAISE") + else: + raise Exception("DID NOT RAISE") def test_fixedview(self): space = self.space diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -72,10 +72,6 @@ w_seqiter.index += 1 return w_item -# XXX __length_hint__() -##def len__SeqIter(space, w_seqiter): -## return w_seqiter.getlength(space) - def iter__FastTupleIter(space, w_seqiter): return w_seqiter @@ -93,10 +89,6 @@ w_seqiter.index = index + 1 return w_item -# XXX __length_hint__() -##def len__FastTupleIter(space, w_seqiter): -## return w_seqiter.getlength(space) - def iter__FastListIter(space, w_seqiter): return w_seqiter @@ -114,10 +106,6 @@ w_seqiter.index = index + 1 return w_item -# XXX __length_hint__() -##def len__FastListIter(space, w_seqiter): -## return w_seqiter.getlength(space) - def iter__ReverseSeqIter(space, w_seqiter): return w_seqiter @@ -135,20 +123,5 @@ raise OperationError(space.w_StopIteration, space.w_None) return w_item -# XXX __length_hint__() -##def len__ReverseSeqIter(space, w_seqiter): -## if w_seqiter.w_seq is None: -## return space.wrap(0) -## index = w_seqiter.index+1 -## w_length = space.len(w_seqiter.w_seq) -## # if length of sequence is less than index :exhaust iterator -## if space.is_true(space.gt(space.wrap(w_seqiter.index), w_length)): -## w_len = space.wrap(0) -## w_seqiter.w_seq = None -## else: -## w_len =space.wrap(index) -## if space.is_true(space.lt(w_len,space.wrap(0))): -## w_len = space.wrap(0) -## return w_len register_all(vars()) diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -19,6 +19,8 @@ # def f(... # +from pypy.rpython.extregistry import ExtRegistryEntry + class _Specialize(object): def memo(self): """ Specialize functions based on argument values. All arguments has @@ -177,6 +179,34 @@ obj.__class__ = FREED_OBJECT # ____________________________________________________________ + +def newlist(sizehint=0): + """ Create a new list, but pass a hint how big the size should be + preallocated + """ + return [] + +class Entry(ExtRegistryEntry): + _about_ = newlist + + def compute_result_annotation(self, s_sizehint): + from pypy.annotation.model import SomeInteger + + assert isinstance(s_sizehint, SomeInteger) + return self.bookkeeper.newlist() + + def specialize_call(self, orig_hop, i_sizehint=None): + from pypy.rpython.rlist import rtype_newlist + # fish a bit hop + hop = orig_hop.copy() + v = hop.args_v[0] + r, s = hop.r_s_popfirstarg() + if s.is_constant(): + v = hop.inputconst(r, s.const) + hop.exception_is_here() + return rtype_newlist(hop, v_sizehint=v) + +# ____________________________________________________________ # # id-like functions. The idea is that calling hash() or id() is not # allowed in RPython. You have to call one of the following more @@ -301,8 +331,6 @@ # ---------- -from pypy.rpython.extregistry import ExtRegistryEntry - class Entry(ExtRegistryEntry): _about_ = compute_hash diff --git a/pypy/rlib/test/test_objectmodel.py b/pypy/rlib/test/test_objectmodel.py --- a/pypy/rlib/test/test_objectmodel.py +++ b/pypy/rlib/test/test_objectmodel.py @@ -424,3 +424,32 @@ if option.view: graph.show() return graph + + +def test_newlist(): + from pypy.annotation.model import SomeInteger + def f(z): + x = newlist(sizehint=38) + if z < 0: + x.append(1) + return len(x) + + graph = getgraph(f, [SomeInteger()]) + for llop in graph.startblock.operations: + if llop.opname == 'malloc_varsize': + break + assert llop.args[2].value == 38 + +def test_newlist_nonconst(): + from pypy.annotation.model import SomeInteger + def f(z): + x = newlist(sizehint=z) + return len(x) + + graph = getgraph(f, [SomeInteger()]) + for llop in graph.startblock.operations: + if llop.opname == 'malloc_varsize': + break + assert llop.args[2] is graph.startblock.inputargs[0] + + diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py --- a/pypy/rpython/lltypesystem/rlist.py +++ b/pypy/rpython/lltypesystem/rlist.py @@ -1,21 +1,14 @@ from pypy.tool.pairtype import pairtype, pair -from pypy.annotation import model as annmodel -from pypy.rpython.error import TyperError -from pypy.rpython.rmodel import Repr, IntegerRepr, inputconst +from pypy.rpython.rmodel import Repr, inputconst from pypy.rpython.rmodel import externalvsinternal from pypy.rpython.rlist import AbstractBaseListRepr, AbstractListRepr, \ - AbstractFixedSizeListRepr, AbstractListIteratorRepr, rtype_newlist, \ - rtype_alloc_and_set, ll_setitem_nonneg, ADTIList, ADTIFixedList -from pypy.rpython.rlist import dum_nocheck, dum_checkidx -from pypy.rpython.lltypesystem.lltype import \ - GcForwardReference, Ptr, GcArray, GcStruct, \ - Void, Signed, malloc, typeOf, Primitive, \ - Bool, nullptr, typeMethod + AbstractFixedSizeListRepr, AbstractListIteratorRepr, \ + ll_setitem_nonneg, ADTIList, ADTIFixedList +from pypy.rpython.rlist import dum_nocheck +from pypy.rpython.lltypesystem.lltype import GcForwardReference, Ptr, GcArray,\ + GcStruct, Void, Signed, malloc, typeOf, nullptr, typeMethod from pypy.rpython.lltypesystem import rstr -from pypy.rpython import robject from pypy.rlib.debug import ll_assert -from pypy.rpython.lltypesystem import rffi -from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib import rgc # ____________________________________________________________ @@ -67,6 +60,7 @@ ITEMARRAY = GcArray(ITEM, adtmeths = ADTIFixedList({ "ll_newlist": ll_fixed_newlist, + "ll_newlist_hint": ll_fixed_newlist, "ll_newemptylist": ll_fixed_newemptylist, "ll_length": ll_fixed_length, "ll_items": ll_fixed_items, @@ -100,6 +94,7 @@ ("items", Ptr(ITEMARRAY)), adtmeths = ADTIList({ "ll_newlist": ll_newlist, + "ll_newlist_hint": ll_newlist_hint, "ll_newemptylist": ll_newemptylist, "ll_length": ll_length, "ll_items": ll_items, @@ -267,6 +262,15 @@ ll_newlist = typeMethod(ll_newlist) ll_newlist.oopspec = 'newlist(length)' +def ll_newlist_hint(LIST, lengthhint): + ll_assert(lengthhint >= 0, "negative list length") + l = malloc(LIST) + l.length = 0 + l.items = malloc(LIST.items.TO, lengthhint) + return l +ll_newlist_hint = typeMethod(ll_newlist_hint) +ll_newlist_hint.oopspec = 'newlist(lengthhint)' + # should empty lists start with no allocated memory, or with a preallocated # minimal number of entries? XXX compare memory usage versus speed, and # check how many always-empty lists there are in a typical pypy-c run... @@ -337,11 +341,15 @@ l[index] = item ll_fixed_setitem_fast.oopspec = 'list.setitem(l, index, item)' -def newlist(llops, r_list, items_v): +def newlist(llops, r_list, items_v, v_sizehint=None): LIST = r_list.LIST if len(items_v) == 0: - v_result = llops.gendirectcall(LIST.ll_newemptylist) + if v_sizehint is None: + v_result = llops.gendirectcall(LIST.ll_newemptylist) + else: + v_result = llops.gendirectcall(LIST.ll_newlist_hint, v_sizehint) else: + assert v_sizehint is None cno = inputconst(Signed, len(items_v)) v_result = llops.gendirectcall(LIST.ll_newlist, cno) v_func = inputconst(Void, dum_nocheck) diff --git a/pypy/rpython/ootypesystem/rlist.py b/pypy/rpython/ootypesystem/rlist.py --- a/pypy/rpython/ootypesystem/rlist.py +++ b/pypy/rpython/ootypesystem/rlist.py @@ -124,7 +124,7 @@ else: return ootype.List() - def _generate_newlist(self, llops, items_v): + def _generate_newlist(self, llops, items_v, v_sizehint): c_list = inputconst(ootype.Void, self.lowleveltype) v_result = llops.genop("new", [c_list], resulttype=self.lowleveltype) c_resize = inputconst(ootype.Void, "_ll_resize") @@ -150,8 +150,8 @@ -def newlist(llops, r_list, items_v): - v_result = r_list._generate_newlist(llops, items_v) +def newlist(llops, r_list, items_v, v_sizehint=None): + v_result = r_list._generate_newlist(llops, items_v, v_sizehint) c_setitem = inputconst(ootype.Void, "ll_setitem_fast") for i, v_item in enumerate(items_v): @@ -224,7 +224,7 @@ def make_iterator_repr(self): return ListIteratorRepr(self) - def _generate_newlist(self, llops, items_v): + def _generate_newlist(self, llops, items_v, v_sizehint): c_array = inputconst(ootype.Void, self.lowleveltype) c_length = inputconst(ootype.Signed, len(items_v)) v_result = llops.genop("oonewarray", [c_array, c_length], resulttype=self.lowleveltype) diff --git a/pypy/rpython/rlist.py b/pypy/rpython/rlist.py --- a/pypy/rpython/rlist.py +++ b/pypy/rpython/rlist.py @@ -2,7 +2,7 @@ from pypy.objspace.flow.model import Constant from pypy.annotation import model as annmodel from pypy.rpython.error import TyperError -from pypy.rpython.rmodel import Repr, IteratorRepr, IntegerRepr, inputconst +from pypy.rpython.rmodel import Repr, IteratorRepr, IntegerRepr from pypy.rpython.rstr import AbstractStringRepr, AbstractCharRepr from pypy.rpython.lltypesystem.lltype import typeOf, Ptr, Void, Signed, Bool from pypy.rpython.lltypesystem.lltype import nullptr, Char, UniChar, Number @@ -344,7 +344,7 @@ return hop.genop('bool_not', [flag], resulttype=Bool) -def rtype_newlist(hop): +def rtype_newlist(hop, v_sizehint=None): nb_args = hop.nb_args r_list = hop.r_result if r_list == robject.pyobj_repr: # special case: SomeObject lists! @@ -358,7 +358,8 @@ return v_result r_listitem = r_list.item_repr items_v = [hop.inputarg(r_listitem, arg=i) for i in range(nb_args)] - return hop.rtyper.type_system.rlist.newlist(hop.llops, r_list, items_v) + return hop.rtyper.type_system.rlist.newlist(hop.llops, r_list, items_v, + v_sizehint=v_sizehint) def rtype_alloc_and_set(hop): r_list = hop.r_result diff --git a/pypy/rpython/test/test_rlist.py b/pypy/rpython/test/test_rlist.py --- a/pypy/rpython/test/test_rlist.py +++ b/pypy/rpython/test/test_rlist.py @@ -1360,6 +1360,19 @@ assert ('foldable' in func.func_name) == \ ("y[*]" in immutable_fields) + def test_hints(self): + from pypy.rlib.objectmodel import newlist + from pypy.rpython.annlowlevel import hlstr + + def f(z): + z = hlstr(z) + x = newlist(sizehint=13) + x += z + return ''.join(x) + + res = self.interpret(f, [self.string_to_ll('abc')]) + assert self.ll_to_string(res) == 'abc' + class TestLLtype(BaseTestRlist, LLRtypeMixin): type_system = 'lltype' rlist = ll_rlist From noreply at buildbot.pypy.org Mon Sep 12 16:19:58 2011 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 Sep 2011 16:19:58 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: s/was/is/ Message-ID: <20110912141958.08E8E82041@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3899:a1cb845647d4 Date: 2011-09-12 16:19 +0200 http://bitbucket.org/pypy/extradoc/changeset/a1cb845647d4/ Log: s/was/is/ diff --git a/talk/pyconar2011/abstract.rst b/talk/pyconar2011/abstract.rst --- a/talk/pyconar2011/abstract.rst +++ b/talk/pyconar2011/abstract.rst @@ -10,4 +10,4 @@ During the talk I'll present some demos and talk what things are possible having a decent optimizing just-in-time compiler and briefly discuss strategies that we used for achieving this. I'll also discuss how faster-than-C, pypy's original -goal from years ago, was after all not that far off. +goal from years ago, is after all not that far off. From noreply at buildbot.pypy.org Mon Sep 12 16:58:07 2011 From: noreply at buildbot.pypy.org (hager) Date: Mon, 12 Sep 2011 16:58:07 +0200 (CEST) Subject: [pypy-commit] pypy ppc-jit-backend: Added teardown() method to ppc_assembler. Message-ID: <20110912145807.DD21B82041@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r47230:3c4861e4e476 Date: 2011-09-12 16:57 +0200 http://bitbucket.org/pypy/pypy/changeset/3c4861e4e476/ Log: Added teardown() method to ppc_assembler. diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -1053,8 +1053,8 @@ self._walk_operations(operations, regalloc) self._make_epilogue() - looptoken.ppc_code = self.assemble() + self._teardown() def _walk_operations(self, operations, regalloc): while regalloc.position() < len(operations) - 1: @@ -1073,6 +1073,9 @@ regalloc.possibly_free_vars_for_op(op) regalloc._check_invariants() + def _teardown(self): + self.patch_list = None + # translate a trace operation to corresponding machine code def build_op(self, trace_op, cpu): opnum = trace_op.getopnum() From noreply at buildbot.pypy.org Mon Sep 12 17:42:40 2011 From: noreply at buildbot.pypy.org (hager) Date: Mon, 12 Sep 2011 17:42:40 +0200 (CEST) Subject: [pypy-commit] pypy ppc-jit-backend: Removed unused code from CPU class. Message-ID: <20110912154240.DC96E82041@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r47231:457b84c743dd Date: 2011-09-12 17:16 +0200 http://bitbucket.org/pypy/pypy/changeset/457b84c743dd/ Log: Removed unused code from CPU class. diff --git a/pypy/jit/backend/ppc/runner.py b/pypy/jit/backend/ppc/runner.py --- a/pypy/jit/backend/ppc/runner.py +++ b/pypy/jit/backend/ppc/runner.py @@ -73,49 +73,6 @@ self.total_compiled_bridges += 1 self.teardown() - def get_next_register(self): - reg = self.next_free_register - self.next_free_register += 1 - return reg - - # XXX not used by now, move to ppc_assembler - def _make_epilogue(self, codebuilder): - for op_index, fail_index, guard, reglist in self.patch_list: - curpos = codebuilder.get_relative_pos() - offset = curpos - (4 * op_index) - assert (1 << 15) > offset - codebuilder.beq(offset) - codebuilder.patch_op(op_index) - - # store return parameters in memory - used_mem_indices = [] - for index, reg in enumerate(reglist): - self.fail_box_count += 1 - # if reg is None, then there is a hole in the failargs - if reg is not None: - addr = self.fail_boxes_int.get_addr_for_num(index) - codebuilder.store_reg(reg, addr) - used_mem_indices.append(index) - - patch_op = codebuilder.get_number_of_ops() - patch_pos = codebuilder.get_relative_pos() - descr = self.saved_descr[fail_index] - descr.patch_op = patch_op - descr.patch_pos = patch_pos - descr.used_mem_indices = used_mem_indices - - codebuilder.restore_nonvolatiles(self.framesize) - - codebuilder.lwz(0, 1, self.framesize + 4) - if IS_PPC_32: - codebuilder.lwz(0, 1, framesize + WORD) # 36 - else: - codebuilder.ld(0, 1, framesize + WORD) # 36 - codebuilder.mtlr(0) - codebuilder.addi(1, 1, self.framesize) - codebuilder.li(3, fail_index) - codebuilder.blr() - # set value in fail_boxes_int def set_future_value_int(self, index, value_int): self.asm.fail_boxes_int.setitem(index, value_int) From noreply at buildbot.pypy.org Mon Sep 12 17:42:42 2011 From: noreply at buildbot.pypy.org (hager) Date: Mon, 12 Sep 2011 17:42:42 +0200 (CEST) Subject: [pypy-commit] pypy ppc-jit-backend: Added test which checks independence of different loops which are compiled with the same assembler instance. Message-ID: <20110912154242.22AB782041@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r47232:328a1cc7f30c Date: 2011-09-12 17:42 +0200 http://bitbucket.org/pypy/pypy/changeset/328a1cc7f30c/ Log: Added test which checks independence of different loops which are compiled with the same assembler instance. diff --git a/pypy/jit/backend/ppc/ppcgen/assembler.py b/pypy/jit/backend/ppc/ppcgen/assembler.py --- a/pypy/jit/backend/ppc/ppcgen/assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/assembler.py @@ -15,6 +15,11 @@ self.labels = {} self.rlabels = {} + def reset(self): + self.insts = [] + self.labels = {} + self.rlabels = {} + def label(self, name): if name in self.labels: raise AssemblerException, "duplicate label '%s'"%(name,) diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -1075,6 +1075,7 @@ def _teardown(self): self.patch_list = None + self.reset() # translate a trace operation to corresponding machine code def build_op(self, trace_op, cpu): diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -192,6 +192,49 @@ res = self.cpu.get_latest_value_int(2) assert res == 10 + def test_independent_loops(self): + # first loop + i0_1 = BoxInt() + i1_1 = BoxInt() + i2_1 = BoxInt() + looptoken1 = LoopToken() + operations1 = [ + ResOperation(rop.INT_ADD, [i0_1, ConstInt(1)], i1_1), + ResOperation(rop.INT_LE, [i1_1, ConstInt(9)], i2_1), + ResOperation(rop.GUARD_TRUE, [i2_1], None, descr=BasicFailDescr(2)), + ResOperation(rop.JUMP, [i1_1], None, descr=looptoken1), + ] + inputargs1 = [i0_1] + operations1[2].setfailargs([i1_1]) + + self.cpu.compile_loop(inputargs1, operations1, looptoken1) + self.cpu.set_future_value_int(0, 2) + fail1 = self.cpu.execute_token(looptoken1) + assert fail1.identifier == 2 + res1 = self.cpu.get_latest_value_int(0) + assert res1 == 10 + + # second loop + i0_2 = BoxInt() + i1_2 = BoxInt() + i2_2 = BoxInt() + looptoken2 = LoopToken() + operations2 = [ + ResOperation(rop.INT_ADD, [i0_2, ConstInt(1)], i1_2), + ResOperation(rop.INT_LE, [i1_2, ConstInt(19)], i2_2), + ResOperation(rop.GUARD_TRUE, [i2_2], None, descr=BasicFailDescr(2)), + ResOperation(rop.JUMP, [i1_2], None, descr=looptoken2), + ] + inputargs2 = [i0_2] + operations2[2].setfailargs([i1_2]) + + self.cpu.compile_loop(inputargs2, operations2, looptoken2) + self.cpu.set_future_value_int(0, 2) + fail2 = self.cpu.execute_token(looptoken2) + assert fail2.identifier == 2 + res2 = self.cpu.get_latest_value_int(0) + assert res2 == 20 + def test_backends_dont_keep_loops_alive(self): import weakref, gc self.cpu.dont_keepalive_stuff = True From noreply at buildbot.pypy.org Mon Sep 12 18:22:50 2011 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Sep 2011 18:22:50 +0200 (CEST) Subject: [pypy-commit] pypy default: Update comment. Message-ID: <20110912162250.49A8E82041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47233:7c5f268c9c8d Date: 2011-09-12 18:22 +0200 http://bitbucket.org/pypy/pypy/changeset/7c5f268c9c8d/ Log: Update comment. diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -44,7 +44,7 @@ if w_iterable is not None: # unfortunately this is duplicating space.unpackiterable to avoid # assigning a new RPython list to 'wrappeditems', which defeats the - # W_FastSeqIterObject optimization. + # W_FastListIterObject optimization. if isinstance(w_iterable, W_ListObject): items_w.extend(w_iterable.wrappeditems) elif isinstance(w_iterable, W_TupleObject): From noreply at buildbot.pypy.org Tue Sep 13 01:55:51 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 13 Sep 2011 01:55:51 +0200 (CEST) Subject: [pypy-commit] pypy unroll-if-const: merged upstream. Message-ID: <20110912235551.2233B82041@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: unroll-if-const Changeset: r47234:d886ce5dd420 Date: 2011-09-12 19:52 -0400 http://bitbucket.org/pypy/pypy/changeset/d886ce5dd420/ Log: merged upstream. diff too long, truncating to 10000 out of 49037 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -1,1 +1,2 @@ b590cf6de4190623aad9aa698694c22e614d67b9 release-1.5 +b48df0bf4e75b81d98f19ce89d4a7dc3e1dab5e5 benchmarked diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -37,22 +37,22 @@ Armin Rigo Maciej Fijalkowski Carl Friedrich Bolz + Antonio Cuni Amaury Forgeot d'Arc - Antonio Cuni Samuele Pedroni Michael Hudson Holger Krekel + Benjamin Peterson Christian Tismer - Benjamin Peterson + Hakan Ardo + Alex Gaynor Eric van Riet Paap - Anders Chrigström - Håkan Ardö + Anders Chrigstrom + David Schneider Richard Emslie Dan Villiom Podlaski Christiansen Alexander Schremmer - Alex Gaynor - David Schneider - Aurelién Campeas + Aurelien Campeas Anders Lehmann Camillo Bruni Niklaus Haldimann @@ -63,16 +63,17 @@ Bartosz Skowron Jakub Gustak Guido Wesdorp + Daniel Roberts Adrien Di Mascio Laura Creighton Ludovic Aubry Niko Matsakis - Daniel Roberts Jason Creighton - Jacob Hallén + Jacob Hallen Alex Martelli Anders Hammarquist Jan de Mooij + Wim Lavrijsen Stephan Diehl Michael Foord Stefan Schwarzer @@ -83,9 +84,13 @@ Alexandre Fayolle Marius Gedminas Simon Burton + Justin Peel Jean-Paul Calderone John Witulski + Lukas Diekmann + holger krekel Wim Lavrijsen + Dario Bertini Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum @@ -97,15 +102,16 @@ Georg Brandl Gerald Klix Wanja Saatkamp + Ronny Pfannschmidt Boris Feigin Oscar Nierstrasz - Dario Bertini David Malcolm Eugene Oden Henry Mason + Sven Hager Lukas Renggli + Ilya Osadchiy Guenter Jantzen - Ronny Pfannschmidt Bert Freudenberg Amit Regmi Ben Young @@ -122,8 +128,8 @@ Jared Grubb Karl Bartel Gabriel Lavoie + Victor Stinner Brian Dorsey - Victor Stinner Stuart Williams Toby Watson Antoine Pitrou @@ -134,19 +140,23 @@ Jonathan David Riehl Elmo Mäntynen Anders Qvist - Beatrice Düring + Beatrice During Alexander Sedov + Timo Paulssen + Corbin Simpson Vincent Legoll + Romain Guillebert Alan McIntyre - Romain Guillebert Alex Perry Jens-Uwe Mager + Simon Cross Dan Stromberg - Lukas Diekmann + Guillebert Romain Carl Meyer Pieter Zieschang Alejandro J. Cura Sylvain Thenault + Christoph Gerum Travis Francis Athougies Henrik Vendelbo Lutz Paelike @@ -157,6 +167,7 @@ Miguel de Val Borro Ignas Mikalajunas Artur Lisiecki + Philip Jenvey Joshua Gilbert Godefroid Chappelle Yusei Tahara @@ -165,27 +176,31 @@ Gustavo Niemeyer William Leslie Akira Li - Kristján Valur Jónsson + Kristjan Valur Jonsson Bobby Impollonia + Michael Hudson-Doyle Andrew Thompson Anders Sigfridsson + Floris Bruynooghe Jacek Generowicz Dan Colish - Sven Hager Zooko Wilcox-O Hearn + Dan Villiom Podlaski Christiansen Anders Hammarquist + Chris Lambacher Dinu Gherman Dan Colish + Brett Cannon Daniel Neuhäuser Michael Chermside Konrad Delong Anna Ravencroft Greg Price Armin Ronacher + Christian Muirhead Jim Baker - Philip Jenvey Rodrigo Araújo - Brett Cannon + Romain Guillebert Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden diff --git a/ctypes_configure/configure.py b/ctypes_configure/configure.py --- a/ctypes_configure/configure.py +++ b/ctypes_configure/configure.py @@ -559,7 +559,9 @@ C_HEADER = """ #include #include /* for offsetof() */ -#include /* FreeBSD: for uint64_t */ +#ifndef _WIN32 +# include /* FreeBSD: for uint64_t */ +#endif void dump(char* key, int value) { printf("%s: %d\\n", key, value); diff --git a/ctypes_configure/stdoutcapture.py b/ctypes_configure/stdoutcapture.py --- a/ctypes_configure/stdoutcapture.py +++ b/ctypes_configure/stdoutcapture.py @@ -15,6 +15,15 @@ not hasattr(os, 'fdopen')): self.dummy = 1 else: + try: + self.tmpout = os.tmpfile() + if mixed_out_err: + self.tmperr = self.tmpout + else: + self.tmperr = os.tmpfile() + except OSError: # bah? on at least one Windows box + self.dummy = 1 + return self.dummy = 0 # make new stdout/stderr files if needed self.localoutfd = os.dup(1) @@ -29,11 +38,6 @@ sys.stderr = os.fdopen(self.localerrfd, 'w', 0) else: self.saved_stderr = None - self.tmpout = os.tmpfile() - if mixed_out_err: - self.tmperr = self.tmpout - else: - self.tmperr = os.tmpfile() os.dup2(self.tmpout.fileno(), 1) os.dup2(self.tmperr.fileno(), 2) diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -154,18 +154,18 @@ RegrTest('test_cmd.py'), RegrTest('test_cmd_line_script.py'), RegrTest('test_codeccallbacks.py', core=True), - RegrTest('test_codecencodings_cn.py'), - RegrTest('test_codecencodings_hk.py'), - RegrTest('test_codecencodings_jp.py'), - RegrTest('test_codecencodings_kr.py'), - RegrTest('test_codecencodings_tw.py'), + RegrTest('test_codecencodings_cn.py', usemodules='_multibytecodec'), + RegrTest('test_codecencodings_hk.py', usemodules='_multibytecodec'), + RegrTest('test_codecencodings_jp.py', usemodules='_multibytecodec'), + RegrTest('test_codecencodings_kr.py', usemodules='_multibytecodec'), + RegrTest('test_codecencodings_tw.py', usemodules='_multibytecodec'), - RegrTest('test_codecmaps_cn.py'), - RegrTest('test_codecmaps_hk.py'), - RegrTest('test_codecmaps_jp.py'), - RegrTest('test_codecmaps_kr.py'), - RegrTest('test_codecmaps_tw.py'), - RegrTest('test_codecs.py', core=True), + RegrTest('test_codecmaps_cn.py', usemodules='_multibytecodec'), + RegrTest('test_codecmaps_hk.py', usemodules='_multibytecodec'), + RegrTest('test_codecmaps_jp.py', usemodules='_multibytecodec'), + RegrTest('test_codecmaps_kr.py', usemodules='_multibytecodec'), + RegrTest('test_codecmaps_tw.py', usemodules='_multibytecodec'), + RegrTest('test_codecs.py', core=True, usemodules='_multibytecodec'), RegrTest('test_codeop.py', core=True), RegrTest('test_coercion.py', core=True), RegrTest('test_collections.py'), @@ -314,7 +314,7 @@ RegrTest('test_mmap.py'), RegrTest('test_module.py', core=True), RegrTest('test_modulefinder.py'), - RegrTest('test_multibytecodec.py'), + RegrTest('test_multibytecodec.py', usemodules='_multibytecodec'), RegrTest('test_multibytecodec_support.py', skip="not a test"), RegrTest('test_multifile.py'), RegrTest('test_multiprocessing.py', skip='FIXME leaves subprocesses'), @@ -359,7 +359,7 @@ RegrTest('test_property.py', core=True), RegrTest('test_pstats.py'), RegrTest('test_pty.py', skip="unsupported extension module"), - RegrTest('test_pwd.py', skip=skip_win32), + RegrTest('test_pwd.py', usemodules="pwd", skip=skip_win32), RegrTest('test_py3kwarn.py'), RegrTest('test_pyclbr.py'), RegrTest('test_pydoc.py'), diff --git a/lib-python/modified-2.7/ctypes/__init__.py b/lib-python/modified-2.7/ctypes/__init__.py --- a/lib-python/modified-2.7/ctypes/__init__.py +++ b/lib-python/modified-2.7/ctypes/__init__.py @@ -489,9 +489,12 @@ _flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI return CFunctionType -_cast = PYFUNCTYPE(py_object, c_void_p, py_object, py_object)(_cast_addr) def cast(obj, typ): - return _cast(obj, obj, typ) + try: + c_void_p.from_param(obj) + except TypeError, e: + raise ArgumentError(str(e)) + return _cast_addr(obj, obj, typ) _string_at = PYFUNCTYPE(py_object, c_void_p, c_int)(_string_at_addr) def string_at(ptr, size=-1): diff --git a/lib-python/modified-2.7/ctypes/util.py b/lib-python/modified-2.7/ctypes/util.py --- a/lib-python/modified-2.7/ctypes/util.py +++ b/lib-python/modified-2.7/ctypes/util.py @@ -72,8 +72,8 @@ return name if os.name == "posix" and sys.platform == "darwin": - from ctypes.macholib.dyld import dyld_find as _dyld_find def find_library(name): + from ctypes.macholib.dyld import dyld_find as _dyld_find possible = ['lib%s.dylib' % name, '%s.dylib' % name, '%s.framework/%s' % (name, name)] diff --git a/lib-python/modified-2.7/distutils/unixccompiler.py b/lib-python/modified-2.7/distutils/unixccompiler.py --- a/lib-python/modified-2.7/distutils/unixccompiler.py +++ b/lib-python/modified-2.7/distutils/unixccompiler.py @@ -324,7 +324,7 @@ # On OSX users can specify an alternate SDK using # '-isysroot', calculate the SDK root if it is specified # (and use it further on) - cflags = sysconfig.get_config_var('CFLAGS') + cflags = sysconfig.get_config_var('CFLAGS') or '' m = re.search(r'-isysroot\s+(\S+)', cflags) if m is None: sysroot = '/' diff --git a/lib-python/modified-2.7/gzip.py b/lib-python/modified-2.7/gzip.py new file mode 100644 --- /dev/null +++ b/lib-python/modified-2.7/gzip.py @@ -0,0 +1,514 @@ +"""Functions that read and write gzipped files. + +The user of the file doesn't have to worry about the compression, +but random access is not allowed.""" + +# based on Andrew Kuchling's minigzip.py distributed with the zlib module + +import struct, sys, time, os +import zlib +import io +import __builtin__ + +__all__ = ["GzipFile","open"] + +FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16 + +READ, WRITE = 1, 2 + +def write32u(output, value): + # The L format writes the bit pattern correctly whether signed + # or unsigned. + output.write(struct.pack("' + + def _check_closed(self): + """Raises a ValueError if the underlying file object has been closed. + + """ + if self.closed: + raise ValueError('I/O operation on closed file.') + + def _init_write(self, filename): + self.name = filename + self.crc = zlib.crc32("") & 0xffffffffL + self.size = 0 + self.writebuf = [] + self.bufsize = 0 + + def _write_gzip_header(self): + self.fileobj.write('\037\213') # magic header + self.fileobj.write('\010') # compression method + fname = os.path.basename(self.name) + if fname.endswith(".gz"): + fname = fname[:-3] + flags = 0 + if fname: + flags = FNAME + self.fileobj.write(chr(flags)) + mtime = self.mtime + if mtime is None: + mtime = time.time() + write32u(self.fileobj, long(mtime)) + self.fileobj.write('\002') + self.fileobj.write('\377') + if fname: + self.fileobj.write(fname + '\000') + + def _init_read(self): + self.crc = zlib.crc32("") & 0xffffffffL + self.size = 0 + + def _read_gzip_header(self): + magic = self.fileobj.read(2) + if magic != '\037\213': + raise IOError, 'Not a gzipped file' + method = ord( self.fileobj.read(1) ) + if method != 8: + raise IOError, 'Unknown compression method' + flag = ord( self.fileobj.read(1) ) + self.mtime = read32(self.fileobj) + # extraflag = self.fileobj.read(1) + # os = self.fileobj.read(1) + self.fileobj.read(2) + + if flag & FEXTRA: + # Read & discard the extra field, if present + xlen = ord(self.fileobj.read(1)) + xlen = xlen + 256*ord(self.fileobj.read(1)) + self.fileobj.read(xlen) + if flag & FNAME: + # Read and discard a null-terminated string containing the filename + while True: + s = self.fileobj.read(1) + if not s or s=='\000': + break + if flag & FCOMMENT: + # Read and discard a null-terminated string containing a comment + while True: + s = self.fileobj.read(1) + if not s or s=='\000': + break + if flag & FHCRC: + self.fileobj.read(2) # Read & discard the 16-bit header CRC + + def write(self,data): + self._check_closed() + if self.mode != WRITE: + import errno + raise IOError(errno.EBADF, "write() on read-only GzipFile object") + + if self.fileobj is None: + raise ValueError, "write() on closed GzipFile object" + + # Convert data type if called by io.BufferedWriter. + if isinstance(data, memoryview): + data = data.tobytes() + + if len(data) > 0: + self.size = self.size + len(data) + self.crc = zlib.crc32(data, self.crc) & 0xffffffffL + self.fileobj.write( self.compress.compress(data) ) + self.offset += len(data) + + return len(data) + + def read(self, size=-1): + self._check_closed() + if self.mode != READ: + import errno + raise IOError(errno.EBADF, "read() on write-only GzipFile object") + + if self.extrasize <= 0 and self.fileobj is None: + return '' + + readsize = 1024 + if size < 0: # get the whole thing + try: + while True: + self._read(readsize) + readsize = min(self.max_read_chunk, readsize * 2) + except EOFError: + size = self.extrasize + elif size == 0: + return "" + else: # just get some more of it + try: + while size > self.extrasize: + self._read(readsize) + readsize = min(self.max_read_chunk, readsize * 2) + except EOFError: + if size > self.extrasize: + size = self.extrasize + + offset = self.offset - self.extrastart + chunk = self.extrabuf[offset: offset + size] + self.extrasize = self.extrasize - size + + self.offset += size + return chunk + + def _unread(self, buf): + self.extrasize = len(buf) + self.extrasize + self.offset -= len(buf) + + def _read(self, size=1024): + if self.fileobj is None: + raise EOFError, "Reached EOF" + + if self._new_member: + # If the _new_member flag is set, we have to + # jump to the next member, if there is one. + # + # First, check if we're at the end of the file; + # if so, it's time to stop; no more members to read. + pos = self.fileobj.tell() # Save current position + self.fileobj.seek(0, 2) # Seek to end of file + if pos == self.fileobj.tell(): + raise EOFError, "Reached EOF" + else: + self.fileobj.seek( pos ) # Return to original position + + self._init_read() + self._read_gzip_header() + self.decompress = zlib.decompressobj(-zlib.MAX_WBITS) + self._new_member = False + + # Read a chunk of data from the file + buf = self.fileobj.read(size) + + # If the EOF has been reached, flush the decompression object + # and mark this object as finished. + + if buf == "": + uncompress = self.decompress.flush() + self._read_eof() + self._add_read_data( uncompress ) + raise EOFError, 'Reached EOF' + + uncompress = self.decompress.decompress(buf) + self._add_read_data( uncompress ) + + if self.decompress.unused_data != "": + # Ending case: we've come to the end of a member in the file, + # so seek back to the start of the unused data, finish up + # this member, and read a new gzip header. + # (The number of bytes to seek back is the length of the unused + # data, minus 8 because _read_eof() will rewind a further 8 bytes) + self.fileobj.seek( -len(self.decompress.unused_data)+8, 1) + + # Check the CRC and file size, and set the flag so we read + # a new member on the next call + self._read_eof() + self._new_member = True + + def _add_read_data(self, data): + self.crc = zlib.crc32(data, self.crc) & 0xffffffffL + offset = self.offset - self.extrastart + self.extrabuf = self.extrabuf[offset:] + data + self.extrasize = self.extrasize + len(data) + self.extrastart = self.offset + self.size = self.size + len(data) + + def _read_eof(self): + # We've read to the end of the file, so we have to rewind in order + # to reread the 8 bytes containing the CRC and the file size. + # We check the that the computed CRC and size of the + # uncompressed data matches the stored values. Note that the size + # stored is the true file size mod 2**32. + self.fileobj.seek(-8, 1) + crc32 = read32(self.fileobj) + isize = read32(self.fileobj) # may exceed 2GB + if crc32 != self.crc: + raise IOError("CRC check failed %s != %s" % (hex(crc32), + hex(self.crc))) + elif isize != (self.size & 0xffffffffL): + raise IOError, "Incorrect length of data produced" + + # Gzip files can be padded with zeroes and still have archives. + # Consume all zero bytes and set the file position to the first + # non-zero byte. See http://www.gzip.org/#faq8 + c = "\x00" + while c == "\x00": + c = self.fileobj.read(1) + if c: + self.fileobj.seek(-1, 1) + + @property + def closed(self): + return self.fileobj is None + + def close(self): + if self.fileobj is None: + return + if self.mode == WRITE: + self.fileobj.write(self.compress.flush()) + write32u(self.fileobj, self.crc) + # self.size may exceed 2GB, or even 4GB + write32u(self.fileobj, self.size & 0xffffffffL) + self.fileobj = None + elif self.mode == READ: + self.fileobj = None + if self.myfileobj: + self.myfileobj.close() + self.myfileobj = None + + def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH): + self._check_closed() + if self.mode == WRITE: + # Ensure the compressor's buffer is flushed + self.fileobj.write(self.compress.flush(zlib_mode)) + self.fileobj.flush() + + def fileno(self): + """Invoke the underlying file object's fileno() method. + + This will raise AttributeError if the underlying file object + doesn't support fileno(). + """ + return self.fileobj.fileno() + + def rewind(self): + '''Return the uncompressed stream file position indicator to the + beginning of the file''' + if self.mode != READ: + raise IOError("Can't rewind in write mode") + self.fileobj.seek(0) + self._new_member = True + self.extrabuf = "" + self.extrasize = 0 + self.extrastart = 0 + self.offset = 0 + + def readable(self): + return self.mode == READ + + def writable(self): + return self.mode == WRITE + + def seekable(self): + return True + + def seek(self, offset, whence=0): + if whence: + if whence == 1: + offset = self.offset + offset + else: + raise ValueError('Seek from end not supported') + if self.mode == WRITE: + if offset < self.offset: + raise IOError('Negative seek in write mode') + count = offset - self.offset + for i in range(count // 1024): + self.write(1024 * '\0') + self.write((count % 1024) * '\0') + elif self.mode == READ: + if offset == self.offset: + self.read(0) # to make sure that this file is open + return self.offset + if offset < self.offset: + # for negative seek, rewind and do positive seek + self.rewind() + count = offset - self.offset + for i in range(count // 1024): + self.read(1024) + self.read(count % 1024) + + return self.offset + + def readline(self, size=-1): + if size < 0: + # Shortcut common case - newline found in buffer. + offset = self.offset - self.extrastart + i = self.extrabuf.find('\n', offset) + 1 + if i > 0: + self.extrasize -= i - offset + self.offset += i - offset + return self.extrabuf[offset: i] + + size = sys.maxint + readsize = self.min_readsize + else: + readsize = size + bufs = [] + while size != 0: + c = self.read(readsize) + i = c.find('\n') + + # We set i=size to break out of the loop under two + # conditions: 1) there's no newline, and the chunk is + # larger than size, or 2) there is a newline, but the + # resulting line would be longer than 'size'. + if (size <= i) or (i == -1 and len(c) > size): + i = size - 1 + + if i >= 0 or c == '': + bufs.append(c[:i + 1]) # Add portion of last chunk + self._unread(c[i + 1:]) # Push back rest of chunk + break + + # Append chunk to list, decrease 'size', + bufs.append(c) + size = size - len(c) + readsize = min(size, readsize * 2) + if readsize > self.min_readsize: + self.min_readsize = min(readsize, self.min_readsize * 2, 512) + return ''.join(bufs) # Return resulting line + + +def _test(): + # Act like gzip; with -d, act like gunzip. + # The input file is not deleted, however, nor are any other gzip + # options or features supported. + args = sys.argv[1:] + decompress = args and args[0] == "-d" + if decompress: + args = args[1:] + if not args: + args = ["-"] + for arg in args: + if decompress: + if arg == "-": + f = GzipFile(filename="", mode="rb", fileobj=sys.stdin) + g = sys.stdout + else: + if arg[-3:] != ".gz": + print "filename doesn't end in .gz:", repr(arg) + continue + f = open(arg, "rb") + g = __builtin__.open(arg[:-3], "wb") + else: + if arg == "-": + f = sys.stdin + g = GzipFile(filename="", mode="wb", fileobj=sys.stdout) + else: + f = __builtin__.open(arg, "rb") + g = open(arg + ".gz", "wb") + while True: + chunk = f.read(1024) + if not chunk: + break + g.write(chunk) + if g is not sys.stdout: + g.close() + if f is not sys.stdin: + f.close() + +if __name__ == '__main__': + _test() diff --git a/lib-python/modified-2.7/sqlite3/test/regression.py b/lib-python/modified-2.7/sqlite3/test/regression.py --- a/lib-python/modified-2.7/sqlite3/test/regression.py +++ b/lib-python/modified-2.7/sqlite3/test/regression.py @@ -274,6 +274,18 @@ cur.execute("UPDATE foo SET id = 3 WHERE id = 1") self.assertEqual(cur.description, None) + def CheckStatementCache(self): + cur = self.con.cursor() + cur.execute("CREATE TABLE foo (id INTEGER)") + values = [(i,) for i in xrange(5)] + cur.executemany("INSERT INTO foo (id) VALUES (?)", values) + + cur.execute("SELECT id FROM foo") + self.assertEqual(list(cur), values) + self.con.commit() + cur.execute("SELECT id FROM foo") + self.assertEqual(list(cur), values) + def suite(): regression_suite = unittest.makeSuite(RegressionTests, "Check") return unittest.TestSuite((regression_suite,)) diff --git a/lib-python/modified-2.7/tarfile.py b/lib-python/modified-2.7/tarfile.py --- a/lib-python/modified-2.7/tarfile.py +++ b/lib-python/modified-2.7/tarfile.py @@ -252,8 +252,8 @@ the high bit set. So we calculate two checksums, unsigned and signed. """ - unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512])) - signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512])) + unsigned_chksum = 256 + sum(struct.unpack("148B8x356B", buf[:512])) + signed_chksum = 256 + sum(struct.unpack("148b8x356b", buf[:512])) return unsigned_chksum, signed_chksum def copyfileobj(src, dst, length=None): @@ -265,7 +265,6 @@ if length is None: shutil.copyfileobj(src, dst) return - BUFSIZE = 16 * 1024 blocks, remainder = divmod(length, BUFSIZE) for b in xrange(blocks): @@ -802,19 +801,19 @@ if self.closed: raise ValueError("I/O operation on closed file") - buf = "" if self.buffer: if size is None: - buf = self.buffer + buf = self.buffer + self.fileobj.read() self.buffer = "" else: buf = self.buffer[:size] self.buffer = self.buffer[size:] - - if size is None: - buf += self.fileobj.read() + buf += self.fileobj.read(size - len(buf)) else: - buf += self.fileobj.read(size - len(buf)) + if size is None: + buf = self.fileobj.read() + else: + buf = self.fileobj.read(size) self.position += len(buf) return buf diff --git a/lib-python/modified-2.7/test/regrtest.py b/lib-python/modified-2.7/test/regrtest.py --- a/lib-python/modified-2.7/test/regrtest.py +++ b/lib-python/modified-2.7/test/regrtest.py @@ -1403,7 +1403,26 @@ test_zipimport test_zlib """, - 'openbsd3': + 'openbsd4': + """ + test_ascii_formatd + test_bsddb + test_bsddb3 + test_ctypes + test_dl + test_epoll + test_gdbm + test_locale + test_normalization + test_ossaudiodev + test_pep277 + test_tcl + test_tk + test_ttk_guionly + test_ttk_textonly + test_multiprocessing + """, + 'openbsd5': """ test_ascii_formatd test_bsddb diff --git a/lib-python/modified-2.7/test/test_bz2.py b/lib-python/modified-2.7/test/test_bz2.py --- a/lib-python/modified-2.7/test/test_bz2.py +++ b/lib-python/modified-2.7/test/test_bz2.py @@ -50,6 +50,7 @@ self.filename = TESTFN def tearDown(self): + test_support.gc_collect() if os.path.isfile(self.filename): os.unlink(self.filename) diff --git a/lib-python/modified-2.7/test/test_fcntl.py b/lib-python/modified-2.7/test/test_fcntl.py new file mode 100644 --- /dev/null +++ b/lib-python/modified-2.7/test/test_fcntl.py @@ -0,0 +1,108 @@ +"""Test program for the fcntl C module. + +OS/2+EMX doesn't support the file locking operations. + +""" +import os +import struct +import sys +import unittest +from test.test_support import (verbose, TESTFN, unlink, run_unittest, + import_module) + +# Skip test if no fnctl module. +fcntl = import_module('fcntl') + + +# TODO - Write tests for flock() and lockf(). + +def get_lockdata(): + if sys.platform.startswith('atheos'): + start_len = "qq" + else: + try: + os.O_LARGEFILE + except AttributeError: + start_len = "ll" + else: + start_len = "qq" + + if sys.platform in ('netbsd1', 'netbsd2', 'netbsd3', + 'Darwin1.2', 'darwin', + 'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5', + 'freebsd6', 'freebsd7', 'freebsd8', + 'bsdos2', 'bsdos3', 'bsdos4', + 'openbsd', 'openbsd2', 'openbsd3', 'openbsd4', 'openbsd5'): + if struct.calcsize('l') == 8: + off_t = 'l' + pid_t = 'i' + else: + off_t = 'lxxxx' + pid_t = 'l' + lockdata = struct.pack(off_t + off_t + pid_t + 'hh', 0, 0, 0, + fcntl.F_WRLCK, 0) + elif sys.platform in ['aix3', 'aix4', 'hp-uxB', 'unixware7']: + lockdata = struct.pack('hhlllii', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0) + elif sys.platform in ['os2emx']: + lockdata = None + else: + lockdata = struct.pack('hh'+start_len+'hh', fcntl.F_WRLCK, 0, 0, 0, 0, 0) + if lockdata: + if verbose: + print 'struct.pack: ', repr(lockdata) + return lockdata + +lockdata = get_lockdata() + + +class TestFcntl(unittest.TestCase): + + def setUp(self): + self.f = None + + def tearDown(self): + if self.f and not self.f.closed: + self.f.close() + unlink(TESTFN) + + def test_fcntl_fileno(self): + # the example from the library docs + self.f = open(TESTFN, 'w') + rv = fcntl.fcntl(self.f.fileno(), fcntl.F_SETFL, os.O_NONBLOCK) + if verbose: + print 'Status from fcntl with O_NONBLOCK: ', rv + if sys.platform not in ['os2emx']: + rv = fcntl.fcntl(self.f.fileno(), fcntl.F_SETLKW, lockdata) + if verbose: + print 'String from fcntl with F_SETLKW: ', repr(rv) + self.f.close() + + def test_fcntl_file_descriptor(self): + # again, but pass the file rather than numeric descriptor + self.f = open(TESTFN, 'w') + rv = fcntl.fcntl(self.f, fcntl.F_SETFL, os.O_NONBLOCK) + if sys.platform not in ['os2emx']: + rv = fcntl.fcntl(self.f, fcntl.F_SETLKW, lockdata) + self.f.close() + + def test_fcntl_64_bit(self): + # Issue #1309352: fcntl shouldn't fail when the third arg fits in a + # C 'long' but not in a C 'int'. + try: + cmd = fcntl.F_NOTIFY + # This flag is larger than 2**31 in 64-bit builds + flags = fcntl.DN_MULTISHOT + except AttributeError: + self.skipTest("F_NOTIFY or DN_MULTISHOT unavailable") + fd = os.open(os.path.dirname(os.path.abspath(TESTFN)), os.O_RDONLY) + try: + fcntl.fcntl(fd, cmd, flags) + finally: + os.close(fd) + + +def test_main(): + run_unittest(TestFcntl) + +if __name__ == '__main__': + test_main() diff --git a/lib-python/modified-2.7/test/test_multibytecodec.py b/lib-python/modified-2.7/test/test_multibytecodec.py --- a/lib-python/modified-2.7/test/test_multibytecodec.py +++ b/lib-python/modified-2.7/test/test_multibytecodec.py @@ -148,7 +148,8 @@ class Test_StreamReader(unittest.TestCase): def test_bug1728403(self): try: - open(TESTFN, 'w').write('\xa1') + with open(TESTFN, 'w') as f: + f.write('\xa1') f = codecs.open(TESTFN, encoding='cp949') self.assertRaises(UnicodeDecodeError, f.read, 2) finally: diff --git a/lib-python/modified-2.7/test/test_tempfile.py b/lib-python/modified-2.7/test/test_tempfile.py --- a/lib-python/modified-2.7/test/test_tempfile.py +++ b/lib-python/modified-2.7/test/test_tempfile.py @@ -23,8 +23,8 @@ # TEST_FILES may need to be tweaked for systems depending on the maximum # number of files that can be opened at one time (see ulimit -n) -if sys.platform in ('openbsd3', 'openbsd4'): - TEST_FILES = 48 +if sys.platform.startswith("openbsd"): + TEST_FILES = 64 # ulimit -n defaults to 128 for normal users else: TEST_FILES = 100 diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -54,7 +54,8 @@ def get_ffi_argtype(self): if self._ffiargtype: return self._ffiargtype - return _shape_to_ffi_type(self._ffiargshape) + self._ffiargtype = _shape_to_ffi_type(self._ffiargshape) + return self._ffiargtype def _CData_output(self, resbuffer, base=None, index=-1): #assert isinstance(resbuffer, _rawffi.ArrayInstance) @@ -166,7 +167,8 @@ return tp._alignmentofinstances() def byref(cdata): - from ctypes import pointer + # "pointer" is imported at the end of this module to avoid circular + # imports return pointer(cdata) def cdata_from_address(self, address): @@ -224,5 +226,9 @@ 'Z' : _ffi.types.void_p, 'X' : _ffi.types.void_p, 'v' : _ffi.types.sshort, + '?' : _ffi.types.ubyte, } + +# used by "byref" +from _ctypes.pointer import pointer diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -78,8 +78,6 @@ _com_iid = None _is_fastpath = False - __restype_set = False - def _getargtypes(self): return self._argtypes_ @@ -93,13 +91,15 @@ raise TypeError( "item %d in _argtypes_ has no from_param method" % ( i + 1,)) - # - if all([hasattr(argtype, '_ffiargshape') for argtype in argtypes]): - fastpath_cls = make_fastpath_subclass(self.__class__) - fastpath_cls.enable_fastpath_maybe(self) self._argtypes_ = list(argtypes) + self._check_argtypes_for_fastpath() argtypes = property(_getargtypes, _setargtypes) + def _check_argtypes_for_fastpath(self): + if all([hasattr(argtype, '_ffiargshape') for argtype in self._argtypes_]): + fastpath_cls = make_fastpath_subclass(self.__class__) + fastpath_cls.enable_fastpath_maybe(self) + def _getparamflags(self): return self._paramflags @@ -149,7 +149,6 @@ return self._restype_ def _setrestype(self, restype): - self.__restype_set = True self._ptr = None if restype is int: from ctypes import c_int @@ -219,6 +218,7 @@ import ctypes restype = ctypes.c_int self._ptr = self._getfuncptr_fromaddress(self._argtypes_, restype) + self._check_argtypes_for_fastpath() return @@ -296,13 +296,12 @@ "This function takes %d argument%s (%s given)" % (len(self._argtypes_), plural, len(args))) - # check that arguments are convertible - ## XXX Not as long as ctypes.cast is a callback function with - ## py_object arguments... - ## self._convert_args(self._argtypes_, args, {}) - try: - res = self.callable(*args) + newargs = self._convert_args_for_callback(argtypes, args) + except (UnicodeError, TypeError, ValueError), e: + raise ArgumentError(str(e)) + try: + res = self.callable(*newargs) except: exc_info = sys.exc_info() traceback.print_tb(exc_info[2], file=sys.stderr) @@ -316,10 +315,6 @@ warnings.warn('C function without declared arguments called', RuntimeWarning, stacklevel=2) argtypes = [] - - if not self.__restype_set: - warnings.warn('C function without declared return type called', - RuntimeWarning, stacklevel=2) if self._com_index: from ctypes import cast, c_void_p, POINTER @@ -366,7 +361,10 @@ if self._flags_ & _rawffi.FUNCFLAG_USE_LASTERROR: set_last_error(_rawffi.get_last_error()) # - return self._build_result(self._restype_, result, newargs) + try: + return self._build_result(self._restype_, result, newargs) + finally: + funcptr.free_temp_buffers() def _do_errcheck(self, result, args): # The 'errcheck' protocol @@ -466,6 +464,19 @@ return cobj, cobj._to_ffi_param(), type(cobj) + def _convert_args_for_callback(self, argtypes, args): + assert len(argtypes) == len(args) + newargs = [] + for argtype, arg in zip(argtypes, args): + param = argtype.from_param(arg) + _type_ = getattr(argtype, '_type_', None) + if _type_ == 'P': # special-case for c_void_p + param = param._get_buffer_value() + elif self._is_primitive(argtype): + param = param.value + newargs.append(param) + return newargs + def _convert_args(self, argtypes, args, kwargs, marker=object()): newargs = [] outargs = [] @@ -556,6 +567,9 @@ newargtypes.append(newargtype) return keepalives, newargs, newargtypes, outargs + @staticmethod + def _is_primitive(argtype): + return argtype.__bases__[0] is _SimpleCData def _wrap_result(self, restype, result): """ @@ -564,7 +578,7 @@ """ # hack for performance: if restype is a "simple" primitive type, don't # allocate the buffer because it's going to be thrown away immediately - if restype.__bases__[0] is _SimpleCData and not restype._is_pointer_like(): + if self._is_primitive(restype) and not restype._is_pointer_like(): return result # shape = restype._ffishape @@ -680,7 +694,7 @@ try: result = self._call_funcptr(funcptr, *args) result = self._do_errcheck(result, args) - except (TypeError, ArgumentError): # XXX, should be FFITypeError + except (TypeError, ArgumentError, UnicodeDecodeError): assert self._slowpath_allowed return CFuncPtr.__call__(self, *args) return result diff --git a/lib_pypy/_ctypes/primitive.py b/lib_pypy/_ctypes/primitive.py --- a/lib_pypy/_ctypes/primitive.py +++ b/lib_pypy/_ctypes/primitive.py @@ -10,6 +10,8 @@ from _ctypes.builtin import ConvMode from _ctypes.array import Array from _ctypes.pointer import _Pointer, as_ffi_pointer +#from _ctypes.function import CFuncPtr # this import is moved at the bottom + # because else it's circular class NULL(object): pass @@ -86,7 +88,7 @@ return res if isinstance(value, Array): return value - if isinstance(value, _Pointer): + if isinstance(value, (_Pointer, CFuncPtr)): return cls.from_address(value._buffer.buffer) if isinstance(value, (int, long)): return cls(value) @@ -338,3 +340,5 @@ def __nonzero__(self): return self._buffer[0] not in (0, '\x00') + +from _ctypes.function import CFuncPtr diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -14,6 +14,15 @@ raise TypeError("Expected CData subclass, got %s" % (tp,)) if isinstance(tp, StructOrUnionMeta): tp._make_final() + if len(f) == 3: + if (not hasattr(tp, '_type_') + or not isinstance(tp._type_, str) + or tp._type_ not in "iIhHbBlL"): + #XXX: are those all types? + # we just dont get the type name + # in the interp levle thrown TypeError + # from rawffi if there are more + raise TypeError('bit fields not allowed for type ' + tp.__name__) all_fields = [] for cls in reversed(inspect.getmro(superclass)): @@ -34,34 +43,37 @@ for i, field in enumerate(all_fields): name = field[0] value = field[1] + is_bitfield = (len(field) == 3) fields[name] = Field(name, self._ffistruct.fieldoffset(name), self._ffistruct.fieldsize(name), - value, i) + value, i, is_bitfield) if anonymous_fields: resnames = [] for i, field in enumerate(all_fields): name = field[0] value = field[1] + is_bitfield = (len(field) == 3) startpos = self._ffistruct.fieldoffset(name) if name in anonymous_fields: for subname in value._names: resnames.append(subname) - relpos = startpos + value._fieldtypes[subname].offset - subvalue = value._fieldtypes[subname].ctype + subfield = getattr(value, subname) + relpos = startpos + subfield.offset + subvalue = subfield.ctype fields[subname] = Field(subname, relpos, subvalue._sizeofinstances(), - subvalue, i) + subvalue, i, is_bitfield) else: resnames.append(name) names = resnames self._names = names - self._fieldtypes = fields + self.__dict__.update(fields) class Field(object): - def __init__(self, name, offset, size, ctype, num): - for k in ('name', 'offset', 'size', 'ctype', 'num'): + def __init__(self, name, offset, size, ctype, num, is_bitfield): + for k in ('name', 'offset', 'size', 'ctype', 'num', 'is_bitfield'): self.__dict__[k] = locals()[k] def __setattr__(self, name, value): @@ -71,6 +83,35 @@ return "" % (self.name, self.offset, self.size) + def __get__(self, obj, cls=None): + if obj is None: + return self + if self.is_bitfield: + # bitfield member, use direct access + return obj._buffer.__getattr__(self.name) + else: + fieldtype = self.ctype + offset = self.num + suba = obj._subarray(fieldtype, self.name) + return fieldtype._CData_output(suba, obj, offset) + + + def __set__(self, obj, value): + fieldtype = self.ctype + cobj = fieldtype.from_param(value) + if ensure_objects(cobj) is not None: + key = keepalive_key(self.num) + store_reference(obj, key, cobj._objects) + arg = cobj._get_buffer_value() + if fieldtype._fficompositesize is not None: + from ctypes import memmove + dest = obj._buffer.fieldaddress(self.name) + memmove(dest, arg, fieldtype._fficompositesize) + else: + obj._buffer.__setattr__(self.name, arg) + + + # ________________________________________________________________ def _set_shape(tp, rawfields, is_union=False): @@ -79,17 +120,12 @@ tp._ffiargshape = tp._ffishape = (tp._ffistruct, 1) tp._fficompositesize = tp._ffistruct.size -def struct_getattr(self, name): - if name not in ('_fields_', '_fieldtypes'): - if hasattr(self, '_fieldtypes') and name in self._fieldtypes: - return self._fieldtypes[name] - return _CDataMeta.__getattribute__(self, name) def struct_setattr(self, name, value): if name == '_fields_': if self.__dict__.get('_fields_', None) is not None: raise AttributeError("_fields_ is final") - if self in [v for k, v in value]: + if self in [f[1] for f in value]: raise AttributeError("Structure or union cannot contain itself") names_and_fields( self, @@ -127,14 +163,14 @@ if '_fields_' not in self.__dict__: self._fields_ = [] self._names = [] - self._fieldtypes = {} _set_shape(self, [], self._is_union) - __getattr__ = struct_getattr __setattr__ = struct_setattr def from_address(self, address): instance = StructOrUnion.__new__(self) + if isinstance(address, _rawffi.StructureInstance): + address = address.buffer instance.__dict__['_buffer'] = self._ffistruct.fromaddress(address) return instance @@ -200,40 +236,6 @@ A = _rawffi.Array(fieldtype._ffishape) return A.fromaddress(address, 1) - def __setattr__(self, name, value): - try: - field = self._fieldtypes[name] - except KeyError: - return _CData.__setattr__(self, name, value) - fieldtype = field.ctype - cobj = fieldtype.from_param(value) - if ensure_objects(cobj) is not None: - key = keepalive_key(field.num) - store_reference(self, key, cobj._objects) - arg = cobj._get_buffer_value() - if fieldtype._fficompositesize is not None: - from ctypes import memmove - dest = self._buffer.fieldaddress(name) - memmove(dest, arg, fieldtype._fficompositesize) - else: - self._buffer.__setattr__(name, arg) - - def __getattribute__(self, name): - if name == '_fieldtypes': - return _CData.__getattribute__(self, '_fieldtypes') - try: - field = self._fieldtypes[name] - except KeyError: - return _CData.__getattribute__(self, name) - if field.size >> 16: - # bitfield member, use direct access - return self._buffer.__getattr__(name) - else: - fieldtype = field.ctype - offset = field.num - suba = self._subarray(fieldtype, name) - return fieldtype._CData_output(suba, self, offset) - def _get_buffer_for_param(self): return self diff --git a/lib_pypy/_elementtree.py b/lib_pypy/_elementtree.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_elementtree.py @@ -0,0 +1,6 @@ +# Just use ElementTree. + +from xml.etree import ElementTree + +globals().update(ElementTree.__dict__) +del __all__ diff --git a/lib_pypy/_pypy_interact.py b/lib_pypy/_pypy_interact.py --- a/lib_pypy/_pypy_interact.py +++ b/lib_pypy/_pypy_interact.py @@ -56,6 +56,10 @@ prompt = getattr(sys, 'ps1', '>>> ') try: line = raw_input(prompt) + # Can be None if sys.stdin was redefined + encoding = getattr(sys.stdin, 'encoding', None) + if encoding and not isinstance(line, unicode): + line = line.decode(encoding) except EOFError: console.write("\n") break diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -24,6 +24,7 @@ from ctypes import c_void_p, c_int, c_double, c_int64, c_char_p, cdll from ctypes import POINTER, byref, string_at, CFUNCTYPE, cast from ctypes import sizeof, c_ssize_t +from collections import OrderedDict import datetime import sys import time @@ -274,6 +275,28 @@ def unicode_text_factory(x): return unicode(x, 'utf-8') + +class StatementCache(object): + def __init__(self, connection, maxcount): + self.connection = connection + self.maxcount = maxcount + self.cache = OrderedDict() + + def get(self, sql, cursor, row_factory): + try: + stat = self.cache[sql] + except KeyError: + stat = Statement(self.connection, sql) + self.cache[sql] = stat + if len(self.cache) > self.maxcount: + self.cache.popitem(0) + # + if stat.in_use: + stat = Statement(self.connection, sql) + stat.set_row_factory(row_factory) + return stat + + class Connection(object): def __init__(self, database, timeout=5.0, detect_types=0, isolation_level="", check_same_thread=True, factory=None, cached_statements=100): @@ -291,6 +314,7 @@ self.row_factory = None self._isolation_level = isolation_level self.detect_types = detect_types + self.statement_cache = StatementCache(self, cached_statements) self.cursors = [] @@ -399,7 +423,7 @@ cur = Cursor(self) if not isinstance(sql, (str, unicode)): raise Warning("SQL is of wrong type. Must be string or unicode.") - statement = Statement(cur, sql, self.row_factory) + statement = self.statement_cache.get(sql, cur, self.row_factory) return statement def _get_isolation_level(self): @@ -681,6 +705,8 @@ from sqlite3.dump import _iterdump return _iterdump(self) +DML, DQL, DDL = range(3) + class Cursor(object): def __init__(self, con): if not isinstance(con, Connection): @@ -708,12 +734,12 @@ if type(sql) is unicode: sql = sql.encode("utf-8") self._check_closed() - self.statement = Statement(self, sql, self.row_factory) + self.statement = self.connection.statement_cache.get(sql, self, self.row_factory) if self.connection._isolation_level is not None: - if self.statement.kind == "DDL": + if self.statement.kind == DDL: self.connection.commit() - elif self.statement.kind == "DML": + elif self.statement.kind == DML: self.connection._begin() self.statement.set_params(params) @@ -724,19 +750,18 @@ self.statement.reset() raise self.connection._get_exception(ret) - if self.statement.kind == "DQL": - if ret == SQLITE_ROW: - self.statement._build_row_cast_map() - self.statement._readahead() - else: - self.statement.item = None - self.statement.exhausted = True + if self.statement.kind == DQL and ret == SQLITE_ROW: + self.statement._build_row_cast_map() + self.statement._readahead(self) + else: + self.statement.item = None + self.statement.exhausted = True - if self.statement.kind in ("DML", "DDL"): + if self.statement.kind == DML or self.statement.kind == DDL: self.statement.reset() self.rowcount = -1 - if self.statement.kind == "DML": + if self.statement.kind == DML: self.rowcount = sqlite.sqlite3_changes(self.connection.db) return self @@ -747,8 +772,9 @@ if type(sql) is unicode: sql = sql.encode("utf-8") self._check_closed() - self.statement = Statement(self, sql, self.row_factory) - if self.statement.kind == "DML": + self.statement = self.connection.statement_cache.get(sql, self, self.row_factory) + + if self.statement.kind == DML: self.connection._begin() else: raise ProgrammingError, "executemany is only for DML statements" @@ -800,7 +826,7 @@ return self def __iter__(self): - return self.statement + return iter(self.fetchone, None) def _check_reset(self): if self.reset: @@ -817,7 +843,7 @@ return None try: - return self.statement.next() + return self.statement.next(self) except StopIteration: return None @@ -831,7 +857,7 @@ if size is None: size = self.arraysize lst = [] - for row in self.statement: + for row in self: lst.append(row) if len(lst) == size: break @@ -842,7 +868,7 @@ self._check_reset() if self.statement is None: return [] - return list(self.statement) + return list(self) def _getdescription(self): if self._description is None: @@ -872,39 +898,47 @@ lastrowid = property(_getlastrowid) class Statement(object): - def __init__(self, cur, sql, row_factory): + def __init__(self, connection, sql): self.statement = None if not isinstance(sql, str): raise ValueError, "sql must be a string" - self.con = cur.connection - self.cur = weakref.ref(cur) + self.con = connection self.sql = sql # DEBUG ONLY - self.row_factory = row_factory first_word = self._statement_kind = sql.lstrip().split(" ")[0].upper() if first_word in ("INSERT", "UPDATE", "DELETE", "REPLACE"): - self.kind = "DML" + self.kind = DML elif first_word in ("SELECT", "PRAGMA"): - self.kind = "DQL" + self.kind = DQL else: - self.kind = "DDL" + self.kind = DDL self.exhausted = False + self.in_use = False + # + # set by set_row_factory + self.row_factory = None self.statement = c_void_p() next_char = c_char_p() - ret = sqlite.sqlite3_prepare_v2(self.con.db, sql, -1, byref(self.statement), byref(next_char)) + sql_char = c_char_p(sql) + ret = sqlite.sqlite3_prepare_v2(self.con.db, sql_char, -1, byref(self.statement), byref(next_char)) if ret == SQLITE_OK and self.statement.value is None: # an empty statement, we work around that, as it's the least trouble ret = sqlite.sqlite3_prepare_v2(self.con.db, "select 42", -1, byref(self.statement), byref(next_char)) - self.kind = "DQL" + self.kind = DQL if ret != SQLITE_OK: raise self.con._get_exception(ret) self.con._remember_statement(self) if _check_remaining_sql(next_char.value): - raise Warning, "One and only one statement required" + raise Warning, "One and only one statement required: %r" % ( + next_char.value,) + # sql_char should remain alive until here self._build_row_cast_map() + def set_row_factory(self, row_factory): + self.row_factory = row_factory + def _build_row_cast_map(self): self.row_cast_map = [] for i in xrange(sqlite.sqlite3_column_count(self.statement)): @@ -974,6 +1008,7 @@ ret = sqlite.sqlite3_reset(self.statement) if ret != SQLITE_OK: raise self.con._get_exception(ret) + self.mark_dirty() if params is None: if sqlite.sqlite3_bind_parameter_count(self.statement) != 0: @@ -1004,10 +1039,7 @@ raise ProgrammingError("missing parameter '%s'" %param) self.set_param(idx, param) - def __iter__(self): - return self - - def next(self): + def next(self, cursor): self.con._check_closed() self.con._check_thread() if self.exhausted: @@ -1023,10 +1055,10 @@ sqlite.sqlite3_reset(self.statement) raise exc - self._readahead() + self._readahead(cursor) return item - def _readahead(self): + def _readahead(self, cursor): self.column_count = sqlite.sqlite3_column_count(self.statement) row = [] for i in xrange(self.column_count): @@ -1061,23 +1093,30 @@ row = tuple(row) if self.row_factory is not None: - row = self.row_factory(self.cur(), row) + row = self.row_factory(cursor, row) self.item = row def reset(self): self.row_cast_map = None - return sqlite.sqlite3_reset(self.statement) + ret = sqlite.sqlite3_reset(self.statement) + self.in_use = False + self.exhausted = False + return ret def finalize(self): sqlite.sqlite3_finalize(self.statement) self.statement = None + self.in_use = False + + def mark_dirty(self): + self.in_use = True def __del__(self): sqlite.sqlite3_finalize(self.statement) self.statement = None def _get_description(self): - if self.kind == "DML": + if self.kind == DML: return None desc = [] for i in xrange(sqlite.sqlite3_column_count(self.statement)): diff --git a/lib_pypy/_subprocess.py b/lib_pypy/_subprocess.py --- a/lib_pypy/_subprocess.py +++ b/lib_pypy/_subprocess.py @@ -35,7 +35,7 @@ _DuplicateHandle.restype = ctypes.c_int _WaitForSingleObject = _kernel32.WaitForSingleObject -_WaitForSingleObject.argtypes = [ctypes.c_int, ctypes.c_int] +_WaitForSingleObject.argtypes = [ctypes.c_int, ctypes.c_uint] _WaitForSingleObject.restype = ctypes.c_int _GetExitCodeProcess = _kernel32.GetExitCodeProcess diff --git a/lib_pypy/distributed/test/test_distributed.py b/lib_pypy/distributed/test/test_distributed.py --- a/lib_pypy/distributed/test/test_distributed.py +++ b/lib_pypy/distributed/test/test_distributed.py @@ -9,7 +9,7 @@ class AppTestDistributed(object): def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, - "usemodules":("_stackless",)}) + "usemodules":("_continuation",)}) def test_init(self): import distributed @@ -91,10 +91,8 @@ class AppTestDistributedTasklets(object): spaceconfig = {"objspace.std.withtproxy": True, - "objspace.usemodules._stackless": True} + "objspace.usemodules._continuation": True} def setup_class(cls): - #cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, - # "usemodules":("_stackless",)}) cls.w_test_env = cls.space.appexec([], """(): from distributed import test_env return test_env diff --git a/lib_pypy/distributed/test/test_greensock.py b/lib_pypy/distributed/test/test_greensock.py --- a/lib_pypy/distributed/test/test_greensock.py +++ b/lib_pypy/distributed/test/test_greensock.py @@ -10,7 +10,7 @@ if not option.runappdirect: py.test.skip("Cannot run this on top of py.py because of PopenGateway") cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, - "usemodules":("_stackless",)}) + "usemodules":("_continuation",)}) cls.w_remote_side_code = cls.space.appexec([], """(): import sys sys.path.insert(0, '%s') diff --git a/lib_pypy/distributed/test/test_socklayer.py b/lib_pypy/distributed/test/test_socklayer.py --- a/lib_pypy/distributed/test/test_socklayer.py +++ b/lib_pypy/distributed/test/test_socklayer.py @@ -9,7 +9,8 @@ class AppTestSocklayer: def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, - "usemodules":("_stackless","_socket", "select")}) + "usemodules":("_continuation", + "_socket", "select")}) def test_socklayer(self): class X(object): diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -1,1 +1,151 @@ -from _stackless import greenlet +import _continuation, sys + + +# ____________________________________________________________ +# Exceptions + +class GreenletExit(Exception): + """This special exception does not propagate to the parent greenlet; it +can be used to kill a single greenlet.""" + +error = _continuation.error + +# ____________________________________________________________ +# Helper function + +def getcurrent(): + "Returns the current greenlet (i.e. the one which called this function)." + try: + return _tls.current + except AttributeError: + # first call in this thread: current == main + _green_create_main() + return _tls.current + +# ____________________________________________________________ +# The 'greenlet' class + +_continulet = _continuation.continulet + +class greenlet(_continulet): + getcurrent = staticmethod(getcurrent) + error = error + GreenletExit = GreenletExit + __main = False + __started = False + + def __new__(cls, *args, **kwds): + self = _continulet.__new__(cls) + self.parent = getcurrent() + return self + + def __init__(self, run=None, parent=None): + if run is not None: + self.run = run + if parent is not None: + self.parent = parent + + def switch(self, *args): + "Switch execution to this greenlet, optionally passing the values " + "given as argument(s). Returns the value passed when switching back." + return self.__switch(_continulet.switch, args) + + def throw(self, typ=GreenletExit, val=None, tb=None): + "raise exception in greenlet, return value passed when switching back" + return self.__switch(_continulet.throw, typ, val, tb) + + def __switch(target, unbound_method, *args): + current = getcurrent() + # + while not target: + if not target.__started: + if unbound_method != _continulet.throw: + greenlet_func = _greenlet_start + else: + greenlet_func = _greenlet_throw + _continulet.__init__(target, greenlet_func, *args) + unbound_method = _continulet.switch + args = () + target.__started = True + break + # already done, go to the parent instead + # (NB. infinite loop possible, but unlikely, unless you mess + # up the 'parent' explicitly. Good enough, because a Ctrl-C + # will show that the program is caught in this loop here.) + target = target.parent + # + try: + if current.__main: + if target.__main: + # switch from main to main + if unbound_method == _continulet.throw: + raise args[0], args[1], args[2] + (args,) = args + else: + # enter from main to target + args = unbound_method(target, *args) + else: + if target.__main: + # leave to go to target=main + args = unbound_method(current, *args) + else: + # switch from non-main to non-main + args = unbound_method(current, *args, to=target) + except GreenletExit, e: + args = (e,) + finally: + _tls.current = current + # + if len(args) == 1: + return args[0] + else: + return args + + def __nonzero__(self): + return self.__main or _continulet.is_pending(self) + + @property + def dead(self): + return self.__started and not self + + @property + def gr_frame(self): + raise NotImplementedError("attribute 'gr_frame' of greenlet objects") + +# ____________________________________________________________ +# Internal stuff + +try: + from thread import _local +except ImportError: + class _local(object): # assume no threads + pass + +_tls = _local() + +def _green_create_main(): + # create the main greenlet for this thread + _tls.current = None + gmain = greenlet.__new__(greenlet) + gmain._greenlet__main = True + gmain._greenlet__started = True + assert gmain.parent is None + _tls.main = gmain + _tls.current = gmain + +def _greenlet_start(greenlet, args): + _tls.current = greenlet + try: + res = greenlet.run(*args) + finally: + if greenlet.parent is not _tls.main: + _continuation.permute(greenlet, greenlet.parent) + return (res,) + +def _greenlet_throw(greenlet, exc, value, tb): + _tls.current = greenlet + try: + raise exc, value, tb + finally: + if greenlet.parent is not _tls.main: + _continuation.permute(greenlet, greenlet.parent) diff --git a/lib_pypy/pypy_test/test_coroutine.py b/lib_pypy/pypy_test/test_coroutine.py --- a/lib_pypy/pypy_test/test_coroutine.py +++ b/lib_pypy/pypy_test/test_coroutine.py @@ -2,7 +2,7 @@ from py.test import skip, raises try: - from lib_pypy.stackless import coroutine, CoroutineExit + from stackless import coroutine, CoroutineExit except ImportError, e: skip('cannot import stackless: %s' % (e,)) @@ -20,10 +20,6 @@ assert not co.is_zombie def test_is_zombie_del_without_frame(self): - try: - import _stackless # are we on pypy with a stackless build? - except ImportError: - skip("only works on pypy-c-stackless") import gc res = [] class MyCoroutine(coroutine): @@ -45,10 +41,6 @@ assert res[0], "is_zombie was False in __del__" def test_is_zombie_del_with_frame(self): - try: - import _stackless # are we on pypy with a stackless build? - except ImportError: - skip("only works on pypy-c-stackless") import gc res = [] class MyCoroutine(coroutine): diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py --- a/lib_pypy/pyrepl/reader.py +++ b/lib_pypy/pyrepl/reader.py @@ -401,13 +401,19 @@ return "(arg: %s) "%self.arg if "\n" in self.buffer: if lineno == 0: - return self._ps2 + res = self.ps2 elif lineno == self.buffer.count("\n"): - return self._ps4 + res = self.ps4 else: - return self._ps3 + res = self.ps3 else: - return self._ps1 + res = self.ps1 + # Lazily call str() on self.psN, and cache the results using as key + # the object on which str() was called. This ensures that even if the + # same object is used e.g. for ps1 and ps2, str() is called only once. + if res not in self._pscache: + self._pscache[res] = str(res) + return self._pscache[res] def push_input_trans(self, itrans): self.input_trans_stack.append(self.input_trans) @@ -473,8 +479,7 @@ self.pos = 0 self.dirty = 1 self.last_command = None - self._ps1, self._ps2, self._ps3, self._ps4 = \ - map(str, [self.ps1, self.ps2, self.ps3, self.ps4]) + self._pscache = {} except: self.restore() raise @@ -571,7 +576,7 @@ self.console.push_char(char) self.handle1(0) - def readline(self): + def readline(self, returns_unicode=False): """Read a line. The implementation of this method also shows how to drive Reader if you want more control over the event loop.""" @@ -580,6 +585,8 @@ self.refresh() while not self.finished: self.handle1() + if returns_unicode: + return self.get_unicode() return self.get_buffer() finally: self.restore() diff --git a/lib_pypy/pyrepl/readline.py b/lib_pypy/pyrepl/readline.py --- a/lib_pypy/pyrepl/readline.py +++ b/lib_pypy/pyrepl/readline.py @@ -33,7 +33,7 @@ from pyrepl.unix_console import UnixConsole, _error -ENCODING = 'latin1' # XXX hard-coded +ENCODING = sys.getfilesystemencoding() or 'latin1' # XXX review __all__ = ['add_history', 'clear_history', @@ -198,7 +198,7 @@ reader.ps1 = prompt return reader.readline() - def multiline_input(self, more_lines, ps1, ps2): + def multiline_input(self, more_lines, ps1, ps2, returns_unicode=False): """Read an input on possibly multiple lines, asking for more lines as long as 'more_lines(unicodetext)' returns an object whose boolean value is true. @@ -209,7 +209,7 @@ reader.more_lines = more_lines reader.ps1 = reader.ps2 = ps1 reader.ps3 = reader.ps4 = ps2 - return reader.readline() + return reader.readline(returns_unicode=returns_unicode) finally: reader.more_lines = saved diff --git a/lib_pypy/pyrepl/simple_interact.py b/lib_pypy/pyrepl/simple_interact.py --- a/lib_pypy/pyrepl/simple_interact.py +++ b/lib_pypy/pyrepl/simple_interact.py @@ -54,7 +54,8 @@ ps1 = getattr(sys, 'ps1', '>>> ') ps2 = getattr(sys, 'ps2', '... ') try: - statement = multiline_input(more_lines, ps1, ps2) + statement = multiline_input(more_lines, ps1, ps2, + returns_unicode=True) except EOFError: break more = console.push(statement) diff --git a/lib_pypy/stackless.py b/lib_pypy/stackless.py --- a/lib_pypy/stackless.py +++ b/lib_pypy/stackless.py @@ -4,121 +4,124 @@ Please refer to their documentation. """ -DEBUG = True - -def dprint(*args): - for arg in args: - print arg, - print import traceback -import sys +import _continuation +from functools import partial + +class TaskletExit(Exception): + pass + +CoroutineExit = TaskletExit + +class GWrap(_continuation.continulet): + """This is just a wrapper around continulet to allow + to stick additional attributes to a continulet. + To be more concrete, we need a backreference to + the coroutine object""" + + +class coroutine(object): + "we can't have continulet as a base, because continulets can't be rebound" + + def __init__(self): + self._frame = None + self.is_zombie = False + + def __getattr__(self, attr): + return getattr(self._frame, attr) + + def __del__(self): + self.is_zombie = True + del self._frame + self._frame = None + + def bind(self, func, *argl, **argd): + """coro.bind(f, *argl, **argd) -> None. + binds function f to coro. f will be called with + arguments *argl, **argd + """ + if self._frame is None or not self._frame.is_pending(): + + def _func(c, *args, **kwargs): + return func(*args, **kwargs) + + run = partial(_func, *argl, **argd) + self._frame = frame = GWrap(run) + else: + raise ValueError("cannot bind a bound coroutine") + + def switch(self): + """coro.switch() -> returnvalue + switches to coroutine coro. If the bound function + f finishes, the returnvalue is that of f, otherwise + None is returned + """ + current = _getcurrent() + current._jump_to(self) + + def _jump_to(self, coroutine): + _tls.current_coroutine = coroutine + self._frame.switch(to=coroutine._frame) + + def kill(self): + """coro.kill() : kill coroutine coro""" + _tls.current_coroutine = self + self._frame.throw(CoroutineExit) + + def _is_alive(self): + if self._frame is None: + return False + return not self._frame.is_pending() + is_alive = property(_is_alive) + del _is_alive + + def getcurrent(): + """coroutine.getcurrent() -> the currently running coroutine""" + try: + return _getcurrent() + except AttributeError: + return _maincoro + getcurrent = staticmethod(getcurrent) + + def __reduce__(self): + raise TypeError, 'pickling is not possible based upon continulets' + + +def _getcurrent(): + "Returns the current coroutine (i.e. the one which called this function)." + try: + return _tls.current_coroutine + except AttributeError: + # first call in this thread: current == main + _coroutine_create_main() + return _tls.current_coroutine + try: - # If _stackless can be imported then TaskletExit and CoroutineExit are - # automatically added to the builtins. - from _stackless import coroutine, greenlet -except ImportError: # we are running from CPython - from greenlet import greenlet, GreenletExit - TaskletExit = CoroutineExit = GreenletExit - del GreenletExit - try: - from functools import partial - except ImportError: # we are not running python 2.5 - class partial(object): - # just enough of 'partial' to be usefull - def __init__(self, func, *argl, **argd): - self.func = func - self.argl = argl - self.argd = argd + from thread import _local +except ImportError: + class _local(object): # assume no threads + pass - def __call__(self): - return self.func(*self.argl, **self.argd) +_tls = _local() - class GWrap(greenlet): - """This is just a wrapper around greenlets to allow - to stick additional attributes to a greenlet. - To be more concrete, we need a backreference to - the coroutine object""" +def _coroutine_create_main(): + # create the main coroutine for this thread + _tls.current_coroutine = None + main_coroutine = coroutine() + main_coroutine.bind(lambda x:x) + _tls.main_coroutine = main_coroutine + _tls.current_coroutine = main_coroutine + return main_coroutine - class MWrap(object): - def __init__(self,something): - self.something = something - def __getattr__(self, attr): - return getattr(self.something, attr) +_maincoro = _coroutine_create_main() - class coroutine(object): - "we can't have greenlet as a base, because greenlets can't be rebound" - - def __init__(self): - self._frame = None - self.is_zombie = False - - def __getattr__(self, attr): - return getattr(self._frame, attr) - - def __del__(self): - self.is_zombie = True - del self._frame - self._frame = None - - def bind(self, func, *argl, **argd): - """coro.bind(f, *argl, **argd) -> None. - binds function f to coro. f will be called with - arguments *argl, **argd - """ - if self._frame is None or self._frame.dead: - self._frame = frame = GWrap() - frame.coro = self - if hasattr(self._frame, 'run') and self._frame.run: - raise ValueError("cannot bind a bound coroutine") - self._frame.run = partial(func, *argl, **argd) - - def switch(self): - """coro.switch() -> returnvalue - switches to coroutine coro. If the bound function - f finishes, the returnvalue is that of f, otherwise - None is returned - """ - try: - return greenlet.switch(self._frame) - except TypeError, exp: # self._frame is the main coroutine - return greenlet.switch(self._frame.something) - - def kill(self): - """coro.kill() : kill coroutine coro""" - self._frame.throw() - - def _is_alive(self): - if self._frame is None: - return False - return not self._frame.dead - is_alive = property(_is_alive) - del _is_alive - - def getcurrent(): - """coroutine.getcurrent() -> the currently running coroutine""" - try: - return greenlet.getcurrent().coro - except AttributeError: - return _maincoro - getcurrent = staticmethod(getcurrent) - - def __reduce__(self): - raise TypeError, 'pickling is not possible based upon greenlets' - - _maincoro = coroutine() - maingreenlet = greenlet.getcurrent() - _maincoro._frame = frame = MWrap(maingreenlet) - frame.coro = _maincoro - del frame - del maingreenlet from collections import deque import operator -__all__ = 'run getcurrent getmain schedule tasklet channel coroutine \ - greenlet'.split() +__all__ = 'run getcurrent getmain schedule tasklet channel coroutine'.split() _global_task_id = 0 _squeue = None @@ -131,7 +134,8 @@ def _scheduler_remove(value): try: del _squeue[operator.indexOf(_squeue, value)] - except ValueError:pass + except ValueError: + pass def _scheduler_append(value, normal=True): if normal: diff --git a/pypy/annotation/builtin.py b/pypy/annotation/builtin.py --- a/pypy/annotation/builtin.py +++ b/pypy/annotation/builtin.py @@ -308,9 +308,6 @@ clsdef = clsdef.commonbase(cdef) return SomeInstance(clsdef) -def robjmodel_we_are_translated(): - return immutablevalue(True) - def robjmodel_r_dict(s_eqfn, s_hashfn, s_force_non_null=None): if s_force_non_null is None: force_non_null = False @@ -376,8 +373,6 @@ BUILTIN_ANALYZERS[pypy.rlib.rarithmetic.intmask] = rarith_intmask BUILTIN_ANALYZERS[pypy.rlib.objectmodel.instantiate] = robjmodel_instantiate -BUILTIN_ANALYZERS[pypy.rlib.objectmodel.we_are_translated] = ( - robjmodel_we_are_translated) BUILTIN_ANALYZERS[pypy.rlib.objectmodel.r_dict] = robjmodel_r_dict BUILTIN_ANALYZERS[pypy.rlib.objectmodel.hlinvoke] = robjmodel_hlinvoke BUILTIN_ANALYZERS[pypy.rlib.objectmodel.keepalive_until_here] = robjmodel_keepalive_until_here @@ -416,7 +411,8 @@ from pypy.annotation.model import SomePtr from pypy.rpython.lltypesystem import lltype -def malloc(s_T, s_n=None, s_flavor=None, s_zero=None, s_track_allocation=None): +def malloc(s_T, s_n=None, s_flavor=None, s_zero=None, s_track_allocation=None, + s_add_memory_pressure=None): assert (s_n is None or s_n.knowntype == int or issubclass(s_n.knowntype, pypy.rlib.rarithmetic.base_int)) assert s_T.is_constant() @@ -432,6 +428,8 @@ else: assert s_flavor.is_constant() assert s_track_allocation is None or s_track_allocation.is_constant() + assert (s_add_memory_pressure is None or + s_add_memory_pressure.is_constant()) # not sure how to call malloc() for the example 'p' in the # presence of s_extraargs r = SomePtr(lltype.Ptr(s_T.const)) diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py --- a/pypy/annotation/description.py +++ b/pypy/annotation/description.py @@ -399,9 +399,7 @@ if b1 is object: continue if b1.__dict__.get('_mixin_', False): - assert b1.__bases__ == () or b1.__bases__ == (object,), ( - "mixin class %r should have no base" % (b1,)) - self.add_sources_for_class(b1, mixin=True) + self.add_mixin(b1) else: assert base is object, ("multiple inheritance only supported " "with _mixin_: %r" % (cls,)) @@ -469,6 +467,15 @@ return self.classdict[name] = Constant(value) + def add_mixin(self, base): + for subbase in base.__bases__: + if subbase is object: + continue + assert subbase.__dict__.get("_mixin_", False), ("Mixin class %r has non" + "mixin base class %r" % (base, subbase)) + self.add_mixin(subbase) + self.add_sources_for_class(base, mixin=True) + def add_sources_for_class(self, cls, mixin=False): for name, value in cls.__dict__.items(): self.add_source_attribute(name, value, mixin) diff --git a/pypy/config/makerestdoc.py b/pypy/config/makerestdoc.py --- a/pypy/config/makerestdoc.py +++ b/pypy/config/makerestdoc.py @@ -134,7 +134,7 @@ for child in self._children: subpath = fullpath + "." + child._name toctree.append(subpath) - content.add(Directive("toctree", *toctree, maxdepth=4)) + content.add(Directive("toctree", *toctree, **{'maxdepth': 4})) content.join( ListItem(Strong("name:"), self._name), ListItem(Strong("description:"), self.doc)) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -27,13 +27,14 @@ # --allworkingmodules working_modules = default_modules.copy() working_modules.update(dict.fromkeys( - ["_socket", "unicodedata", "mmap", "fcntl", "_locale", + ["_socket", "unicodedata", "mmap", "fcntl", "_locale", "pwd", "rctime" , "select", "zipimport", "_lsprof", "crypt", "signal", "_rawffi", "termios", "zlib", "bz2", "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "_bisect", "binascii", "_multiprocessing", '_warnings', - "_collections", "_multibytecodec", "micronumpy", "_ffi"] + "_collections", "_multibytecodec", "micronumpy", "_ffi", + "_continuation"] )) translation_modules = default_modules.copy() @@ -57,6 +58,7 @@ # unix only modules del working_modules["crypt"] del working_modules["fcntl"] + del working_modules["pwd"] del working_modules["termios"] del working_modules["_minimal_curses"] @@ -99,6 +101,7 @@ "_ssl" : ["pypy.module._ssl.interp_ssl"], "_hashlib" : ["pypy.module._ssl.interp_ssl"], "_minimal_curses": ["pypy.module._minimal_curses.fficurses"], + "_continuation": ["pypy.rlib.rstacklet"], } def get_module_validator(modname): diff --git a/pypy/config/test/test_config.py b/pypy/config/test/test_config.py --- a/pypy/config/test/test_config.py +++ b/pypy/config/test/test_config.py @@ -1,5 +1,5 @@ from pypy.config.config import * -import py +import py, sys def make_description(): gcoption = ChoiceOption('name', 'GC name', ['ref', 'framework'], 'ref') @@ -69,13 +69,15 @@ attrs = dir(config) assert '__repr__' in attrs # from the type assert '_cfgimpl_values' in attrs # from self - assert 'gc' in attrs # custom attribute - assert 'objspace' in attrs # custom attribute + if sys.version_info >= (2, 6): + assert 'gc' in attrs # custom attribute + assert 'objspace' in attrs # custom attribute # attrs = dir(config.gc) - assert 'name' in attrs - assert 'dummy' in attrs - assert 'float' in attrs + if sys.version_info >= (2, 6): + assert 'name' in attrs + assert 'dummy' in attrs + assert 'float' in attrs def test_arbitrary_option(): descr = OptionDescription("top", "", [ @@ -279,11 +281,11 @@ def test_underscore_in_option_name(): descr = OptionDescription("opt", "", [ - BoolOption("_stackless", "", default=False), + BoolOption("_foobar", "", default=False), ]) config = Config(descr) parser = to_optparse(config) - assert parser.has_option("--_stackless") + assert parser.has_option("--_foobar") def test_none(): dummy1 = BoolOption('dummy1', 'doc dummy', default=False, cmdline=None) diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -13,6 +13,10 @@ DEFL_LOW_INLINE_THRESHOLD = DEFL_INLINE_THRESHOLD / 2.0 DEFL_GC = "minimark" +if sys.platform.startswith("linux"): + DEFL_ROOTFINDER_WITHJIT = "asmgcc" +else: + DEFL_ROOTFINDER_WITHJIT = "shadowstack" IS_64_BITS = sys.maxint > 2147483647 @@ -24,10 +28,9 @@ translation_optiondescription = OptionDescription( "translation", "Translation Options", [ - BoolOption("stackless", "enable stackless features during compilation", - default=False, cmdline="--stackless", - requires=[("translation.type_system", "lltype"), - ("translation.gcremovetypeptr", False)]), # XXX? + BoolOption("continuation", "enable single-shot continuations", + default=False, cmdline="--continuation", + requires=[("translation.type_system", "lltype")]), ChoiceOption("type_system", "Type system to use when RTyping", ["lltype", "ootype"], cmdline=None, default="lltype", requires={ @@ -66,7 +69,8 @@ "statistics": [("translation.gctransformer", "framework")], "generation": [("translation.gctransformer", "framework")], "hybrid": [("translation.gctransformer", "framework")], - "boehm": [("translation.gctransformer", "boehm")], + "boehm": [("translation.gctransformer", "boehm"), + ("translation.continuation", False)], # breaks "markcompact": [("translation.gctransformer", "framework")], "minimark": [("translation.gctransformer", "framework")], }, @@ -109,7 +113,7 @@ BoolOption("jit", "generate a JIT", default=False, suggests=[("translation.gc", DEFL_GC), - ("translation.gcrootfinder", "asmgcc"), + ("translation.gcrootfinder", DEFL_ROOTFINDER_WITHJIT), ("translation.list_comprehension_operations", True)]), ChoiceOption("jit_backend", "choose the backend for the JIT", ["auto", "x86", "x86-without-sse2", "llvm"], @@ -385,8 +389,6 @@ config.translation.suggest(withsmallfuncsets=5) elif word == 'jit': config.translation.suggest(jit=True) - if config.translation.stackless: - raise NotImplementedError("JIT conflicts with stackless for now") elif word == 'removetypeptr': config.translation.suggest(gcremovetypeptr=True) else: diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -1,11 +1,10 @@ .. _`ctypes_configure/doc/sample.py`: https://bitbucket.org/pypy/pypy/src/default/ctypes_configure/doc/sample.py .. _`demo/`: https://bitbucket.org/pypy/pypy/src/default/demo/ -.. _`demo/pickle_coroutine.py`: https://bitbucket.org/pypy/pypy/src/default/demo/pickle_coroutine.py .. _`lib-python/`: https://bitbucket.org/pypy/pypy/src/default/lib-python/ .. _`lib-python/2.7/dis.py`: https://bitbucket.org/pypy/pypy/src/default/lib-python/2.7/dis.py .. _`lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/ +.. _`lib_pypy/greenlet.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/greenlet.py .. _`lib_pypy/pypy_test/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/pypy_test/ -.. _`lib_pypy/stackless.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/stackless.py .. _`lib_pypy/tputil.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/tputil.py .. _`pypy/annotation`: .. _`pypy/annotation/`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation/ @@ -55,7 +54,6 @@ .. _`pypy/module`: .. _`pypy/module/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/ .. _`pypy/module/__builtin__/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/__init__.py -.. _`pypy/module/_stackless/test/test_composable_coroutine.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/_stackless/test/test_composable_coroutine.py .. _`pypy/objspace`: .. _`pypy/objspace/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/ .. _`pypy/objspace/dump.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/dump.py @@ -117,6 +115,7 @@ .. _`pypy/translator/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/ .. _`pypy/translator/backendopt/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/backendopt/ .. _`pypy/translator/c/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/ +.. _`pypy/translator/c/src/stacklet/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/src/stacklet/ .. _`pypy/translator/cli/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/cli/ .. _`pypy/translator/goal/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/goal/ .. _`pypy/translator/jvm/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/jvm/ diff --git a/pypy/doc/architecture.rst b/pypy/doc/architecture.rst --- a/pypy/doc/architecture.rst +++ b/pypy/doc/architecture.rst @@ -153,7 +153,7 @@ * Optionally, `various transformations`_ can then be applied which, for example, perform optimizations such as inlining, add capabilities - such as stackless_-style concurrency, or insert code for the + such as stackless-style concurrency (deprecated), or insert code for the `garbage collector`_. * Then, the graphs are converted to source code for the target platform @@ -255,7 +255,6 @@ .. _Python: http://docs.python.org/reference/ .. _Psyco: http://psyco.sourceforge.net -.. _stackless: stackless.html .. _`generate Just-In-Time Compilers`: jit/index.html .. _`JIT Generation in PyPy`: jit/index.html .. _`implement your own interpreter`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.5' +version = '1.6' # The full version, including alpha/beta/rc tags. -release = '1.5' +release = '1.6' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/config/objspace.usemodules._stackless.txt b/pypy/doc/config/objspace.usemodules._continuation.txt rename from pypy/doc/config/objspace.usemodules._stackless.txt rename to pypy/doc/config/objspace.usemodules._continuation.txt --- a/pypy/doc/config/objspace.usemodules._stackless.txt +++ b/pypy/doc/config/objspace.usemodules._continuation.txt @@ -1,6 +1,4 @@ -Use the '_stackless' module. +Use the '_continuation' module. -Exposes the `stackless` primitives, and also implies a stackless build. -See also :config:`translation.stackless`. - -.. _`stackless`: ../stackless.html +Exposes the `continulet` app-level primitives. +See also :config:`translation.continuation`. diff --git a/pypy/doc/config/objspace.usemodules.pwd.txt b/pypy/doc/config/objspace.usemodules.pwd.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.usemodules.pwd.txt @@ -0,0 +1,2 @@ +Use the 'pwd' module. +This module is expected to be fully working. diff --git a/pypy/doc/config/translation.stackless.txt b/pypy/doc/config/translation.continuation.txt rename from pypy/doc/config/translation.stackless.txt rename to pypy/doc/config/translation.continuation.txt --- a/pypy/doc/config/translation.stackless.txt +++ b/pypy/doc/config/translation.continuation.txt @@ -1,5 +1,2 @@ -Run the `stackless transform`_ on each generated graph, which enables the use -of coroutines at RPython level and the "stackless" module when translating -PyPy. - -.. _`stackless transform`: ../stackless.html +Enable the use of a stackless-like primitive called "stacklet". +In PyPy, this is exposed at app-level by the "_continuation" module. diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -9,22 +9,22 @@ Armin Rigo Maciej Fijalkowski Carl Friedrich Bolz + Antonio Cuni Amaury Forgeot d'Arc - Antonio Cuni Samuele Pedroni Michael Hudson Holger Krekel + Benjamin Peterson Christian Tismer - Benjamin Peterson + Hakan Ardo + Alex Gaynor Eric van Riet Paap - Anders Chrigström - Håkan Ardö + Anders Chrigstrom + David Schneider Richard Emslie Dan Villiom Podlaski Christiansen Alexander Schremmer - Alex Gaynor - David Schneider - Aurelién Campeas + Aurelien Campeas Anders Lehmann Camillo Bruni Niklaus Haldimann @@ -35,16 +35,17 @@ Bartosz Skowron Jakub Gustak Guido Wesdorp + Daniel Roberts Adrien Di Mascio Laura Creighton Ludovic Aubry Niko Matsakis - Daniel Roberts Jason Creighton - Jacob Hallén + Jacob Hallen Alex Martelli Anders Hammarquist Jan de Mooij + Wim Lavrijsen Stephan Diehl Michael Foord Stefan Schwarzer @@ -55,9 +56,13 @@ Alexandre Fayolle Marius Gedminas Simon Burton + Justin Peel Jean-Paul Calderone John Witulski + Lukas Diekmann + holger krekel Wim Lavrijsen + Dario Bertini Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum @@ -69,15 +74,16 @@ Georg Brandl Gerald Klix Wanja Saatkamp + Ronny Pfannschmidt Boris Feigin Oscar Nierstrasz - Dario Bertini David Malcolm Eugene Oden Henry Mason + Sven Hager Lukas Renggli + Ilya Osadchiy Guenter Jantzen - Ronny Pfannschmidt Bert Freudenberg Amit Regmi Ben Young @@ -94,8 +100,8 @@ Jared Grubb Karl Bartel Gabriel Lavoie + Victor Stinner Brian Dorsey - Victor Stinner Stuart Williams Toby Watson Antoine Pitrou @@ -106,19 +112,23 @@ Jonathan David Riehl Elmo Mäntynen Anders Qvist - Beatrice Düring + Beatrice During Alexander Sedov + Timo Paulssen + Corbin Simpson Vincent Legoll + Romain Guillebert Alan McIntyre - Romain Guillebert Alex Perry Jens-Uwe Mager + Simon Cross Dan Stromberg - Lukas Diekmann + Guillebert Romain Carl Meyer Pieter Zieschang Alejandro J. Cura Sylvain Thenault + Christoph Gerum Travis Francis Athougies Henrik Vendelbo Lutz Paelike @@ -129,6 +139,7 @@ Miguel de Val Borro Ignas Mikalajunas Artur Lisiecki + Philip Jenvey Joshua Gilbert Godefroid Chappelle Yusei Tahara @@ -137,24 +148,29 @@ Gustavo Niemeyer William Leslie Akira Li - Kristján Valur Jónsson + Kristjan Valur Jonsson Bobby Impollonia + Michael Hudson-Doyle Andrew Thompson Anders Sigfridsson + Floris Bruynooghe Jacek Generowicz Dan Colish - Sven Hager Zooko Wilcox-O Hearn + Dan Villiom Podlaski Christiansen Anders Hammarquist + Chris Lambacher Dinu Gherman Dan Colish + Brett Cannon Daniel Neuhäuser Michael Chermside Konrad Delong Anna Ravencroft Greg Price Armin Ronacher + Christian Muirhead Jim Baker - Philip Jenvey Rodrigo Araújo + Romain Guillebert diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -24,6 +24,7 @@ _bisect _codecs _collections + `_continuation`_ `_ffi`_ _hashlib _io @@ -84,9 +85,12 @@ _winreg - Extra module with Stackless_ only: - - _stackless + Note that only some of these modules are built-in in a typical + CPython installation, and the rest is from non built-in extension + modules. This means that e.g. ``import parser`` will, on CPython, + find a local file ``parser.py``, while ``import sys`` will not find a + local file ``sys.py``. In PyPy the difference does not exist: all + these modules are built-in. * Supported by being rewritten in pure Python (possibly using ``ctypes``): see the `lib_pypy/`_ directory. Examples of modules that we @@ -101,11 +105,11 @@ .. the nonstandard modules are listed below... .. _`__pypy__`: __pypy__-module.html +.. _`_continuation`: stackless.html .. _`_ffi`: ctypes-implementation.html .. _`_rawffi`: ctypes-implementation.html .. _`_minimal_curses`: config/objspace.usemodules._minimal_curses.html .. _`cpyext`: http://morepypy.blogspot.com/2010/04/using-cpython-extension-modules-with.html -.. _Stackless: stackless.html Differences related to garbage collection strategies @@ -280,7 +284,14 @@ never a dictionary as it sometimes is in CPython. Assigning to ``__builtins__`` has no effect. -* object identity of immutable keys in dictionaries is not necessarily preserved. - Never compare immutable objects with ``is``. +* Do not compare immutable objects with ``is``. For example on CPython + it is true that ``x is 0`` works, i.e. does the same as ``type(x) is + int and x == 0``, but it is so by accident. If you do instead + ``x is 1000``, then it stops working, because 1000 is too large and + doesn't come from the internal cache. In PyPy it fails to work in + both cases, because we have no need for a cache at all. + +* Also, object identity of immutable keys in dictionaries is not necessarily + preserved. .. include:: _ref.txt diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -315,6 +315,28 @@ .. _`Andrew Brown's tutorial`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html +--------------------------------------------------------- +Can RPython modules for PyPy be translated independently? +--------------------------------------------------------- + +No, you have to rebuild the entire interpreter. This means two things: + +* It is imperative to use test-driven development. You have to test + exhaustively your module in pure Python, before even attempting to + translate it. Once you translate it, you should have only a few typing + issues left to fix, but otherwise the result should work out of the box. + +* Second, and perhaps most important: do you have a really good reason + for writing the module in RPython in the first place? Nowadays you + should really look at alternatives, like writing it in pure Python, + using ctypes if it needs to call C code. Other alternatives are being + developed too (as of summer 2011), like a Cython binding. + +In this context it is not that important to be able to translate +RPython modules independently of translating the complete interpreter. +(It could be done given enough efforts, but it's a really serious +undertaking. Consider it as quite unlikely for now.) + ---------------------------------------------------------- Why does PyPy draw a Mandelbrot fractal while translating? ---------------------------------------------------------- diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -147,7 +147,7 @@ You can read more about them at the start of `pypy/rpython/memory/gc/minimark.py`_. -In more details: +In more detail: - The small newly malloced objects are allocated in the nursery (case 1). All objects living in the nursery are "young". diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -32,7 +32,10 @@ .. _`windows document`: windows.html You can translate the whole of PyPy's Python interpreter to low level C code, -or `CLI code`_. +or `CLI code`_. If you intend to build using gcc, check to make sure that +the version you have is not 4.2 or you will run into `this bug`_. + +.. _`this bug`: https://bugs.launchpad.net/ubuntu/+source/gcc-4.2/+bug/187391 1. First `download a pre-built PyPy`_ for your architecture which you will use to translate your Python interpreter. It is, of course, possible to @@ -64,7 +67,6 @@ * ``libssl-dev`` (for the optional ``_ssl`` module) * ``libgc-dev`` (for the Boehm garbage collector: only needed when translating with `--opt=0, 1` or `size`) * ``python-sphinx`` (for the optional documentation build. You need version 1.0.7 or later) - * ``python-greenlet`` (for the optional stackless support in interpreted mode/testing) 3. Translation is time-consuming -- 45 minutes on a very fast machine -- @@ -102,7 +104,7 @@ $ ./pypy-c Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) - [PyPy 1.5.0-alpha0 with GCC 4.4.3] on linux2 + [PyPy 1.6.0 with GCC 4.4.3] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``this sentence is false'' >>>> 46 - 4 @@ -117,19 +119,8 @@ Installation_ below. The ``translate.py`` script takes a very large number of options controlling -what to translate and how. See ``translate.py -h``. Some of the more -interesting options (but for now incompatible with the JIT) are: - - * ``--stackless``: this produces a pypy-c that includes features - inspired by `Stackless Python `__. - - * ``--gc=boehm|ref|marknsweep|semispace|generation|hybrid|minimark``: - choose between using - the `Boehm-Demers-Weiser garbage collector`_, our reference - counting implementation or one of own collector implementations - (the default depends on the optimization level but is usually - ``minimark``). - +what to translate and how. See ``translate.py -h``. The default options +should be suitable for mostly everybody by now. Find a more detailed description of the various options in our `configuration sections`_. @@ -162,7 +153,7 @@ $ ./pypy-cli Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) - [PyPy 1.5.0-alpha0] on linux2 + [PyPy 1.6.0] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``distopian and utopian chairs'' >>>> @@ -199,7 +190,7 @@ $ ./pypy-jvm Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) - [PyPy 1.5.0-alpha0] on linux2 + [PyPy 1.6.0] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``# assert did not crash'' >>>> @@ -238,7 +229,7 @@ the ``bin/pypy`` executable. To install PyPy system wide on unix-like systems, it is recommended to put the -whole hierarchy alone (e.g. in ``/opt/pypy1.5``) and put a symlink to the +whole hierarchy alone (e.g. in ``/opt/pypy1.6``) and put a symlink to the ``pypy`` executable into ``/usr/bin`` or ``/usr/local/bin`` If the executable fails to find suitable libraries, it will report diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -53,11 +53,11 @@ PyPy is ready to be executed as soon as you unpack the tarball or the zip file, with no need to install it in any specific location:: - $ tar xf pypy-1.5-linux.tar.bz2 + $ tar xf pypy-1.6-linux.tar.bz2 - $ ./pypy-1.5-linux/bin/pypy + $ ./pypy-1.6/bin/pypy Python 2.7.1 (?, Apr 27 2011, 12:44:21) - [PyPy 1.5.0-alpha0 with GCC 4.4.3] on linux2 + [PyPy 1.6.0 with GCC 4.4.3] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``implementing LOGO in LOGO: "turtles all the way down"'' @@ -73,16 +73,16 @@ $ curl -O http://python-distribute.org/distribute_setup.py - $ curl -O https://github.com/pypa/pip/raw/master/contrib/get-pip.py + $ curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py - $ ./pypy-1.5-linux/bin/pypy distribute_setup.py + $ ./pypy-1.6/bin/pypy distribute_setup.py - $ ./pypy-1.5-linux/bin/pypy get-pip.py + $ ./pypy-1.6/bin/pypy get-pip.py - $ ./pypy-1.5-linux/bin/pip install pygments # for example + $ ./pypy-1.6/bin/pip install pygments # for example -3rd party libraries will be installed in ``pypy-1.5-linux/site-packages``, and -the scripts in ``pypy-1.5-linux/bin``. +3rd party libraries will be installed in ``pypy-1.6/site-packages``, and +the scripts in ``pypy-1.6/bin``. Installing using virtualenv --------------------------- diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -21,8 +21,8 @@ Release Steps ---------------- -* at code freeze make a release branch under - http://codepeak.net/svn/pypy/release/x.y(.z). IMPORTANT: bump the +* at code freeze make a release branch using release-x.x.x in mercurial + IMPORTANT: bump the pypy version number in module/sys/version.py and in module/cpyext/include/patchlevel.h, notice that the branch will capture the revision number of this change for the release; @@ -42,18 +42,11 @@ JIT: windows, linux, os/x no JIT: windows, linux, os/x sandbox: linux, os/x - stackless: windows, linux, os/x * write release announcement pypy/doc/release-x.y(.z).txt the release announcement should contain a direct link to the download page * update pypy.org (under extradoc/pypy.org), rebuild and commit -* update http://codespeak.net/pypy/trunk: - code0> + chmod -R yourname:users /www/codespeak.net/htdocs/pypy/trunk - local> cd ..../pypy/doc && py.test - local> cd ..../pypy - local> rsync -az doc codespeak.net:/www/codespeak.net/htdocs/pypy/trunk/pypy/ - * post announcement on morepypy.blogspot.com * send announcements to pypy-dev, python-list, python-announce, python-dev ... diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -16,3 +16,4 @@ release-1.4.0beta.rst release-1.4.1.rst release-1.5.0.rst + release-1.6.0.rst diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -15,7 +15,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 1.5`_: the latest official release +* `Release 1.6`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -35,7 +35,7 @@ * `Differences between PyPy and CPython`_ * `What PyPy can do for your objects`_ - * `Stackless and coroutines`_ + * `Continulets and greenlets`_ * `JIT Generation in PyPy`_ * `Sandboxing Python code`_ @@ -77,7 +77,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 1.5`: http://pypy.org/download.html +.. _`Release 1.6`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html @@ -122,9 +122,9 @@ Windows, on top of .NET, and on top of Java. To dig into PyPy it is recommended to try out the current Mercurial default branch, which is always working or mostly working, -instead of the latest release, which is `1.5`__. +instead of the latest release, which is `1.6`__. -.. __: release-1.5.0.html +.. __: release-1.6.0.html PyPy is mainly developed on Linux and Mac OS X. Windows is supported, but platform-specific bugs tend to take longer before we notice and fix @@ -292,8 +292,6 @@ `pypy/translator/jvm/`_ the Java backend -`pypy/translator/stackless/`_ the `Stackless Transform`_ - `pypy/translator/tool/`_ helper tools for translation, including the Pygame `graph viewer`_ @@ -318,7 +316,7 @@ .. _`transparent proxies`: objspace-proxies.html#tproxy .. _`Differences between PyPy and CPython`: cpython_differences.html .. _`What PyPy can do for your objects`: objspace-proxies.html -.. _`Stackless and coroutines`: stackless.html +.. _`Continulets and greenlets`: stackless.html .. _StdObjSpace: objspace.html#the-standard-object-space .. _`abstract interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation .. _`rpython`: coding-guide.html#rpython @@ -337,7 +335,6 @@ .. _`low-level type system`: rtyper.html#low-level-type .. _`object-oriented type system`: rtyper.html#oo-type .. _`garbage collector`: garbage_collection.html -.. _`Stackless Transform`: translation.html#the-stackless-transform .. _`main PyPy-translation scripts`: getting-started-python.html#translating-the-pypy-python-interpreter .. _`.NET`: http://www.microsoft.com/net/ .. _Mono: http://www.mono-project.com/ diff --git a/pypy/doc/jit/pyjitpl5.rst b/pypy/doc/jit/pyjitpl5.rst --- a/pypy/doc/jit/pyjitpl5.rst +++ b/pypy/doc/jit/pyjitpl5.rst @@ -103,7 +103,7 @@ The meta-interpreter starts interpreting the JIT bytecode. Each operation is executed and then recorded in a list of operations, called the trace. -Operations can have a list of boxes that operate on, arguments. Some operations +Operations can have a list of boxes they operate on, arguments. Some operations (like GETFIELD and GETARRAYITEM) also have special objects that describe how their arguments are laid out in memory. All possible operations generated by tracing are listed in metainterp/resoperation.py. When a (interpreter-level) diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -48,12 +48,6 @@ .. image:: image/jitviewer.png -We would like to add one level to this hierarchy, by showing the generated -machine code for each jit operation. The necessary information is already in -the log file produced by the JIT, so it is "only" a matter of teaching the -jitviewer to display it. Ideally, the machine code should be hidden by -default and viewable on request. - The jitviewer is a web application based on flask and jinja2 (and jQuery on the client): if you have great web developing skills and want to help PyPy, this is an ideal task to get started, because it does not require any deep diff --git a/pypy/doc/release-1.6.0.rst b/pypy/doc/release-1.6.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-1.6.0.rst @@ -0,0 +1,95 @@ +======================== +PyPy 1.6 - kickass panda +======================== + +We're pleased to announce the 1.6 release of PyPy. This release brings a lot +of bugfixes and performance improvements over 1.5, and improves support for +Windows 32bit and OS X 64bit. This version fully implements Python 2.7.1 and +has beta level support for loading CPython C extensions. You can download it +here: + + http://pypy.org/download.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.1. It's fast (`pypy 1.5 and cpython 2.6.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64 or Mac OS X. Windows 32 +is beta (it roughly works but a lot of small issues have not been fixed so +far). Windows 64 is not yet supported. + +The main topics of this release are speed and stability: on average on +our benchmark suite, PyPy 1.6 is between **20% and 30%** faster than PyPy 1.5, +which was already much faster than CPython on our set of benchmarks. + +The speed improvements have been made possible by optimizing many of the +layers which compose PyPy. In particular, we improved: the Garbage Collector, +the JIT warmup time, the optimizations performed by the JIT, the quality of +the generated machine code and the implementation of our Python interpreter. + +.. _`pypy 1.5 and cpython 2.6.2`: http://speed.pypy.org + + +Highlights +========== + +* Numerous performance improvements, overall giving considerable speedups: + + - better GC behavior when dealing with very large objects and arrays + + - **fast ctypes:** now calls to ctypes functions are seen and optimized + by the JIT, and they are up to 60 times faster than PyPy 1.5 and 10 times + faster than CPython + + - improved generators(1): simple generators now are inlined into the caller + loop, making performance up to 3.5 times faster than PyPy 1.5. + + - improved generators(2): thanks to other optimizations, even generators + that are not inlined are between 10% and 20% faster than PyPy 1.5. + + - faster warmup time for the JIT + + - JIT support for single floats (e.g., for ``array('f')``) + + - optimized dictionaries: the internal representation of dictionaries is now + dynamically selected depending on the type of stored objects, resulting in + faster code and smaller memory footprint. For example, dictionaries whose + keys are all strings, or all integers. Other dictionaries are also smaller + due to bugfixes. + +* JitViewer: this is the first official release which includes the JitViewer, + a web-based tool which helps you to see which parts of your Python code have + been compiled by the JIT, down until the assembler. The `jitviewer`_ 0.1 has + already been release and works well with PyPy 1.6. + +* The CPython extension module API has been improved and now supports many + more extensions. For information on which one are supported, please refer to + our `compatibility wiki`_. + +* Multibyte encoding support: this was of of the last areas in which we were + still behind CPython, but now we fully support them. + +* Preliminary support for NumPy: this release includes a preview of a very + fast NumPy module integrated with the PyPy JIT. Unfortunately, this does + not mean that you can expect to take an existing NumPy program and run it on + PyPy, because the module is still unfinished and supports only some of the + numpy API. However, barring some details, what works should be + blazingly fast :-) + +* Bugfixes: since the 1.5 release we fixed 53 bugs in our `bug tracker`_, not + counting the numerous bugs that were found and reported through other + channels than the bug tracker. + +Cheers, + +Hakan Ardo, Carl Friedrich Bolz, Laura Creighton, Antonio Cuni, +Maciej Fijalkowski, Amaury Forgeot d'Arc, Alex Gaynor, +Armin Rigo and the PyPy team + +.. _`jitviewer`: http://morepypy.blogspot.com/2011/08/visualization-of-jitted-code.html +.. _`bug tracker`: https://bugs.pypy.org +.. _`compatibility wiki`: https://bitbucket.org/pypy/compatibility/wiki/Home + diff --git a/pypy/doc/rlib.rst b/pypy/doc/rlib.rst --- a/pypy/doc/rlib.rst +++ b/pypy/doc/rlib.rst @@ -134,69 +134,6 @@ a hierarchy of Address classes, in a typical static-OO-programming style. -``rstack`` -========== - -The `pypy/rlib/rstack.py`_ module allows an RPython program to control its own execution stack. -This is only useful if the program is translated using stackless. An old -description of the exposed functions is below. - -We introduce an RPython type ``frame_stack_top`` and a built-in function -``yield_current_frame_to_caller()`` that work as follows (see example below): - -* The built-in function ``yield_current_frame_to_caller()`` causes the current - function's state to be captured in a new ``frame_stack_top`` object that is - returned to the parent. Only one frame, the current one, is captured this - way. The current frame is suspended and the caller continues to run. Note - that the caller is only resumed once: when - ``yield_current_frame_to_caller()`` is called. See below. - -* A ``frame_stack_top`` object can be jumped to by calling its ``switch()`` - method with no argument. - -* ``yield_current_frame_to_caller()`` and ``switch()`` themselves return a new - ``frame_stack_top`` object: the freshly captured state of the caller of the - source ``switch()`` that was just executed, or None in the case described - below. - -* the function that called ``yield_current_frame_to_caller()`` also has a - normal return statement, like all functions. This statement must return - another ``frame_stack_top`` object. The latter is *not* returned to the - original caller; there is no way to return several times to the caller. - Instead, it designates the place to which the execution must jump, as if by - a ``switch()``. The place to which we jump this way will see a None as the - source frame stack top. - -* every frame stack top must be resumed once and only once. Not resuming - it at all causes a leak. Resuming it several times causes a crash. - -* a function that called ``yield_current_frame_to_caller()`` should not raise. - It would have no implicit parent frame to propagate the exception to. That - would be a crashingly bad idea. - -The following example would print the numbers from 1 to 7 in order:: - - def g(): - print 2 - frametop_before_5 = yield_current_frame_to_caller() - print 4 - frametop_before_7 = frametop_before_5.switch() - print 6 - return frametop_before_7 - - def f(): - print 1 - frametop_before_4 = g() - print 3 - frametop_before_6 = frametop_before_4.switch() - print 5 - frametop_after_return = frametop_before_6.switch() - print 7 - assert frametop_after_return is None - - f() - - ``streamio`` ============ diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -8,446 +8,294 @@ ================ PyPy can expose to its user language features similar to the ones -present in `Stackless Python`_: **no recursion depth limit**, and the -ability to write code in a **massively concurrent style**. It actually -exposes three different paradigms to choose from: +present in `Stackless Python`_: the ability to write code in a +**massively concurrent style**. (It does not (any more) offer the +ability to run with no `recursion depth limit`_, but the same effect +can be achieved indirectly.) -* `Tasklets and channels`_; +This feature is based on a custom primitive called a continulet_. +Continulets can be directly used by application code, or it is possible +to write (entirely at app-level) more user-friendly interfaces. -* Greenlets_; +Currently PyPy implements greenlets_ on top of continulets. It would be +easy to implement tasklets and channels as well, emulating the model +of `Stackless Python`_. -* Plain coroutines_. +Continulets are extremely light-weight, which means that PyPy should be +able to handle programs containing large amounts of them. However, due +to an implementation restriction, a PyPy compiled with +``--gcrootfinder=shadowstack`` consumes at least one page of physical +memory (4KB) per live continulet, and half a megabyte of virtual memory +on 32-bit or a complete megabyte on 64-bit. Moreover, the feature is +only available (so far) on x86 and x86-64 CPUs; for other CPUs you need +to add a short page of custom assembler to +`pypy/translator/c/src/stacklet/`_. -All of them are extremely light-weight, which means that PyPy should be -able to handle programs containing large amounts of coroutines, tasklets -and greenlets. +Theory +====== -Requirements -++++++++++++++++ +The fundamental idea is that, at any point in time, the program happens +to run one stack of frames (or one per thread, in case of +multi-threading). To see the stack, start at the top frame and follow +the chain of ``f_back`` until you reach the bottom frame. From the +point of view of one of these frames, it has a ``f_back`` pointing to +another frame (unless it is the bottom frame), and it is itself being +pointed to by another frame (unless it is the top frame). -If you are running py.py on top of CPython, then you need to enable -the _stackless module by running it as follows:: +The theory behind continulets is to literally take the previous sentence +as definition of "an O.K. situation". The trick is that there are +O.K. situations that are more complex than just one stack: you will +always have one stack, but you can also have in addition one or more +detached *cycles* of frames, such that by following the ``f_back`` chain +you run in a circle. But note that these cycles are indeed completely +detached: the top frame (the currently running one) is always the one +which is not the ``f_back`` of anybody else, and it is always the top of +a stack that ends with the bottom frame, never a part of these extra +cycles. - py.py --withmod-_stackless +How do you create such cycles? The fundamental operation to do so is to +take two frames and *permute* their ``f_back`` --- i.e. exchange them. +You can permute any two ``f_back`` without breaking the rule of "an O.K. +situation". Say for example that ``f`` is some frame halfway down the +stack, and you permute its ``f_back`` with the ``f_back`` of the top +frame. Then you have removed from the normal stack all intermediate +frames, and turned them into one stand-alone cycle. By doing the same +permutation again you restore the original situation. -This is implemented internally using greenlets, so it only works on a -platform where `greenlets`_ are supported. A few features do -not work this way, though, and really require a translated -``pypy-c``. +In practice, in PyPy, you cannot change the ``f_back`` of an abitrary +frame, but only of frames stored in ``continulets``. -To obtain a translated version of ``pypy-c`` that includes Stackless -support, run translate.py as follows:: - - cd pypy/translator/goal - python translate.py --stackless +Continulets are internally implemented using stacklets. Stacklets are a +bit more primitive (they are really one-shot continuations), but that +idea only works in C, not in Python. The basic idea of continulets is +to have at any point in time a complete valid stack; this is important +e.g. to correctly propagate exceptions (and it seems to give meaningful +tracebacks too). Application level interface ============================= -A stackless PyPy contains a module called ``stackless``. The interface -exposed by this module have not been refined much, so it should be -considered in-flux (as of 2007). -So far, PyPy does not provide support for ``stackless`` in a threaded -environment. This limitation is not fundamental, as previous experience -has shown, so supporting this would probably be reasonably easy. +.. _continulet: -An interesting point is that the same ``stackless`` module can provide -a number of different concurrency paradigms at the same time. From a -theoretical point of view, none of above-mentioned existing three -paradigms considered on its own is new: two of them are from previous -Python work, and the third one is a variant of the classical coroutine. -The new part is that the PyPy implementation manages to provide all of -them and let the user implement more. Moreover - and this might be an -important theoretical contribution of this work - we manage to provide -these concurrency concepts in a "composable" way. In other words, it -is possible to naturally mix in a single application multiple -concurrency paradigms, and multiple unrelated usages of the same -paradigm. This is discussed in the Composability_ section below. +Continulets ++++++++++++ +A translated PyPy contains by default a module called ``_continuation`` +exporting the type ``continulet``. A ``continulet`` object from this +module is a container that stores a "one-shot continuation". It plays +the role of an extra frame you can insert in the stack, and whose +``f_back`` can be changed. -Infinite recursion -++++++++++++++++++ +To make a continulet object, call ``continulet()`` with a callable and +optional extra arguments. -Any stackless PyPy executable natively supports recursion that is only -limited by the available memory. As in normal Python, though, there is -an initial recursion limit (which is 5000 in all pypy-c's, and 1000 in -CPython). It can be changed with ``sys.setrecursionlimit()``. With a -stackless PyPy, any value is acceptable - use ``sys.maxint`` for -unlimited. +Later, the first time you ``switch()`` to the continulet, the callable +is invoked with the same continulet object as the extra first argument. +At that point, the one-shot continuation stored in the continulet points +to the caller of ``switch()``. In other words you have a perfectly +normal-looking stack of frames. But when ``switch()`` is called again, +this stored one-shot continuation is exchanged with the current one; it +means that the caller of ``switch()`` is suspended with its continuation +stored in the container, and the old continuation from the continulet +object is resumed. -In some cases, you can write Python code that causes interpreter-level -infinite recursion -- i.e. infinite recursion without going via -application-level function calls. It is possible to limit that too, -with ``_stackless.set_stack_depth_limit()``, or to unlimit it completely -by setting it to ``sys.maxint``. +The most primitive API is actually 'permute()', which just permutes the +one-shot continuation stored in two (or more) continulets. +In more details: -Coroutines -++++++++++ +* ``continulet(callable, *args, **kwds)``: make a new continulet. + Like a generator, this only creates it; the ``callable`` is only + actually called the first time it is switched to. It will be + called as follows:: -A Coroutine is similar to a very small thread, with no preemptive scheduling. -Within a family of coroutines, the flow of execution is explicitly -transferred from one to another by the programmer. When execution is -transferred to a coroutine, it begins to execute some Python code. When -it transfers execution away from itself it is temporarily suspended, and -when execution returns to it it resumes its execution from the -point where it was suspended. Conceptually, only one coroutine is -actively running at any given time (but see Composability_ below). + callable(cont, *args, **kwds) -The ``stackless.coroutine`` class is instantiated with no argument. -It provides the following methods and attributes: + where ``cont`` is the same continulet object. -* ``stackless.coroutine.getcurrent()`` + Note that it is actually ``cont.__init__()`` that binds + the continulet. It is also possible to create a not-bound-yet + continulet by calling explicitly ``continulet.__new__()``, and + only bind it later by calling explicitly ``cont.__init__()``. - Static method returning the currently running coroutine. There is a - so-called "main" coroutine object that represents the "outer" - execution context, where your main program started and where it runs - as long as it does not switch to another coroutine. +* ``cont.switch(value=None, to=None)``: start the continulet if + it was not started yet. Otherwise, store the current continuation + in ``cont``, and activate the target continuation, which is the + one that was previously stored in ``cont``. Note that the target + continuation was itself previously suspended by another call to + ``switch()``; this older ``switch()`` will now appear to return. + The ``value`` argument is any object that is carried to the target + and returned by the target's ``switch()``. -* ``coro.bind(callable, *args, **kwds)`` + If ``to`` is given, it must be another continulet object. In + that case, performs a "double switch": it switches as described + above to ``cont``, and then immediately switches again to ``to``. + This is different from switching directly to ``to``: the current + continuation gets stored in ``cont``, the old continuation from + ``cont`` gets stored in ``to``, and only then we resume the + execution from the old continuation out of ``to``. - Bind the coroutine so that it will execute ``callable(*args, - **kwds)``. The call is not performed immediately, but only the - first time we call the ``coro.switch()`` method. A coroutine must - be bound before it is switched to. When the coroutine finishes - (because the call to the callable returns), the coroutine exits and - implicitly switches back to another coroutine (its "parent"); after - this point, it is possible to bind it again and switch to it again. - (Which coroutine is the parent of which is not documented, as it is - likely to change when the interface is refined.) +* ``cont.throw(type, value=None, tb=None, to=None)``: similar to + ``switch()``, except that immediately after the switch is done, raise + the given exception in the target. -* ``coro.switch()`` +* ``cont.is_pending()``: return True if the continulet is pending. + This is False when it is not initialized (because we called + ``__new__`` and not ``__init__``) or when it is finished (because + the ``callable()`` returned). When it is False, the continulet + object is empty and cannot be ``switch()``-ed to. - Suspend the current (caller) coroutine, and resume execution in the - target coroutine ``coro``. +* ``permute(*continulets)``: a global function that permutes the + continuations stored in the given continulets arguments. Mostly + theoretical. In practice, using ``cont.switch()`` is easier and + more efficient than using ``permute()``; the latter does not on + its own change the currently running frame. -* ``coro.kill()`` - Kill ``coro`` by sending a CoroutineExit exception and switching - execution immediately to it. This exception can be caught in the - coroutine itself and can be raised from any call to ``coro.switch()``. - This exception isn't propagated to the parent coroutine. +Genlets ++++++++ -* ``coro.throw(type, value)`` +The ``_continuation`` module also exposes the ``generator`` decorator:: - Insert an exception in ``coro`` an resume switches execution - immediately to it. In the coroutine itself, this exception - will come from any call to ``coro.switch()`` and can be caught. If the - exception isn't caught, it will be propagated to the parent coroutine. + @generator + def f(cont, a, b): + cont.switch(a + b) + cont.switch(a + b + 1) -When a coroutine is garbage-collected, it gets the ``.kill()`` method sent to -it. This happens at the point the next ``.switch`` method is called, so the -target coroutine of this call will be executed only after the ``.kill`` has -finished. + for i in f(10, 20): + print i -Example -~~~~~~~ +This example prints 30 and 31. The only advantage over using regular +generators is that the generator itself is not limited to ``yield`` +statements that must all occur syntactically in the same function. +Instead, we can pass around ``cont``, e.g. to nested sub-functions, and +call ``cont.switch(x)`` from there. -Here is a classical producer/consumer example: an algorithm computes a -sequence of values, while another consumes them. For our purposes we -assume that the producer can generate several values at once, and the -consumer can process up to 3 values in a batch - it can also process -batches with fewer than 3 values without waiting for the producer (which -would be messy to express with a classical Python generator). :: +The ``generator`` decorator can also be applied to methods:: - def producer(lst): - while True: - ...compute some more values... - lst.extend(new_values) - coro_consumer.switch() - - def consumer(lst): - while True: - # First ask the producer for more values if needed - while len(lst) == 0: - coro_producer.switch() - # Process the available values in a batch, but at most 3 - batch = lst[:3] - del lst[:3] - ...process batch... - - # Initialize two coroutines with a shared list as argument - exchangelst = [] - coro_producer = coroutine() - coro_producer.bind(producer, exchangelst) - coro_consumer = coroutine() - coro_consumer.bind(consumer, exchangelst) - - # Start running the consumer coroutine - coro_consumer.switch() - - -Tasklets and channels -+++++++++++++++++++++ - -The ``stackless`` module also provides an interface that is roughly -compatible with the interface of the ``stackless`` module in `Stackless -Python`_: it contains ``stackless.tasklet`` and ``stackless.channel`` -classes. Tasklets are also similar to microthreads, but (like coroutines) -they don't actually run in parallel with other microthreads; instead, -they synchronize and exchange data with each other over Channels, and -these exchanges determine which Tasklet runs next. - -For usage reference, see the documentation on the `Stackless Python`_ -website. - -Note that Tasklets and Channels are implemented at application-level in -`lib_pypy/stackless.py`_ on top of coroutines_. You can refer to this -module for more details and API documentation. - -The stackless.py code tries to resemble the stackless C code as much -as possible. This makes the code somewhat unpythonic. - -Bird's eye view of tasklets and channels -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Tasklets are a bit like threads: they encapsulate a function in such a way that -they can be suspended/restarted any time. Unlike threads, they won't -run concurrently, but must be cooperative. When using stackless -features, it is vitally important that no action is performed that blocks -everything else. In particular, blocking input/output should be centralized -to a single tasklet. - -Communication between tasklets is done via channels. -There are three ways for a tasklet to give up control: - -1. call ``stackless.schedule()`` -2. send something over a channel -3. receive something from a channel - -A (live) tasklet can either be running, waiting to get scheduled, or be -blocked by a channel. - -Scheduling is done in strictly round-robin manner. A blocked tasklet -is removed from the scheduling queue and will be reinserted when it -becomes unblocked. - -Example -~~~~~~~ - -Here is a many-producers many-consumers example, where any consumer can -process the result of any producer. For this situation we set up a -single channel where all producer send, and on which all consumers -wait:: - - def producer(chan): - while True: - chan.send(...next value...) - - def consumer(chan): - while True: - x = chan.receive() - ...do something with x... - - # Set up the N producer and M consumer tasklets - common_channel = stackless.channel() - for i in range(N): - stackless.tasklet(producer, common_channel)() - for i in range(M): - stackless.tasklet(consumer, common_channel)() - - # Run it all - stackless.run() - -Each item sent over the channel is received by one of the waiting -consumers; which one is not specified. The producers block until their -item is consumed: the channel is not a queue, but rather a meeting point -which causes tasklets to block until both a consumer and a producer are -ready. In practice, the reason for having several consumers receiving -on a single channel is that some of the consumers can be busy in other -ways part of the time. For example, each consumer might receive a -database request, process it, and send the result to a further channel -before it asks for the next request. In this situation, further -requests can still be received by other consumers. + class X: + @generator + def f(self, cont, a, b): + ... Greenlets +++++++++ -A Greenlet is a kind of primitive Tasklet with a lower-level interface -and with exact control over the execution order. Greenlets are similar -to Coroutines, with a slightly different interface: greenlets put more -emphasis on a tree structure. The various greenlets of a program form a -precise tree, which fully determines their order of execution. +Greenlets are implemented on top of continulets in `lib_pypy/greenlet.py`_. +See the official `documentation of the greenlets`_. -For usage reference, see the `documentation of the greenlets`_. -The PyPy interface is identical. You should use ``greenlet.greenlet`` -instead of ``stackless.greenlet`` directly, because the greenlet library -can give you the latter when you ask for the former on top of PyPy. +Note that unlike the CPython greenlets, this version does not suffer +from GC issues: if the program "forgets" an unfinished greenlet, it will +always be collected at the next garbage collection. -PyPy's greenlets do not suffer from the cyclic GC limitation that the -CPython greenlets have: greenlets referencing each other via local -variables tend to leak on top of CPython (where it is mostly impossible -to do the right thing). It works correctly on top of PyPy. +Unimplemented features +++++++++++++++++++++++ -Coroutine Pickling -++++++++++++++++++ +The following features (present in some past Stackless version of PyPy) +are for the time being not supported any more: -Coroutines and tasklets can be pickled and unpickled, i.e. serialized to -a string of bytes for the purpose of storage or transmission. This -allows "live" coroutines or tasklets to be made persistent, moved to -other machines, or cloned in any way. The standard ``pickle`` module -works with coroutines and tasklets (at least in a translated ``pypy-c``; -unpickling live coroutines or tasklets cannot be easily implemented on -top of CPython). +* Tasklets and channels (currently ``stackless.py`` seems to import, + but you have tasklets on top of coroutines on top of greenlets on + top of continulets on top of stacklets, and it's probably not too + hard to cut two of these levels by adapting ``stackless.py`` to + use directly continulets) -To be able to achieve this result, we have to consider many objects that -are not normally pickleable in CPython. Here again, the `Stackless -Python`_ implementation has paved the way, and we follow the same -general design decisions: simple internal objects like bound method -objects and various kinds of iterators are supported; frame objects can -be fully pickled and unpickled -(by serializing a reference to the bytecode they are -running in addition to all the local variables). References to globals -and modules are pickled by name, similarly to references to functions -and classes in the traditional CPython ``pickle``. +* Coroutines (could be rewritten at app-level) -The "magic" part of this process is the implementation of the unpickling -of a chain of frames. The Python interpreter of PyPy uses -interpreter-level recursion to represent application-level calls. The -reason for this is that it tremendously simplifies the implementation of -the interpreter itself. Indeed, in Python, almost any operation can -potentially result in a non-tail-recursive call to another Python -function. This makes writing a non-recursive interpreter extremely -tedious; instead, we rely on lower-level transformations during the -translation process to control this recursion. This is the `Stackless -Transform`_, which is at the heart of PyPy's support for stackless-style -concurrency. +* Pickling and unpickling continulets (*) -At any point in time, a chain of Python-level frames corresponds to a -chain of interpreter-level frames (e.g. C frames in pypy-c), where each -single Python-level frame corresponds to one or a few interpreter-level -frames - depending on the length of the interpreter-level call chain -from one bytecode evaluation loop to the next (recursively invoked) one. +* Continuing execution of a continulet in a different thread (*) -This means that it is not sufficient to simply create a chain of Python -frame objects in the heap of a process before we can resume execution of -these newly built frames. We must recreate a corresponding chain of -interpreter-level frames. To this end, we have inserted a few *named -resume points* (see 3.2.4, in `D07.1 Massive Parallelism and Translation Aspects`_) in the Python interpreter of PyPy. This is the -motivation for implementing the interpreter-level primitives -``resume_state_create()`` and ``resume_state_invoke()``, the powerful -interface that allows an RPython program to artificially rebuild a chain -of calls in a reflective way, completely from scratch, and jump to it. +* Automatic unlimited stack (must be emulated__ so far) -.. _`D07.1 Massive Parallelism and Translation Aspects`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf +* Support for other CPUs than x86 and x86-64 -Example -~~~~~~~ +.. __: `recursion depth limit`_ -(See `demo/pickle_coroutine.py`_ for the complete source of this demo.) +(*) Pickling, as well as changing threads, could be implemented by using +a "soft" stack switching mode again. We would get either "hard" or +"soft" switches, similarly to Stackless Python 3rd version: you get a +"hard" switch (like now) when the C stack contains non-trivial C frames +to save, and a "soft" switch (like previously) when it contains only +simple calls from Python to Python. Soft-switched continulets would +also consume a bit less RAM, and the switch might be a bit faster too +(unsure about that; what is the Stackless Python experience?). -Consider a program which contains a part performing a long-running -computation:: - def ackermann(x, y): - if x == 0: - return y + 1 - if y == 0: - return ackermann(x - 1, 1) - return ackermann(x - 1, ackermann(x, y - 1)) +Recursion depth limit ++++++++++++++++++++++ -By using pickling, we can save the state of the computation while it is -running, for the purpose of restoring it later and continuing the -computation at another time or on a different machine. However, -pickling does not produce a whole-program dump: it can only pickle -individual coroutines. This means that the computation should be -started in its own coroutine:: +You can use continulets to emulate the infinite recursion depth present +in Stackless Python and in stackless-enabled older versions of PyPy. - # Make a coroutine that will run 'ackermann(3, 8)' - coro = coroutine() - coro.bind(ackermann, 3, 8) +The trick is to start a continulet "early", i.e. when the recursion +depth is very low, and switch to it "later", i.e. when the recursion +depth is high. Example:: - # Now start running the coroutine - result = coro.switch() + from _continuation import continulet -The coroutine itself must switch back to the main program when it needs -to be interrupted (we can only pickle suspended coroutines). Due to -current limitations this requires an explicit check in the -``ackermann()`` function:: + def invoke(_, callable, arg): + return callable(arg) - def ackermann(x, y): - if interrupt_flag: # test a global flag - main.switch() # and switch back to 'main' if it is set - if x == 0: - return y + 1 - if y == 0: - return ackermann(x - 1, 1) - return ackermann(x - 1, ackermann(x, y - 1)) + def bootstrap(c): + # this loop runs forever, at a very low recursion depth + callable, arg = c.switch() + while True: + # start a new continulet from here, and switch to + # it using an "exchange", i.e. a switch with to=. + to = continulet(invoke, callable, arg) + callable, arg = c.switch(to=to) -The global ``interrupt_flag`` would be set for example by a timeout, or -by a signal handler reacting to Ctrl-C, etc. It causes the coroutine to -transfer control back to the main program. The execution comes back -just after the line ``coro.switch()``, where we can pickle the coroutine -if necessary:: + c = continulet(bootstrap) + c.switch() - if not coro.is_alive: - print "finished; the result is:", result - else: - # save the state of the suspended coroutine - f = open('demo.pickle', 'w') - pickle.dump(coro, f) - f.close() -The process can then stop. At any later time, or on another machine, -we can reload the file and restart the coroutine with:: + def recursive(n): + if n == 0: + return ("ok", n) + if n % 200 == 0: + prev = c.switch((recursive, n - 1)) + else: + prev = recursive(n - 1) + return (prev[0], prev[1] + 1) - f = open('demo.pickle', 'r') - coro = pickle.load(f) - f.close() - result = coro.switch() + print recursive(999999) # prints ('ok', 999999) -Limitations -~~~~~~~~~~~ +Note that if you press Ctrl-C while running this example, the traceback +will be built with *all* recursive() calls so far, even if this is more +than the number that can possibly fit in the C stack. These frames are +"overlapping" each other in the sense of the C stack; more precisely, +they are copied out of and into the C stack as needed. -Coroutine pickling is subject to some limitations. First of all, it is -not a whole-program "memory dump". It means that only the "local" state -of a coroutine is saved. The local state is defined to include the -chain of calls and the local variables, but not for example the value of -any global variable. +(The example above also makes use of the following general "guideline" +to help newcomers write continulets: in ``bootstrap(c)``, only call +methods on ``c``, not on another continulet object. That's why we wrote +``c.switch(to=to)`` and not ``to.switch()``, which would mess up the +state. This is however just a guideline; in general we would recommend +to use other interfaces like genlets and greenlets.) -As in normal Python, the pickle will not include any function object's -code, any class definition, etc., but only references to functions and -classes. Unlike normal Python, the pickle contains frames. A pickled -frame stores a bytecode index, representing the current execution -position. This means that the user program cannot be modified *at all* -between pickling and unpickling! -On the other hand, the pickled data is fairly independent from the -platform and from the PyPy version. - -Pickling/unpickling fails if the coroutine is suspended in a state that -involves Python frames which were *indirectly* called. To define this -more precisely, a Python function can issue a regular function or method -call to invoke another Python function - this is a *direct* call and can -be pickled and unpickled. But there are many ways to invoke a Python -function indirectly. For example, most operators can invoke a special -method ``__xyz__()`` on a class, various built-in functions can call -back Python functions, signals can invoke signal handlers, and so on. -These cases are not supported yet. - - -Composability -+++++++++++++ +Theory of composability ++++++++++++++++++++++++ Although the concept of coroutines is far from new, they have not been generally integrated into mainstream languages, or only in limited form (like generators in Python and iterators in C#). We can argue that a possible reason for that is that they do not scale well when a program's complexity increases: they look attractive in small examples, but the -models that require explicit switching, by naming the target coroutine, -do not compose naturally. This means that a program that uses -coroutines for two unrelated purposes may run into conflicts caused by -unexpected interactions. +models that require explicit switching, for example by naming the target +coroutine, do not compose naturally. This means that a program that +uses coroutines for two unrelated purposes may run into conflicts caused +by unexpected interactions. To illustrate the problem, consider the following example (simplified -code; see the full source in -`pypy/module/_stackless/test/test_composable_coroutine.py`_). First, a -simple usage of coroutine:: +code using a theorical ``coroutine`` class). First, a simple usage of +coroutine:: main_coro = coroutine.getcurrent() # the main (outer) coroutine data = [] @@ -530,74 +378,35 @@ main coroutine, which confuses the ``generator_iterator.next()`` method (it gets resumed, but not as a result of a call to ``Yield()``). -As part of trying to combine multiple different paradigms into a single -application-level module, we have built a way to solve this problem. -The idea is to avoid the notion of a single, global "main" coroutine (or -a single main greenlet, or a single main tasklet). Instead, each -conceptually separated user of one of these concurrency interfaces can -create its own "view" on what the main coroutine/greenlet/tasklet is, -which other coroutine/greenlet/tasklets there are, and which of these is -the currently running one. Each "view" is orthogonal to the others. In -particular, each view has one (and exactly one) "current" -coroutine/greenlet/tasklet at any point in time. When the user switches -to a coroutine/greenlet/tasklet, it implicitly means that he wants to -switch away from the current coroutine/greenlet/tasklet *that belongs to -the same view as the target*. +Thus the notion of coroutine is *not composable*. By opposition, the +primitive notion of continulets is composable: if you build two +different interfaces on top of it, or have a program that uses twice the +same interface in two parts, then assuming that both parts independently +work, the composition of the two parts still works. -The precise application-level interface has not been fixed yet; so far, -"views" in the above sense are objects of the type -``stackless.usercostate``. The above two examples can be rewritten in -the following way:: +A full proof of that claim would require careful definitions, but let us +just claim that this fact is true because of the following observation: +the API of continulets is such that, when doing a ``switch()``, it +requires the program to have some continulet to explicitly operate on. +It shuffles the current continuation with the continuation stored in +that continulet, but has no effect outside. So if a part of a program +has a continulet object, and does not expose it as a global, then the +rest of the program cannot accidentally influence the continuation +stored in that continulet object. - producer_view = stackless.usercostate() # a local view - main_coro = producer_view.getcurrent() # the main (outer) coroutine - ... - producer_coro = producer_view.newcoroutine() - ... - -and:: - - generators_view = stackless.usercostate() - - def generator(f): - def wrappedfunc(*args, **kwds): - g = generators_view.newcoroutine(generator_iterator) - ... - - ...generators_view.getcurrent()... - -Then the composition ``grab_values()`` works as expected, because the -two views are independent. The coroutine captured as ``self.caller`` in -the ``generator_iterator.next()`` method is the main coroutine of the -``generators_view``. It is no longer the same object as the main -coroutine of the ``producer_view``, so when ``data_producer()`` issues -the following command:: - - main_coro.switch() - -the control flow cannot accidentally jump back to -``generator_iterator.next()``. In other words, from the point of view -of ``producer_view``, the function ``grab_next_value()`` always runs in -its main coroutine ``main_coro`` and the function ``data_producer`` in -its coroutine ``producer_coro``. This is the case independently of -which ``generators_view``-based coroutine is the current one when -``grab_next_value()`` is called. - -Only code that has explicit access to the ``producer_view`` or its -coroutine objects can perform switches that are relevant for the -generator code. If the view object and the coroutine objects that share -this view are all properly encapsulated inside the generator logic, no -external code can accidentally temper with the expected control flow any -longer. - -In conclusion: we will probably change the app-level interface of PyPy's -stackless module in the future to not expose coroutines and greenlets at -all, but only views. They are not much more difficult to use, and they -scale automatically to larger programs. +In other words, if we regard the continulet object as being essentially +a modifiable ``f_back``, then it is just a link between the frame of +``callable()`` and the parent frame --- and it cannot be arbitrarily +changed by unrelated code, as long as they don't explicitly manipulate +the continulet object. Typically, both the frame of ``callable()`` +(commonly a local function) and its parent frame (which is the frame +that switched to it) belong to the same class or module; so from that +point of view the continulet is a purely local link between two local +frames. It doesn't make sense to have a concept that allows this link +to be manipulated from outside. .. _`Stackless Python`: http://www.stackless.com .. _`documentation of the greenlets`: http://packages.python.org/greenlet/ -.. _`Stackless Transform`: translation.html#the-stackless-transform .. include:: _ref.txt diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -552,14 +552,15 @@ The stackless transform converts functions into a form that knows how to save the execution point and active variables into a heap structure -and resume execution at that point. This is used to implement +and resume execution at that point. This was used to implement coroutines as an RPython-level feature, which in turn are used to -implement `coroutines, greenlets and tasklets`_ as an application +implement coroutines, greenlets and tasklets as an application level feature for the Standard Interpreter. -Enable the stackless transformation with :config:`translation.stackless`. +The stackless transformation has been deprecated and is no longer +available in trunk. It has been replaced with continulets_. -.. _`coroutines, greenlets and tasklets`: stackless.html +.. _continulets: stackless.html .. _`preparing the graphs for source generation`: diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -8,13 +8,13 @@ from pypy.interpreter.miscutils import ThreadLocals from pypy.tool.cache import Cache from pypy.tool.uid import HUGEVAL_BYTES -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, newlist from pypy.rlib.debug import make_sure_not_resized from pypy.rlib.timer import DummyTimer, Timer from pypy.rlib.rarithmetic import r_uint from pypy.rlib import jit from pypy.tool.sourcetools import func_with_new_name -import os, sys, py +import os, sys __all__ = ['ObjSpace', 'OperationError', 'Wrappable', 'W_Root'] @@ -111,6 +111,9 @@ def setslotvalue(self, index, w_val): raise NotImplementedError + def delslotvalue(self, index): + raise NotImplementedError + def descr_call_mismatch(self, space, opname, RequiredClass, args): if RequiredClass is None: classname = '?' @@ -623,9 +626,9 @@ self.default_compiler = compiler return compiler - def createframe(self, code, w_globals, closure=None): + def createframe(self, code, w_globals, outer_func=None): "Create an empty PyFrame suitable for this code object." - return self.FrameClass(self, code, w_globals, closure) + return self.FrameClass(self, code, w_globals, outer_func) def allocate_lock(self): """Return an interp-level Lock object if threads are enabled, @@ -754,7 +757,18 @@ w_iterator = self.iter(w_iterable) # If we know the expected length we can preallocate. if expected_length == -1: - items = [] + try: + lgt_estimate = self.len_w(w_iterable) + except OperationError, o: + if (not o.match(self, self.w_AttributeError) and + not o.match(self, self.w_TypeError)): + raise + items = [] + else: + try: + items = newlist(lgt_estimate) + except MemoryError: + items = [] # it might have lied else: items = [None] * expected_length idx = 0 diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -81,58 +81,6 @@ # ________________________________________________________________ - - class Subcontext(object): - # coroutine: subcontext support - - def __init__(self): - self.topframe = None - self.w_tracefunc = None - self.profilefunc = None - self.w_profilefuncarg = None - self.is_tracing = 0 - - def enter(self, ec): - ec.topframeref = jit.non_virtual_ref(self.topframe) - ec.w_tracefunc = self.w_tracefunc - ec.profilefunc = self.profilefunc - ec.w_profilefuncarg = self.w_profilefuncarg - ec.is_tracing = self.is_tracing - ec.space.frame_trace_action.fire() - - def leave(self, ec): - self.topframe = ec.gettopframe() - self.w_tracefunc = ec.w_tracefunc - self.profilefunc = ec.profilefunc - self.w_profilefuncarg = ec.w_profilefuncarg - self.is_tracing = ec.is_tracing - - def clear_framestack(self): - self.topframe = None - - # the following interface is for pickling and unpickling - def getstate(self, space): - if self.topframe is None: - return space.w_None - return self.topframe - - def setstate(self, space, w_state): - from pypy.interpreter.pyframe import PyFrame - if space.is_w(w_state, space.w_None): - self.topframe = None - else: - self.topframe = space.interp_w(PyFrame, w_state) - - def getframestack(self): - lst = [] - f = self.topframe - while f is not None: - lst.append(f) - f = f.f_backref() - lst.reverse() - return lst - # coroutine: I think this is all, folks! - def c_call_trace(self, frame, w_func, args=None): "Profile the call of a builtin function" self._c_call_return_trace(frame, w_func, args, 'c_call') diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -30,7 +30,7 @@ can_change_code = True _immutable_fields_ = ['code?', 'w_func_globals?', - 'closure?', + 'closure?[*]', 'defs_w?[*]', 'name?'] @@ -96,7 +96,7 @@ assert isinstance(code, PyCode) if nargs < 5: new_frame = self.space.createframe(code, self.w_func_globals, - self.closure) + self) for i in funccallunrolling: if i < nargs: new_frame.locals_stack_w[i] = args_w[i] @@ -156,7 +156,7 @@ def _flat_pycall(self, code, nargs, frame): # code is a PyCode new_frame = self.space.createframe(code, self.w_func_globals, - self.closure) + self) for i in xrange(nargs): w_arg = frame.peekvalue(nargs-1-i) new_frame.locals_stack_w[i] = w_arg @@ -167,7 +167,7 @@ def _flat_pycall_defaults(self, code, nargs, frame, defs_to_load): # code is a PyCode new_frame = self.space.createframe(code, self.w_func_globals, - self.closure) + self) for i in xrange(nargs): w_arg = frame.peekvalue(nargs-1-i) new_frame.locals_stack_w[i] = w_arg @@ -242,8 +242,10 @@ # we have been seen by other means so rtyping should not choke # on us identifier = self.code.identifier - assert Function._all.get(identifier, self) is self, ("duplicate " - "function ids") + previous = Function._all.get(identifier, self) + assert previous is self, ( + "duplicate function ids with identifier=%r: %r and %r" % ( + identifier, previous, self)) self.add_to_table() return False diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -64,7 +64,7 @@ self.visit_self(el[1], *args) else: self.visit_function(el, *args) - else: + elif isinstance(el, type): for typ in self.bases_order: if issubclass(el, typ): visit = getattr(self, "visit__%s" % (typ.__name__,)) @@ -73,6 +73,8 @@ else: raise Exception("%s: no match for unwrap_spec element %s" % ( self.__class__.__name__, el)) + else: + raise Exception("unable to dispatch, %s, perhaps your parameter should have started with w_?" % el) def apply_over(self, unwrap_spec, *extra): dispatch = self.dispatch diff --git a/pypy/interpreter/miscutils.py b/pypy/interpreter/miscutils.py --- a/pypy/interpreter/miscutils.py +++ b/pypy/interpreter/miscutils.py @@ -167,3 +167,7 @@ def getmainthreadvalue(self): return self._value + + def getallvalues(self): + return {0: self._value} + diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -8,7 +8,7 @@ class Cell(Wrappable): "A simple container for a wrapped value." - + def __init__(self, w_value=None): self.w_value = w_value @@ -90,32 +90,33 @@ # variables coming from a parent function in which i'm nested # 'closure' is a list of Cell instances: the received free vars. - cells = None - @jit.unroll_safe - def initialize_frame_scopes(self, closure, code): - super_initialize_frame_scopes(self, closure, code) + def initialize_frame_scopes(self, outer_func, code): + super_initialize_frame_scopes(self, outer_func, code) ncellvars = len(code.co_cellvars) nfreevars = len(code.co_freevars) if not nfreevars: if not ncellvars: + self.cells = [] return # no self.cells needed - fast path - if closure is None: - closure = [] - elif closure is None: + elif outer_func is None: space = self.space raise OperationError(space.w_TypeError, space.wrap("directly executed code object " "may not contain free variables")) - if len(closure) != nfreevars: + if outer_func and outer_func.closure: + closure_size = len(outer_func.closure) + else: + closure_size = 0 + if closure_size != nfreevars: raise ValueError("code object received a closure with " "an unexpected number of free variables") self.cells = [None] * (ncellvars + nfreevars) for i in range(ncellvars): self.cells[i] = Cell() for i in range(nfreevars): - self.cells[i + ncellvars] = closure[i] - + self.cells[i + ncellvars] = outer_func.closure[i] + def _getcells(self): return self.cells diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -10,7 +10,7 @@ from pypy.interpreter.argument import Signature from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import NoneNotWrapped, unwrap_spec -from pypy.interpreter.astcompiler.consts import (CO_OPTIMIZED, +from pypy.interpreter.astcompiler.consts import ( CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED, CO_GENERATOR, CO_CONTAINSGLOBALS) from pypy.rlib.rarithmetic import intmask @@ -198,7 +198,7 @@ def funcrun(self, func, args): frame = self.space.createframe(self, func.w_func_globals, - func.closure) + func) sig = self._signature # speed hack fresh_frame = jit.hint(frame, access_directly=True, @@ -211,7 +211,7 @@ def funcrun_obj(self, func, w_obj, args): frame = self.space.createframe(self, func.w_func_globals, - func.closure) + func) sig = self._signature # speed hack fresh_frame = jit.hint(frame, access_directly=True, diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -51,7 +51,10 @@ is_being_profiled = False escaped = False # see mark_as_escaped() - def __init__(self, space, code, w_globals, closure): + def __init__(self, space, code, w_globals, outer_func): + if not we_are_translated(): + assert type(self) in (space.FrameClass, CPythonFrame), ( + "use space.FrameClass(), not directly PyFrame()") self = hint(self, access_directly=True, fresh_virtualizable=True) assert isinstance(code, pycode.PyCode) self.pycode = code @@ -63,11 +66,11 @@ make_sure_not_resized(self.locals_stack_w) check_nonneg(self.nlocals) # - if space.config.objspace.honor__builtins__: + if space.config.objspace.honor__builtins__ and w_globals is not None: self.builtin = space.builtin.pick_builtin(w_globals) # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. - self.initialize_frame_scopes(closure, code) + self.initialize_frame_scopes(outer_func, code) self.f_lineno = code.co_firstlineno def mark_as_escaped(self): @@ -80,7 +83,7 @@ self.escaped = True def append_block(self, block): - block.previous = self.lastblock + assert block.previous is self.lastblock self.lastblock = block def pop_block(self): @@ -106,15 +109,16 @@ while i >= 0: block = lst[i] i -= 1 - self.append_block(block) + block.previous = self.lastblock + self.lastblock = block def get_builtin(self): if self.space.config.objspace.honor__builtins__: return self.builtin else: return self.space.builtin - - def initialize_frame_scopes(self, closure, code): + + def initialize_frame_scopes(self, outer_func, code): # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. # CO_NEWLOCALS: make a locals dict unless optimized is also set @@ -381,7 +385,11 @@ # do not use the instance's __init__ but the base's, because we set # everything like cells from here - PyFrame.__init__(self, space, pycode, w_globals, closure) + # XXX hack + from pypy.interpreter.function import Function + outer_func = Function(space, None, closure=closure, + forcename="fake") + PyFrame.__init__(self, space, pycode, w_globals, outer_func) f_back = space.interp_w(PyFrame, w_f_back, can_be_None=True) new_frame.f_backref = jit.non_virtual_ref(f_back) @@ -606,7 +614,8 @@ return self.get_builtin().getdict(space) def fget_f_back(self, space): - return self.space.wrap(self.f_backref()) + f_backref = ExecutionContext.getnextframe_nohidden(self) + return self.space.wrap(f_backref) def fget_f_lasti(self, space): return self.space.wrap(self.last_instr) diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -892,16 +892,16 @@ raise BytecodeCorruption, "old opcode, no longer in use" def SETUP_LOOP(self, offsettoend, next_instr): - block = LoopBlock(self, next_instr + offsettoend) - self.append_block(block) + block = LoopBlock(self, next_instr + offsettoend, self.lastblock) + self.lastblock = block def SETUP_EXCEPT(self, offsettoend, next_instr): - block = ExceptBlock(self, next_instr + offsettoend) - self.append_block(block) + block = ExceptBlock(self, next_instr + offsettoend, self.lastblock) + self.lastblock = block def SETUP_FINALLY(self, offsettoend, next_instr): - block = FinallyBlock(self, next_instr + offsettoend) - self.append_block(block) + block = FinallyBlock(self, next_instr + offsettoend, self.lastblock) + self.lastblock = block def SETUP_WITH(self, offsettoend, next_instr): w_manager = self.peekvalue() @@ -915,8 +915,8 @@ w_exit = self.space.get(w_descr, w_manager) self.settopvalue(w_exit) w_result = self.space.get_and_call_function(w_enter, w_manager) - block = WithBlock(self, next_instr + offsettoend) - self.append_block(block) + block = WithBlock(self, next_instr + offsettoend, self.lastblock) + self.lastblock = block self.pushvalue(w_result) def WITH_CLEANUP(self, oparg, next_instr): @@ -1247,10 +1247,10 @@ _immutable_ = True - def __init__(self, frame, handlerposition): + def __init__(self, frame, handlerposition, previous): self.handlerposition = handlerposition self.valuestackdepth = frame.valuestackdepth - self.previous = None # this makes a linked list of blocks + self.previous = previous # this makes a linked list of blocks def __eq__(self, other): return (self.__class__ is other.__class__ and @@ -1523,10 +1523,8 @@ if not isinstance(prog, codetype): filename = '' - if not isinstance(prog, str): - if isinstance(prog, basestring): - prog = str(prog) - elif isinstance(prog, file): + if not isinstance(prog, basestring): + if isinstance(prog, file): filename = prog.name prog = prog.read() else: diff --git a/pypy/interpreter/pyparser/future.py b/pypy/interpreter/pyparser/future.py --- a/pypy/interpreter/pyparser/future.py +++ b/pypy/interpreter/pyparser/future.py @@ -109,25 +109,19 @@ self.getc() == self.getc(+2)): self.pos += 3 while 1: # Deal with a triple quoted docstring - if self.getc() == '\\': - self.pos += 2 + c = self.getc() + if c == '\\': + self.pos += 1 + self._skip_next_char_from_docstring() + elif c != endchar: + self._skip_next_char_from_docstring() else: - c = self.getc() - if c != endchar: - self.pos += 1 - if c == '\n': - self.atbol() - elif c == '\r': - if self.getc() == '\n': - self.pos += 1 - self.atbol() - else: - self.pos += 1 - if (self.getc() == endchar and - self.getc(+1) == endchar): - self.pos += 2 - self.consume_empty_line() - break + self.pos += 1 + if (self.getc() == endchar and + self.getc(+1) == endchar): + self.pos += 2 + self.consume_empty_line() + break else: # Deal with a single quoted docstring self.pos += 1 @@ -138,17 +132,21 @@ self.consume_empty_line() return elif c == '\\': - # Deal with linefeeds - if self.getc() != '\r': - self.pos += 1 - else: - self.pos += 1 - if self.getc() == '\n': - self.pos += 1 + self._skip_next_char_from_docstring() elif c in '\r\n': # Syntax error return + def _skip_next_char_from_docstring(self): + c = self.getc() + self.pos += 1 + if c == '\n': + self.atbol() + elif c == '\r': + if self.getc() == '\n': + self.pos += 1 + self.atbol() + def consume_continuation(self): c = self.getc() if c in '\n\r': diff --git a/pypy/interpreter/pyparser/test/test_futureautomaton.py b/pypy/interpreter/pyparser/test/test_futureautomaton.py --- a/pypy/interpreter/pyparser/test/test_futureautomaton.py +++ b/pypy/interpreter/pyparser/test/test_futureautomaton.py @@ -221,6 +221,14 @@ assert f.lineno == 3 assert f.col_offset == 0 +def test_lots_of_continuation_lines(): + s = "\\\n\\\n\\\n\\\n\\\n\\\n\nfrom __future__ import with_statement\n" + f = run(s) + assert f.pos == len(s) + assert f.flags == fut.CO_FUTURE_WITH_STATEMENT + assert f.lineno == 8 + assert f.col_offset == 0 + # This looks like a bug in cpython parser # and would require extensive modifications # to future.py in order to emulate the same behaviour @@ -239,3 +247,19 @@ raise AssertionError('IndentationError not raised') assert f.lineno == 2 assert f.col_offset == 0 + +def test_continuation_lines_in_docstring_single_quoted(): + s = '"\\\n\\\n\\\n\\\n\\\n\\\n"\nfrom __future__ import division\n' + f = run(s) + assert f.pos == len(s) + assert f.flags == fut.CO_FUTURE_DIVISION + assert f.lineno == 8 + assert f.col_offset == 0 + +def test_continuation_lines_in_docstring_triple_quoted(): + s = '"""\\\n\\\n\\\n\\\n\\\n\\\n"""\nfrom __future__ import division\n' + f = run(s) + assert f.pos == len(s) + assert f.flags == fut.CO_FUTURE_DIVISION + assert f.lineno == 8 + assert f.col_offset == 0 diff --git a/pypy/interpreter/test/test_exec.py b/pypy/interpreter/test/test_exec.py --- a/pypy/interpreter/test/test_exec.py +++ b/pypy/interpreter/test/test_exec.py @@ -219,3 +219,30 @@ raise e assert res == 1 + + def test_exec_unicode(self): + # 's' is a string + s = "x = u'\xd0\xb9\xd1\x86\xd1\x83\xd0\xba\xd0\xb5\xd0\xbd'" + # 'u' is a unicode + u = s.decode('utf-8') + exec u + assert len(x) == 6 + assert ord(x[0]) == 0x0439 + assert ord(x[1]) == 0x0446 + assert ord(x[2]) == 0x0443 + assert ord(x[3]) == 0x043a + assert ord(x[4]) == 0x0435 + assert ord(x[5]) == 0x043d + + def test_eval_unicode(self): + u = "u'%s'" % unichr(0x1234) + v = eval(u) + assert v == unichr(0x1234) + + def test_compile_unicode(self): + s = "x = u'\xd0\xb9\xd1\x86\xd1\x83\xd0\xba\xd0\xb5\xd0\xbd'" + u = s.decode('utf-8') + c = compile(u, '', 'exec') + exec c + assert len(x) == 6 + assert ord(x[0]) == 0x0439 diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -704,7 +704,7 @@ class TestPassThroughArguments_CALL_METHOD(TestPassThroughArguments): def setup_class(cls): - space = gettestobjspace(usemodules=('_stackless',), **{ + space = gettestobjspace(usemodules=('itertools',), **{ "objspace.opcodes.CALL_METHOD": True }) cls.space = space diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -71,6 +71,23 @@ assert err.value.match(space, space.w_ValueError) err = raises(OperationError, space.unpackiterable, w_l, 5) assert err.value.match(space, space.w_ValueError) + w_a = space.appexec((), """(): + class A(object): + def __iter__(self): + return self + def next(self): + raise StopIteration + def __len__(self): + 1/0 + return A() + """) + try: + space.unpackiterable(w_a) + except OperationError, o: + if not o.match(space, space.w_ZeroDivisionError): + raise Exception("DID NOT RAISE") + else: + raise Exception("DID NOT RAISE") def test_fixedview(self): space = self.space diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -1,4 +1,5 @@ from pypy.tool import udir +from pypy.conftest import option class AppTestPyFrame: @@ -6,6 +7,15 @@ def setup_class(cls): cls.w_udir = cls.space.wrap(str(udir.udir)) cls.w_tempfile1 = cls.space.wrap(str(udir.udir.join('tempfile1'))) + if not option.runappdirect: + w_call_further = cls.space.appexec([], """(): + def call_further(f): + return f() + return call_further + """) + assert not w_call_further.code.hidden_applevel + w_call_further.code.hidden_applevel = True # hack + cls.w_call_further = w_call_further # test for the presence of the attributes, not functionality @@ -107,6 +117,22 @@ frame = f() assert frame.f_back.f_code.co_name == 'f' + def test_f_back_hidden(self): + if not hasattr(self, 'call_further'): + skip("not for runappdirect testing") + import sys + def f(): + return (sys._getframe(0), + sys._getframe(1), + sys._getframe(0).f_back) + def main(): + return self.call_further(f) + f0, f1, f1bis = main() + assert f0.f_code.co_name == 'f' + assert f1.f_code.co_name == 'main' + assert f1bis is f1 + assert f0.f_back is f1 + def test_f_exc_xxx(self): import sys diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -258,6 +258,11 @@ self.slots_w = [None] * nslots def setslotvalue(self, index, w_value): self.slots_w[index] = w_value + def delslotvalue(self, index): + if self.slots_w[index] is None: + return False + self.slots_w[index] = None + return True def getslotvalue(self, index): return self.slots_w[index] add(Proto) @@ -530,11 +535,10 @@ """member.__delete__(obj) Delete the value of the slot 'member' from the given 'obj'.""" self.typecheck(space, w_obj) - w_oldresult = w_obj.getslotvalue(self.index) - if w_oldresult is None: + success = w_obj.delslotvalue(self.index) + if not success: raise OperationError(space.w_AttributeError, space.wrap(self.name)) # XXX better message - w_obj.setslotvalue(self.index, None) Member.typedef = TypeDef( "member_descriptor", diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -57,6 +57,12 @@ else: return LLSupport.from_rstr(s) +FLOAT_ARRAY_TP = lltype.Ptr(lltype.Array(lltype.Float, hints={"nolength": True})) +def maybe_uncast(TP, array): + if array._TYPE.TO._hints.get("uncast_on_llgraph"): + array = rffi.cast(TP, array) + return array + # a list of argtypes of all operations - couldn't find any and it's # very useful. Note however that the table is half-broken here and # there, in ways that are sometimes a bit hard to fix; that's why @@ -1071,13 +1077,15 @@ return heaptracker.adr2int(llmemory.cast_ptr_to_adr(x)) if TP == llmemory.Address: return heaptracker.adr2int(x) + if TP is lltype.SingleFloat: + return longlong.singlefloat2int(x) return lltype.cast_primitive(lltype.Signed, x) def cast_from_int(TYPE, x): if isinstance(TYPE, lltype.Ptr): if isinstance(x, (int, long, llmemory.AddressAsInt)): x = llmemory.cast_int_to_adr(x) - if TYPE is rffi.VOIDP: + if TYPE is rffi.VOIDP or TYPE.TO._hints.get("uncast_on_llgraph"): # assume that we want a "C-style" cast, without typechecking the value return rffi.cast(TYPE, x) return llmemory.cast_adr_to_ptr(x, TYPE) @@ -1086,6 +1094,9 @@ x = llmemory.cast_int_to_adr(x) assert lltype.typeOf(x) == llmemory.Address return x + elif TYPE is lltype.SingleFloat: + assert lltype.typeOf(x) is lltype.Signed + return longlong.int2singlefloat(x) else: if lltype.typeOf(x) == llmemory.Address: x = heaptracker.adr2int(x) @@ -1140,6 +1151,7 @@ del _future_values[:] def set_future_value_int(index, value): + assert lltype.typeOf(value) is lltype.Signed set_future_value_ref(index, value) def set_future_value_float(index, value): @@ -1323,8 +1335,8 @@ return cast_to_floatstorage(array.getitem(index)) def do_getarrayitem_raw_float(array, index): - array = array.adr.ptr._obj - return cast_to_floatstorage(array.getitem(index)) + array = maybe_uncast(FLOAT_ARRAY_TP, array.adr.ptr) + return cast_to_floatstorage(array._obj.getitem(index)) def do_getarrayitem_gc_ptr(array, index): array = array._obj.container @@ -1386,8 +1398,9 @@ newvalue = cast_from_floatstorage(ITEMTYPE, newvalue) array.setitem(index, newvalue) + def do_setarrayitem_raw_float(array, index, newvalue): - array = array.adr.ptr + array = maybe_uncast(FLOAT_ARRAY_TP, array.adr.ptr) ITEMTYPE = lltype.typeOf(array).TO.OF newvalue = cast_from_floatstorage(ITEMTYPE, newvalue) array._obj.setitem(index, newvalue) @@ -1488,6 +1501,7 @@ 'i': lltype.Signed, 'f': lltype.Float, 'L': lltype.SignedLongLong, + 'S': lltype.SingleFloat, 'v': lltype.Void, } diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -25,13 +25,14 @@ class Descr(history.AbstractDescr): def __init__(self, ofs, typeinfo, extrainfo=None, name=None, - arg_types=None, count_fields_if_immut=-1): + arg_types=None, count_fields_if_immut=-1, ffi_flags=0): self.ofs = ofs self.typeinfo = typeinfo self.extrainfo = extrainfo self.name = name self.arg_types = arg_types self.count_fields_if_immut = count_fields_if_immut + self.ffi_flags = ffi_flags def get_arg_types(self): return self.arg_types @@ -67,6 +68,9 @@ def count_fields_if_immutable(self): return self.count_fields_if_immut + def get_ffi_flags(self): + return self.ffi_flags + def __lt__(self, other): raise TypeError("cannot use comparison on Descrs") def __le__(self, other): @@ -91,6 +95,7 @@ class BaseCPU(model.AbstractCPU): supports_floats = True supports_longlong = llimpl.IS_32_BIT + supports_singlefloats = True def __init__(self, rtyper, stats=None, opts=None, translate_support_code=False, @@ -113,14 +118,14 @@ return False def getdescr(self, ofs, typeinfo='?', extrainfo=None, name=None, - arg_types=None, count_fields_if_immut=-1): + arg_types=None, count_fields_if_immut=-1, ffi_flags=0): key = (ofs, typeinfo, extrainfo, name, arg_types, - count_fields_if_immut) + count_fields_if_immut, ffi_flags) try: return self._descrs[key] except KeyError: descr = Descr(ofs, typeinfo, extrainfo, name, arg_types, - count_fields_if_immut) + count_fields_if_immut, ffi_flags) self._descrs[key] = descr return descr @@ -311,7 +316,7 @@ token = history.getkind(getattr(S, fieldname)) return self.getdescr(ofs, token[0], name=fieldname) - def calldescrof(self, FUNC, ARGS, RESULT, extrainfo=None): + def calldescrof(self, FUNC, ARGS, RESULT, extrainfo): arg_types = [] for ARG in ARGS: token = history.getkind(ARG) @@ -325,16 +330,21 @@ return self.getdescr(0, token[0], extrainfo=extrainfo, arg_types=''.join(arg_types)) - def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo=None): + def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo, ffi_flags): from pypy.jit.backend.llsupport.ffisupport import get_ffi_type_kind + from pypy.jit.backend.llsupport.ffisupport import UnsupportedKind arg_types = [] - for arg in ffi_args: - kind = get_ffi_type_kind(arg) - if kind != history.VOID: - arg_types.append(kind) - reskind = get_ffi_type_kind(ffi_result) + try: + for arg in ffi_args: + kind = get_ffi_type_kind(self, arg) + if kind != history.VOID: + arg_types.append(kind) + reskind = get_ffi_type_kind(self, ffi_result) + except UnsupportedKind: + return None return self.getdescr(0, reskind, extrainfo=extrainfo, - arg_types=''.join(arg_types)) + arg_types=''.join(arg_types), + ffi_flags=ffi_flags) def grab_exc_value(self): @@ -517,7 +527,7 @@ return FieldDescr.new(T1, fieldname) @staticmethod - def calldescrof(FUNC, ARGS, RESULT, extrainfo=None): + def calldescrof(FUNC, ARGS, RESULT, extrainfo): return StaticMethDescr.new(FUNC, ARGS, RESULT, extrainfo) @staticmethod diff --git a/pypy/jit/backend/llgraph/test/test_llgraph.py b/pypy/jit/backend/llgraph/test/test_llgraph.py --- a/pypy/jit/backend/llgraph/test/test_llgraph.py +++ b/pypy/jit/backend/llgraph/test/test_llgraph.py @@ -19,6 +19,9 @@ def setup_method(self, _): self.cpu = self.cpu_type(None) + def test_memoryerror(self): + py.test.skip("does not make much sense on the llgraph backend") + def test_cast_adr_to_int_and_back(): X = lltype.Struct('X', ('foo', lltype.Signed)) diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -260,10 +260,12 @@ _clsname = '' loop_token = None arg_classes = '' # <-- annotation hack + ffi_flags = 0 - def __init__(self, arg_classes, extrainfo=None): + def __init__(self, arg_classes, extrainfo=None, ffi_flags=0): self.arg_classes = arg_classes # string of "r" and "i" (ref/int) self.extrainfo = extrainfo + self.ffi_flags = ffi_flags def __repr__(self): res = '%s(%s)' % (self.__class__.__name__, self.arg_classes) @@ -284,6 +286,13 @@ def get_extra_info(self): return self.extrainfo + def get_ffi_flags(self): + return self.ffi_flags + + def get_call_conv(self): + from pypy.rlib.clibffi import get_call_conv + return get_call_conv(self.ffi_flags, True) + def get_arg_types(self): return self.arg_classes @@ -303,6 +312,8 @@ c = 'f' elif c == 'f' and longlong.supports_longlong: return 'longlong.getrealfloat(%s)' % (process('L'),) + elif c == 'S': + return 'longlong.int2singlefloat(%s)' % (process('i'),) arg = 'args_%s[%d]' % (c, seen[c]) seen[c] += 1 return arg @@ -318,6 +329,8 @@ return lltype.Void elif arg == 'L': return lltype.SignedLongLong + elif arg == 'S': + return lltype.SingleFloat else: raise AssertionError(arg) @@ -334,6 +347,8 @@ result = 'rffi.cast(lltype.SignedLongLong, res)' elif self.get_return_type() == history.VOID: result = 'None' + elif self.get_return_type() == 'S': + result = 'longlong.singlefloat2int(res)' else: assert 0 source = py.code.Source(""" @@ -344,14 +359,15 @@ """ % locals()) ARGS = [TYPE(arg) for arg in self.arg_classes] FUNC = lltype.FuncType(ARGS, RESULT) - d = locals().copy() - d.update(globals()) + d = globals().copy() + d.update(locals()) exec source.compile() in d self.call_stub = d['call_stub'] def verify_types(self, args_i, args_r, args_f, return_type): assert self._return_type in return_type - assert self.arg_classes.count('i') == len(args_i or ()) + assert (self.arg_classes.count('i') + + self.arg_classes.count('S')) == len(args_i or ()) assert self.arg_classes.count('r') == len(args_r or ()) assert (self.arg_classes.count('f') + self.arg_classes.count('L')) == len(args_f or ()) @@ -384,8 +400,8 @@ """ _clsname = 'DynamicIntCallDescr' - def __init__(self, arg_classes, result_size, result_sign, extrainfo=None): - BaseIntCallDescr.__init__(self, arg_classes, extrainfo) + def __init__(self, arg_classes, result_size, result_sign, extrainfo=None, ffi_flags=0): + BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags) assert isinstance(result_sign, bool) self._result_size = chr(result_size) self._result_sign = result_sign @@ -428,23 +444,39 @@ def get_result_size(self, translate_support_code): return 0 +_SingleFloatCallDescr = None # built lazily + def getCallDescrClass(RESULT): if RESULT is lltype.Void: return VoidCallDescr if RESULT is lltype.Float: return FloatCallDescr + if RESULT is lltype.SingleFloat: + global _SingleFloatCallDescr + if _SingleFloatCallDescr is None: + assert rffi.sizeof(rffi.UINT) == rffi.sizeof(RESULT) + class SingleFloatCallDescr(getCallDescrClass(rffi.UINT)): + _clsname = 'SingleFloatCallDescr' + _return_type = 'S' + _SingleFloatCallDescr = SingleFloatCallDescr + return _SingleFloatCallDescr if is_longlong(RESULT): return LongLongCallDescr return getDescrClass(RESULT, BaseIntCallDescr, GcPtrCallDescr, NonGcPtrCallDescr, 'Call', 'get_result_size', Ellipsis, # <= floatattrname should not be used here '_is_result_signed') +getCallDescrClass._annspecialcase_ = 'specialize:memo' def get_call_descr(gccache, ARGS, RESULT, extrainfo=None): arg_classes = [] for ARG in ARGS: kind = getkind(ARG) - if kind == 'int': arg_classes.append('i') + if kind == 'int': + if ARG is lltype.SingleFloat: + arg_classes.append('S') + else: + arg_classes.append('i') elif kind == 'ref': arg_classes.append('r') elif kind == 'float': if is_longlong(ARG): @@ -476,6 +508,9 @@ return GcPtrDescr else: return NonGcPtrDescr + if TYPE is lltype.SingleFloat: + assert rffi.sizeof(rffi.UINT) == rffi.sizeof(TYPE) + TYPE = rffi.UINT try: return _cache[nameprefix, TYPE] except KeyError: diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py --- a/pypy/jit/backend/llsupport/ffisupport.py +++ b/pypy/jit/backend/llsupport/ffisupport.py @@ -1,41 +1,58 @@ from pypy.rlib.rarithmetic import intmask from pypy.jit.metainterp import history -from pypy.jit.backend.llsupport.descr import DynamicIntCallDescr, NonGcPtrCallDescr,\ - FloatCallDescr, VoidCallDescr +from pypy.rpython.lltypesystem import rffi +from pypy.jit.backend.llsupport.descr import ( + DynamicIntCallDescr, NonGcPtrCallDescr, FloatCallDescr, VoidCallDescr, + LongLongCallDescr, getCallDescrClass) class UnsupportedKind(Exception): pass -def get_call_descr_dynamic(ffi_args, ffi_result, extrainfo=None): +def get_call_descr_dynamic(cpu, ffi_args, ffi_result, extrainfo=None, ffi_flags=0): """Get a call descr: the types of result and args are represented by rlib.libffi.types.*""" try: - reskind = get_ffi_type_kind(ffi_result) - argkinds = [get_ffi_type_kind(arg) for arg in ffi_args] + reskind = get_ffi_type_kind(cpu, ffi_result) + argkinds = [get_ffi_type_kind(cpu, arg) for arg in ffi_args] except UnsupportedKind: - return None # ?? + return None arg_classes = ''.join(argkinds) if reskind == history.INT: size = intmask(ffi_result.c_size) signed = is_ffi_type_signed(ffi_result) - return DynamicIntCallDescr(arg_classes, size, signed, extrainfo) + return DynamicIntCallDescr(arg_classes, size, signed, extrainfo, + ffi_flags=ffi_flags) elif reskind == history.REF: - return NonGcPtrCallDescr(arg_classes, extrainfo) + return NonGcPtrCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) elif reskind == history.FLOAT: - return FloatCallDescr(arg_classes, extrainfo) + return FloatCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) elif reskind == history.VOID: - return VoidCallDescr(arg_classes, extrainfo) + return VoidCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) + elif reskind == 'L': + return LongLongCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) + elif reskind == 'S': + SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) + return SingleFloatCallDescr(arg_classes, extrainfo, + ffi_flags=ffi_flags) assert False -def get_ffi_type_kind(ffi_type): +def get_ffi_type_kind(cpu, ffi_type): from pypy.rlib.libffi import types kind = types.getkind(ffi_type) if kind == 'i' or kind == 'u': return history.INT - elif kind == 'f': + elif cpu.supports_floats and kind == 'f': return history.FLOAT elif kind == 'v': return history.VOID + elif cpu.supports_longlong and (kind == 'I' or kind == 'U'): # longlong + return 'L' + elif cpu.supports_singlefloats and kind == 's': # singlefloat + return 'S' raise UnsupportedKind("Unsupported kind '%s'" % kind) def is_ffi_type_signed(ffi_type): diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -366,36 +366,92 @@ def add_jit2gc_hooks(self, jit2gc): # - def collect_jit_stack_root(callback, gc, addr): - if addr.signed[0] != GcRootMap_shadowstack.MARKER: - # common case - if gc.points_to_valid_gc_object(addr): - callback(gc, addr) - return WORD - else: - # case of a MARKER followed by an assembler stack frame - follow_stack_frame_of_assembler(callback, gc, addr) - return 2 * WORD + # --------------- + # This is used to enumerate the shadowstack in the presence + # of the JIT. It is also used by the stacklet support in + # rlib/_stacklet_shadowstack. That's why it is written as + # an iterator that can also be used with a custom_trace. # - def follow_stack_frame_of_assembler(callback, gc, addr): - frame_addr = addr.signed[1] - addr = llmemory.cast_int_to_adr(frame_addr + self.force_index_ofs) - force_index = addr.signed[0] - if force_index < 0: - force_index = ~force_index - callshape = self._callshapes[force_index] - n = 0 - while True: - offset = rffi.cast(lltype.Signed, callshape[n]) - if offset == 0: - break - addr = llmemory.cast_int_to_adr(frame_addr + offset) - if gc.points_to_valid_gc_object(addr): - callback(gc, addr) - n += 1 + class RootIterator: + _alloc_flavor_ = "raw" + + def next(iself, gc, next, range_highest): + # Return the "next" valid GC object' address. This usually + # means just returning "next", until we reach "range_highest", + # except that we are skipping NULLs. If "next" contains a + # MARKER instead, then we go into JIT-frame-lookup mode. + # + while True: + # + # If we are not iterating right now in a JIT frame + if iself.frame_addr == 0: + # + # Look for the next shadowstack address that + # contains a valid pointer + while next != range_highest: + if next.signed[0] == self.MARKER: + break + if gc.points_to_valid_gc_object(next): + return next + next += llmemory.sizeof(llmemory.Address) + else: + return llmemory.NULL # done + # + # It's a JIT frame. Save away 'next' for later, and + # go into JIT-frame-exploring mode. + next += llmemory.sizeof(llmemory.Address) + frame_addr = next.signed[0] + iself.saved_next = next + iself.frame_addr = frame_addr + addr = llmemory.cast_int_to_adr(frame_addr + + self.force_index_ofs) + addr = iself.translateptr(iself.context, addr) + force_index = addr.signed[0] + if force_index < 0: + force_index = ~force_index + # NB: the next line reads a still-alive _callshapes, + # because we ensure that just before we called this + # piece of assembler, we put on the (same) stack a + # pointer to a loop_token that keeps the force_index + # alive. + callshape = self._callshapes[force_index] + else: + # Continuing to explore this JIT frame + callshape = iself.callshape + # + # 'callshape' points to the next INT of the callshape. + # If it's zero we are done with the JIT frame. + while rffi.cast(lltype.Signed, callshape[0]) != 0: + # + # Non-zero: it's an offset inside the JIT frame. + # Read it and increment 'callshape'. + offset = rffi.cast(lltype.Signed, callshape[0]) + callshape = lltype.direct_ptradd(callshape, 1) + addr = llmemory.cast_int_to_adr(iself.frame_addr + + offset) + addr = iself.translateptr(iself.context, addr) + if gc.points_to_valid_gc_object(addr): + # + # The JIT frame contains a valid GC pointer at + # this address (as opposed to NULL). Save + # 'callshape' for the next call, and return the + # address. + iself.callshape = callshape + return addr + # + # Restore 'prev' and loop back to the start. + iself.frame_addr = 0 + next = iself.saved_next + next += llmemory.sizeof(llmemory.Address) + + # --------------- # + root_iterator = RootIterator() + root_iterator.frame_addr = 0 + root_iterator.context = llmemory.NULL + root_iterator.translateptr = lambda context, addr: addr jit2gc.update({ - 'rootstackhook': collect_jit_stack_root, + 'root_iterator': root_iterator, }) def initialize(self): @@ -544,18 +600,19 @@ assert self.GCClass.inline_simple_malloc assert self.GCClass.inline_simple_malloc_varsize - # make a malloc function, with three arguments + # make a malloc function, with two arguments def malloc_basic(size, tid): type_id = llop.extract_ushort(llgroup.HALFWORD, tid) has_finalizer = bool(tid & (1<", res) return res @@ -571,14 +628,10 @@ def malloc_array(itemsize, tid, num_elem): type_id = llop.extract_ushort(llgroup.HALFWORD, tid) check_typeid(type_id) - try: - return llop1.do_malloc_varsize_clear( - llmemory.GCREF, - type_id, num_elem, self.array_basesize, itemsize, - self.array_length_ofs, True) - except MemoryError: - fatalerror("out of memory (from JITted code)") - return lltype.nullptr(llmemory.GCREF.TO) + return llop1.do_malloc_varsize_clear( + llmemory.GCREF, + type_id, num_elem, self.array_basesize, itemsize, + self.array_length_ofs) self.malloc_array = malloc_array self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType( [lltype.Signed] * 3, llmemory.GCREF)) @@ -591,23 +644,15 @@ unicode_type_id = self.layoutbuilder.get_type_id(rstr.UNICODE) # def malloc_str(length): - try: - return llop1.do_malloc_varsize_clear( - llmemory.GCREF, - str_type_id, length, str_basesize, str_itemsize, - str_ofs_length, True) - except MemoryError: - fatalerror("out of memory (from JITted code)") - return lltype.nullptr(llmemory.GCREF.TO) + return llop1.do_malloc_varsize_clear( + llmemory.GCREF, + str_type_id, length, str_basesize, str_itemsize, + str_ofs_length) def malloc_unicode(length): - try: - return llop1.do_malloc_varsize_clear( - llmemory.GCREF, - unicode_type_id, length, unicode_basesize,unicode_itemsize, - unicode_ofs_length, True) - except MemoryError: - fatalerror("out of memory (from JITted code)") - return lltype.nullptr(llmemory.GCREF.TO) + return llop1.do_malloc_varsize_clear( + llmemory.GCREF, + unicode_type_id, length, unicode_basesize,unicode_itemsize, + unicode_ofs_length) self.malloc_str = malloc_str self.malloc_unicode = malloc_unicode self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType( @@ -628,16 +673,12 @@ if self.DEBUG: random_usage_of_xmm_registers() assert size >= self.minimal_size_in_nursery - try: - # NB. although we call do_malloc_fixedsize_clear() here, - # it's a bit of a hack because we set tid to 0 and may - # also use it to allocate varsized objects. The tid - # and possibly the length are both set afterward. - gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, - 0, size, True, False, False) - except MemoryError: - fatalerror("out of memory (from JITted code)") - return 0 + # NB. although we call do_malloc_fixedsize_clear() here, + # it's a bit of a hack because we set tid to 0 and may + # also use it to allocate varsized objects. The tid + # and possibly the length are both set afterward. + gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, + 0, size, False, False) return rffi.cast(lltype.Signed, gcref) self.malloc_slowpath = malloc_slowpath self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -254,13 +254,13 @@ return ofs, size, sign unpack_arraydescr_size._always_inline_ = True - def calldescrof(self, FUNC, ARGS, RESULT, extrainfo=None): + def calldescrof(self, FUNC, ARGS, RESULT, extrainfo): return get_call_descr(self.gc_ll_descr, ARGS, RESULT, extrainfo) - def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo=None): + def calldescrof_dynamic(self, ffi_args, ffi_result, extrainfo, ffi_flags): from pypy.jit.backend.llsupport import ffisupport - return ffisupport.get_call_descr_dynamic(ffi_args, ffi_result, - extrainfo) + return ffisupport.get_call_descr_dynamic(self, ffi_args, ffi_result, + extrainfo, ffi_flags) def get_overflow_error(self): ovf_vtable = self.cast_adr_to_int(self._ovf_error_vtable) @@ -499,7 +499,7 @@ def bh_call_i(self, func, calldescr, args_i, args_r, args_f): assert isinstance(calldescr, BaseIntCallDescr) if not we_are_translated(): - calldescr.verify_types(args_i, args_r, args_f, history.INT) + calldescr.verify_types(args_i, args_r, args_f, history.INT + 'S') return calldescr.call_stub(func, args_i, args_r, args_f) def bh_call_r(self, func, calldescr, args_i, args_r, args_f): diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -57,11 +57,13 @@ all_regs = [] no_lower_byte_regs = [] save_around_call_regs = [] - + frame_reg = None + def __init__(self, longevity, frame_manager=None, assembler=None): self.free_regs = self.all_regs[:] self.longevity = longevity self.reg_bindings = {} + self.bindings_to_frame_reg = {} self.position = -1 self.frame_manager = frame_manager self.assembler = assembler @@ -218,6 +220,10 @@ self.reg_bindings[v] = loc return loc + def force_allocate_frame_reg(self, v): + """ Allocate the new variable v in the frame register.""" + self.bindings_to_frame_reg[v] = None + def force_spill_var(self, var): self._sync_var(var) try: @@ -236,6 +242,8 @@ try: return self.reg_bindings[box] except KeyError: + if box in self.bindings_to_frame_reg: + return self.frame_reg return self.frame_manager.loc(box) def return_constant(self, v, forbidden_vars=[], selected_reg=None): @@ -264,8 +272,9 @@ self._check_type(v) if isinstance(v, Const): return self.return_constant(v, forbidden_vars, selected_reg) - prev_loc = self.loc(v) + if prev_loc is self.frame_reg and selected_reg is None: + return prev_loc loc = self.force_allocate_reg(v, forbidden_vars, selected_reg, need_lower_byte=need_lower_byte) if prev_loc is not loc: diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -52,7 +52,8 @@ S = lltype.GcStruct('S', ('x', lltype.Char), ('y', lltype.Ptr(T)), ('z', lltype.Ptr(U)), - ('f', lltype.Float)) + ('f', lltype.Float), + ('s', lltype.SingleFloat)) assert getFieldDescrClass(lltype.Ptr(T)) is GcPtrFieldDescr assert getFieldDescrClass(lltype.Ptr(U)) is NonGcPtrFieldDescr cls = getFieldDescrClass(lltype.Char) @@ -61,6 +62,10 @@ clsf = getFieldDescrClass(lltype.Float) assert clsf != cls assert clsf == getFieldDescrClass(lltype.Float) + clss = getFieldDescrClass(lltype.SingleFloat) + assert clss not in (cls, clsf) + assert clss == getFieldDescrClass(lltype.SingleFloat) + assert clss == getFieldDescrClass(rffi.UINT) # for now # c0 = GcCache(False) c1 = GcCache(True) @@ -72,14 +77,17 @@ descr_y = get_field_descr(c2, S, 'y') descr_z = get_field_descr(c2, S, 'z') descr_f = get_field_descr(c2, S, 'f') + descr_s = get_field_descr(c2, S, 's') assert descr_x.__class__ is cls assert descr_y.__class__ is GcPtrFieldDescr assert descr_z.__class__ is NonGcPtrFieldDescr assert descr_f.__class__ is clsf + assert descr_s.__class__ is clss assert descr_x.name == 'S.x' assert descr_y.name == 'S.y' assert descr_z.name == 'S.z' assert descr_f.name == 'S.f' + assert descr_s.name == 'S.s' if not tsc: assert descr_x.offset < descr_y.offset < descr_z.offset assert descr_x.sort_key() < descr_y.sort_key() < descr_z.sort_key() @@ -87,23 +95,29 @@ assert descr_y.get_field_size(False) == rffi.sizeof(lltype.Ptr(T)) assert descr_z.get_field_size(False) == rffi.sizeof(lltype.Ptr(U)) assert descr_f.get_field_size(False) == rffi.sizeof(lltype.Float) + assert descr_s.get_field_size(False) == rffi.sizeof( + lltype.SingleFloat) else: assert isinstance(descr_x.offset, Symbolic) assert isinstance(descr_y.offset, Symbolic) assert isinstance(descr_z.offset, Symbolic) assert isinstance(descr_f.offset, Symbolic) + assert isinstance(descr_s.offset, Symbolic) assert isinstance(descr_x.get_field_size(True), Symbolic) assert isinstance(descr_y.get_field_size(True), Symbolic) assert isinstance(descr_z.get_field_size(True), Symbolic) assert isinstance(descr_f.get_field_size(True), Symbolic) + assert isinstance(descr_s.get_field_size(True), Symbolic) assert not descr_x.is_pointer_field() assert descr_y.is_pointer_field() assert not descr_z.is_pointer_field() assert not descr_f.is_pointer_field() + assert not descr_s.is_pointer_field() assert not descr_x.is_float_field() assert not descr_y.is_float_field() assert not descr_z.is_float_field() assert descr_f.is_float_field() + assert not descr_s.is_float_field() def test_get_field_descr_sign(): @@ -135,6 +149,7 @@ A2 = lltype.GcArray(lltype.Ptr(T)) A3 = lltype.GcArray(lltype.Ptr(U)) A4 = lltype.GcArray(lltype.Float) + A5 = lltype.GcArray(lltype.SingleFloat) assert getArrayDescrClass(A2) is GcPtrArrayDescr assert getArrayDescrClass(A3) is NonGcPtrArrayDescr cls = getArrayDescrClass(A1) @@ -143,25 +158,32 @@ clsf = getArrayDescrClass(A4) assert clsf != cls assert clsf == getArrayDescrClass(lltype.GcArray(lltype.Float)) + clss = getArrayDescrClass(A5) + assert clss not in (clsf, cls) + assert clss == getArrayDescrClass(lltype.GcArray(rffi.UINT)) # c0 = GcCache(False) descr1 = get_array_descr(c0, A1) descr2 = get_array_descr(c0, A2) descr3 = get_array_descr(c0, A3) descr4 = get_array_descr(c0, A4) + descr5 = get_array_descr(c0, A5) assert descr1.__class__ is cls assert descr2.__class__ is GcPtrArrayDescr assert descr3.__class__ is NonGcPtrArrayDescr assert descr4.__class__ is clsf + assert descr5.__class__ is clss assert descr1 == get_array_descr(c0, lltype.GcArray(lltype.Char)) assert not descr1.is_array_of_pointers() assert descr2.is_array_of_pointers() assert not descr3.is_array_of_pointers() assert not descr4.is_array_of_pointers() + assert not descr5.is_array_of_pointers() assert not descr1.is_array_of_floats() assert not descr2.is_array_of_floats() assert not descr3.is_array_of_floats() assert descr4.is_array_of_floats() + assert not descr5.is_array_of_floats() # def get_alignment(code): # Retrieve default alignment for the compiler/platform @@ -170,27 +192,33 @@ assert descr2.get_base_size(False) == get_alignment('p') assert descr3.get_base_size(False) == get_alignment('p') assert descr4.get_base_size(False) == get_alignment('d') + assert descr5.get_base_size(False) == get_alignment('f') assert descr1.get_ofs_length(False) == 0 assert descr2.get_ofs_length(False) == 0 assert descr3.get_ofs_length(False) == 0 assert descr4.get_ofs_length(False) == 0 + assert descr5.get_ofs_length(False) == 0 assert descr1.get_item_size(False) == rffi.sizeof(lltype.Char) assert descr2.get_item_size(False) == rffi.sizeof(lltype.Ptr(T)) assert descr3.get_item_size(False) == rffi.sizeof(lltype.Ptr(U)) assert descr4.get_item_size(False) == rffi.sizeof(lltype.Float) + assert descr5.get_item_size(False) == rffi.sizeof(lltype.SingleFloat) # assert isinstance(descr1.get_base_size(True), Symbolic) assert isinstance(descr2.get_base_size(True), Symbolic) assert isinstance(descr3.get_base_size(True), Symbolic) assert isinstance(descr4.get_base_size(True), Symbolic) + assert isinstance(descr5.get_base_size(True), Symbolic) assert isinstance(descr1.get_ofs_length(True), Symbolic) assert isinstance(descr2.get_ofs_length(True), Symbolic) assert isinstance(descr3.get_ofs_length(True), Symbolic) assert isinstance(descr4.get_ofs_length(True), Symbolic) + assert isinstance(descr5.get_ofs_length(True), Symbolic) assert isinstance(descr1.get_item_size(True), Symbolic) assert isinstance(descr2.get_item_size(True), Symbolic) assert isinstance(descr3.get_item_size(True), Symbolic) assert isinstance(descr4.get_item_size(True), Symbolic) + assert isinstance(descr5.get_item_size(True), Symbolic) CA = rffi.CArray(lltype.Signed) descr = get_array_descr(c0, CA) assert not descr.is_array_of_floats() @@ -210,6 +238,11 @@ assert descr.is_array_of_floats() assert descr.get_base_size(False) == 0 assert descr.get_ofs_length(False) == -1 + CA = rffi.CArray(rffi.FLOAT) + descr = get_array_descr(c0, CA) + assert not descr.is_array_of_floats() + assert descr.get_base_size(False) == 0 + assert descr.get_ofs_length(False) == -1 def test_get_array_descr_sign(): @@ -257,6 +290,11 @@ assert descr4.get_result_size(False) == rffi.sizeof(lltype.Float) assert descr4.get_return_type() == history.FLOAT assert descr4.arg_classes == "ff" + # + descr5 = get_call_descr(c0, [lltype.SingleFloat], lltype.SingleFloat) + assert descr5.get_result_size(False) == rffi.sizeof(lltype.SingleFloat) + assert descr5.get_return_type() == "S" + assert descr5.arg_classes == "S" def test_get_call_descr_not_translated_longlong(): if sys.maxint > 2147483647: @@ -286,6 +324,11 @@ assert isinstance(descr4.get_result_size(True), Symbolic) assert descr4.get_return_type() == history.FLOAT assert descr4.arg_classes == "ff" + # + descr5 = get_call_descr(c1, [lltype.SingleFloat], lltype.SingleFloat) + assert isinstance(descr5.get_result_size(True), Symbolic) + assert descr5.get_return_type() == "S" + assert descr5.arg_classes == "S" def test_call_descr_extra_info(): c1 = GcCache(True) @@ -345,8 +388,11 @@ # descr4f = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Float) assert 'FloatCallDescr' in descr4f.repr_of_descr() + # + descr5f = get_call_descr(c0, [lltype.Char], lltype.SingleFloat) + assert 'SingleFloatCallDescr' in descr5f.repr_of_descr() -def test_call_stubs(): +def test_call_stubs_1(): c0 = GcCache(False) ARGS = [lltype.Char, lltype.Signed] RES = lltype.Char @@ -360,6 +406,8 @@ res = call_stub(rffi.cast(lltype.Signed, fnptr), [1, 2], None, None) assert res == ord('c') +def test_call_stubs_2(): + c0 = GcCache(False) ARRAY = lltype.GcArray(lltype.Signed) ARGS = [lltype.Float, lltype.Ptr(ARRAY)] RES = lltype.Float @@ -375,3 +423,27 @@ res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), [], [opaquea], [longlong.getfloatstorage(3.5)]) assert longlong.getrealfloat(res) == 4.5 + +def test_call_stubs_single_float(): + from pypy.rlib.longlong2float import uint2singlefloat, singlefloat2uint + from pypy.rlib.rarithmetic import r_singlefloat, intmask + # + c0 = GcCache(False) + ARGS = [lltype.SingleFloat, lltype.SingleFloat, lltype.SingleFloat] + RES = lltype.SingleFloat + + def f(a, b, c): + a = float(a) + b = float(b) + c = float(c) + x = a - (b / c) + return r_singlefloat(x) + + fnptr = llhelper(lltype.Ptr(lltype.FuncType(ARGS, RES)), f) + descr2 = get_call_descr(c0, ARGS, RES) + a = intmask(singlefloat2uint(r_singlefloat(-10.0))) + b = intmask(singlefloat2uint(r_singlefloat(3.0))) + c = intmask(singlefloat2uint(r_singlefloat(2.0))) + res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr), + [a, b, c], [], []) + assert float(uint2singlefloat(rffi.r_uint(res))) == -11.5 diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py --- a/pypy/jit/backend/llsupport/test/test_ffisupport.py +++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py @@ -1,24 +1,56 @@ from pypy.rlib.libffi import types -from pypy.jit.backend.llsupport.ffisupport import get_call_descr_dynamic, \ - VoidCallDescr, DynamicIntCallDescr - +from pypy.jit.codewriter.longlong import is_64_bit +from pypy.jit.backend.llsupport.ffisupport import * + + +class FakeCPU: + def __init__(self, supports_floats=False, supports_longlong=False, + supports_singlefloats=False): + self.supports_floats = supports_floats + self.supports_longlong = supports_longlong + self.supports_singlefloats = supports_singlefloats + + def test_call_descr_dynamic(): + args = [types.sint, types.pointer] + descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, ffi_flags=42) + assert isinstance(descr, DynamicIntCallDescr) + assert descr.arg_classes == 'ii' + assert descr.get_ffi_flags() == 42 args = [types.sint, types.double, types.pointer] - descr = get_call_descr_dynamic(args, types.void) + descr = get_call_descr_dynamic(FakeCPU(), args, types.void) + assert descr is None # missing floats + descr = get_call_descr_dynamic(FakeCPU(supports_floats=True), + args, types.void, ffi_flags=43) assert isinstance(descr, VoidCallDescr) assert descr.arg_classes == 'ifi' + assert descr.get_ffi_flags() == 43 - descr = get_call_descr_dynamic([], types.sint8) + descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8) assert isinstance(descr, DynamicIntCallDescr) assert descr.get_result_size(False) == 1 assert descr.is_result_signed() == True - descr = get_call_descr_dynamic([], types.uint8) + descr = get_call_descr_dynamic(FakeCPU(), [], types.uint8) assert isinstance(descr, DynamicIntCallDescr) assert descr.get_result_size(False) == 1 assert descr.is_result_signed() == False - descr = get_call_descr_dynamic([], types.float) - assert descr is None # single floats are not supported so far - + if not is_64_bit: + descr = get_call_descr_dynamic(FakeCPU(), [], types.slonglong) + assert descr is None # missing longlongs + descr = get_call_descr_dynamic(FakeCPU(supports_longlong=True), + [], types.slonglong, ffi_flags=43) + assert isinstance(descr, LongLongCallDescr) + assert descr.get_ffi_flags() == 43 + else: + assert types.slonglong is types.slong + + descr = get_call_descr_dynamic(FakeCPU(), [], types.float) + assert descr is None # missing singlefloats + descr = get_call_descr_dynamic(FakeCPU(supports_singlefloats=True), + [], types.float, ffi_flags=44) + SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT) + assert isinstance(descr, SingleFloatCallDescr) + assert descr.get_ffi_flags() == 44 diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -246,9 +246,8 @@ def __init__(self): self.record = [] - def do_malloc_fixedsize_clear(self, RESTYPE, type_id, size, can_collect, + def do_malloc_fixedsize_clear(self, RESTYPE, type_id, size, has_finalizer, contains_weakptr): - assert can_collect assert not contains_weakptr p = llmemory.raw_malloc(size) p = llmemory.cast_adr_to_ptr(p, RESTYPE) @@ -258,8 +257,7 @@ return p def do_malloc_varsize_clear(self, RESTYPE, type_id, length, size, - itemsize, offset_to_length, can_collect): - assert can_collect + itemsize, offset_to_length): p = llmemory.raw_malloc(size + itemsize * length) (p + offset_to_length).signed[0] = length p = llmemory.cast_adr_to_ptr(p, RESTYPE) diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -8,12 +8,13 @@ # ^^^ This is only useful on 32-bit platforms. If True, # longlongs are supported by the JIT, but stored as doubles. # Boxes and Consts are BoxFloats and ConstFloats. + supports_singlefloats = False done_with_this_frame_void_v = -1 done_with_this_frame_int_v = -1 done_with_this_frame_ref_v = -1 done_with_this_frame_float_v = -1 - exit_frame_with_exception_v = -1 + propagate_exception_v = -1 total_compiled_loops = 0 total_compiled_bridges = 0 total_freed_loops = 0 diff --git a/pypy/jit/backend/test/calling_convention_test.py b/pypy/jit/backend/test/calling_convention_test.py --- a/pypy/jit/backend/test/calling_convention_test.py +++ b/pypy/jit/backend/test/calling_convention_test.py @@ -8,6 +8,7 @@ ConstObj, BoxFloat, ConstFloat) from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.metainterp.typesystem import deref +from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi, rclass from pypy.rpython.ootypesystem import ootype @@ -96,7 +97,8 @@ FUNC = self.FuncType(funcargs, F) FPTR = self.Ptr(FUNC) func_ptr = llhelper(FPTR, func) - calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) funcbox = self.get_funcbox(cpu, func_ptr) ops = '[%s]\n' % arguments @@ -148,7 +150,8 @@ FUNC = self.FuncType(args, F) FPTR = self.Ptr(FUNC) func_ptr = llhelper(FPTR, func) - calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) funcbox = self.get_funcbox(cpu, func_ptr) res = self.execute_operation(rop.CALL, @@ -190,7 +193,8 @@ FUNC = self.FuncType(args, F) FPTR = self.Ptr(FUNC) func_ptr = llhelper(FPTR, func) - calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) funcbox = self.get_funcbox(cpu, func_ptr) res = self.execute_operation(rop.CALL, @@ -268,7 +272,8 @@ else: ARGS.append(lltype.Signed) FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( - lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES) + lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES, + EffectInfo.MOST_GENERAL) ops = ''' [%s] f99 = call_assembler(%s, descr=called_looptoken) @@ -290,3 +295,59 @@ assert abs(x - expected_result) < 0.0001 finally: del self.cpu.done_with_this_frame_float_v + + def test_call_with_singlefloats(self): + cpu = self.cpu + if not cpu.supports_floats or not cpu.supports_singlefloats: + py.test.skip('requires floats and singlefloats') + + import random + from pypy.rlib.libffi import types + from pypy.rlib.rarithmetic import r_singlefloat + + def func(*args): + res = 0.0 + for i, x in enumerate(args): + res += (i + 1.1) * float(x) + return res + + F = lltype.Float + S = lltype.SingleFloat + I = lltype.Signed + floats = [random.random() - 0.5 for i in range(8)] + singlefloats = [r_singlefloat(random.random() - 0.5) for i in range(8)] + ints = [random.randrange(-99, 99) for i in range(8)] + for repeat in range(100): + args = [] + argvalues = [] + argslist = [] + local_floats = list(floats) + local_singlefloats = list(singlefloats) + local_ints = list(ints) + for i in range(8): + case = random.randrange(0, 3) + if case == 0: + args.append(F) + arg = local_floats.pop() + argslist.append(boxfloat(arg)) + elif case == 1: + args.append(S) + arg = local_singlefloats.pop() + argslist.append(BoxInt(longlong.singlefloat2int(arg))) + else: + args.append(I) + arg = local_ints.pop() + argslist.append(BoxInt(arg)) + argvalues.append(arg) + FUNC = self.FuncType(args, F) + FPTR = self.Ptr(FUNC) + func_ptr = llhelper(FPTR, func) + calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) + funcbox = self.get_funcbox(cpu, func_ptr) + + res = self.execute_operation(rop.CALL, + [funcbox] + argslist, + 'float', descr=calldescr) + expected = func(*argvalues) + assert abs(res.getfloat() - expected) < 0.0001 diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -9,6 +9,7 @@ ConstObj, BoxFloat, ConstFloat) from pypy.jit.metainterp.resoperation import ResOperation, rop from pypy.jit.metainterp.typesystem import deref +from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi, rclass from pypy.rpython.ootypesystem import ootype @@ -445,7 +446,8 @@ return chr(ord(c) + 1) FPTR = self.Ptr(self.FuncType([lltype.Char], lltype.Char)) func_ptr = llhelper(FPTR, func) - calldescr = cpu.calldescrof(deref(FPTR), (lltype.Char,), lltype.Char) + calldescr = cpu.calldescrof(deref(FPTR), (lltype.Char,), lltype.Char, + EffectInfo.MOST_GENERAL) x = cpu.bh_call_i(self.get_funcbox(cpu, func_ptr).value, calldescr, [ord('A')], None, None) assert x == ord('B') @@ -458,14 +460,15 @@ lltype.Float)) func_ptr = llhelper(FPTR, func) FTP = deref(FPTR) - calldescr = cpu.calldescrof(FTP, FTP.ARGS, FTP.RESULT) + calldescr = cpu.calldescrof(FTP, FTP.ARGS, FTP.RESULT, + EffectInfo.MOST_GENERAL) x = cpu.bh_call_f(self.get_funcbox(cpu, func_ptr).value, calldescr, [42], None, [longlong.getfloatstorage(3.5)]) assert longlong.getrealfloat(x) == 3.5 - 42 def test_call(self): - from pypy.rlib.libffi import types + from pypy.rlib.libffi import types, FUNCFLAG_CDECL def func_int(a, b): return a + b @@ -486,13 +489,16 @@ FUNC = deref(FPTR) funcbox = self.get_funcbox(cpu, func_ptr) # first, try it with the "normal" calldescr - calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) res = self.execute_operation(rop.CALL, [funcbox, BoxInt(num), BoxInt(num)], 'int', descr=calldescr) assert res.value == 2 * num # then, try it with the dynamic calldescr - dyn_calldescr = cpu.calldescrof_dynamic([ffi_type, ffi_type], ffi_type) + dyn_calldescr = cpu.calldescrof_dynamic([ffi_type, ffi_type], ffi_type, + EffectInfo.MOST_GENERAL, + ffi_flags=FUNCFLAG_CDECL) res = self.execute_operation(rop.CALL, [funcbox, BoxInt(num), BoxInt(num)], 'int', descr=dyn_calldescr) @@ -507,7 +513,8 @@ FUNC = self.FuncType([F] * 7 + [I] * 2 + [F] * 3, F) FPTR = self.Ptr(FUNC) func_ptr = llhelper(FPTR, func) - calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) funcbox = self.get_funcbox(cpu, func_ptr) args = ([boxfloat(.1) for i in range(7)] + [BoxInt(1), BoxInt(2), boxfloat(.2), boxfloat(.3), @@ -529,7 +536,8 @@ FUNC = self.FuncType([lltype.Signed]*16, lltype.Signed) FPTR = self.Ptr(FUNC) - calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) func_ptr = llhelper(FPTR, func) args = range(16) funcbox = self.get_funcbox(self.cpu, func_ptr) @@ -552,7 +560,8 @@ FPTR = self.Ptr(self.FuncType([TP] * nb_args, TP)) func_ptr = llhelper(FPTR, func_ints) FUNC = deref(FPTR) - calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) funcbox = self.get_funcbox(cpu, func_ptr) args = [280-24*i for i in range(nb_args)] res = self.execute_operation(rop.CALL, @@ -566,7 +575,8 @@ FUNC = self.FuncType([lltype.Float, lltype.Float], lltype.Float) FPTR = self.Ptr(FUNC) - calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) func_ptr = llhelper(FPTR, func) funcbox = self.get_funcbox(self.cpu, func_ptr) res = self.execute_operation(rop.CALL, [funcbox, constfloat(1.5), @@ -1589,7 +1599,8 @@ ''' FPTR = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void)) fptr = llhelper(FPTR, func) - calldescr = self.cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT) + calldescr = self.cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT, + EffectInfo.MOST_GENERAL) xtp = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) xtp.subclassrange_min = 1 @@ -1807,7 +1818,8 @@ FUNC = self.FuncType([lltype.Signed, lltype.Signed], lltype.Void) func_ptr = llhelper(lltype.Ptr(FUNC), maybe_force) funcbox = self.get_funcbox(self.cpu, func_ptr).constbox() - calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) cpu = self.cpu i0 = BoxInt() i1 = BoxInt() @@ -1850,7 +1862,8 @@ FUNC = self.FuncType([lltype.Signed, lltype.Signed], lltype.Signed) func_ptr = llhelper(lltype.Ptr(FUNC), maybe_force) funcbox = self.get_funcbox(self.cpu, func_ptr).constbox() - calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) cpu = self.cpu i0 = BoxInt() i1 = BoxInt() @@ -1895,7 +1908,8 @@ FUNC = self.FuncType([lltype.Signed, lltype.Signed], lltype.Float) func_ptr = llhelper(lltype.Ptr(FUNC), maybe_force) funcbox = self.get_funcbox(self.cpu, func_ptr).constbox() - calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) cpu = self.cpu i0 = BoxInt() i1 = BoxInt() @@ -1931,7 +1945,7 @@ assert values == [1, 10] def test_call_to_c_function(self): - from pypy.rlib.libffi import CDLL, types, ArgChain + from pypy.rlib.libffi import CDLL, types, ArgChain, FUNCFLAG_CDECL from pypy.rpython.lltypesystem.ll2ctypes import libc_name libc = CDLL(libc_name) c_tolower = libc.getpointer('tolower', [types.uchar], types.sint) @@ -1941,7 +1955,9 @@ cpu = self.cpu func_adr = llmemory.cast_ptr_to_adr(c_tolower.funcsym) funcbox = ConstInt(heaptracker.adr2int(func_adr)) - calldescr = cpu.calldescrof_dynamic([types.uchar], types.sint) + calldescr = cpu.calldescrof_dynamic([types.uchar], types.sint, + EffectInfo.MOST_GENERAL, + ffi_flags=FUNCFLAG_CDECL) i1 = BoxInt() i2 = BoxInt() tok = BoxInt() @@ -1997,7 +2013,9 @@ funcbox = ConstInt(heaptracker.adr2int(func_adr)) calldescr = cpu.calldescrof_dynamic([types.pointer, types_size_t, types_size_t, types.pointer], - types.void) + types.void, + EffectInfo.MOST_GENERAL, + ffi_flags=clibffi.FUNCFLAG_CDECL) i0 = BoxInt() i1 = BoxInt() i2 = BoxInt() @@ -2023,6 +2041,62 @@ assert len(glob.lst) > 0 lltype.free(raw, flavor='raw') + def test_call_to_winapi_function(self): + from pypy.rlib.clibffi import _WIN32, FUNCFLAG_STDCALL + if not _WIN32: + py.test.skip("Windows test only") + from pypy.rlib.libffi import CDLL, types, ArgChain + from pypy.rlib.rwin32 import DWORD + libc = CDLL('KERNEL32') + c_GetCurrentDir = libc.getpointer('GetCurrentDirectoryA', + [types.ulong, types.pointer], + types.ulong) + + cwd = os.getcwd() + buflen = len(cwd) + 10 + buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') + argchain = ArgChain().arg(rffi.cast(DWORD, buflen)).arg(buffer) + res = c_GetCurrentDir.call(argchain, DWORD) + assert rffi.cast(lltype.Signed, res) == len(cwd) + assert rffi.charp2strn(buffer, buflen) == cwd + lltype.free(buffer, flavor='raw') + + cpu = self.cpu + func_adr = llmemory.cast_ptr_to_adr(c_GetCurrentDir.funcsym) + funcbox = ConstInt(heaptracker.adr2int(func_adr)) + calldescr = cpu.calldescrof_dynamic([types.ulong, types.pointer], + types.ulong, + EffectInfo.MOST_GENERAL, + ffi_flags=FUNCFLAG_STDCALL) + i1 = BoxInt() + i2 = BoxInt() + faildescr = BasicFailDescr(1) + # if the stdcall convention is ignored, then ESP is wrong after the + # call: 8 bytes too much. If we repeat the call often enough, crash. + ops = [] + for i in range(50): + i3 = BoxInt() + ops += [ + ResOperation(rop.CALL_RELEASE_GIL, [funcbox, i1, i2], i3, + descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + ] + ops[-1].setfailargs([]) + ops += [ + ResOperation(rop.FINISH, [i3], None, descr=BasicFailDescr(0)) + ] + looptoken = LoopToken() + self.cpu.compile_loop([i1, i2], ops, looptoken) + + buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') + self.cpu.set_future_value_int(0, buflen) + self.cpu.set_future_value_int(1, rffi.cast(lltype.Signed, buffer)) + fail = self.cpu.execute_token(looptoken) + assert fail.identifier == 0 + assert self.cpu.get_latest_value_int(0) == len(cwd) + assert rffi.charp2strn(buffer, buflen) == cwd + lltype.free(buffer, flavor='raw') + def test_guard_not_invalidated(self): cpu = self.cpu i0 = BoxInt() @@ -2292,7 +2366,8 @@ ARGS = [lltype.Signed] * 10 RES = lltype.Signed FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( - lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES) + lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES, + EffectInfo.MOST_GENERAL) for i in range(10): self.cpu.set_future_value_int(i, i+1) res = self.cpu.execute_token(looptoken) @@ -2332,7 +2407,8 @@ ARGS = [lltype.Float, lltype.Float] RES = lltype.Float FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( - lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES) + lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES, + EffectInfo.MOST_GENERAL) ops = ''' [f0, f1] @@ -2422,7 +2498,8 @@ ARGS = [lltype.Float, lltype.Float] RES = lltype.Float FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( - lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES) + lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES, + EffectInfo.MOST_GENERAL) ops = ''' [f0, f1] @@ -2634,7 +2711,8 @@ # FUNC = self.FuncType([lltype.Signed], RESTYPE) FPTR = self.Ptr(FUNC) - calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) x = self.cpu.bh_call_i(self.get_funcbox(self.cpu, f).value, calldescr, [value], None, None) assert x == expected, ( @@ -2667,7 +2745,8 @@ # FUNC = self.FuncType([lltype.Signed], RESTYPE) FPTR = self.Ptr(FUNC) - calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) funcbox = self.get_funcbox(self.cpu, f) res = self.execute_operation(rop.CALL, [funcbox, BoxInt(value)], 'int', descr=calldescr) @@ -2701,7 +2780,8 @@ # FUNC = self.FuncType([lltype.SignedLongLong], lltype.SignedLongLong) FPTR = self.Ptr(FUNC) - calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) x = self.cpu.bh_call_f(self.get_funcbox(self.cpu, f).value, calldescr, None, None, [value]) assert x == expected @@ -2728,12 +2808,74 @@ # FUNC = self.FuncType([lltype.SignedLongLong], lltype.SignedLongLong) FPTR = self.Ptr(FUNC) - calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) funcbox = self.get_funcbox(self.cpu, f) res = self.execute_operation(rop.CALL, [funcbox, BoxFloat(value)], 'float', descr=calldescr) assert res.getfloatstorage() == expected + def test_singlefloat_result_of_call_direct(self): + if not self.cpu.supports_singlefloats: + py.test.skip("singlefloat test") + from pypy.translator.tool.cbuild import ExternalCompilationInfo + from pypy.rlib.rarithmetic import r_singlefloat + eci = ExternalCompilationInfo( + separate_module_sources=[""" + float fn_test_result_of_call(float x) + { + return x / 2.0f; + } + """], + export_symbols=['fn_test_result_of_call']) + f = rffi.llexternal('fn_test_result_of_call', [lltype.SingleFloat], + lltype.SingleFloat, + compilation_info=eci, _nowrapper=True) + value = r_singlefloat(-42.5) + expected = r_singlefloat(-21.25) + assert f(value) == expected + # + FUNC = self.FuncType([lltype.SingleFloat], lltype.SingleFloat) + FPTR = self.Ptr(FUNC) + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) + ivalue = longlong.singlefloat2int(value) + iexpected = longlong.singlefloat2int(expected) + x = self.cpu.bh_call_i(self.get_funcbox(self.cpu, f).value, + calldescr, [ivalue], None, None) + assert x == iexpected + + def test_singlefloat_result_of_call_compiled(self): + if not self.cpu.supports_singlefloats: + py.test.skip("test of singlefloat result") + from pypy.translator.tool.cbuild import ExternalCompilationInfo + from pypy.rlib.rarithmetic import r_singlefloat + eci = ExternalCompilationInfo( + separate_module_sources=[""" + float fn_test_result_of_call(float x) + { + return x / 2.0f; + } + """], + export_symbols=['fn_test_result_of_call']) + f = rffi.llexternal('fn_test_result_of_call', [lltype.SingleFloat], + lltype.SingleFloat, + compilation_info=eci, _nowrapper=True) + value = r_singlefloat(-42.5) + expected = r_singlefloat(-21.25) + assert f(value) == expected + # + FUNC = self.FuncType([lltype.SingleFloat], lltype.SingleFloat) + FPTR = self.Ptr(FUNC) + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) + funcbox = self.get_funcbox(self.cpu, f) + ivalue = longlong.singlefloat2int(value) + iexpected = longlong.singlefloat2int(expected) + res = self.execute_operation(rop.CALL, [funcbox, BoxInt(ivalue)], + 'int', descr=calldescr) + assert res.value == iexpected + def test_free_loop_and_bridges(self): from pypy.jit.backend.llsupport.llmodel import AbstractLLCPU if not isinstance(self.cpu, AbstractLLCPU): @@ -2748,6 +2890,26 @@ assert mem2 < mem1 assert mem2 == mem0 + def test_memoryerror(self): + excdescr = BasicFailDescr(666) + self.cpu.propagate_exception_v = self.cpu.get_fail_descr_number( + excdescr) + self.cpu.setup_once() # xxx redo it, because we added + # propagate_exception_v + i0 = BoxInt() + p0 = BoxPtr() + operations = [ + ResOperation(rop.NEWUNICODE, [i0], p0), + ResOperation(rop.FINISH, [p0], None, descr=BasicFailDescr(1)) + ] + inputargs = [i0] + looptoken = LoopToken() + self.cpu.compile_loop(inputargs, operations, looptoken) + # overflowing value: + self.cpu.set_future_value_int(0, sys.maxint // 4 + 1) + fail = self.cpu.execute_token(looptoken) + assert fail.identifier == excdescr.identifier + class OOtypeBackendTest(BaseBackendTest): diff --git a/pypy/jit/backend/test/test_ll_random.py b/pypy/jit/backend/test/test_ll_random.py --- a/pypy/jit/backend/test/test_ll_random.py +++ b/pypy/jit/backend/test/test_ll_random.py @@ -6,6 +6,7 @@ from pypy.jit.metainterp.history import BoxPtr, BoxInt from pypy.jit.metainterp.history import BasicFailDescr from pypy.jit.codewriter import heaptracker +from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.rpython.annlowlevel import llhelper from pypy.rlib.rarithmetic import intmask from pypy.rpython.llinterp import LLException @@ -468,6 +469,10 @@ exec code in d return subset, d['f'], vtableptr + def getcalldescr(self, builder, TP): + ef = EffectInfo.MOST_GENERAL + return builder.cpu.calldescrof(TP, TP.ARGS, TP.RESULT, ef) + # 1. non raising call and guard_no_exception class CallOperation(BaseCallOperation): def produce_into(self, builder, r): @@ -481,7 +486,7 @@ ptr = llhelper(lltype.Ptr(TP), f) c_addr = ConstAddr(llmemory.cast_ptr_to_adr(ptr), builder.cpu) args = [c_addr] + subset - descr = builder.cpu.calldescrof(TP, TP.ARGS, TP.RESULT) + descr = self.getcalldescr(builder, TP) self.put(builder, args, descr) op = ResOperation(rop.GUARD_NO_EXCEPTION, [], None, descr=BasicFailDescr()) @@ -501,7 +506,7 @@ ptr = llhelper(lltype.Ptr(TP), f) c_addr = ConstAddr(llmemory.cast_ptr_to_adr(ptr), builder.cpu) args = [c_addr] + subset - descr = builder.cpu.calldescrof(TP, TP.ARGS, TP.RESULT) + descr = self.getcalldescr(builder, TP) self.put(builder, args, descr) _, vtableptr = builder.get_random_structure_type_and_vtable(r) exc_box = ConstAddr(llmemory.cast_ptr_to_adr(vtableptr), builder.cpu) @@ -523,7 +528,7 @@ ptr = llhelper(lltype.Ptr(TP), f) c_addr = ConstAddr(llmemory.cast_ptr_to_adr(ptr), builder.cpu) args = [c_addr] + subset - descr = builder.cpu.calldescrof(TP, TP.ARGS, TP.RESULT) + descr = self.getcalldescr(builder, TP) self.put(builder, args, descr) exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu) op = ResOperation(rop.GUARD_EXCEPTION, [exc_box], BoxPtr(), @@ -540,7 +545,7 @@ ptr = llhelper(lltype.Ptr(TP), f) c_addr = ConstAddr(llmemory.cast_ptr_to_adr(ptr), builder.cpu) args = [c_addr] + subset - descr = builder.cpu.calldescrof(TP, TP.ARGS, TP.RESULT) + descr = self.getcalldescr(builder, TP) self.put(builder, args, descr) op = ResOperation(rop.GUARD_NO_EXCEPTION, [], BoxPtr(), descr=BasicFailDescr()) @@ -559,7 +564,7 @@ ptr = llhelper(lltype.Ptr(TP), f) c_addr = ConstAddr(llmemory.cast_ptr_to_adr(ptr), builder.cpu) args = [c_addr] + subset - descr = builder.cpu.calldescrof(TP, TP.ARGS, TP.RESULT) + descr = self.getcalldescr(builder, TP) self.put(builder, args, descr) while True: _, vtableptr = builder.get_random_structure_type_and_vtable(r) diff --git a/pypy/jit/backend/x86/arch.py b/pypy/jit/backend/x86/arch.py --- a/pypy/jit/backend/x86/arch.py +++ b/pypy/jit/backend/x86/arch.py @@ -27,3 +27,6 @@ # which are used in the malloc itself. They are: # ecx, ebx, esi, edi [32 and 64 bits] # r8, r9, r10, r12, r13, r14, r15 [64 bits only] +# +# Note that with asmgcc, the locations corresponding to callee-save registers +# are never used. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -34,6 +34,7 @@ from pypy.rlib.debug import (debug_print, debug_start, debug_stop, have_debug_prints) from pypy.rlib import rgc +from pypy.rlib.clibffi import FFI_DEFAULT_ABI from pypy.jit.backend.x86.jump import remap_frame_layout from pypy.jit.metainterp.history import ConstInt, BoxInt from pypy.jit.codewriter.effectinfo import EffectInfo @@ -56,7 +57,9 @@ self.exc = exc self.is_guard_not_invalidated = is_guard_not_invalidated -DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER', ('i', lltype.Signed)) +DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER', ('i', lltype.Signed), + ('bridge', lltype.Signed), # 0 or 1 + ('number', lltype.Signed)) class Assembler386(object): _regalloc = None @@ -89,6 +92,8 @@ self._current_depths_cache = (0, 0) self.datablockwrapper = None self.stack_check_slowpath = 0 + self.propagate_exception_path = 0 + self.gcrootmap_retaddr_forced = 0 self.teardown() def leave_jitted_hook(self): @@ -125,6 +130,7 @@ self._build_failure_recovery(True, withfloats=True) support.ensure_sse2_floats() self._build_float_constants() + self._build_propagate_exception_path() if gc_ll_descr.get_malloc_slowpath_addr is not None: self._build_malloc_slowpath() self._build_stack_check_slowpath() @@ -138,6 +144,9 @@ assert self.memcpy_addr != 0, "setup_once() not called?" self.current_clt = looptoken.compiled_loop_token self.pending_guard_tokens = [] + if WORD == 8: + self.pending_memoryerror_trampoline_from = [] + self.error_trampoline_64 = 0 self.mc = codebuf.MachineCodeBlockWrapper() #assert self.datablockwrapper is None --- but obscure case # possible, e.g. getting MemoryError and continuing @@ -147,6 +156,8 @@ def teardown(self): self.pending_guard_tokens = None + if WORD == 8: + self.pending_memoryerror_trampoline_from = None self.mc = None self.looppos = -1 self.currently_compiling_loop = None @@ -155,9 +166,12 @@ def finish_once(self): if self._debug: debug_start('jit-backend-counts') - for i in range(len(self.loop_run_counters)): - struct = self.loop_run_counters[i] - debug_print(str(i) + ':' + str(struct.i)) + for struct in self.loop_run_counters: + if struct.bridge: + prefix = 'bridge ' + else: + prefix = 'loop ' + debug_print(prefix + str(struct.number) + ':' + str(struct.i)) debug_stop('jit-backend-counts') def _build_float_constants(self): @@ -181,6 +195,7 @@ # instructions in assembler, with a mark_gc_roots in between. # With shadowstack, this is not needed, so we produce a single helper. gcrootmap = self.cpu.gc_ll_descr.gcrootmap + shadow_stack = (gcrootmap is not None and gcrootmap.is_shadow_stack) # # ---------- first helper for the slow path of malloc ---------- mc = codebuf.MachineCodeBlockWrapper() @@ -190,10 +205,19 @@ mc.SUB_rr(edx.value, eax.value) # compute the size we want addr = self.cpu.gc_ll_descr.get_malloc_slowpath_addr() # - if gcrootmap is not None and gcrootmap.is_shadow_stack: + # The registers to save in the copy area: with shadowstack, most + # registers need to be saved. With asmgcc, the callee-saved registers + # don't need to. + save_in_copy_area = gpr_reg_mgr_cls.REGLOC_TO_COPY_AREA_OFS.items() + if not shadow_stack: + save_in_copy_area = [(reg, ofs) for (reg, ofs) in save_in_copy_area + if reg not in gpr_reg_mgr_cls.REGLOC_TO_GCROOTMAP_REG_INDEX] + # + for reg, ofs in save_in_copy_area: + mc.MOV_br(ofs, reg.value) + # + if shadow_stack: # ---- shadowstack ---- - for reg, ofs in gpr_reg_mgr_cls.REGLOC_TO_COPY_AREA_OFS.items(): - mc.MOV_br(ofs, reg.value) mc.SUB_ri(esp.value, 16 - WORD) # stack alignment of 16 bytes if IS_X86_32: mc.MOV_sr(0, edx.value) # push argument @@ -201,15 +225,13 @@ mc.MOV_rr(edi.value, edx.value) mc.CALL(imm(addr)) mc.ADD_ri(esp.value, 16 - WORD) - for reg, ofs in gpr_reg_mgr_cls.REGLOC_TO_COPY_AREA_OFS.items(): - mc.MOV_rb(reg.value, ofs) else: # ---- asmgcc ---- if IS_X86_32: mc.MOV_sr(WORD, edx.value) # save it as the new argument elif IS_X86_64: - # rdi can be clobbered: its content was forced to the stack - # by _fastpath_malloc(), like all other save_around_call_regs. + # rdi can be clobbered: its content was saved in the + # copy area of the stack mc.MOV_rr(edi.value, edx.value) mc.JMP(imm(addr)) # tail call to the real malloc rawstart = mc.materialize(self.cpu.asmmemmgr, []) @@ -217,18 +239,54 @@ # ---------- second helper for the slow path of malloc ---------- mc = codebuf.MachineCodeBlockWrapper() # + for reg, ofs in save_in_copy_area: + mc.MOV_rb(reg.value, ofs) + assert reg is not eax and reg is not edx + # if self.cpu.supports_floats: # restore the XMM registers for i in range(self.cpu.NUM_REGS):# from where they were saved mc.MOVSD_xs(i, (WORD*2)+8*i) + # + # Note: we check this after the code above, just because the code + # above is more than 127 bytes on 64-bits... + mc.TEST_rr(eax.value, eax.value) + mc.J_il8(rx86.Conditions['Z'], 0) # patched later + jz_location = mc.get_relative_pos() + # nursery_free_adr = self.cpu.gc_ll_descr.get_nursery_free_addr() mc.MOV(edx, heap(nursery_free_adr)) # load this in EDX mc.RET() + # + # If the slowpath malloc failed, we raise a MemoryError that + # always interrupts the current loop, as a "good enough" + # approximation. Also note that we didn't RET from this helper; + # but the code we jump to will actually restore the stack + # position based on EBP, which will get us out of here for free. + offset = mc.get_relative_pos() - jz_location + assert 0 < offset <= 127 + mc.overwrite(jz_location-1, chr(offset)) + mc.JMP(imm(self.propagate_exception_path)) + # rawstart = mc.materialize(self.cpu.asmmemmgr, []) self.malloc_slowpath2 = rawstart + def _build_propagate_exception_path(self): + if self.cpu.propagate_exception_v < 0: + return # not supported (for tests, or non-translated) + # + self.mc = codebuf.MachineCodeBlockWrapper() + # call on_leave_jitted_save_exc() + addr = self.cpu.get_on_leave_jitted_int(save_exception=True) + self.mc.CALL(imm(addr)) + self.mc.MOV_ri(eax.value, self.cpu.propagate_exception_v) + self._call_footer() + rawstart = self.mc.materialize(self.cpu.asmmemmgr, []) + self.propagate_exception_path = rawstart + self.mc = None + def _build_stack_check_slowpath(self): _, _, slowpathaddr = self.cpu.insert_stack_check() - if slowpathaddr == 0 or self.cpu.exit_frame_with_exception_v < 0: + if slowpathaddr == 0 or self.cpu.propagate_exception_v < 0: return # no stack check (for tests, or non-translated) # # make a "function" that is called immediately at the start of @@ -284,19 +342,11 @@ offset = mc.get_relative_pos() - jnz_location assert 0 < offset <= 127 mc.overwrite(jnz_location-1, chr(offset)) - # clear the exception from the global position - mc.MOV(eax, heap(self.cpu.pos_exc_value())) - mc.MOV(heap(self.cpu.pos_exception()), imm0) - mc.MOV(heap(self.cpu.pos_exc_value()), imm0) - # save the current exception instance into fail_boxes_ptr[0] - adr = self.fail_boxes_ptr.get_addr_for_num(0) - mc.MOV(heap(adr), eax) - # call the helper function to set the GC flag on the fail_boxes_ptr - # array (note that there is no exception any more here) - addr = self.cpu.get_on_leave_jitted_int(save_exception=False) + # call on_leave_jitted_save_exc() + addr = self.cpu.get_on_leave_jitted_int(save_exception=True) mc.CALL(imm(addr)) # - mc.MOV_ri(eax.value, self.cpu.exit_frame_with_exception_v) + mc.MOV_ri(eax.value, self.cpu.propagate_exception_v) # # footer -- note the ADD, which skips the return address of this # function, and will instead return to the caller's caller. Note @@ -309,6 +359,7 @@ self.stack_check_slowpath = rawstart @staticmethod + @rgc.no_collect def _release_gil_asmgcc(css): # similar to trackgcroot.py:pypy_asm_stackwalk, first part from pypy.rpython.memory.gctransform import asmgcroot @@ -324,6 +375,7 @@ before() @staticmethod + @rgc.no_collect def _reacquire_gil_asmgcc(css): # first reacquire the GIL after = rffi.aroundstate.after @@ -338,12 +390,14 @@ next.prev = prev @staticmethod + @rgc.no_collect def _release_gil_shadowstack(): before = rffi.aroundstate.before if before: before() @staticmethod + @rgc.no_collect def _reacquire_gil_shadowstack(): after = rffi.aroundstate.after if after: @@ -392,7 +446,7 @@ self.setup(looptoken) self.currently_compiling_loop = looptoken if log: - self._register_counter() + self._register_counter(False, looptoken.number) operations = self._inject_debugging_code(looptoken, operations) regalloc = RegAlloc(self, self.cpu.translate_support_code) @@ -461,7 +515,7 @@ self.setup(original_loop_token) if log: - self._register_counter() + self._register_counter(True, descr_number) operations = self._inject_debugging_code(faildescr, operations) arglocs = self.rebuild_faillocs_from_descr(failure_recovery) @@ -508,6 +562,8 @@ # at the end of self.mc. for tok in self.pending_guard_tokens: tok.pos_recovery_stub = self.generate_quick_failure(tok) + if WORD == 8 and len(self.pending_memoryerror_trampoline_from) > 0: + self.error_trampoline_64 = self.generate_propagate_error_64() def patch_pending_failure_recoveries(self, rawstart): # after we wrote the assembler to raw memory, set up @@ -544,6 +600,12 @@ # less, we would run into the issue that overwriting the # 5 bytes here might get a few nonsense bytes at the # return address of the following CALL. + if WORD == 8: + for pos_after_jz in self.pending_memoryerror_trampoline_from: + assert self.error_trampoline_64 != 0 # only if non-empty + mc = codebuf.MachineCodeBlockWrapper() + mc.writeimm32(self.error_trampoline_64 - pos_after_jz) + mc.copy_to_raw_memory(rawstart + pos_after_jz - 4) def get_asmmemmgr_blocks(self, looptoken): clt = looptoken.compiled_loop_token @@ -558,7 +620,7 @@ return self.mc.materialize(self.cpu.asmmemmgr, allblocks, self.cpu.gc_ll_descr.gcrootmap) - def _register_counter(self): + def _register_counter(self, bridge, number): if self._debug: # YYY very minor leak -- we need the counters to stay alive # forever, just because we want to report them at the end @@ -566,6 +628,8 @@ struct = lltype.malloc(DEBUG_COUNTER, flavor='raw', track_allocation=False) struct.i = 0 + struct.bridge = int(bridge) + struct.number = number self.loop_run_counters.append(struct) def _find_failure_recovery_bytecode(self, faildescr): @@ -893,6 +957,7 @@ if (isinstance(from_loc, RegLoc) and from_loc.is_xmm) or (isinstance(to_loc, RegLoc) and to_loc.is_xmm): self.mc.MOVSD(to_loc, from_loc) else: + assert to_loc is not ebp self.mc.MOV(to_loc, from_loc) regalloc_mov = mov # legacy interface @@ -1056,9 +1121,10 @@ self.implement_guard(guard_token, checkfalsecond) return genop_cmp_guard_float - def _emit_call(self, force_index, x, arglocs, start=0, tmp=eax): + def _emit_call(self, force_index, x, arglocs, start=0, tmp=eax, + argtypes=None, callconv=FFI_DEFAULT_ABI): if IS_X86_64: - return self._emit_call_64(force_index, x, arglocs, start) + return self._emit_call_64(force_index, x, arglocs, start, argtypes) p = 0 n = len(arglocs) @@ -1085,13 +1151,24 @@ # x is a location self.mc.CALL(x) self.mark_gc_roots(force_index) + # + if callconv != FFI_DEFAULT_ABI: + self._fix_stdcall(callconv, p) - def _emit_call_64(self, force_index, x, arglocs, start): + def _fix_stdcall(self, callconv, p): + from pypy.rlib.clibffi import FFI_STDCALL + assert callconv == FFI_STDCALL + # it's a bit stupid, but we're just going to cancel the fact that + # the called function just added 'p' to ESP, by subtracting it again. + self.mc.SUB_ri(esp.value, p) + + def _emit_call_64(self, force_index, x, arglocs, start, argtypes): src_locs = [] dst_locs = [] xmm_src_locs = [] xmm_dst_locs = [] pass_on_stack = [] + singlefloats = None # In reverse order for use with pop() unused_gpr = [r9, r8, ecx, edx, esi, edi] @@ -1111,6 +1188,11 @@ xmm_dst_locs.append(unused_xmm.pop()) else: pass_on_stack.append(loc) + elif (argtypes is not None and argtypes[i-start] == 'S' and + len(unused_xmm) > 0): + # Singlefloat argument + if singlefloats is None: singlefloats = [] + singlefloats.append((loc, unused_xmm.pop())) else: if len(unused_gpr) > 0: src_locs.append(loc) @@ -1138,9 +1220,15 @@ else: self.mc.MOV_sr(i*WORD, loc.value) - # Handle register arguments + # Handle register arguments: first remap the xmm arguments + remap_frame_layout(self, xmm_src_locs, xmm_dst_locs, + X86_64_XMM_SCRATCH_REG) + # Load the singlefloat arguments from main regs or stack to xmm regs + if singlefloats is not None: + for src, dst in singlefloats: + self.mc.MOVD(dst, src) + # Finally remap the arguments in the main regs remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG) - remap_frame_layout(self, xmm_src_locs, xmm_dst_locs, X86_64_XMM_SCRATCH_REG) self._regalloc.reserve_param(len(pass_on_stack)) self.mc.CALL(x) @@ -1255,6 +1343,20 @@ def genop_cast_int_to_float(self, op, arglocs, resloc): self.mc.CVTSI2SD(resloc, arglocs[0]) + def genop_cast_float_to_singlefloat(self, op, arglocs, resloc): + loc0, loctmp = arglocs + self.mc.CVTSD2SS(loctmp, loc0) + assert isinstance(resloc, RegLoc) + assert isinstance(loctmp, RegLoc) + self.mc.MOVD_rx(resloc.value, loctmp.value) + + def genop_cast_singlefloat_to_float(self, op, arglocs, resloc): + loc0, = arglocs + assert isinstance(resloc, RegLoc) + assert isinstance(loc0, RegLoc) + self.mc.MOVD_xr(resloc.value, loc0.value) + self.mc.CVTSS2SD_xx(resloc.value, resloc.value) + def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc): guard_opnum = guard_op.getopnum() self.mc.CMP(arglocs[0], imm0) @@ -1376,7 +1478,7 @@ assert isinstance(loc_vtable, ImmedLoc) arglocs = arglocs[:-1] self.call(self.malloc_func_addr, arglocs, eax) - # xxx ignore NULL returns for now + self.propagate_memoryerror_if_eax_is_null() self.set_vtable(eax, loc_vtable) def set_vtable(self, loc, loc_vtable): @@ -1395,18 +1497,35 @@ def genop_new(self, op, arglocs, result_loc): assert result_loc is eax self.call(self.malloc_func_addr, arglocs, eax) + self.propagate_memoryerror_if_eax_is_null() def genop_new_array(self, op, arglocs, result_loc): assert result_loc is eax self.call(self.malloc_array_func_addr, arglocs, eax) + self.propagate_memoryerror_if_eax_is_null() def genop_newstr(self, op, arglocs, result_loc): assert result_loc is eax self.call(self.malloc_str_func_addr, arglocs, eax) + self.propagate_memoryerror_if_eax_is_null() def genop_newunicode(self, op, arglocs, result_loc): assert result_loc is eax self.call(self.malloc_unicode_func_addr, arglocs, eax) + self.propagate_memoryerror_if_eax_is_null() + + def propagate_memoryerror_if_eax_is_null(self): + # if self.propagate_exception_path == 0 (tests), this may jump to 0 + # and segfaults. too bad. the alternative is to continue anyway + # with eax==0, but that will segfault too. + self.mc.TEST_rr(eax.value, eax.value) + if WORD == 4: + self.mc.J_il(rx86.Conditions['Z'], self.propagate_exception_path) + self.mc.add_pending_relocation() + elif WORD == 8: + self.mc.J_il(rx86.Conditions['Z'], 0) + pos = self.mc.get_relative_pos() + self.pending_memoryerror_trampoline_from.append(pos) # ---------- @@ -1678,6 +1797,12 @@ return GuardToken(faildescr, failargs, fail_locs, exc, is_guard_not_invalidated) + def generate_propagate_error_64(self): + assert WORD == 8 + startpos = self.mc.get_relative_pos() + self.mc.JMP(imm(self.propagate_exception_path)) + return startpos + def generate_quick_failure(self, guardtok): """Generate the initial code for handling a failure. We try to keep it as compact as possible. @@ -2013,7 +2138,9 @@ else: tmp = eax - self._emit_call(force_index, x, arglocs, 3, tmp=tmp) + self._emit_call(force_index, x, arglocs, 3, tmp=tmp, + argtypes=op.getdescr().get_arg_types(), + callconv=op.getdescr().get_call_conv()) if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8: # a float or a long long return @@ -2025,7 +2152,19 @@ # and this way is simpler also because the result loc # can just be always a stack location else: - self.mc.FSTP_b(resloc.value) # float return + self.mc.FSTPL_b(resloc.value) # float return + elif op.getdescr().get_return_type() == 'S': + # singlefloat return + assert resloc is eax + if IS_X86_32: + # must convert ST(0) to a 32-bit singlefloat and load it into EAX + # mess mess mess + self.mc.SUB_ri(esp.value, 4) + self.mc.FSTPS_s(0) + self.mc.POP_r(eax.value) + elif IS_X86_64: + # must copy from the lower 32 bits of XMM0 into eax + self.mc.MOVD_rx(eax.value, xmm0.value) elif size == WORD: assert resloc is eax or resloc is xmm0 # a full word elif size == 0: @@ -2097,13 +2236,27 @@ css = get_ebp_ofs(pos + use_words - 1) self._regalloc.close_stack_struct = css # The location where the future CALL will put its return address - # will be [ESP-WORD], so save that as the next frame's top address - self.mc.LEA_rs(eax.value, -WORD) # LEA EAX, [ESP-4] + # will be [ESP-WORD]. But we can't use that as the next frame's + # top address! As the code after releasegil() runs without the + # GIL, it might not be set yet by the time we need it (very + # unlikely), or it might be overwritten by the following call + # to reaquiregil() (much more likely). So we hack even more + # and use a dummy location containing a dummy value (a pointer + # to itself) which we pretend is the return address :-/ :-/ :-/ + # It prevents us to store any %esp-based stack locations but we + # don't so far. + adr = self.datablockwrapper.malloc_aligned(WORD, WORD) + rffi.cast(rffi.CArrayPtr(lltype.Signed), adr)[0] = adr + self.gcrootmap_retaddr_forced = adr frame_ptr = css + WORD * (2+asmgcroot.FRAME_PTR) - self.mc.MOV_br(frame_ptr, eax.value) # MOV [css.frame], EAX + if rx86.fits_in_32bits(adr): + self.mc.MOV_bi(frame_ptr, adr) # MOV [css.frame], adr + else: + self.mc.MOV_ri(eax.value, adr) # MOV EAX, adr + self.mc.MOV_br(frame_ptr, eax.value) # MOV [css.frame], EAX # Save ebp index_of_ebp = css + WORD * (2+asmgcroot.INDEX_OF_EBP) - self.mc.MOV_br(index_of_ebp, ebp.value) # MOV [css.ebp], EBP + self.mc.MOV_br(index_of_ebp, ebp.value) # MOV [css.ebp], EBP # Call the closestack() function (also releasing the GIL) if IS_X86_32: reg = eax @@ -2131,6 +2284,9 @@ if gcrootmap.is_shadow_stack: args = [] else: + assert self.gcrootmap_retaddr_forced == -1, ( + "missing mark_gc_roots() in CALL_RELEASE_GIL") + self.gcrootmap_retaddr_forced = 0 css = self._regalloc.close_stack_struct assert css != 0 if IS_X86_32: @@ -2183,7 +2339,7 @@ self._emit_call(fail_index, imm(asm_helper_adr), [eax, arglocs[1]], 0, tmp=ecx) if IS_X86_32 and isinstance(result_loc, StackLoc) and result_loc.type == FLOAT: - self.mc.FSTP_b(result_loc.value) + self.mc.FSTPL_b(result_loc.value) #else: result_loc is already either eax or None, checked below self.mc.JMP_l8(0) # jump to done, patched later jmp_location = self.mc.get_relative_pos() @@ -2355,11 +2511,6 @@ genop_discard_cond_call_gc_wb_array = genop_discard_cond_call_gc_wb - def genop_force_token(self, op, arglocs, resloc): - # RegAlloc.consider_force_token ensures this: - assert isinstance(resloc, RegLoc) - self.mc.LEA_rb(resloc.value, FORCE_INDEX_OFS) - def not_implemented_op_discard(self, op, arglocs): not_implemented("not implemented operation: %s" % op.getopname()) @@ -2381,7 +2532,13 @@ if gcrootmap.is_shadow_stack: gcrootmap.write_callshape(mark, force_index) else: - self.mc.insert_gcroot_marker(mark) + if self.gcrootmap_retaddr_forced == 0: + self.mc.insert_gcroot_marker(mark) # common case + else: + assert self.gcrootmap_retaddr_forced != -1, ( + "two mark_gc_roots() in a CALL_RELEASE_GIL") + gcrootmap.put(self.gcrootmap_retaddr_forced, mark) + self.gcrootmap_retaddr_forced = -1 def target_arglocs(self, loop_token): return loop_token._x86_arglocs @@ -2424,8 +2581,7 @@ # there are two helpers to call only with asmgcc slowpath_addr1 = self.malloc_slowpath1 self.mc.CALL(imm(slowpath_addr1)) - self.mark_gc_roots(self.write_new_force_index(), - use_copy_area=shadow_stack) + self.mark_gc_roots(self.write_new_force_index(), use_copy_area=True) slowpath_addr2 = self.malloc_slowpath2 self.mc.CALL(imm(slowpath_addr2)) diff --git a/pypy/jit/backend/x86/codebuf.py b/pypy/jit/backend/x86/codebuf.py --- a/pypy/jit/backend/x86/codebuf.py +++ b/pypy/jit/backend/x86/codebuf.py @@ -25,8 +25,11 @@ self.init_block_builder() # a list of relative positions; for each position p, the bytes # at [p-4:p] encode an absolute address that will need to be - # made relative. - self.relocations = [] + # made relative. Only works on 32-bit! + if WORD == 4: + self.relocations = [] + else: + self.relocations = None # # ResOperation --> offset in the assembly. # ops_offset[None] represents the beginning of the code after the last op @@ -42,9 +45,10 @@ def copy_to_raw_memory(self, addr): self._copy_to_raw_memory(addr) - for reloc in self.relocations: - p = addr + reloc - adr = rffi.cast(rffi.LONGP, p - WORD) - adr[0] = intmask(adr[0] - p) + if self.relocations is not None: + for reloc in self.relocations: + p = addr + reloc + adr = rffi.cast(rffi.LONGP, p - WORD) + adr[0] = intmask(adr[0] - p) valgrind.discard_translations(addr, self.get_relative_pos()) self._dump(addr, "jit-backend-dump", backend_name) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -29,6 +29,7 @@ all_regs = [eax, ecx, edx, ebx, esi, edi] no_lower_byte_regs = [esi, edi] save_around_call_regs = [eax, edx, ecx] + frame_reg = ebp REGLOC_TO_GCROOTMAP_REG_INDEX = { ebx: 1, @@ -312,8 +313,11 @@ self.fm.frame_bindings[arg] = loc else: if isinstance(loc, RegLoc): - self.rm.reg_bindings[arg] = loc - used[loc] = None + if loc is ebp: + self.rm.bindings_to_frame_reg[arg] = None + else: + self.rm.reg_bindings[arg] = loc + used[loc] = None else: self.fm.frame_bindings[arg] = loc self.rm.free_regs = [] @@ -705,6 +709,17 @@ self.Perform(op, [loc0], loc1) self.rm.possibly_free_var(op.getarg(0)) + def consider_cast_float_to_singlefloat(self, op): + loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0)) + loc1 = self.rm.force_allocate_reg(op.result) + self.xrm.possibly_free_var(op.getarg(0)) + tmpxvar = TempBox() + loctmp = self.xrm.force_allocate_reg(tmpxvar) # may be equal to loc0 + self.xrm.possibly_free_var(tmpxvar) + self.Perform(op, [loc0, loctmp], loc1) + + consider_cast_singlefloat_to_float = consider_cast_int_to_float + def _consider_llong_binop_xx(self, op): # must force both arguments into xmm registers, because we don't # know if they will be suitably aligned. Exception: if the second @@ -832,8 +847,8 @@ def consider_call(self, op): effectinfo = op.getdescr().get_extra_info() - if effectinfo is not None: - oopspecindex = effectinfo.oopspecindex + oopspecindex = effectinfo.oopspecindex + if oopspecindex != EffectInfo.OS_NONE: if IS_X86_32: # support for some of the llong operations, # which only exist on x86-32 @@ -921,27 +936,13 @@ def _do_fastpath_malloc(self, op, size, tid): gc_ll_descr = self.assembler.cpu.gc_ll_descr self.rm.force_allocate_reg(op.result, selected_reg=eax) - - if gc_ll_descr.gcrootmap and gc_ll_descr.gcrootmap.is_shadow_stack: - # ---- shadowstack ---- - # We need edx as a temporary, but otherwise don't save any more - # register. See comments in _build_malloc_slowpath(). - tmp_box = TempBox() - self.rm.force_allocate_reg(tmp_box, selected_reg=edx) - self.rm.possibly_free_var(tmp_box) - else: - # ---- asmgcc ---- - # We need to force-allocate each of save_around_call_regs now. - # The alternative would be to save and restore them around the - # actual call to malloc(), in the rare case where we need to do - # it; however, mark_gc_roots() would need to be adapted to know - # where the variables end up being saved. Messy. - for reg in self.rm.save_around_call_regs: - if reg is not eax: - tmp_box = TempBox() - self.rm.force_allocate_reg(tmp_box, selected_reg=reg) - self.rm.possibly_free_var(tmp_box) - + # + # We need edx as a temporary, but otherwise don't save any more + # register. See comments in _build_malloc_slowpath(). + tmp_box = TempBox() + self.rm.force_allocate_reg(tmp_box, selected_reg=edx) + self.rm.possibly_free_var(tmp_box) + # self.assembler.malloc_cond( gc_ll_descr.get_nursery_free_addr(), gc_ll_descr.get_nursery_top_addr(), @@ -1337,20 +1338,32 @@ if reg is eax: continue # ok to ignore this one if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): - if use_copy_area: - assert reg in self.rm.REGLOC_TO_COPY_AREA_OFS - area_offset = self.rm.REGLOC_TO_COPY_AREA_OFS[reg] - gcrootmap.add_frame_offset(shape, area_offset) - else: - assert reg in self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX - gcrootmap.add_callee_save_reg( - shape, self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX[reg]) + # + # The register 'reg' is alive across this call. + gcrootmap = self.assembler.cpu.gc_ll_descr.gcrootmap + if gcrootmap is None or not gcrootmap.is_shadow_stack: + # + # Asmgcc: if reg is a callee-save register, we can + # explicitly mark it as containing a BoxPtr. + if reg in self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX: + gcrootmap.add_callee_save_reg( + shape, self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX[reg]) + continue + # + # Else, 'use_copy_area' must be True (otherwise this BoxPtr + # should not be in a register). The copy area contains the + # real value of the register. + assert use_copy_area + assert reg in self.rm.REGLOC_TO_COPY_AREA_OFS + area_offset = self.rm.REGLOC_TO_COPY_AREA_OFS[reg] + gcrootmap.add_frame_offset(shape, area_offset) + # return gcrootmap.compress_callshape(shape, self.assembler.datablockwrapper) def consider_force_token(self, op): - loc = self.rm.force_allocate_reg(op.result) - self.Perform(op, [], loc) + # the FORCE_TOKEN operation returns directly 'ebp' + self.rm.force_allocate_frame_reg(op.result) def not_implemented_op(self, op): not_implemented("not implemented operation: %s" % op.getopname()) diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -521,6 +521,8 @@ UCOMISD = _binaryop('UCOMISD') CVTSI2SD = _binaryop('CVTSI2SD') CVTTSD2SI = _binaryop('CVTTSD2SI') + CVTSD2SS = _binaryop('CVTSD2SS') + CVTSS2SD = _binaryop('CVTSS2SD') SQRTSD = _binaryop('SQRTSD') @@ -534,6 +536,8 @@ PXOR = _binaryop('PXOR') PCMPEQD = _binaryop('PCMPEQD') + MOVD = _binaryop('MOVD') + CALL = _relative_unaryop('CALL') JMP = _relative_unaryop('JMP') diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -19,6 +19,7 @@ class AbstractX86CPU(AbstractLLCPU): debug = True supports_floats = True + supports_singlefloats = True BOOTSTRAP_TP = lltype.FuncType([], lltype.Signed) dont_keepalive_stuff = False # for tests @@ -118,7 +119,8 @@ setitem(index, null) def get_latest_force_token(self): - return self.assembler.fail_ebp + FORCE_INDEX_OFS + # the FORCE_TOKEN operation and this helper both return 'ebp'. + return self.assembler.fail_ebp def execute_token(self, executable_token): addr = executable_token._x86_bootstrap_code @@ -152,8 +154,9 @@ flavor='raw', zero=True, immortal=True) - def force(self, addr_of_force_index): + def force(self, addr_of_force_token): TP = rffi.CArrayPtr(lltype.Signed) + addr_of_force_index = addr_of_force_token + FORCE_INDEX_OFS fail_index = rffi.cast(TP, addr_of_force_index)[0] assert fail_index >= 0, "already forced!" faildescr = self.get_fail_descr_from_number(fail_index) @@ -163,7 +166,7 @@ # start of "no gc operation!" block fail_index_2 = self.assembler.grab_frame_values( bytecode, - addr_of_force_index - FORCE_INDEX_OFS, + addr_of_force_token, self.all_null_registers) self.assembler.leave_jitted_hook() # end of "no gc operation!" block diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -527,6 +527,7 @@ NOP = insn('\x90') RET = insn('\xC3') + RET16_i = insn('\xC2', immediate(1, 'h')) PUSH_r = insn(rex_nw, register(1), '\x50') PUSH_b = insn(rex_nw, '\xFF', orbyte(6<<3), stack_bp(1)) @@ -573,7 +574,8 @@ BTS_jr = insn(rex_w, '\x0F\xAB', register(2,8), abs_, immediate(1)) # x87 instructions - FSTP_b = insn('\xDD', orbyte(3<<3), stack_bp(1)) + FSTPL_b = insn('\xDD', orbyte(3<<3), stack_bp(1)) # rffi.DOUBLE ('as' wants L??) + FSTPS_s = insn('\xD9', orbyte(3<<3), stack_sp(1)) # lltype.SingleFloat # ------------------------------ Random mess ----------------------- RDTSC = insn('\x0F\x31') @@ -590,8 +592,18 @@ CVTTSD2SI_rx = xmminsn('\xF2', rex_w, '\x0F\x2C', register(1, 8), register(2), '\xC0') CVTTSD2SI_rb = xmminsn('\xF2', rex_w, '\x0F\x2C', register(1, 8), stack_bp(2)) - MOVD_rx = xmminsn('\x66', rex_w, '\x0F\x7E', register(2, 8), register(1), '\xC0') - MOVD_xr = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), register(2), '\xC0') + CVTSD2SS_xx = xmminsn('\xF2', rex_nw, '\x0F\x5A', + register(1, 8), register(2), '\xC0') + CVTSD2SS_xb = xmminsn('\xF2', rex_nw, '\x0F\x5A', + register(1, 8), stack_bp(2)) + CVTSS2SD_xx = xmminsn('\xF3', rex_nw, '\x0F\x5A', + register(1, 8), register(2), '\xC0') + CVTSS2SD_xb = xmminsn('\xF3', rex_nw, '\x0F\x5A', + register(1, 8), stack_bp(2)) + + MOVD_rx = xmminsn('\x66', rex_nw, '\x0F\x7E', register(2, 8), register(1), '\xC0') + MOVD_xr = xmminsn('\x66', rex_nw, '\x0F\x6E', register(1, 8), register(2), '\xC0') + MOVD_xb = xmminsn('\x66', rex_nw, '\x0F\x6E', register(1, 8), stack_bp(2)) PSRAD_xi = xmminsn('\x66', rex_nw, '\x0F\x72', register(1), '\xE0', immediate(2, 'b')) diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -7,6 +7,7 @@ BoxPtr, ConstPtr, TreeLoop from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.codewriter import heaptracker +from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.backend.llsupport.descr import GcCache from pypy.jit.backend.llsupport.gc import GcLLDescription from pypy.jit.backend.detect_cpu import getcpuclass @@ -76,7 +77,8 @@ for box in boxes: regalloc.rm.try_allocate_reg(box) TP = lltype.FuncType([], lltype.Signed) - calldescr = cpu.calldescrof(TP, TP.ARGS, TP.RESULT) + calldescr = cpu.calldescrof(TP, TP.ARGS, TP.RESULT, + EffectInfo.MOST_GENERAL) regalloc.rm._check_invariants() box = boxes[0] regalloc.position = 0 diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py --- a/pypy/jit/backend/x86/test/test_regalloc.py +++ b/pypy/jit/backend/x86/test/test_regalloc.py @@ -16,6 +16,7 @@ from pypy.rpython.annlowlevel import llhelper from pypy.rpython.lltypesystem import rclass, rstr from pypy.jit.codewriter import longlong +from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.backend.x86.rx86 import * def test_is_comparison_or_ovf_op(): @@ -92,7 +93,8 @@ zd_addr = cpu.cast_int_to_adr(zero_division_tp) zero_division_error = llmemory.cast_adr_to_ptr(zd_addr, lltype.Ptr(rclass.OBJECT_VTABLE)) - raising_calldescr = cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT) + raising_calldescr = cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT, + EffectInfo.MOST_GENERAL) fdescr1 = BasicFailDescr(1) fdescr2 = BasicFailDescr(2) @@ -115,9 +117,12 @@ f2ptr = llhelper(F2PTR, f2) f10ptr = llhelper(F10PTR, f10) - f1_calldescr = cpu.calldescrof(F1PTR.TO, F1PTR.TO.ARGS, F1PTR.TO.RESULT) - f2_calldescr = cpu.calldescrof(F2PTR.TO, F2PTR.TO.ARGS, F2PTR.TO.RESULT) - f10_calldescr = cpu.calldescrof(F10PTR.TO, F10PTR.TO.ARGS, F10PTR.TO.RESULT) + f1_calldescr = cpu.calldescrof(F1PTR.TO, F1PTR.TO.ARGS, F1PTR.TO.RESULT, + EffectInfo.MOST_GENERAL) + f2_calldescr = cpu.calldescrof(F2PTR.TO, F2PTR.TO.ARGS, F2PTR.TO.RESULT, + EffectInfo.MOST_GENERAL) + f10_calldescr= cpu.calldescrof(F10PTR.TO, F10PTR.TO.ARGS, F10PTR.TO.RESULT, + EffectInfo.MOST_GENERAL) namespace = locals().copy() type_system = 'lltype' diff --git a/pypy/jit/backend/x86/test/test_regloc.py b/pypy/jit/backend/x86/test/test_regloc.py --- a/pypy/jit/backend/x86/test/test_regloc.py +++ b/pypy/jit/backend/x86/test/test_regloc.py @@ -62,7 +62,7 @@ assert mc.relocations == [5] expected = "\xE8" + struct.pack(' movl $xxx, %eax suffix = 'l' - if ops[1][2:].isdigit(): - ops[1] += 'd' - else: - ops[1] = '%e' + ops[1][2:] + ops[1] = reduce_to_32bit(ops[1]) + if instrname.lower() == 'movd': + ops[0] = reduce_to_32bit(ops[0]) + ops[1] = reduce_to_32bit(ops[1]) # op = '\t%s%s %s%s' % (instrname.lower(), suffix, ', '.join(ops), following) diff --git a/pypy/jit/codewriter/assembler.py b/pypy/jit/codewriter/assembler.py --- a/pypy/jit/codewriter/assembler.py +++ b/pypy/jit/codewriter/assembler.py @@ -76,6 +76,8 @@ TYPE = llmemory.Address if TYPE == llmemory.Address: value = heaptracker.adr2int(value) + if TYPE is lltype.SingleFloat: + value = longlong.singlefloat2int(value) if not isinstance(value, (llmemory.AddressAsInt, ComputedIntSymbolic)): value = lltype.cast_primitive(lltype.Signed, value) diff --git a/pypy/jit/codewriter/call.py b/pypy/jit/codewriter/call.py --- a/pypy/jit/codewriter/call.py +++ b/pypy/jit/codewriter/call.py @@ -6,7 +6,7 @@ from pypy.jit.codewriter import support from pypy.jit.codewriter.jitcode import JitCode from pypy.jit.codewriter.effectinfo import (VirtualizableAnalyzer, - QuasiImmutAnalyzer, CanReleaseGILAnalyzer, effectinfo_from_writeanalyze, + QuasiImmutAnalyzer, RandomEffectsAnalyzer, effectinfo_from_writeanalyze, EffectInfo, CallInfoCollection) from pypy.translator.simplify import get_funcobj, get_functype from pypy.rpython.lltypesystem import lltype, llmemory @@ -31,7 +31,7 @@ self.readwrite_analyzer = ReadWriteAnalyzer(translator) self.virtualizable_analyzer = VirtualizableAnalyzer(translator) self.quasiimmut_analyzer = QuasiImmutAnalyzer(translator) - self.canreleasegil_analyzer = CanReleaseGILAnalyzer(translator) + self.randomeffects_analyzer = RandomEffectsAnalyzer(translator) # for index, jd in enumerate(jitdrivers_sd): jd.index = index @@ -190,7 +190,7 @@ fnaddr = llmemory.cast_ptr_to_adr(fnptr) NON_VOID_ARGS = [ARG for ARG in FUNC.ARGS if ARG is not lltype.Void] calldescr = self.cpu.calldescrof(FUNC, tuple(NON_VOID_ARGS), - FUNC.RESULT) + FUNC.RESULT, EffectInfo.MOST_GENERAL) return (fnaddr, calldescr) def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE, @@ -222,17 +222,21 @@ assert not NON_VOID_ARGS, ("arguments not supported for " "loop-invariant function!") # build the extraeffect - can_release_gil = self.canreleasegil_analyzer.analyze(op) - # can_release_gil implies can_invalidate - can_invalidate = can_release_gil or self.quasiimmut_analyzer.analyze(op) + random_effects = self.randomeffects_analyzer.analyze(op) + if random_effects: + extraeffect = EffectInfo.EF_RANDOM_EFFECTS + # random_effects implies can_invalidate + can_invalidate = random_effects or self.quasiimmut_analyzer.analyze(op) if extraeffect is None: if self.virtualizable_analyzer.analyze(op): extraeffect = EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE elif loopinvariant: extraeffect = EffectInfo.EF_LOOPINVARIANT elif elidable: - # XXX check what to do about exceptions (also MemoryError?) - extraeffect = EffectInfo.EF_ELIDABLE + if self._canraise(op): + extraeffect = EffectInfo.EF_ELIDABLE_CAN_RAISE + else: + extraeffect = EffectInfo.EF_ELIDABLE_CANNOT_RAISE elif self._canraise(op): extraeffect = EffectInfo.EF_CAN_RAISE else: @@ -240,12 +244,10 @@ # effectinfo = effectinfo_from_writeanalyze( self.readwrite_analyzer.analyze(op), self.cpu, extraeffect, - oopspecindex, can_invalidate, can_release_gil) + oopspecindex, can_invalidate) # - if oopspecindex != EffectInfo.OS_NONE: - assert effectinfo is not None + assert effectinfo is not None if elidable or loopinvariant: - assert effectinfo is not None assert extraeffect != EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE # XXX this should also say assert not can_invalidate, but # it can't because our analyzer is not good enough for now @@ -265,8 +267,7 @@ def calldescr_canraise(self, calldescr): effectinfo = calldescr.get_extra_info() - return (effectinfo is None or - effectinfo.extraeffect >= EffectInfo.EF_CAN_RAISE) + return effectinfo.check_can_raise() def jitdriver_sd_from_portal_graph(self, graph): for jd in self.jitdrivers_sd: diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -9,11 +9,13 @@ _cache = {} # the 'extraeffect' field is one of the following values: - EF_ELIDABLE = 0 #elidable function (and cannot raise) + EF_ELIDABLE_CANNOT_RAISE = 0 #elidable function (and cannot raise) EF_LOOPINVARIANT = 1 #special: call it only once per loop EF_CANNOT_RAISE = 2 #a function which cannot raise - EF_CAN_RAISE = 3 #normal function (can raise) + EF_ELIDABLE_CAN_RAISE = 3 #elidable function (but can raise) + EF_CAN_RAISE = 4 #normal function (can raise) EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE = 5 #can raise and force virtualizables + EF_RANDOM_EFFECTS = 6 #can do whatever # the 'oopspecindex' field is one of the following values: OS_NONE = 0 # normal case, no oopspec @@ -79,22 +81,32 @@ write_descrs_fields, write_descrs_arrays, extraeffect=EF_CAN_RAISE, oopspecindex=OS_NONE, - can_invalidate=False, can_release_gil=False): - key = (frozenset(readonly_descrs_fields), - frozenset(readonly_descrs_arrays), - frozenset(write_descrs_fields), - frozenset(write_descrs_arrays), + can_invalidate=False): + key = (frozenset_or_none(readonly_descrs_fields), + frozenset_or_none(readonly_descrs_arrays), + frozenset_or_none(write_descrs_fields), + frozenset_or_none(write_descrs_arrays), extraeffect, oopspecindex, - can_invalidate, - can_release_gil) + can_invalidate) if key in cls._cache: return cls._cache[key] + if extraeffect == EffectInfo.EF_RANDOM_EFFECTS: + assert readonly_descrs_fields is None + assert readonly_descrs_arrays is None + assert write_descrs_fields is None + assert write_descrs_arrays is None + else: + assert readonly_descrs_fields is not None + assert readonly_descrs_arrays is not None + assert write_descrs_fields is not None + assert write_descrs_arrays is not None result = object.__new__(cls) result.readonly_descrs_fields = readonly_descrs_fields result.readonly_descrs_arrays = readonly_descrs_arrays if extraeffect == EffectInfo.EF_LOOPINVARIANT or \ - extraeffect == EffectInfo.EF_ELIDABLE: + extraeffect == EffectInfo.EF_ELIDABLE_CANNOT_RAISE or \ + extraeffect == EffectInfo.EF_ELIDABLE_CAN_RAISE: result.write_descrs_fields = [] result.write_descrs_arrays = [] else: @@ -102,11 +114,13 @@ result.write_descrs_arrays = write_descrs_arrays result.extraeffect = extraeffect result.can_invalidate = can_invalidate - result.can_release_gil = can_release_gil result.oopspecindex = oopspecindex cls._cache[key] = result return result + def check_can_raise(self): + return self.extraeffect > self.EF_CANNOT_RAISE + def check_can_invalidate(self): return self.can_invalidate @@ -114,56 +128,71 @@ return self.extraeffect >= self.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE def has_random_effects(self): - return self.oopspecindex == self.OS_LIBFFI_CALL or self.can_release_gil + return self.extraeffect >= self.EF_RANDOM_EFFECTS + + +def frozenset_or_none(x): + if x is None: + return None + return frozenset(x) + +EffectInfo.MOST_GENERAL = EffectInfo(None, None, None, None, + EffectInfo.EF_RANDOM_EFFECTS, + can_invalidate=True) + def effectinfo_from_writeanalyze(effects, cpu, extraeffect=EffectInfo.EF_CAN_RAISE, oopspecindex=EffectInfo.OS_NONE, - can_invalidate=False, - can_release_gil=False): + can_invalidate=False): from pypy.translator.backendopt.writeanalyze import top_set - if effects is top_set: - return None - readonly_descrs_fields = [] - readonly_descrs_arrays = [] - write_descrs_fields = [] - write_descrs_arrays = [] + if effects is top_set or extraeffect == EffectInfo.EF_RANDOM_EFFECTS: + readonly_descrs_fields = None + readonly_descrs_arrays = None + write_descrs_fields = None + write_descrs_arrays = None + extraeffect = EffectInfo.EF_RANDOM_EFFECTS + else: + readonly_descrs_fields = [] + readonly_descrs_arrays = [] + write_descrs_fields = [] + write_descrs_arrays = [] - def add_struct(descrs_fields, (_, T, fieldname)): - T = deref(T) - if consider_struct(T, fieldname): - descr = cpu.fielddescrof(T, fieldname) - descrs_fields.append(descr) + def add_struct(descrs_fields, (_, T, fieldname)): + T = deref(T) + if consider_struct(T, fieldname): + descr = cpu.fielddescrof(T, fieldname) + descrs_fields.append(descr) - def add_array(descrs_arrays, (_, T)): - ARRAY = deref(T) - if consider_array(ARRAY): - descr = cpu.arraydescrof(ARRAY) - descrs_arrays.append(descr) + def add_array(descrs_arrays, (_, T)): + ARRAY = deref(T) + if consider_array(ARRAY): + descr = cpu.arraydescrof(ARRAY) + descrs_arrays.append(descr) - for tup in effects: - if tup[0] == "struct": - add_struct(write_descrs_fields, tup) - elif tup[0] == "readstruct": - tupw = ("struct",) + tup[1:] - if tupw not in effects: - add_struct(readonly_descrs_fields, tup) - elif tup[0] == "array": - add_array(write_descrs_arrays, tup) - elif tup[0] == "readarray": - tupw = ("array",) + tup[1:] - if tupw not in effects: - add_array(readonly_descrs_arrays, tup) - else: - assert 0 + for tup in effects: + if tup[0] == "struct": + add_struct(write_descrs_fields, tup) + elif tup[0] == "readstruct": + tupw = ("struct",) + tup[1:] + if tupw not in effects: + add_struct(readonly_descrs_fields, tup) + elif tup[0] == "array": + add_array(write_descrs_arrays, tup) + elif tup[0] == "readarray": + tupw = ("array",) + tup[1:] + if tupw not in effects: + add_array(readonly_descrs_arrays, tup) + else: + assert 0 + # return EffectInfo(readonly_descrs_fields, readonly_descrs_arrays, write_descrs_fields, write_descrs_arrays, extraeffect, oopspecindex, - can_invalidate, - can_release_gil) + can_invalidate) def consider_struct(TYPE, fieldname): if fieldType(TYPE, fieldname) is lltype.Void: @@ -199,12 +228,13 @@ def analyze_simple_operation(self, op, graphinfo): return op.opname == 'jit_force_quasi_immutable' -class CanReleaseGILAnalyzer(BoolGraphAnalyzer): +class RandomEffectsAnalyzer(BoolGraphAnalyzer): def analyze_direct_call(self, graph, seen=None): - releases_gil = False if hasattr(graph, "func") and hasattr(graph.func, "_ptr"): - releases_gil = graph.func._ptr._obj.releases_gil - return releases_gil or super(CanReleaseGILAnalyzer, self).analyze_direct_call(graph, seen) + if graph.func._ptr._obj.random_effects_on_gcobjs: + return True + return super(RandomEffectsAnalyzer, self).analyze_direct_call(graph, + seen) def analyze_simple_operation(self, op, graphinfo): return False diff --git a/pypy/jit/codewriter/jitcode.py b/pypy/jit/codewriter/jitcode.py --- a/pypy/jit/codewriter/jitcode.py +++ b/pypy/jit/codewriter/jitcode.py @@ -1,7 +1,6 @@ from pypy.jit.metainterp.history import AbstractDescr from pypy.jit.codewriter import heaptracker from pypy.rlib.objectmodel import we_are_translated -from pypy.rpython.lltypesystem import llmemory class JitCode(AbstractDescr): @@ -102,7 +101,7 @@ def _clone_if_mutable(self): raise NotImplementedError - + class MissingLiveness(Exception): pass diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -1,18 +1,17 @@ -import py, sys -from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rclass -from pypy.rpython import rlist -from pypy.jit.metainterp.history import getkind -from pypy.objspace.flow.model import SpaceOperation, Variable, Constant -from pypy.objspace.flow.model import Block, Link, c_last_exception -from pypy.jit.codewriter.flatten import ListOfKind, IndirectCallTargets +import py + from pypy.jit.codewriter import support, heaptracker, longlong from pypy.jit.codewriter.effectinfo import EffectInfo +from pypy.jit.codewriter.flatten import ListOfKind, IndirectCallTargets from pypy.jit.codewriter.policy import log +from pypy.jit.metainterp import quasiimmut +from pypy.jit.metainterp.history import getkind from pypy.jit.metainterp.typesystem import deref, arrayItem -from pypy.jit.metainterp import quasiimmut -from pypy.rpython.rclass import IR_QUASIIMMUTABLE, IR_QUASIIMMUTABLE_ARRAY +from pypy.objspace.flow.model import SpaceOperation, Variable, Constant, c_last_exception from pypy.rlib import objectmodel from pypy.rlib.jit import _we_are_jitted +from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rclass, rffi +from pypy.rpython.rclass import IR_QUASIIMMUTABLE, IR_QUASIIMMUTABLE_ARRAY from pypy.translator.simplify import get_funcobj from pypy.translator.unsimplify import varoftype @@ -24,6 +23,11 @@ t = Transformer(cpu, callcontrol, portal_jd) t.transform(graph) +def integer_bounds(size, unsigned): + if unsigned: + return 0, 1 << (8 * size) + else: + return -(1 << (8 * size - 1)), 1 << (8 * size - 1) class Transformer(object): vable_array_vars = None @@ -200,7 +204,6 @@ self.vable_array_vars[op.result]= self.vable_array_vars[op.args[0]] rewrite_op_cast_pointer = rewrite_op_same_as - rewrite_op_cast_opaque_ptr = rewrite_op_same_as # rlib.rerased def rewrite_op_cast_bool_to_int(self, op): pass def rewrite_op_cast_bool_to_uint(self, op): pass def rewrite_op_cast_char_to_int(self, op): pass @@ -588,6 +591,7 @@ pure = '_pure' else: pure = '' + self.check_field_access(v_inst.concretetype.TO) argname = getattr(v_inst.concretetype.TO, '_gckind', 'gc') descr = self.cpu.fielddescrof(v_inst.concretetype.TO, c_fieldname.value) @@ -621,6 +625,7 @@ return [SpaceOperation('-live-', [], None), SpaceOperation('setfield_vable_%s' % kind, [v_inst, descr, v_value], None)] + self.check_field_access(v_inst.concretetype.TO) argname = getattr(v_inst.concretetype.TO, '_gckind', 'gc') descr = self.cpu.fielddescrof(v_inst.concretetype.TO, c_fieldname.value) @@ -633,6 +638,22 @@ return (op.args[1].value == 'typeptr' and op.args[0].concretetype.TO._hints.get('typeptr')) + def check_field_access(self, STRUCT): + # check against a GcStruct with a nested GcStruct as a first argument + # but which is not an object at all; see metainterp/test/test_loop, + # test_regular_pointers_in_short_preamble. + if not isinstance(STRUCT, lltype.GcStruct): + return + if STRUCT._first_struct() == (None, None): + return + PARENT = STRUCT + while not PARENT._hints.get('typeptr'): + _, PARENT = PARENT._first_struct() + if PARENT is None: + raise NotImplementedError("%r is a GcStruct using nesting but " + "not inheriting from object" % + (STRUCT,)) + def get_vinfo(self, v_virtualizable): if self.callcontrol is None: # for tests return None @@ -779,76 +800,127 @@ raise NotImplementedError("cast_ptr_to_int") def rewrite_op_force_cast(self, op): - assert not self._is_gc(op.args[0]) - fromll = longlong.is_longlong(op.args[0].concretetype) - toll = longlong.is_longlong(op.result.concretetype) - if fromll and toll: + v_arg = op.args[0] + v_result = op.result + assert not self._is_gc(v_arg) + + if v_arg.concretetype == v_result.concretetype: return - if fromll: - args = op.args - opname = 'truncate_longlong_to_int' - RESULT = lltype.Signed - v = varoftype(RESULT) - op1 = SpaceOperation(opname, args, v) - op2 = self.rewrite_operation(op1) - oplist = self.force_cast_without_longlong(op2.result, op.result) + + float_arg = v_arg.concretetype in [lltype.Float, lltype.SingleFloat] + float_res = v_result.concretetype in [lltype.Float, lltype.SingleFloat] + if not float_arg and not float_res: + # some int -> some int cast + return self._int_to_int_cast(v_arg, v_result) + elif float_arg and float_res: + # some float -> some float cast + return self._float_to_float_cast(v_arg, v_result) + elif not float_arg and float_res: + # some int -> some float + ops = [] + v1 = varoftype(lltype.Signed) + oplist = self.rewrite_operation( + SpaceOperation('force_cast', [v_arg], v1) + ) if oplist: - return [op2] + oplist - # - # force a renaming to put the correct result in place, even though - # it might be slightly mistyped (e.g. Signed versus Unsigned) - assert op2.result is v - op2.result = op.result - return op2 - elif toll: - from pypy.rpython.lltypesystem import rffi - size, unsigned = rffi.size_and_sign(op.args[0].concretetype) - if unsigned: + ops.extend(oplist) + else: + v1 = v_arg + v2 = varoftype(lltype.Float) + op = self.rewrite_operation( + SpaceOperation('cast_int_to_float', [v1], v2) + ) + ops.append(op) + op2 = self.rewrite_operation( + SpaceOperation('force_cast', [v2], v_result) + ) + if op2: + ops.append(op2) + else: + op.result = v_result + return ops + elif float_arg and not float_res: + # some float -> some int + ops = [] + v1 = varoftype(lltype.Float) + op1 = self.rewrite_operation( + SpaceOperation('force_cast', [v_arg], v1) + ) + if op1: + ops.append(op1) + else: + v1 = v_arg + v2 = varoftype(lltype.Signed) + op = self.rewrite_operation( + SpaceOperation('cast_float_to_int', [v1], v2) + ) + ops.append(op) + oplist = self.rewrite_operation( + SpaceOperation('force_cast', [v2], v_result) + ) + if oplist: + ops.extend(oplist) + else: + op.result = v_result + return ops + else: + assert False + + def _int_to_int_cast(self, v_arg, v_result): + longlong_arg = longlong.is_longlong(v_arg.concretetype) + longlong_res = longlong.is_longlong(v_result.concretetype) + size1, unsigned1 = rffi.size_and_sign(v_arg.concretetype) + size2, unsigned2 = rffi.size_and_sign(v_result.concretetype) + + if longlong_arg and longlong_res: + return + elif longlong_arg: + v = varoftype(lltype.Signed) + op1 = self.rewrite_operation( + SpaceOperation('truncate_longlong_to_int', [v_arg], v) + ) + op2 = SpaceOperation('force_cast', [v], v_result) + oplist = self.rewrite_operation(op2) + if not oplist: + op1.result = v_result + oplist = [] + return [op1] + oplist + elif longlong_res: + if unsigned1: INTERMEDIATE = lltype.Unsigned else: INTERMEDIATE = lltype.Signed v = varoftype(INTERMEDIATE) - oplist = self.force_cast_without_longlong(op.args[0], v) + op1 = SpaceOperation('force_cast', [v_arg], v) + oplist = self.rewrite_operation(op1) if not oplist: - v = op.args[0] + v = v_arg oplist = [] - if unsigned: + if unsigned1: opname = 'cast_uint_to_longlong' else: opname = 'cast_int_to_longlong' - op1 = SpaceOperation(opname, [v], op.result) - op2 = self.rewrite_operation(op1) + op2 = self.rewrite_operation( + SpaceOperation(opname, [v], v_result) + ) return oplist + [op2] - else: - return self.force_cast_without_longlong(op.args[0], op.result) - def force_cast_without_longlong(self, v_arg, v_result): - from pypy.rpython.lltypesystem.rffi import size_and_sign, sizeof, FLOAT - from pypy.rlib.rarithmetic import intmask - # - if (v_result.concretetype in (FLOAT, lltype.Float) or - v_arg.concretetype in (FLOAT, lltype.Float)): - assert (v_result.concretetype == lltype.Float and - v_arg.concretetype == lltype.Float), "xxx unsupported cast" + # We've now, ostensibly, dealt with the longlongs, everything should be + # a Signed or smaller + assert size1 <= rffi.sizeof(lltype.Signed) + assert size2 <= rffi.sizeof(lltype.Signed) + + # the target type is LONG or ULONG + if size2 == rffi.sizeof(lltype.Signed): return - # - size2, unsigned2 = size_and_sign(v_result.concretetype) - assert size2 <= sizeof(lltype.Signed) - if size2 == sizeof(lltype.Signed): - return # the target type is LONG or ULONG - size1, unsigned1 = size_and_sign(v_arg.concretetype) - assert size1 <= sizeof(lltype.Signed) - # - def bounds(size, unsigned): - if unsigned: - return 0, 1<<(8*size) - else: - return -(1<<(8*size-1)), 1<<(8*size-1) - min1, max1 = bounds(size1, unsigned1) - min2, max2 = bounds(size2, unsigned2) + + min1, max1 = integer_bounds(size1, unsigned1) + min2, max2 = integer_bounds(size2, unsigned2) + + # the target type includes the source range if min2 <= min1 <= max1 <= max2: - return # the target type includes the source range - # + return + result = [] if min2: c_min2 = Constant(min2, lltype.Signed) @@ -856,17 +928,29 @@ result.append(SpaceOperation('int_sub', [v_arg, c_min2], v2)) else: v2 = v_arg - c_mask = Constant(int((1<<(8*size2))-1), lltype.Signed) - v3 = varoftype(lltype.Signed) + c_mask = Constant(int((1 << (8 * size2)) - 1), lltype.Signed) + if min2: + v3 = varoftype(lltype.Signed) + else: + v3 = v_result result.append(SpaceOperation('int_and', [v2, c_mask], v3)) if min2: result.append(SpaceOperation('int_add', [v3, c_min2], v_result)) - else: - result[-1].result = v_result return result + def _float_to_float_cast(self, v_arg, v_result): + if v_arg.concretetype == lltype.SingleFloat: + assert v_result.concretetype == lltype.Float, "cast %s -> %s" % ( + v_arg.concretetype, v_result.concretetype) + return SpaceOperation('cast_singlefloat_to_float', [v_arg], + v_result) + if v_result.concretetype == lltype.SingleFloat: + assert v_arg.concretetype == lltype.Float, "cast %s -> %s" % ( + v_arg.concretetype, v_result.concretetype) + return SpaceOperation('cast_float_to_singlefloat', [v_arg], + v_result) + def rewrite_op_direct_ptradd(self, op): - from pypy.rpython.lltypesystem import rffi # xxx otherwise, not implemented: assert op.args[0].concretetype == rffi.CCHARP # @@ -919,7 +1003,7 @@ op1 = self.prepare_builtin_call(op, "llong_%s", args) op2 = self._handle_oopspec_call(op1, args, EffectInfo.OS_LLONG_%s, - EffectInfo.EF_ELIDABLE) + EffectInfo.EF_ELIDABLE_CANNOT_RAISE) if %r == "TO_INT": assert op2.result.concretetype == lltype.Signed return op2 @@ -1380,15 +1464,15 @@ otherindex += EffectInfo._OS_offset_uni self._register_extra_helper(otherindex, othername, argtypes, resulttype, - EffectInfo.EF_ELIDABLE) + EffectInfo.EF_ELIDABLE_CANNOT_RAISE) # return self._handle_oopspec_call(op, args, dict[oopspec_name], - EffectInfo.EF_ELIDABLE) + EffectInfo.EF_ELIDABLE_CANNOT_RAISE) def _handle_str2unicode_call(self, op, oopspec_name, args): - # ll_str2unicode is not EF_ELIDABLE, because it can raise - # UnicodeDecodeError... - return self._handle_oopspec_call(op, args, EffectInfo.OS_STR2UNICODE) + # ll_str2unicode can raise UnicodeDecodeError + return self._handle_oopspec_call(op, args, EffectInfo.OS_STR2UNICODE, + EffectInfo.EF_ELIDABLE_CAN_RAISE) # ---------- # VirtualRefs. @@ -1412,7 +1496,7 @@ extraeffect = EffectInfo.EF_CANNOT_RAISE elif oopspec_name.startswith('libffi_call_'): oopspecindex = EffectInfo.OS_LIBFFI_CALL - extraeffect = EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE + extraeffect = EffectInfo.EF_RANDOM_EFFECTS else: assert False, 'unsupported oopspec: %s' % oopspec_name return self._handle_oopspec_call(op, args, oopspecindex, extraeffect) @@ -1426,13 +1510,13 @@ assert vinfo is not None self.vable_flags[op.args[0]] = op.args[2].value return [] - + # --------- # ll_math.sqrt_nonneg() - + def _handle_math_sqrt_call(self, op, oopspec_name, args): return self._handle_oopspec_call(op, args, EffectInfo.OS_MATH_SQRT, - EffectInfo.EF_ELIDABLE) + EffectInfo.EF_ELIDABLE_CANNOT_RAISE) def rewrite_op_jit_force_quasi_immutable(self, op): v_inst, c_fieldname = op.args diff --git a/pypy/jit/codewriter/longlong.py b/pypy/jit/codewriter/longlong.py --- a/pypy/jit/codewriter/longlong.py +++ b/pypy/jit/codewriter/longlong.py @@ -7,7 +7,8 @@ """ import sys -from pypy.rpython.lltypesystem import lltype +from pypy.rpython.lltypesystem import lltype, rffi +from pypy.rlib import rarithmetic, longlong2float if sys.maxint > 2147483647: @@ -31,8 +32,6 @@ # ---------- 32-bit platform ---------- # the type FloatStorage is r_longlong, and conversion is needed - from pypy.rlib import rarithmetic, longlong2float - is_64_bit = False supports_longlong = True r_float_storage = rarithmetic.r_longlong @@ -41,9 +40,19 @@ getfloatstorage = longlong2float.float2longlong getrealfloat = longlong2float.longlong2float gethash = lambda xll: rarithmetic.intmask(xll - (xll >> 32)) - is_longlong = lambda TYPE: (TYPE == lltype.SignedLongLong or - TYPE == lltype.UnsignedLongLong) + is_longlong = lambda TYPE: (TYPE is lltype.SignedLongLong or + TYPE is lltype.UnsignedLongLong) # ------------------------------------- ZEROF = getfloatstorage(0.0) + +# ____________________________________________________________ + +def int2singlefloat(x): + x = rffi.r_uint(x) + return longlong2float.uint2singlefloat(x) + +def singlefloat2int(x): + x = longlong2float.singlefloat2uint(x) + return rffi.cast(lltype.Signed, x) diff --git a/pypy/jit/codewriter/policy.py b/pypy/jit/codewriter/policy.py --- a/pypy/jit/codewriter/policy.py +++ b/pypy/jit/codewriter/policy.py @@ -1,9 +1,7 @@ -from pypy.translator.simplify import get_funcobj from pypy.jit.metainterp import history -from pypy.rpython.lltypesystem import lltype, rclass from pypy.tool.udir import udir -import py, sys +import py from pypy.tool.ansi_print import ansi_log log = py.log.Producer('jitcodewriter') py.log.setconsumer('jitcodewriter', ansi_log) @@ -14,6 +12,7 @@ self.unsafe_loopy_graphs = set() self.supports_floats = False self.supports_longlong = False + self.supports_singlefloats = False def set_supports_floats(self, flag): self.supports_floats = flag @@ -21,6 +20,9 @@ def set_supports_longlong(self, flag): self.supports_longlong = flag + def set_supports_singlefloats(self, flag): + self.supports_singlefloats = flag + def dump_unsafe_loops(self): f = udir.join("unsafe-loops.txt").open('w') strs = [str(graph) for graph in self.unsafe_loopy_graphs] @@ -62,8 +64,9 @@ func, '_jit_unroll_if_const_', False) unsupported = contains_unsupported_variable_type(graph, - self.supports_floats, - self.supports_longlong) + self.supports_floats, + self.supports_longlong, + self.supports_singlefloats) res = see_function and not unsupported if res and contains_loop: self.unsafe_loopy_graphs.add(graph) @@ -84,17 +87,24 @@ return res def contains_unsupported_variable_type(graph, supports_floats, - supports_longlong): + supports_longlong, + supports_singlefloats): getkind = history.getkind try: for block in graph.iterblocks(): for v in block.inputargs: - getkind(v.concretetype, supports_floats, supports_longlong) + getkind(v.concretetype, supports_floats, + supports_longlong, + supports_singlefloats) for op in block.operations: for v in op.args: - getkind(v.concretetype, supports_floats, supports_longlong) + getkind(v.concretetype, supports_floats, + supports_longlong, + supports_singlefloats) v = op.result - getkind(v.concretetype, supports_floats, supports_longlong) + getkind(v.concretetype, supports_floats, + supports_longlong, + supports_singlefloats) except NotImplementedError, e: log.WARNING('%s, ignoring graph' % (e,)) log.WARNING(' %s' % (graph,)) diff --git a/pypy/jit/codewriter/regalloc.py b/pypy/jit/codewriter/regalloc.py --- a/pypy/jit/codewriter/regalloc.py +++ b/pypy/jit/codewriter/regalloc.py @@ -1,129 +1,8 @@ -import sys -from pypy.objspace.flow.model import Variable -from pypy.tool.algo.color import DependencyGraph -from pypy.tool.algo.unionfind import UnionFind +from pypy.tool.algo import regalloc from pypy.jit.metainterp.history import getkind from pypy.jit.codewriter.flatten import ListOfKind + def perform_register_allocation(graph, kind): - """Perform register allocation for the Variables of the given 'kind' - in the 'graph'.""" - regalloc = RegAllocator(graph, kind) - regalloc.make_dependencies() - regalloc.coalesce_variables() - regalloc.find_node_coloring() - return regalloc - - -class RegAllocator(object): - DEBUG_REGALLOC = False - - def __init__(self, graph, kind): - self.graph = graph - self.kind = kind - - def make_dependencies(self): - dg = DependencyGraph() - for block in self.graph.iterblocks(): - # Compute die_at = {Variable: index_of_operation_with_last_usage} - die_at = dict.fromkeys(block.inputargs, 0) - for i, op in enumerate(block.operations): - for v in op.args: - if isinstance(v, Variable): - die_at[v] = i - elif isinstance(v, ListOfKind): - for v1 in v: - if isinstance(v1, Variable): - die_at[v1] = i - if op.result is not None: - die_at[op.result] = i + 1 - if isinstance(block.exitswitch, tuple): - for x in block.exitswitch: - die_at.pop(x, None) - else: - die_at.pop(block.exitswitch, None) - for link in block.exits: - for v in link.args: - die_at.pop(v, None) - die_at = [(value, key) for (key, value) in die_at.items()] - die_at.sort() - die_at.append((sys.maxint,)) - # Done. XXX the code above this line runs 3 times - # (for kind in KINDS) to produce the same result... - livevars = [v for v in block.inputargs - if getkind(v.concretetype) == self.kind] - # Add the variables of this block to the dependency graph - for i, v in enumerate(livevars): - dg.add_node(v) - for j in range(i): - dg.add_edge(livevars[j], v) - livevars = set(livevars) - die_index = 0 - for i, op in enumerate(block.operations): - while die_at[die_index][0] == i: - try: - livevars.remove(die_at[die_index][1]) - except KeyError: - pass - die_index += 1 - if (op.result is not None and - getkind(op.result.concretetype) == self.kind): - dg.add_node(op.result) - for v in livevars: - if getkind(v.concretetype) == self.kind: - dg.add_edge(v, op.result) - livevars.add(op.result) - self._depgraph = dg - - def coalesce_variables(self): - self._unionfind = UnionFind() - pendingblocks = list(self.graph.iterblocks()) - while pendingblocks: - block = pendingblocks.pop() - # Aggressively try to coalesce each source variable with its - # target. We start from the end of the graph instead of - # from the beginning. This is a bit arbitrary, but the idea - # is that the end of the graph runs typically more often - # than the start, given that we resume execution from the - # middle during blackholing. - for link in block.exits: - if link.last_exception is not None: - self._depgraph.add_node(link.last_exception) - if link.last_exc_value is not None: - self._depgraph.add_node(link.last_exc_value) - for i, v in enumerate(link.args): - self._try_coalesce(v, link.target.inputargs[i]) - - def _try_coalesce(self, v, w): - if isinstance(v, Variable) and getkind(v.concretetype) == self.kind: - assert getkind(w.concretetype) == self.kind - dg = self._depgraph - uf = self._unionfind - v0 = uf.find_rep(v) - w0 = uf.find_rep(w) - if v0 is not w0 and v0 not in dg.neighbours[w0]: - _, rep, _ = uf.union(v0, w0) - assert uf.find_rep(v0) is uf.find_rep(w0) is rep - if rep is v0: - dg.coalesce(w0, v0) - else: - assert rep is w0 - dg.coalesce(v0, w0) - - def find_node_coloring(self): - self._coloring = self._depgraph.find_node_coloring() - if self.DEBUG_REGALLOC: - for block in self.graph.iterblocks(): - print block - for v in block.getvariables(): - print '\t', v, '\t', self.getcolor(v) - - def getcolor(self, v): - return self._coloring[self._unionfind.find_rep(v)] - - def swapcolors(self, col1, col2): - for key, value in self._coloring.items(): - if value == col1: - self._coloring[key] = col2 - elif value == col2: - self._coloring[key] = col1 + checkkind = lambda v: getkind(v.concretetype) == kind + return regalloc.perform_register_allocation(graph, checkkind, ListOfKind) diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -20,6 +20,7 @@ from pypy.rpython.annlowlevel import MixLevelHelperAnnotator from pypy.jit.metainterp.typesystem import deref from pypy.rlib import rgc +from pypy.rlib.jit import elidable from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint, intmask def getargtypes(annotator, values): @@ -90,9 +91,12 @@ reds_v = op.args[2+numgreens:] assert len(reds_v) == numreds # - def _sort(args_v): + def _sort(args_v, is_green): from pypy.jit.metainterp.history import getkind lst = [v for v in args_v if v.concretetype is not lltype.Void] + if is_green: + assert len(lst) == len(args_v), ( + "not supported so far: 'greens' variables contain Void") _kind2count = {'int': 1, 'ref': 2, 'float': 3} lst2 = sorted(lst, key=lambda v: _kind2count[getkind(v.concretetype)]) # a crash here means that you have to reorder the variable named in @@ -101,7 +105,7 @@ assert lst == lst2 return lst # - return (_sort(greens_v), _sort(reds_v)) + return (_sort(greens_v, True), _sort(reds_v, False)) def maybe_on_top_of_llinterp(rtyper, fnptr): # Run a generated graph on top of the llinterp for testing. @@ -167,9 +171,14 @@ _ll_5_list_ll_arraycopy = rgc.ll_arraycopy + at elidable def _ll_1_gc_identityhash(x): return lltype.identityhash(x) +# the following function should not be "@elidable": I can think of +# a corner case in which id(const) is constant-folded, and then 'const' +# disappears and is collected too early (possibly causing another object +# with the same id() to appear). def _ll_1_gc_id(ptr): return llop.gc_id(lltype.Signed, ptr) @@ -420,10 +429,6 @@ _ll_1_dict_values.need_result_type = True _ll_1_dict_items .need_result_type = True - def _ll_1_newdictiter(ITER, d): - return ll_rdict.ll_dictiter(lltype.Ptr(ITER), d) - _ll_1_newdictiter.need_result_type = True - _dictnext_keys = staticmethod(ll_rdict.ll_dictnext_group['keys']) _dictnext_values = staticmethod(ll_rdict.ll_dictnext_group['values']) _dictnext_items = staticmethod(ll_rdict.ll_dictnext_group['items']) @@ -574,10 +579,6 @@ _ll_1_dict_values.need_result_type = True _ll_1_dict_items .need_result_type = True - def _ll_1_newdictiter(ITER, d): - return oo_rdict.ll_dictiter(ITER, d) - _ll_1_newdictiter.need_result_type = True - _dictnext_keys = staticmethod(oo_rdict.ll_dictnext_group['keys']) _dictnext_values = staticmethod(oo_rdict.ll_dictnext_group['values']) _dictnext_items = staticmethod(oo_rdict.ll_dictnext_group['items']) diff --git a/pypy/jit/codewriter/test/test_call.py b/pypy/jit/codewriter/test/test_call.py --- a/pypy/jit/codewriter/test/test_call.py +++ b/pypy/jit/codewriter/test/test_call.py @@ -202,4 +202,4 @@ [block, _] = list(f_graph.iterblocks()) [op] = block.operations call_descr = cc.getcalldescr(op) - assert call_descr.extrainfo.can_release_gil + assert call_descr.extrainfo.has_random_effects() diff --git a/pypy/jit/codewriter/test/test_codewriter.py b/pypy/jit/codewriter/test/test_codewriter.py --- a/pypy/jit/codewriter/test/test_codewriter.py +++ b/pypy/jit/codewriter/test/test_codewriter.py @@ -5,7 +5,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rffi class FakeCallDescr(AbstractDescr): - def __init__(self, FUNC, ARGS, RESULT, effectinfo=None): + def __init__(self, FUNC, ARGS, RESULT, effectinfo): self.FUNC = FUNC self.ARGS = ARGS self.RESULT = RESULT diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py --- a/pypy/jit/codewriter/test/test_flatten.py +++ b/pypy/jit/codewriter/test/test_flatten.py @@ -50,7 +50,7 @@ def __init__(self, rtyper): rtyper._builtin_func_for_spec_cache = FakeDict() self.rtyper = rtyper - def calldescrof(self, FUNC, ARGS, RESULT): + def calldescrof(self, FUNC, ARGS, RESULT, effectinfo): return FakeDescr() def fielddescrof(self, STRUCT, name): return FakeDescr() @@ -324,7 +324,7 @@ def test_exc_exitswitch(self): def g(i): pass - + def f(i): try: g(i) @@ -854,13 +854,51 @@ int_return %i0 """, transform=True) - def test_force_cast_float(self): + def test_force_cast_floats(self): from pypy.rpython.lltypesystem import rffi + # Caststs to lltype.Float def f(n): return rffi.cast(lltype.Float, n) self.encoding_test(f, [12.456], """ float_return %f0 """, transform=True) + self.encoding_test(f, [rffi.cast(rffi.SIGNEDCHAR, 42)], """ + cast_int_to_float %i0 -> %f0 + float_return %f0 + """, transform=True) + + # Casts to lltype.SingleFloat + def g(n): + return rffi.cast(lltype.SingleFloat, n) + self.encoding_test(g, [12.456], """ + cast_float_to_singlefloat %f0 -> %i0 + int_return %i0 + """, transform=True) + self.encoding_test(g, [rffi.cast(rffi.SIGNEDCHAR, 42)], """ + cast_int_to_float %i0 -> %f0 + cast_float_to_singlefloat %f0 -> %i1 + int_return %i1 + """, transform=True) + + # Casts from floats + def f(n): + return rffi.cast(rffi.SIGNEDCHAR, n) + self.encoding_test(f, [12.456], """ + cast_float_to_int %f0 -> %i0 + int_sub %i0, $-128 -> %i1 + int_and %i1, $255 -> %i2 + int_add %i2, $-128 -> %i3 + int_return %i3 + """, transform=True) + self.encoding_test(f, [rffi.cast(lltype.SingleFloat, 12.456)], """ + cast_singlefloat_to_float %i0 -> %f0 + cast_float_to_int %f0 -> %i1 + int_sub %i1, $-128 -> %i2 + int_and %i2, $255 -> %i3 + int_add %i3, $-128 -> %i4 + int_return %i4 + """, transform=True) + def test_direct_ptradd(self): from pypy.rpython.lltypesystem import rffi diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -135,9 +135,9 @@ assert argtypes[0] == [v.concretetype for v in op.args[1:]] assert argtypes[1] == op.result.concretetype if oopspecindex == EI.OS_STR2UNICODE: - assert extraeffect == None # not pure, can raise! + assert extraeffect == EI.EF_ELIDABLE_CAN_RAISE else: - assert extraeffect == EI.EF_ELIDABLE + assert extraeffect == EI.EF_ELIDABLE_CANNOT_RAISE return 'calldescr-%d' % oopspecindex def calldescr_canraise(self, calldescr): return False @@ -797,7 +797,7 @@ def get_vinfo(self, v): return None def could_be_green_field(self, S1, name1): - assert S1 is S + assert S1 == S assert name1 == 'x' return True S = lltype.GcStruct('S', ('x', lltype.Char), @@ -1042,3 +1042,13 @@ assert op1.opname == 'jit_force_quasi_immutable' assert op1.args[0] == v_x assert op1.args[1] == ('fielddescr', STRUCT, 'mutate_x') + +def test_no_gcstruct_nesting_outside_of_OBJECT(): + PARENT = lltype.GcStruct('parent') + STRUCT = lltype.GcStruct('struct', ('parent', PARENT), + ('x', lltype.Signed)) + v_x = varoftype(lltype.Ptr(STRUCT)) + op = SpaceOperation('getfield', [v_x, Constant('x', lltype.Void)], + varoftype(lltype.Signed)) + tr = Transformer(None, None) + raises(NotImplementedError, tr.rewrite_operation, op) diff --git a/pypy/jit/codewriter/test/test_longlong.py b/pypy/jit/codewriter/test/test_longlong.py --- a/pypy/jit/codewriter/test/test_longlong.py +++ b/pypy/jit/codewriter/test/test_longlong.py @@ -230,3 +230,18 @@ assert list(op1.args[3]) == [] assert list(op1.args[4]) == vlist assert op1.result == v_result + + +##def test_singlefloat_constants(): +## v_x = varoftype(TYPE) +## vlist = [v_x, const(rffi.cast(TYPE, 7))] +## v_result = varoftype(TYPE) +## op = SpaceOperation('llong_add', vlist, v_result) +## tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) +## op1 = tr.rewrite_operation(op) +## # +## assert op1.opname == 'residual_call_irf_f' +## assert list(op1.args[2]) == [] +## assert list(op1.args[3]) == [] +## assert list(op1.args[4]) == vlist +## assert op1.result == v_result diff --git a/pypy/jit/codewriter/test/test_policy.py b/pypy/jit/codewriter/test/test_policy.py --- a/pypy/jit/codewriter/test/test_policy.py +++ b/pypy/jit/codewriter/test/test_policy.py @@ -12,24 +12,30 @@ graph = support.getgraph(f, [5]) for sf in [False, True]: for sll in [False, True]: - assert not contains_unsupported_variable_type(graph, sf, sll) + for ssf in [False, True]: + assert not contains_unsupported_variable_type(graph, sf, + sll, ssf) # graph = support.getgraph(f, [5.5]) for sf in [False, True]: for sll in [False, True]: - res = contains_unsupported_variable_type(graph, sf, sll) - assert res is not sf + for ssf in [False, True]: + res = contains_unsupported_variable_type(graph, sf, sll, ssf) + assert res is not sf # graph = support.getgraph(f, [r_singlefloat(5.5)]) for sf in [False, True]: for sll in [False, True]: - assert contains_unsupported_variable_type(graph, sf, sll) + for ssf in [False, True]: + res = contains_unsupported_variable_type(graph, sf, sll, ssf) + assert res == (not ssf) # graph = support.getgraph(f, [r_longlong(5)]) for sf in [False, True]: for sll in [False, True]: - res = contains_unsupported_variable_type(graph, sf, sll) - assert res == (sys.maxint == 2147483647 and not sll) + for ssf in [False, True]: + res = contains_unsupported_variable_type(graph, sf, sll, ssf) + assert res == (sys.maxint == 2147483647 and not sll) def test_regular_function(): diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -500,6 +500,9 @@ @arguments("r", returns="i") def bhimpl_ptr_nonzero(a): return bool(a) + @arguments("r", returns="r") + def bhimpl_cast_opaque_ptr(a): + return a @arguments("i", returns="i") def bhimpl_int_copy(a): @@ -623,6 +626,19 @@ x = float(a) return longlong.getfloatstorage(x) + @arguments("f", returns="i") + def bhimpl_cast_float_to_singlefloat(a): + from pypy.rlib.rarithmetic import r_singlefloat + a = longlong.getrealfloat(a) + a = r_singlefloat(a) + return longlong.singlefloat2int(a) + + @arguments("i", returns="f") + def bhimpl_cast_singlefloat_to_float(a): + a = longlong.int2singlefloat(a) + a = float(a) + return longlong.getfloatstorage(a) + # ---------- # control flow operations diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -137,6 +137,10 @@ jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( greenkey, loop.preamble.token) record_loop_or_bridge(metainterp_sd, loop.preamble) + elif token.short_preamble: + short = token.short_preamble[-1] + metainterp_sd.logger_ops.log_short_preamble(short.inputargs, + short.operations) return token else: send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, @@ -637,6 +641,7 @@ debug_print("compile_new_bridge: got an InvalidLoop") # XXX I am fairly convinced that optimize_bridge cannot actually raise # InvalidLoop + debug_print('InvalidLoop in compile_new_bridge') return None # Did it work? if target_loop_token is not None: @@ -668,10 +673,9 @@ def handle_fail(self, metainterp_sd, jitdriver_sd): cpu = metainterp_sd.cpu exception = cpu.grab_exc_value() + assert exception, "PropagateExceptionDescr: no exception??" raise metainterp_sd.ExitFrameWithExceptionRef(cpu, exception) -propagate_exception_descr = PropagateExceptionDescr() - def compile_tmp_callback(cpu, jitdriver_sd, greenboxes, redboxes, memory_manager=None): """Make a LoopToken that corresponds to assembler code that just @@ -705,7 +709,7 @@ finishargs = [] # jd = jitdriver_sd - faildescr = propagate_exception_descr + faildescr = PropagateExceptionDescr() operations = [ ResOperation(rop.CALL, callargs, result, descr=jd.portal_calldescr), ResOperation(rop.GUARD_NO_EXCEPTION, [], None, descr=faildescr), diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -50,7 +50,7 @@ func = argboxes[0].getint() # do the call using the correct function from the cpu rettype = descr.get_return_type() - if rettype == INT: + if rettype == INT or rettype == 'S': # *S*ingle float try: result = cpu.bh_call_i(func, descr, args_i, args_r, args_f) except Exception, e: @@ -64,7 +64,7 @@ metainterp.execute_raised(e) result = NULL return BoxPtr(result) - if rettype == FLOAT or rettype == 'L': + if rettype == FLOAT or rettype == 'L': # *L*ong long try: result = cpu.bh_call_f(func, descr, args_i, args_r, args_f) except Exception, e: diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -20,12 +20,16 @@ FAILARGS_LIMIT = 1000 -def getkind(TYPE, supports_floats=True, supports_longlong=True): +def getkind(TYPE, supports_floats=True, + supports_longlong=True, + supports_singlefloats=True): if TYPE is lltype.Void: return "void" elif isinstance(TYPE, lltype.Primitive): if TYPE is lltype.Float and supports_floats: return 'float' + if TYPE is lltype.SingleFloat and supports_singlefloats: + return 'int' # singlefloats are stored in an int if TYPE in (lltype.Float, lltype.SingleFloat): raise NotImplementedError("type %s not supported" % TYPE) # XXX fix this for oo... @@ -145,6 +149,7 @@ """ Implement in call descr. Must return INT, REF, FLOAT, or 'v' for void. On 32-bit (hack) it can also be 'L' for longlongs. + Additionally it can be 'S' for singlefloats. """ raise NotImplementedError diff --git a/pypy/jit/metainterp/optimize.py b/pypy/jit/metainterp/optimize.py --- a/pypy/jit/metainterp/optimize.py +++ b/pypy/jit/metainterp/optimize.py @@ -1,4 +1,4 @@ -from pypy.rlib.debug import debug_start, debug_stop +from pypy.rlib.debug import debug_start, debug_stop, debug_print from pypy.jit.metainterp.jitexc import JitException class InvalidLoop(JitException): diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -33,10 +33,6 @@ if name in enable_opts: if opt is not None: o = opt() - if unroll and name == 'string': - o.enabled = False - # FIXME: Workaround to disable string optimisation - # during preamble but to keep it during the loop optimizations.append(o) elif name == 'ffi' and config.translation.jit_ffi: # we cannot put the class directly in the unrolling_iterable, @@ -55,7 +51,7 @@ def optimize_loop_1(metainterp_sd, loop, enable_opts, - inline_short_preamble=True, retraced=False): + inline_short_preamble=True, retraced=False, bridge=False): """Optimize loop.operations to remove internal overheadish operations. """ @@ -64,7 +60,7 @@ if unroll: optimize_unroll(metainterp_sd, loop, optimizations) else: - optimizer = Optimizer(metainterp_sd, loop, optimizations) + optimizer = Optimizer(metainterp_sd, loop, optimizations, bridge) optimizer.propagate_all_forward() def optimize_bridge_1(metainterp_sd, bridge, enable_opts, @@ -76,7 +72,7 @@ except KeyError: pass optimize_loop_1(metainterp_sd, bridge, enable_opts, - inline_short_preamble, retraced) + inline_short_preamble, retraced, bridge=True) if __name__ == '__main__': print ALL_OPTS_NAMES diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py --- a/pypy/jit/metainterp/optimizeopt/fficall.py +++ b/pypy/jit/metainterp/optimizeopt/fficall.py @@ -1,12 +1,11 @@ from pypy.rpython.annlowlevel import cast_base_ptr_to_instance from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.libffi import Func -from pypy.rlib.debug import debug_start, debug_stop, debug_print, have_debug_prints +from pypy.rlib.debug import debug_print from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method from pypy.jit.metainterp.optimizeopt.optimizer import Optimization -from pypy.jit.backend.llsupport.ffisupport import UnsupportedKind class FuncInfo(object): @@ -19,28 +18,27 @@ def __init__(self, funcval, cpu, prepare_op): self.funcval = funcval self.opargs = [] - argtypes, restype = self._get_signature(funcval) - try: - self.descr = cpu.calldescrof_dynamic(argtypes, restype) - except UnsupportedKind: - # e.g., I or U for long longs - self.descr = None + argtypes, restype, flags = self._get_signature(funcval) + self.descr = cpu.calldescrof_dynamic(argtypes, restype, + EffectInfo.MOST_GENERAL, + ffi_flags=flags) + # ^^^ may be None if unsupported self.prepare_op = prepare_op self.delayed_ops = [] def _get_signature(self, funcval): """ - given the funcval, return a tuple (argtypes, restype), where the - actuall types are libffi.types.* + given the funcval, return a tuple (argtypes, restype, flags), where + the actuall types are libffi.types.* The implementation is tricky because we have three possible cases: - translated: the easiest case, we can just cast back the pointer to - the original Func instance and read .argtypes and .restype + the original Func instance and read .argtypes, .restype and .flags - completely untranslated: this is what we get from test_optimizeopt tests. funcval contains a FakeLLObject whose _fake_class is Func, - and we can just get .argtypes and .restype + and we can just get .argtypes, .restype and .flags - partially translated: this happens when running metainterp tests: funcval contains the low-level equivalent of a Func, and thus we @@ -48,14 +46,14 @@ inst_argtypes is actually a low-level array, but we can use it directly since the only thing we do with it is to read its items """ - + llfunc = funcval.box.getref_base() if we_are_translated(): func = cast_base_ptr_to_instance(Func, llfunc) - return func.argtypes, func.restype + return func.argtypes, func.restype, func.flags elif getattr(llfunc, '_fake_class', None) is Func: # untranslated - return llfunc.argtypes, llfunc.restype + return llfunc.argtypes, llfunc.restype, llfunc.flags else: # partially translated # llfunc contains an opaque pointer to something like the following: @@ -66,7 +64,7 @@ # because we don't have the exact TYPE to cast to. Instead, we # just fish it manually :-( f = llfunc._obj.container - return f.inst_argtypes, f.inst_restype + return f.inst_argtypes, f.inst_restype, f.inst_flags class OptFfiCall(Optimization): @@ -78,18 +76,9 @@ else: self.logops = None - def propagate_begin_forward(self): - debug_start('jit-log-ffiopt') - Optimization.propagate_begin_forward(self) - - def propagate_end_forward(self): - debug_stop('jit-log-ffiopt') - Optimization.propagate_end_forward(self) - - def reconstruct_for_next_iteration(self, optimizer, valuemap): + def new(self): return OptFfiCall() - # FIXME: Should any status be saved for next iteration? - + def begin_optimization(self, funcval, op): self.rollback_maybe('begin_optimization', op) self.funcinfo = FuncInfo(funcval, self.optimizer.cpu, op) @@ -184,7 +173,8 @@ def do_call(self, op): funcval = self._get_funcval(op) funcinfo = self.funcinfo - if not funcinfo or funcinfo.funcval is not funcval: + if (not funcinfo or funcinfo.funcval is not funcval or + funcinfo.descr is None): return [op] # cannot optimize funcsymval = self.getvalue(op.getarg(2)) arglist = [funcsymval.force_box()] @@ -207,9 +197,7 @@ def _get_oopspec(self, op): effectinfo = op.getdescr().get_extra_info() - if effectinfo is not None: - return effectinfo.oopspecindex - return EffectInfo.OS_NONE + return effectinfo.oopspecindex def _get_funcval(self, op): return self.getvalue(op.getarg(1)) diff --git a/pypy/jit/metainterp/optimizeopt/generalize.py b/pypy/jit/metainterp/optimizeopt/generalize.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/optimizeopt/generalize.py @@ -0,0 +1,19 @@ +from pypy.jit.metainterp.optimizeopt.optimizer import MININT, MAXINT + +class GeneralizationStrategy(object): + def __init__(self, optimizer): + self.optimizer = optimizer + + def apply(self): + raise NotImplementedError + +class KillHugeIntBounds(GeneralizationStrategy): + def apply(self): + for v in self.optimizer.values.values(): + if v.is_constant(): + continue + if v.intbound.lower < MININT/2: + v.intbound.lower = MININT + if v.intbound.upper > MAXINT/2: + v.intbound.upper = MAXINT + diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -1,9 +1,11 @@ import os + +from pypy.jit.metainterp.jitexc import JitException +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization, MODE_ARRAY +from pypy.jit.metainterp.history import ConstInt, Const from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.rlib.objectmodel import we_are_translated -from pypy.jit.metainterp.jitexc import JitException -from pypy.jit.metainterp.optimizeopt.optimizer import Optimization class CachedField(object): @@ -23,6 +25,7 @@ # 'cached_fields'. # self._cached_fields = {} + self._cached_fields_getfield_op = {} self._lazy_setfield = None self._lazy_setfield_registered = False @@ -34,6 +37,12 @@ self.force_lazy_setfield(optheap) assert not self.possible_aliasing(optheap, structvalue) cached_fieldvalue = self._cached_fields.get(structvalue, None) + + # Hack to ensure constants are imported from the preamble + if cached_fieldvalue and fieldvalue.is_constant(): + optheap.optimizer.ensure_imported(cached_fieldvalue) + cached_fieldvalue = self._cached_fields.get(structvalue, None) + if cached_fieldvalue is not fieldvalue: # common case: store the 'op' as lazy_setfield, and register # myself in the optheap's _lazy_setfields_and_arrayitems list @@ -69,9 +78,10 @@ else: return self._cached_fields.get(structvalue, None) - def remember_field_value(self, structvalue, fieldvalue): + def remember_field_value(self, structvalue, fieldvalue, getfield_op=None): assert self._lazy_setfield is None self._cached_fields[structvalue] = fieldvalue + self._cached_fields_getfield_op[structvalue] = getfield_op def force_lazy_setfield(self, optheap, can_cache=True): op = self._lazy_setfield @@ -80,7 +90,7 @@ # Now we clear _cached_fields, because actually doing the # setfield might impact any of the stored result (because of # possible aliasing). - self._cached_fields.clear() + self.clear() self._lazy_setfield = None optheap.next_optimization.propagate_forward(op) if not can_cache: @@ -90,19 +100,47 @@ # field. structvalue = optheap.getvalue(op.getarg(0)) fieldvalue = optheap.getvalue(op.getarglist()[-1]) - self.remember_field_value(structvalue, fieldvalue) + self.remember_field_value(structvalue, fieldvalue, op) elif not can_cache: - self._cached_fields.clear() + self.clear() - def get_reconstructed(self, optimizer, valuemap): - assert self._lazy_setfield is None - cf = CachedField() - for structvalue, fieldvalue in self._cached_fields.iteritems(): - structvalue2 = structvalue.get_reconstructed(optimizer, valuemap) - fieldvalue2 = fieldvalue .get_reconstructed(optimizer, valuemap) - cf._cached_fields[structvalue2] = fieldvalue2 - return cf + def clear(self): + self._cached_fields.clear() + self._cached_fields_getfield_op.clear() + def turned_constant(self, newvalue, value): + if newvalue not in self._cached_fields and value in self._cached_fields: + self._cached_fields[newvalue] = self._cached_fields[value] + op = self._cached_fields_getfield_op[value].clone() + constbox = value.box + assert isinstance(constbox, Const) + op.setarg(0, constbox) + self._cached_fields_getfield_op[newvalue] = op + for structvalue in self._cached_fields.keys(): + if self._cached_fields[structvalue] is value: + self._cached_fields[structvalue] = newvalue + + def produce_potential_short_preamble_ops(self, optimizer, shortboxes, descr): + if self._lazy_setfield is not None: + return + for structvalue in self._cached_fields_getfield_op.keys(): + op = self._cached_fields_getfield_op[structvalue] + if not op: + continue + if optimizer.getvalue(op.getarg(0)) in optimizer.opaque_pointers: + continue + if structvalue in self._cached_fields: + if op.getopnum() == rop.SETFIELD_GC: + result = op.getarg(1) + if isinstance(result, Const): + newresult = result.clonebox() + optimizer.make_constant(newresult, result) + result = newresult + getop = ResOperation(rop.GETFIELD_GC, [op.getarg(0)], + result, op.getdescr()) + shortboxes.add_potential(getop, synthetic=True) + elif op.result is not None: + shortboxes.add_potential(op) class BogusPureField(JitException): pass @@ -121,24 +159,32 @@ self._remove_guard_not_invalidated = False self._seen_guard_not_invalidated = False - def reconstruct_for_next_iteration(self, optimizer, valuemap): - new = OptHeap() + def force_at_end_of_preamble(self): + self.force_all_lazy_setfields_and_arrayitems() - if True: - self.force_all_lazy_setfields_and_arrayitems() - else: - assert 0 # was: new.lazy_setfields = self.lazy_setfields + def flush(self): + self.force_all_lazy_setfields_and_arrayitems() - for descr, d in self.cached_fields.items(): - new.cached_fields[descr] = d.get_reconstructed(optimizer, valuemap) + def new(self): + return OptHeap() + + def produce_potential_short_preamble_ops(self, sb): + descrkeys = self.cached_fields.keys() + if not we_are_translated(): + # XXX Pure operation of boxes that are cached in several places will + # only be removed from the peeled loop when red from the first + # place discovered here. This is far from ideal, as it makes + # the effectiveness of our optimization a bit random. It should + # howevere always generate correct results. For tests we dont + # want this randomness. + descrkeys.sort(key=str, reverse=True) + for descr in descrkeys: + d = self.cached_fields[descr] + d.produce_potential_short_preamble_ops(self.optimizer, sb, descr) for descr, submap in self.cached_arrayitems.items(): - newdict = {} for index, d in submap.items(): - newdict[index] = d.get_reconstructed(optimizer, valuemap) - new.cached_arrayitems[descr] = newdict - - return new + d.produce_potential_short_preamble_ops(self.optimizer, sb, descr) def clean_caches(self): del self._lazy_setfields_and_arrayitems[:] @@ -193,43 +239,43 @@ opnum == rop.CALL_RELEASE_GIL or opnum == rop.CALL_ASSEMBLER): if opnum == rop.CALL_ASSEMBLER: - effectinfo = None + self._seen_guard_not_invalidated = False else: effectinfo = op.getdescr().get_extra_info() - if effectinfo is None or effectinfo.check_can_invalidate(): - self._seen_guard_not_invalidated = False - if effectinfo is not None and not effectinfo.has_random_effects(): - # XXX we can get the wrong complexity here, if the lists - # XXX stored on effectinfo are large - for fielddescr in effectinfo.readonly_descrs_fields: - self.force_lazy_setfield(fielddescr) - for arraydescr in effectinfo.readonly_descrs_arrays: - self.force_lazy_setarrayitem(arraydescr) - for fielddescr in effectinfo.write_descrs_fields: - self.force_lazy_setfield(fielddescr, can_cache=False) - for arraydescr in effectinfo.write_descrs_arrays: - self.force_lazy_setarrayitem(arraydescr, can_cache=False) - if effectinfo.check_forces_virtual_or_virtualizable(): - vrefinfo = self.optimizer.metainterp_sd.virtualref_info - self.force_lazy_setfield(vrefinfo.descr_forced) - # ^^^ we only need to force this field; the other fields - # of virtualref_info and virtualizable_info are not gcptrs. - return + if effectinfo.check_can_invalidate(): + self._seen_guard_not_invalidated = False + if not effectinfo.has_random_effects(): + self.force_from_effectinfo(effectinfo) + return self.force_all_lazy_setfields_and_arrayitems() self.clean_caches() + def force_from_effectinfo(self, effectinfo): + # XXX we can get the wrong complexity here, if the lists + # XXX stored on effectinfo are large + for fielddescr in effectinfo.readonly_descrs_fields: + self.force_lazy_setfield(fielddescr) + for arraydescr in effectinfo.readonly_descrs_arrays: + self.force_lazy_setarrayitem(arraydescr) + for fielddescr in effectinfo.write_descrs_fields: + self.force_lazy_setfield(fielddescr, can_cache=False) + for arraydescr in effectinfo.write_descrs_arrays: + self.force_lazy_setarrayitem(arraydescr, can_cache=False) + if effectinfo.check_forces_virtual_or_virtualizable(): + vrefinfo = self.optimizer.metainterp_sd.virtualref_info + self.force_lazy_setfield(vrefinfo.descr_forced) + # ^^^ we only need to force this field; the other fields + # of virtualref_info and virtualizable_info are not gcptrs. def turned_constant(self, value): assert value.is_constant() newvalue = self.getvalue(value.box) if value is not newvalue: for cf in self.cached_fields.itervalues(): - if value in cf._cached_fields: - cf._cached_fields[newvalue] = cf._cached_fields[value] + cf.turned_constant(newvalue, value) for submap in self.cached_arrayitems.itervalues(): for cf in submap.itervalues(): - if value in cf._cached_fields: - cf._cached_fields[newvalue] = cf._cached_fields[value] + cf.turned_constant(newvalue, value) def force_lazy_setfield(self, descr, can_cache=True): try: @@ -238,13 +284,14 @@ return cf.force_lazy_setfield(self, can_cache) - def force_lazy_setarrayitem(self, arraydescr, can_cache=True): + def force_lazy_setarrayitem(self, arraydescr, indexvalue=None, can_cache=True): try: submap = self.cached_arrayitems[arraydescr] except KeyError: return - for cf in submap.values(): - cf.force_lazy_setfield(self, can_cache) + for idx, cf in submap.iteritems(): + if indexvalue is None or indexvalue.intbound.contains(idx): + cf.force_lazy_setfield(self, can_cache) def fixup_guard_situation(self): # hackish: reverse the order of the last two operations if it makes @@ -331,7 +378,7 @@ self.emit_operation(op) # then remember the result of reading the field fieldvalue = self.getvalue(op.result) - cf.remember_field_value(structvalue, fieldvalue) + cf.remember_field_value(structvalue, fieldvalue, op) def optimize_SETFIELD_GC(self, op): if self.has_pure_result(rop.GETFIELD_GC_PURE, [op.getarg(0)], @@ -348,6 +395,7 @@ indexvalue = self.getvalue(op.getarg(1)) cf = None if indexvalue.is_constant(): + arrayvalue.make_len_gt(MODE_ARRAY, op.getdescr(), indexvalue.box.getint()) # use the cache on (arraydescr, index), which is a constant cf = self.arrayitem_cache(op.getdescr(), indexvalue.box.getint()) fieldvalue = cf.getfield_from_cache(self, arrayvalue) @@ -356,14 +404,14 @@ return else: # variable index, so make sure the lazy setarrayitems are done - self.force_lazy_setarrayitem(op.getdescr()) + self.force_lazy_setarrayitem(op.getdescr(), indexvalue=indexvalue) # default case: produce the operation arrayvalue.ensure_nonnull() self.emit_operation(op) # the remember the result of reading the array item if cf is not None: fieldvalue = self.getvalue(op.result) - cf.remember_field_value(arrayvalue, fieldvalue) + cf.remember_field_value(arrayvalue, fieldvalue, op) def optimize_SETARRAYITEM_GC(self, op): if self.has_pure_result(rop.GETARRAYITEM_GC_PURE, [op.getarg(0), @@ -375,12 +423,14 @@ # indexvalue = self.getvalue(op.getarg(1)) if indexvalue.is_constant(): + arrayvalue = self.getvalue(op.getarg(0)) + arrayvalue.make_len_gt(MODE_ARRAY, op.getdescr(), indexvalue.box.getint()) # use the cache on (arraydescr, index), which is a constant cf = self.arrayitem_cache(op.getdescr(), indexvalue.box.getint()) cf.do_setfield(self, op) else: # variable index, so make sure the lazy setarrayitems are done - self.force_lazy_setarrayitem(op.getdescr(), can_cache=False) + self.force_lazy_setarrayitem(op.getdescr(), indexvalue=indexvalue, can_cache=False) # and then emit the operation self.emit_operation(op) diff --git a/pypy/jit/metainterp/optimizeopt/intbounds.py b/pypy/jit/metainterp/optimizeopt/intbounds.py --- a/pypy/jit/metainterp/optimizeopt/intbounds.py +++ b/pypy/jit/metainterp/optimizeopt/intbounds.py @@ -1,9 +1,11 @@ -from pypy.jit.metainterp.optimizeopt.optimizer import Optimization, CONST_1, CONST_0 +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization, CONST_1, CONST_0, \ + MODE_ARRAY, MODE_STR, MODE_UNICODE +from pypy.jit.metainterp.history import ConstInt +from pypy.jit.metainterp.optimizeopt.intutils import (IntBound, IntLowerBound, + IntUpperBound) from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method -from pypy.jit.metainterp.optimizeopt.intutils import (IntBound, IntUnbounded, - IntLowerBound, IntUpperBound) -from pypy.jit.metainterp.history import Const, ConstInt -from pypy.jit.metainterp.resoperation import rop, ResOperation +from pypy.jit.metainterp.resoperation import rop + class OptIntBounds(Optimization): """Keeps track of the bounds placed on integers by guards and remove @@ -13,18 +15,17 @@ self.posponedop = None self.nextop = None - def reconstruct_for_next_iteration(self, optimizer, valuemap): + def new(self): assert self.posponedop is None - return self + return OptIntBounds() + + def flush(self): + assert self.posponedop is None def setup(self): self.posponedop = None self.nextop = None - def reconstruct_for_next_iteration(self, optimizer, valuemap): - assert self.posponedop is None - return self - def propagate_forward(self, op): if op.is_ovf(): self.posponedop = op @@ -124,6 +125,17 @@ r = self.getvalue(op.result) r.intbound.intersect(v1.intbound.div_bound(v2.intbound)) + def optimize_INT_MOD(self, op): + self.emit_operation(op) + v2 = self.getvalue(op.getarg(1)) + if v2.is_constant(): + val = v2.box.getint() + r = self.getvalue(op.result) + if val < 0: + val = -val + r.intbound.make_gt(IntBound(-val, -val)) + r.intbound.make_lt(IntBound(val, val)) + def optimize_INT_LSHIFT(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) @@ -274,10 +286,27 @@ def optimize_ARRAYLEN_GC(self, op): self.emit_operation(op) - v1 = self.getvalue(op.result) - v1.intbound.make_ge(IntLowerBound(0)) + array = self.getvalue(op.getarg(0)) + result = self.getvalue(op.result) + array.make_len_gt(MODE_ARRAY, op.getdescr(), -1) + array.lenbound.bound.intersect(result.intbound) + result.intbound = array.lenbound.bound - optimize_STRLEN = optimize_UNICODELEN = optimize_ARRAYLEN_GC + def optimize_STRLEN(self, op): + self.emit_operation(op) + array = self.getvalue(op.getarg(0)) + result = self.getvalue(op.result) + array.make_len_gt(MODE_STR, op.getdescr(), -1) + array.lenbound.bound.intersect(result.intbound) + result.intbound = array.lenbound.bound + + def optimize_UNICODELEN(self, op): + self.emit_operation(op) + array = self.getvalue(op.getarg(0)) + result = self.getvalue(op.result) + array.make_len_gt(MODE_UNICODE, op.getdescr(), -1) + array.lenbound.bound.intersect(result.intbound) + result.intbound = array.lenbound.bound def optimize_STRGETITEM(self, op): self.emit_operation(op) diff --git a/pypy/jit/metainterp/optimizeopt/intutils.py b/pypy/jit/metainterp/optimizeopt/intutils.py --- a/pypy/jit/metainterp/optimizeopt/intutils.py +++ b/pypy/jit/metainterp/optimizeopt/intutils.py @@ -1,4 +1,9 @@ from pypy.rlib.rarithmetic import ovfcheck, ovfcheck_lshift, LONG_BIT +from pypy.jit.metainterp.resoperation import rop, ResOperation +from pypy.jit.metainterp.history import BoxInt, ConstInt +import sys +MAXINT = sys.maxint +MININT = -sys.maxint - 1 class IntBound(object): _attrs_ = ('has_upper', 'has_lower', 'upper', 'lower') @@ -210,11 +215,11 @@ def __repr__(self): if self.has_lower: - l = '%4d' % self.lower + l = '%d' % self.lower else: l = '-Inf' if self.has_upper: - u = '%3d' % self.upper + u = '%d' % self.upper else: u = 'Inf' return '%s <= x <= %s' % (l, u) @@ -224,7 +229,24 @@ res.has_lower = self.has_lower res.has_upper = self.has_upper return res + + def make_guards(self, box, guards): + if self.has_lower and self.lower > MININT: + bound = self.lower + res = BoxInt() + op = ResOperation(rop.INT_GE, [box, ConstInt(bound)], res) + guards.append(op) + op = ResOperation(rop.GUARD_TRUE, [res], None) + guards.append(op) + if self.has_upper and self.upper < MAXINT: + bound = self.upper + res = BoxInt() + op = ResOperation(rop.INT_LE, [box, ConstInt(bound)], res) + guards.append(op) + op = ResOperation(rop.GUARD_TRUE, [res], None) + guards.append(op) + class IntUpperBound(IntBound): def __init__(self, upper): self.has_upper = True @@ -244,7 +266,23 @@ self.has_upper = False self.has_lower = False self.upper = 0 - self.lower = 0 + self.lower = 0 + +class ImmutableIntUnbounded(IntUnbounded): + def _raise(self): + raise TypeError('ImmutableIntUnbounded is immutable') + def make_le(self, other): + self._raise() + def make_lt(self, other): + self._raise() + def make_ge(self, other): + self._raise() + def make_gt(self, other): + self._raise() + def make_constant(self, value): + self._raise() + def intersect(self, other): + self._raise() def min4(t): return min(min(t[0], t[1]), min(t[2], t[3])) diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -1,70 +1,128 @@ -from pypy.jit.metainterp.history import Box, BoxInt, LoopToken, BoxFloat,\ - ConstFloat -from pypy.jit.metainterp.history import Const, ConstInt, ConstPtr, ConstObj, REF +from pypy.jit.metainterp import jitprof, resume, compile +from pypy.jit.metainterp.executor import execute_nonspec +from pypy.jit.metainterp.history import BoxInt, BoxFloat, Const, ConstInt, REF +from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded, \ + ImmutableIntUnbounded, \ + IntLowerBound, MININT, MAXINT +from pypy.jit.metainterp.optimizeopt.util import (make_dispatcher_method, + args_dict) from pypy.jit.metainterp.resoperation import rop, ResOperation -from pypy.jit.metainterp import jitprof -from pypy.jit.metainterp.executor import execute_nonspec -from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method, sort_descrs -from pypy.jit.metainterp.optimizeopt.util import descrlist_dict, args_dict -from pypy.jit.metainterp.optimize import InvalidLoop -from pypy.jit.metainterp import resume, compile from pypy.jit.metainterp.typesystem import llhelper, oohelper -from pypy.rpython.lltypesystem import lltype -from pypy.jit.metainterp.history import AbstractDescr, make_hashable_int -from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded from pypy.tool.pairtype import extendabletype +from pypy.rlib.debug import debug_start, debug_stop, debug_print +from pypy.rlib.objectmodel import specialize LEVEL_UNKNOWN = '\x00' LEVEL_NONNULL = '\x01' LEVEL_KNOWNCLASS = '\x02' # might also mean KNOWNARRAYDESCR, for arrays LEVEL_CONSTANT = '\x03' -import sys -MAXINT = sys.maxint -MININT = -sys.maxint - 1 +MODE_ARRAY = '\x00' +MODE_STR = '\x01' +MODE_UNICODE = '\x02' +class LenBound(object): + def __init__(self, mode, descr, bound): + self.mode = mode + self.descr = descr + self.bound = bound + + def clone(self): + return LenBound(self.mode, self.descr, self.bound.clone()) class OptValue(object): __metaclass__ = extendabletype - _attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound') + _attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound', 'lenbound') last_guard_index = -1 level = LEVEL_UNKNOWN known_class = None - intbound = None + intbound = ImmutableIntUnbounded() + lenbound = None - def __init__(self, box): + def __init__(self, box, level=None, known_class=None, intbound=None): self.box = box - self.intbound = IntBound(MININT, MAXINT) #IntUnbounded() + if level is not None: + self.level = level + self.known_class = known_class + if intbound: + self.intbound = intbound + else: + if isinstance(box, BoxInt): + self.intbound = IntBound(MININT, MAXINT) + else: + self.intbound = IntUnbounded() + if isinstance(box, Const): self.make_constant(box) # invariant: box is a Const if and only if level == LEVEL_CONSTANT + def make_len_gt(self, mode, descr, val): + if self.lenbound: + assert self.lenbound.mode == mode + assert self.lenbound.descr == descr + self.lenbound.bound.make_gt(IntBound(val, val)) + else: + self.lenbound = LenBound(mode, descr, IntLowerBound(val + 1)) + + def make_guards(self, box): + guards = [] + if self.level == LEVEL_CONSTANT: + op = ResOperation(rop.GUARD_VALUE, [box, self.box], None) + guards.append(op) + elif self.level == LEVEL_KNOWNCLASS: + op = ResOperation(rop.GUARD_NONNULL, [box], None) + guards.append(op) + op = ResOperation(rop.GUARD_CLASS, [box, self.known_class], None) + guards.append(op) + else: + if self.level == LEVEL_NONNULL: + op = ResOperation(rop.GUARD_NONNULL, [box], None) + guards.append(op) + self.intbound.make_guards(box, guards) + if self.lenbound: + lenbox = BoxInt() + if self.lenbound.mode == MODE_ARRAY: + op = ResOperation(rop.ARRAYLEN_GC, [box], lenbox, self.lenbound.descr) + elif self.lenbound.mode == MODE_STR: + op = ResOperation(rop.STRLEN, [box], lenbox, self.lenbound.descr) + elif self.lenbound.mode == MODE_UNICODE: + op = ResOperation(rop.UNICODELEN, [box], lenbox, self.lenbound.descr) + else: + debug_print("Unknown lenbound mode") + assert False + guards.append(op) + self.lenbound.bound.make_guards(lenbox, guards) + return guards + + def import_from(self, other, optimizer): + assert self.level <= LEVEL_NONNULL + if other.level == LEVEL_CONSTANT: + self.make_constant(other.get_key_box()) + optimizer.turned_constant(self) + elif other.level == LEVEL_KNOWNCLASS: + self.make_constant_class(other.known_class, -1) + else: + if other.level == LEVEL_NONNULL: + self.ensure_nonnull() + self.intbound.intersect(other.intbound) + if other.lenbound: + if self.lenbound: + assert other.lenbound.mode == self.lenbound.mode + assert other.lenbound.descr == self.lenbound.descr + self.lenbound.bound.intersect(other.lenbound.bound) + else: + self.lenbound = other.lenbound.clone() + + def force_box(self): return self.box def get_key_box(self): return self.box - def enum_forced_boxes(self, boxes, already_seen): - key = self.get_key_box() - if key not in already_seen: - boxes.append(self.force_box()) - already_seen[self.get_key_box()] = None - - def get_reconstructed(self, optimizer, valuemap): - if self in valuemap: - return valuemap[self] - new = self.reconstruct_for_next_iteration(optimizer) - valuemap[self] = new - self.reconstruct_childs(new, valuemap) - return new - - def reconstruct_for_next_iteration(self, optimizer): + def force_at_end_of_preamble(self, already_forced): return self - def reconstruct_childs(self, new, valuemap): - pass - def get_args_for_fail(self, modifier): pass @@ -88,6 +146,7 @@ assert isinstance(constbox, Const) self.box = constbox self.level = LEVEL_CONSTANT + if isinstance(constbox, ConstInt): val = constbox.getint() self.intbound = IntBound(val, val) @@ -228,7 +287,9 @@ def pure(self, opnum, args, result): op = ResOperation(opnum, args, result) - self.optimizer.pure_operations[self.optimizer.make_args_key(op)] = op + key = self.optimizer.make_args_key(op) + if key not in self.optimizer.pure_operations: + self.optimizer.pure_operations[key] = op def has_pure_result(self, opnum, args, descr): op = ResOperation(opnum, args, None, descr) @@ -241,39 +302,47 @@ def setup(self): pass + def turned_constant(self, value): + pass + def force_at_end_of_preamble(self): pass - def turned_constant(self, value): + # It is too late to force stuff here, it must be done in force_at_end_of_preamble + def new(self): + raise NotImplementedError + + # Called after last operation has been propagated to flush out any posponed ops + def flush(self): pass - def reconstruct_for_next_iteration(self, optimizer=None, valuemap=None): - #return self.__class__() - raise NotImplementedError - + def produce_potential_short_preamble_ops(self, potential_ops): + pass class Optimizer(Optimization): - def __init__(self, metainterp_sd, loop, optimizations=None): + def __init__(self, metainterp_sd, loop, optimizations=None, bridge=False): self.metainterp_sd = metainterp_sd self.cpu = metainterp_sd.cpu self.loop = loop + self.bridge = bridge self.values = {} self.interned_refs = self.cpu.ts.new_ref_dict() self.resumedata_memo = resume.ResumeDataLoopMemo(metainterp_sd) self.bool_boxes = {} - self.loop_invariant_results = {} self.pure_operations = args_dict() self.producer = {} self.pendingfields = [] self.posponedop = None self.exception_might_have_happened = False self.quasi_immutable_deps = None + self.opaque_pointers = {} self.newoperations = [] if loop is not None: self.call_pure_results = loop.call_pure_results self.set_optimizations(optimizations) + self.setup() def set_optimizations(self, optimizations): if optimizations: @@ -291,39 +360,27 @@ self.optimizations = optimizations def force_at_end_of_preamble(self): - self.resumedata_memo = resume.ResumeDataLoopMemo(self.metainterp_sd) for o in self.optimizations: o.force_at_end_of_preamble() - def reconstruct_for_next_iteration(self, optimizer=None, valuemap=None): - assert optimizer is None - assert valuemap is None - valuemap = {} + def flush(self): + for o in self.optimizations: + o.flush() + assert self.posponedop is None + + def new(self): new = Optimizer(self.metainterp_sd, self.loop) - optimizations = [o.reconstruct_for_next_iteration(new, valuemap) for o in - self.optimizations] + return self._new(new) + + def _new(self, new): + assert self.posponedop is None + optimizations = [o.new() for o in self.optimizations] new.set_optimizations(optimizations) - - new.values = {} - for box, value in self.values.items(): - new.values[box] = value.get_reconstructed(new, valuemap) - new.interned_refs = self.interned_refs - new.bool_boxes = {} - for value in new.bool_boxes.keys(): - new.bool_boxes[value.get_reconstructed(new, valuemap)] = None - - # FIXME: Move to rewrite.py - new.loop_invariant_results = {} - for key, value in self.loop_invariant_results.items(): - new.loop_invariant_results[key] = \ - value.get_reconstructed(new, valuemap) - - new.pure_operations = self.pure_operations - new.producer = self.producer - assert self.posponedop is None new.quasi_immutable_deps = self.quasi_immutable_deps - return new + + def produce_potential_short_preamble_ops(self, sb): + raise NotImplementedError('This is implemented in unroll.UnrollableOptimizer') def turned_constant(self, value): for o in self.optimizations: @@ -345,19 +402,26 @@ else: return box + @specialize.argtype(0) def getvalue(self, box): box = self.getinterned(box) try: value = self.values[box] except KeyError: value = self.values[box] = OptValue(box) + self.ensure_imported(value) return value + def ensure_imported(self, value): + pass + + @specialize.argtype(0) def get_constant_box(self, box): if isinstance(box, Const): return box try: value = self.values[box] + self.ensure_imported(value) except KeyError: return None if value.is_constant(): @@ -413,9 +477,7 @@ return CVAL_ZERO def propagate_all_forward(self): - self.exception_might_have_happened = True - # ^^^ at least at the start of bridges. For loops, we could set - # it to False, but we probably don't care + self.exception_might_have_happened = self.bridge self.newoperations = [] self.first_optimization.propagate_begin_forward() self.i = 0 @@ -440,23 +502,27 @@ return True def emit_operation(self, op): - ###self.heap_op_optimizer.emitting_operation(op) + if op.returns_bool_result(): + self.bool_boxes[self.getvalue(op.result)] = None self._emit_operation(op) - - def _emit_operation(self, op): + + @specialize.argtype(0) + def _emit_operation(self, op): for i in range(op.numargs()): arg = op.getarg(i) - if arg in self.values: - box = self.values[arg].force_box() - op.setarg(i, box) + try: + value = self.values[arg] + except KeyError: + pass + else: + self.ensure_imported(value) + op.setarg(i, value.force_box()) self.metainterp_sd.profiler.count(jitprof.OPT_OPS) if op.is_guard(): self.metainterp_sd.profiler.count(jitprof.OPT_GUARDS) op = self.store_final_boxes_in_guard(op) elif op.can_raise(): self.exception_might_have_happened = True - elif op.returns_bool_result(): - self.bool_boxes[self.getvalue(op.result)] = None self.newoperations.append(op) def store_final_boxes_in_guard(self, op): @@ -505,6 +571,7 @@ args[n+1] = op.getdescr() return args + @specialize.argtype(0) def optimize_default(self, op): canfold = op.is_always_pure() if op.is_ovf(): @@ -540,12 +607,16 @@ return else: self.pure_operations[args] = op + self.remember_emitting_pure(op) # otherwise, the operation remains self.emit_operation(op) if nextop: self.emit_operation(nextop) + def remember_emitting_pure(self, op): + pass + def constant_fold(self, op): argboxes = [self.get_constant_box(op.getarg(i)) for i in range(op.numargs())] @@ -562,6 +633,35 @@ def optimize_DEBUG_MERGE_POINT(self, op): self.emit_operation(op) + def optimize_CAST_OPAQUE_PTR(self, op): + value = self.getvalue(op.getarg(0)) + self.opaque_pointers[value] = True + self.make_equal_to(op.result, value) + + def optimize_GETARRAYITEM_GC_PURE(self, op): + indexvalue = self.getvalue(op.getarg(1)) + if indexvalue.is_constant(): + arrayvalue = self.getvalue(op.getarg(0)) + arrayvalue.make_len_gt(MODE_ARRAY, op.getdescr(), indexvalue.box.getint()) + self.optimize_default(op) + + def optimize_STRGETITEM(self, op): + indexvalue = self.getvalue(op.getarg(1)) + if indexvalue.is_constant(): + arrayvalue = self.getvalue(op.getarg(0)) + arrayvalue.make_len_gt(MODE_STR, op.getdescr(), indexvalue.box.getint()) + self.optimize_default(op) + + def optimize_UNICODEGETITEM(self, op): + indexvalue = self.getvalue(op.getarg(1)) + if indexvalue.is_constant(): + arrayvalue = self.getvalue(op.getarg(0)) + arrayvalue.make_len_gt(MODE_UNICODE, op.getdescr(), indexvalue.box.getint()) + self.optimize_default(op) + + + + dispatch_opt = make_dispatcher_method(Optimizer, 'optimize_', default=Optimizer.optimize_default) diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -1,10 +1,11 @@ +from pypy.jit.codewriter.effectinfo import EffectInfo +from pypy.jit.metainterp.history import ConstInt, make_hashable_int +from pypy.jit.metainterp.optimize import InvalidLoop +from pypy.jit.metainterp.optimizeopt.intutils import IntBound from pypy.jit.metainterp.optimizeopt.optimizer import * -from pypy.jit.metainterp.resoperation import opboolinvers, opboolreflex -from pypy.jit.metainterp.history import ConstInt from pypy.jit.metainterp.optimizeopt.util import _findall, make_dispatcher_method -from pypy.jit.metainterp.resoperation import rop, ResOperation -from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.jit.metainterp.optimizeopt.intutils import IntBound +from pypy.jit.metainterp.resoperation import (opboolinvers, opboolreflex, rop, + ResOperation) from pypy.rlib.rarithmetic import highest_bit @@ -12,9 +13,16 @@ """Rewrite operations into equivalent, cheaper operations. This includes already executed operations and constants. """ + def __init__(self): + self.loop_invariant_results = {} + self.loop_invariant_producer = {} - def reconstruct_for_next_iteration(self, optimizer, valuemap): - return self From noreply at buildbot.pypy.org Tue Sep 13 01:55:52 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 13 Sep 2011 01:55:52 +0200 (CEST) Subject: [pypy-commit] pypy unroll-if-const: reclose this branch, I accidentally opened it Message-ID: <20110912235552.5329B82213@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: unroll-if-const Changeset: r47235:df69255f489f Date: 2011-09-12 19:53 -0400 http://bitbucket.org/pypy/pypy/changeset/df69255f489f/ Log: reclose this branch, I accidentally opened it From noreply at buildbot.pypy.org Tue Sep 13 01:55:54 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 13 Sep 2011 01:55:54 +0200 (CEST) Subject: [pypy-commit] pypy unroll-if-alt: merged upstream. Message-ID: <20110912235554.6985D82041@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: unroll-if-alt Changeset: r47236:f49f274ed3da Date: 2011-09-12 19:55 -0400 http://bitbucket.org/pypy/pypy/changeset/f49f274ed3da/ Log: merged upstream. diff too long, truncating to 10000 out of 10968 lines diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -359,7 +359,7 @@ RegrTest('test_property.py', core=True), RegrTest('test_pstats.py'), RegrTest('test_pty.py', skip="unsupported extension module"), - RegrTest('test_pwd.py', skip=skip_win32), + RegrTest('test_pwd.py', usemodules="pwd", skip=skip_win32), RegrTest('test_py3kwarn.py'), RegrTest('test_pyclbr.py'), RegrTest('test_pydoc.py'), diff --git a/lib-python/modified-2.7/sqlite3/test/regression.py b/lib-python/modified-2.7/sqlite3/test/regression.py --- a/lib-python/modified-2.7/sqlite3/test/regression.py +++ b/lib-python/modified-2.7/sqlite3/test/regression.py @@ -274,6 +274,18 @@ cur.execute("UPDATE foo SET id = 3 WHERE id = 1") self.assertEqual(cur.description, None) + def CheckStatementCache(self): + cur = self.con.cursor() + cur.execute("CREATE TABLE foo (id INTEGER)") + values = [(i,) for i in xrange(5)] + cur.executemany("INSERT INTO foo (id) VALUES (?)", values) + + cur.execute("SELECT id FROM foo") + self.assertEqual(list(cur), values) + self.con.commit() + cur.execute("SELECT id FROM foo") + self.assertEqual(list(cur), values) + def suite(): regression_suite = unittest.makeSuite(RegressionTests, "Check") return unittest.TestSuite((regression_suite,)) diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -54,7 +54,8 @@ def get_ffi_argtype(self): if self._ffiargtype: return self._ffiargtype - return _shape_to_ffi_type(self._ffiargshape) + self._ffiargtype = _shape_to_ffi_type(self._ffiargshape) + return self._ffiargtype def _CData_output(self, resbuffer, base=None, index=-1): #assert isinstance(resbuffer, _rawffi.ArrayInstance) @@ -225,6 +226,7 @@ 'Z' : _ffi.types.void_p, 'X' : _ffi.types.void_p, 'v' : _ffi.types.sshort, + '?' : _ffi.types.ubyte, } diff --git a/lib_pypy/_elementtree.py b/lib_pypy/_elementtree.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_elementtree.py @@ -0,0 +1,6 @@ +# Just use ElementTree. + +from xml.etree import ElementTree + +globals().update(ElementTree.__dict__) +del __all__ diff --git a/lib_pypy/_pypy_interact.py b/lib_pypy/_pypy_interact.py --- a/lib_pypy/_pypy_interact.py +++ b/lib_pypy/_pypy_interact.py @@ -56,6 +56,10 @@ prompt = getattr(sys, 'ps1', '>>> ') try: line = raw_input(prompt) + # Can be None if sys.stdin was redefined + encoding = getattr(sys.stdin, 'encoding', None) + if encoding and not isinstance(line, unicode): + line = line.decode(encoding) except EOFError: console.write("\n") break diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -293,7 +293,7 @@ # if stat.in_use: stat = Statement(self.connection, sql) - stat.set_cursor_and_factory(cursor, row_factory) + stat.set_row_factory(row_factory) return stat @@ -705,6 +705,8 @@ from sqlite3.dump import _iterdump return _iterdump(self) +DML, DQL, DDL = range(3) + class Cursor(object): def __init__(self, con): if not isinstance(con, Connection): @@ -735,9 +737,9 @@ self.statement = self.connection.statement_cache.get(sql, self, self.row_factory) if self.connection._isolation_level is not None: - if self.statement.kind == "DDL": + if self.statement.kind == DDL: self.connection.commit() - elif self.statement.kind == "DML": + elif self.statement.kind == DML: self.connection._begin() self.statement.set_params(params) @@ -748,18 +750,18 @@ self.statement.reset() raise self.connection._get_exception(ret) - if self.statement.kind == "DQL"and ret == SQLITE_ROW: + if self.statement.kind == DQL and ret == SQLITE_ROW: self.statement._build_row_cast_map() - self.statement._readahead() + self.statement._readahead(self) else: self.statement.item = None self.statement.exhausted = True - if self.statement.kind in ("DML", "DDL"): + if self.statement.kind == DML or self.statement.kind == DDL: self.statement.reset() self.rowcount = -1 - if self.statement.kind == "DML": + if self.statement.kind == DML: self.rowcount = sqlite.sqlite3_changes(self.connection.db) return self @@ -771,8 +773,8 @@ sql = sql.encode("utf-8") self._check_closed() self.statement = self.connection.statement_cache.get(sql, self, self.row_factory) - - if self.statement.kind == "DML": + + if self.statement.kind == DML: self.connection._begin() else: raise ProgrammingError, "executemany is only for DML statements" @@ -824,7 +826,7 @@ return self def __iter__(self): - return self.statement + return iter(self.fetchone, None) def _check_reset(self): if self.reset: @@ -841,7 +843,7 @@ return None try: - return self.statement.next() + return self.statement.next(self) except StopIteration: return None @@ -855,7 +857,7 @@ if size is None: size = self.arraysize lst = [] - for row in self.statement: + for row in self: lst.append(row) if len(lst) == size: break @@ -866,7 +868,7 @@ self._check_reset() if self.statement is None: return [] - return list(self.statement) + return list(self) def _getdescription(self): if self._description is None: @@ -904,16 +906,15 @@ self.sql = sql # DEBUG ONLY first_word = self._statement_kind = sql.lstrip().split(" ")[0].upper() if first_word in ("INSERT", "UPDATE", "DELETE", "REPLACE"): - self.kind = "DML" + self.kind = DML elif first_word in ("SELECT", "PRAGMA"): - self.kind = "DQL" + self.kind = DQL else: - self.kind = "DDL" + self.kind = DDL self.exhausted = False self.in_use = False # - # set by set_cursor_and_factory - self.cur = None + # set by set_row_factory self.row_factory = None self.statement = c_void_p() @@ -923,7 +924,7 @@ if ret == SQLITE_OK and self.statement.value is None: # an empty statement, we work around that, as it's the least trouble ret = sqlite.sqlite3_prepare_v2(self.con.db, "select 42", -1, byref(self.statement), byref(next_char)) - self.kind = "DQL" + self.kind = DQL if ret != SQLITE_OK: raise self.con._get_exception(ret) @@ -935,8 +936,7 @@ self._build_row_cast_map() - def set_cursor_and_factory(self, cur, row_factory): - self.cur = weakref.ref(cur) + def set_row_factory(self, row_factory): self.row_factory = row_factory def _build_row_cast_map(self): @@ -1039,10 +1039,7 @@ raise ProgrammingError("missing parameter '%s'" %param) self.set_param(idx, param) - def __iter__(self): - return self - - def next(self): + def next(self, cursor): self.con._check_closed() self.con._check_thread() if self.exhausted: @@ -1058,10 +1055,10 @@ sqlite.sqlite3_reset(self.statement) raise exc - self._readahead() + self._readahead(cursor) return item - def _readahead(self): + def _readahead(self, cursor): self.column_count = sqlite.sqlite3_column_count(self.statement) row = [] for i in xrange(self.column_count): @@ -1096,13 +1093,14 @@ row = tuple(row) if self.row_factory is not None: - row = self.row_factory(self.cur(), row) + row = self.row_factory(cursor, row) self.item = row def reset(self): self.row_cast_map = None ret = sqlite.sqlite3_reset(self.statement) self.in_use = False + self.exhausted = False return ret def finalize(self): @@ -1118,7 +1116,7 @@ self.statement = None def _get_description(self): - if self.kind == "DML": + if self.kind == DML: return None desc = [] for i in xrange(sqlite.sqlite3_column_count(self.statement)): diff --git a/lib_pypy/distributed/test/test_distributed.py b/lib_pypy/distributed/test/test_distributed.py --- a/lib_pypy/distributed/test/test_distributed.py +++ b/lib_pypy/distributed/test/test_distributed.py @@ -9,7 +9,7 @@ class AppTestDistributed(object): def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, - "usemodules":("_stackless",)}) + "usemodules":("_continuation",)}) def test_init(self): import distributed @@ -91,10 +91,8 @@ class AppTestDistributedTasklets(object): spaceconfig = {"objspace.std.withtproxy": True, - "objspace.usemodules._stackless": True} + "objspace.usemodules._continuation": True} def setup_class(cls): - #cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, - # "usemodules":("_stackless",)}) cls.w_test_env = cls.space.appexec([], """(): from distributed import test_env return test_env diff --git a/lib_pypy/distributed/test/test_greensock.py b/lib_pypy/distributed/test/test_greensock.py --- a/lib_pypy/distributed/test/test_greensock.py +++ b/lib_pypy/distributed/test/test_greensock.py @@ -10,7 +10,7 @@ if not option.runappdirect: py.test.skip("Cannot run this on top of py.py because of PopenGateway") cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, - "usemodules":("_stackless",)}) + "usemodules":("_continuation",)}) cls.w_remote_side_code = cls.space.appexec([], """(): import sys sys.path.insert(0, '%s') diff --git a/lib_pypy/distributed/test/test_socklayer.py b/lib_pypy/distributed/test/test_socklayer.py --- a/lib_pypy/distributed/test/test_socklayer.py +++ b/lib_pypy/distributed/test/test_socklayer.py @@ -9,7 +9,8 @@ class AppTestSocklayer: def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, - "usemodules":("_stackless","_socket", "select")}) + "usemodules":("_continuation", + "_socket", "select")}) def test_socklayer(self): class X(object): diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -59,7 +59,12 @@ # while not target: if not target.__started: - _continulet.__init__(target, _greenlet_start, *args) + if unbound_method != _continulet.throw: + greenlet_func = _greenlet_start + else: + greenlet_func = _greenlet_throw + _continulet.__init__(target, greenlet_func, *args) + unbound_method = _continulet.switch args = () target.__started = True break @@ -136,3 +141,11 @@ if greenlet.parent is not _tls.main: _continuation.permute(greenlet, greenlet.parent) return (res,) + +def _greenlet_throw(greenlet, exc, value, tb): + _tls.current = greenlet + try: + raise exc, value, tb + finally: + if greenlet.parent is not _tls.main: + _continuation.permute(greenlet, greenlet.parent) diff --git a/lib_pypy/pypy_test/test_coroutine.py b/lib_pypy/pypy_test/test_coroutine.py --- a/lib_pypy/pypy_test/test_coroutine.py +++ b/lib_pypy/pypy_test/test_coroutine.py @@ -2,7 +2,7 @@ from py.test import skip, raises try: - from lib_pypy.stackless import coroutine, CoroutineExit + from stackless import coroutine, CoroutineExit except ImportError, e: skip('cannot import stackless: %s' % (e,)) @@ -20,10 +20,6 @@ assert not co.is_zombie def test_is_zombie_del_without_frame(self): - try: - import _stackless # are we on pypy with a stackless build? - except ImportError: - skip("only works on pypy-c-stackless") import gc res = [] class MyCoroutine(coroutine): @@ -45,10 +41,6 @@ assert res[0], "is_zombie was False in __del__" def test_is_zombie_del_with_frame(self): - try: - import _stackless # are we on pypy with a stackless build? - except ImportError: - skip("only works on pypy-c-stackless") import gc res = [] class MyCoroutine(coroutine): diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py --- a/lib_pypy/pyrepl/reader.py +++ b/lib_pypy/pyrepl/reader.py @@ -576,7 +576,7 @@ self.console.push_char(char) self.handle1(0) - def readline(self): + def readline(self, returns_unicode=False): """Read a line. The implementation of this method also shows how to drive Reader if you want more control over the event loop.""" @@ -585,6 +585,8 @@ self.refresh() while not self.finished: self.handle1() + if returns_unicode: + return self.get_unicode() return self.get_buffer() finally: self.restore() diff --git a/lib_pypy/pyrepl/readline.py b/lib_pypy/pyrepl/readline.py --- a/lib_pypy/pyrepl/readline.py +++ b/lib_pypy/pyrepl/readline.py @@ -198,7 +198,7 @@ reader.ps1 = prompt return reader.readline() - def multiline_input(self, more_lines, ps1, ps2): + def multiline_input(self, more_lines, ps1, ps2, returns_unicode=False): """Read an input on possibly multiple lines, asking for more lines as long as 'more_lines(unicodetext)' returns an object whose boolean value is true. @@ -209,7 +209,7 @@ reader.more_lines = more_lines reader.ps1 = reader.ps2 = ps1 reader.ps3 = reader.ps4 = ps2 - return reader.readline() + return reader.readline(returns_unicode=returns_unicode) finally: reader.more_lines = saved diff --git a/lib_pypy/pyrepl/simple_interact.py b/lib_pypy/pyrepl/simple_interact.py --- a/lib_pypy/pyrepl/simple_interact.py +++ b/lib_pypy/pyrepl/simple_interact.py @@ -54,7 +54,8 @@ ps1 = getattr(sys, 'ps1', '>>> ') ps2 = getattr(sys, 'ps2', '... ') try: - statement = multiline_input(more_lines, ps1, ps2) + statement = multiline_input(more_lines, ps1, ps2, + returns_unicode=True) except EOFError: break more = console.push(statement) diff --git a/lib_pypy/stackless.py b/lib_pypy/stackless.py --- a/lib_pypy/stackless.py +++ b/lib_pypy/stackless.py @@ -4,121 +4,124 @@ Please refer to their documentation. """ -DEBUG = True - -def dprint(*args): - for arg in args: - print arg, - print import traceback -import sys +import _continuation +from functools import partial + +class TaskletExit(Exception): + pass + +CoroutineExit = TaskletExit + +class GWrap(_continuation.continulet): + """This is just a wrapper around continulet to allow + to stick additional attributes to a continulet. + To be more concrete, we need a backreference to + the coroutine object""" + + +class coroutine(object): + "we can't have continulet as a base, because continulets can't be rebound" + + def __init__(self): + self._frame = None + self.is_zombie = False + + def __getattr__(self, attr): + return getattr(self._frame, attr) + + def __del__(self): + self.is_zombie = True + del self._frame + self._frame = None + + def bind(self, func, *argl, **argd): + """coro.bind(f, *argl, **argd) -> None. + binds function f to coro. f will be called with + arguments *argl, **argd + """ + if self._frame is None or not self._frame.is_pending(): + + def _func(c, *args, **kwargs): + return func(*args, **kwargs) + + run = partial(_func, *argl, **argd) + self._frame = frame = GWrap(run) + else: + raise ValueError("cannot bind a bound coroutine") + + def switch(self): + """coro.switch() -> returnvalue + switches to coroutine coro. If the bound function + f finishes, the returnvalue is that of f, otherwise + None is returned + """ + current = _getcurrent() + current._jump_to(self) + + def _jump_to(self, coroutine): + _tls.current_coroutine = coroutine + self._frame.switch(to=coroutine._frame) + + def kill(self): + """coro.kill() : kill coroutine coro""" + _tls.current_coroutine = self + self._frame.throw(CoroutineExit) + + def _is_alive(self): + if self._frame is None: + return False + return not self._frame.is_pending() + is_alive = property(_is_alive) + del _is_alive + + def getcurrent(): + """coroutine.getcurrent() -> the currently running coroutine""" + try: + return _getcurrent() + except AttributeError: + return _maincoro + getcurrent = staticmethod(getcurrent) + + def __reduce__(self): + raise TypeError, 'pickling is not possible based upon continulets' + + +def _getcurrent(): + "Returns the current coroutine (i.e. the one which called this function)." + try: + return _tls.current_coroutine + except AttributeError: + # first call in this thread: current == main + _coroutine_create_main() + return _tls.current_coroutine + try: - # If _stackless can be imported then TaskletExit and CoroutineExit are - # automatically added to the builtins. - from _stackless import coroutine, greenlet -except ImportError: # we are running from CPython - from greenlet import greenlet, GreenletExit - TaskletExit = CoroutineExit = GreenletExit - del GreenletExit - try: - from functools import partial - except ImportError: # we are not running python 2.5 - class partial(object): - # just enough of 'partial' to be usefull - def __init__(self, func, *argl, **argd): - self.func = func - self.argl = argl - self.argd = argd + from thread import _local +except ImportError: + class _local(object): # assume no threads + pass - def __call__(self): - return self.func(*self.argl, **self.argd) +_tls = _local() - class GWrap(greenlet): - """This is just a wrapper around greenlets to allow - to stick additional attributes to a greenlet. - To be more concrete, we need a backreference to - the coroutine object""" +def _coroutine_create_main(): + # create the main coroutine for this thread + _tls.current_coroutine = None + main_coroutine = coroutine() + main_coroutine.bind(lambda x:x) + _tls.main_coroutine = main_coroutine + _tls.current_coroutine = main_coroutine + return main_coroutine - class MWrap(object): - def __init__(self,something): - self.something = something - def __getattr__(self, attr): - return getattr(self.something, attr) +_maincoro = _coroutine_create_main() - class coroutine(object): - "we can't have greenlet as a base, because greenlets can't be rebound" - - def __init__(self): - self._frame = None - self.is_zombie = False - - def __getattr__(self, attr): - return getattr(self._frame, attr) - - def __del__(self): - self.is_zombie = True - del self._frame - self._frame = None - - def bind(self, func, *argl, **argd): - """coro.bind(f, *argl, **argd) -> None. - binds function f to coro. f will be called with - arguments *argl, **argd - """ - if self._frame is None or self._frame.dead: - self._frame = frame = GWrap() - frame.coro = self - if hasattr(self._frame, 'run') and self._frame.run: - raise ValueError("cannot bind a bound coroutine") - self._frame.run = partial(func, *argl, **argd) - - def switch(self): - """coro.switch() -> returnvalue - switches to coroutine coro. If the bound function - f finishes, the returnvalue is that of f, otherwise - None is returned - """ - try: - return greenlet.switch(self._frame) - except TypeError, exp: # self._frame is the main coroutine - return greenlet.switch(self._frame.something) - - def kill(self): - """coro.kill() : kill coroutine coro""" - self._frame.throw() - - def _is_alive(self): - if self._frame is None: - return False - return not self._frame.dead - is_alive = property(_is_alive) - del _is_alive - - def getcurrent(): - """coroutine.getcurrent() -> the currently running coroutine""" - try: - return greenlet.getcurrent().coro - except AttributeError: - return _maincoro - getcurrent = staticmethod(getcurrent) - - def __reduce__(self): - raise TypeError, 'pickling is not possible based upon greenlets' - - _maincoro = coroutine() - maingreenlet = greenlet.getcurrent() - _maincoro._frame = frame = MWrap(maingreenlet) - frame.coro = _maincoro - del frame - del maingreenlet from collections import deque import operator -__all__ = 'run getcurrent getmain schedule tasklet channel coroutine \ - greenlet'.split() +__all__ = 'run getcurrent getmain schedule tasklet channel coroutine'.split() _global_task_id = 0 _squeue = None @@ -131,7 +134,8 @@ def _scheduler_remove(value): try: del _squeue[operator.indexOf(_squeue, value)] - except ValueError:pass + except ValueError: + pass def _scheduler_append(value, normal=True): if normal: diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -27,7 +27,7 @@ # --allworkingmodules working_modules = default_modules.copy() working_modules.update(dict.fromkeys( - ["_socket", "unicodedata", "mmap", "fcntl", "_locale", + ["_socket", "unicodedata", "mmap", "fcntl", "_locale", "pwd", "rctime" , "select", "zipimport", "_lsprof", "crypt", "signal", "_rawffi", "termios", "zlib", "bz2", "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO", @@ -58,6 +58,7 @@ # unix only modules del working_modules["crypt"] del working_modules["fcntl"] + del working_modules["pwd"] del working_modules["termios"] del working_modules["_minimal_curses"] diff --git a/pypy/config/test/test_config.py b/pypy/config/test/test_config.py --- a/pypy/config/test/test_config.py +++ b/pypy/config/test/test_config.py @@ -281,11 +281,11 @@ def test_underscore_in_option_name(): descr = OptionDescription("opt", "", [ - BoolOption("_stackless", "", default=False), + BoolOption("_foobar", "", default=False), ]) config = Config(descr) parser = to_optparse(config) - assert parser.has_option("--_stackless") + assert parser.has_option("--_foobar") def test_none(): dummy1 = BoolOption('dummy1', 'doc dummy', default=False, cmdline=None) diff --git a/pypy/doc/config/objspace.usemodules._stackless.txt b/pypy/doc/config/objspace.usemodules._stackless.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._stackless.txt +++ /dev/null @@ -1,1 +0,0 @@ -Deprecated. diff --git a/pypy/doc/config/objspace.usemodules.pwd.txt b/pypy/doc/config/objspace.usemodules.pwd.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.usemodules.pwd.txt @@ -0,0 +1,2 @@ +Use the 'pwd' module. +This module is expected to be fully working. diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -315,6 +315,28 @@ .. _`Andrew Brown's tutorial`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html +--------------------------------------------------------- +Can RPython modules for PyPy be translated independently? +--------------------------------------------------------- + +No, you have to rebuild the entire interpreter. This means two things: + +* It is imperative to use test-driven development. You have to test + exhaustively your module in pure Python, before even attempting to + translate it. Once you translate it, you should have only a few typing + issues left to fix, but otherwise the result should work out of the box. + +* Second, and perhaps most important: do you have a really good reason + for writing the module in RPython in the first place? Nowadays you + should really look at alternatives, like writing it in pure Python, + using ctypes if it needs to call C code. Other alternatives are being + developed too (as of summer 2011), like a Cython binding. + +In this context it is not that important to be able to translate +RPython modules independently of translating the complete interpreter. +(It could be done given enough efforts, but it's a really serious +undertaking. Consider it as quite unlikely for now.) + ---------------------------------------------------------- Why does PyPy draw a Mandelbrot fractal while translating? ---------------------------------------------------------- diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -199,7 +199,11 @@ The following features (present in some past Stackless version of PyPy) are for the time being not supported any more: -* Tasklets and channels (needs to be rewritten at app-level) +* Tasklets and channels (currently ``stackless.py`` seems to import, + but you have tasklets on top of coroutines on top of greenlets on + top of continulets on top of stacklets, and it's probably not too + hard to cut two of these levels by adapting ``stackless.py`` to + use directly continulets) * Coroutines (could be rewritten at app-level) diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -8,13 +8,13 @@ from pypy.interpreter.miscutils import ThreadLocals from pypy.tool.cache import Cache from pypy.tool.uid import HUGEVAL_BYTES -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, newlist from pypy.rlib.debug import make_sure_not_resized from pypy.rlib.timer import DummyTimer, Timer from pypy.rlib.rarithmetic import r_uint from pypy.rlib import jit from pypy.tool.sourcetools import func_with_new_name -import os, sys, py +import os, sys __all__ = ['ObjSpace', 'OperationError', 'Wrappable', 'W_Root'] @@ -757,7 +757,18 @@ w_iterator = self.iter(w_iterable) # If we know the expected length we can preallocate. if expected_length == -1: - items = [] + try: + lgt_estimate = self.len_w(w_iterable) + except OperationError, o: + if (not o.match(self, self.w_AttributeError) and + not o.match(self, self.w_TypeError)): + raise + items = [] + else: + try: + items = newlist(lgt_estimate) + except MemoryError: + items = [] # it might have lied else: items = [None] * expected_length idx = 0 diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -81,58 +81,6 @@ # ________________________________________________________________ - - class Subcontext(object): - # coroutine: subcontext support - - def __init__(self): - self.topframe = None - self.w_tracefunc = None - self.profilefunc = None - self.w_profilefuncarg = None - self.is_tracing = 0 - - def enter(self, ec): - ec.topframeref = jit.non_virtual_ref(self.topframe) - ec.w_tracefunc = self.w_tracefunc - ec.profilefunc = self.profilefunc - ec.w_profilefuncarg = self.w_profilefuncarg - ec.is_tracing = self.is_tracing - ec.space.frame_trace_action.fire() - - def leave(self, ec): - self.topframe = ec.gettopframe() - self.w_tracefunc = ec.w_tracefunc - self.profilefunc = ec.profilefunc - self.w_profilefuncarg = ec.w_profilefuncarg - self.is_tracing = ec.is_tracing - - def clear_framestack(self): - self.topframe = None - - # the following interface is for pickling and unpickling - def getstate(self, space): - if self.topframe is None: - return space.w_None - return self.topframe - - def setstate(self, space, w_state): - from pypy.interpreter.pyframe import PyFrame - if space.is_w(w_state, space.w_None): - self.topframe = None - else: - self.topframe = space.interp_w(PyFrame, w_state) - - def getframestack(self): - lst = [] - f = self.topframe - while f is not None: - lst.append(f) - f = f.f_backref() - lst.reverse() - return lst - # coroutine: I think this is all, folks! - def c_call_trace(self, frame, w_func, args=None): "Profile the call of a builtin function" self._c_call_return_trace(frame, w_func, args, 'c_call') diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -242,8 +242,10 @@ # we have been seen by other means so rtyping should not choke # on us identifier = self.code.identifier - assert Function._all.get(identifier, self) is self, ("duplicate " - "function ids") + previous = Function._all.get(identifier, self) + assert previous is self, ( + "duplicate function ids with identifier=%r: %r and %r" % ( + identifier, previous, self)) self.add_to_table() return False diff --git a/pypy/interpreter/miscutils.py b/pypy/interpreter/miscutils.py --- a/pypy/interpreter/miscutils.py +++ b/pypy/interpreter/miscutils.py @@ -167,3 +167,7 @@ def getmainthreadvalue(self): return self._value + + def getallvalues(self): + return {0: self._value} + diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -10,7 +10,7 @@ from pypy.interpreter.argument import Signature from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import NoneNotWrapped, unwrap_spec -from pypy.interpreter.astcompiler.consts import (CO_OPTIMIZED, +from pypy.interpreter.astcompiler.consts import ( CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED, CO_GENERATOR, CO_CONTAINSGLOBALS) from pypy.rlib.rarithmetic import intmask diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -66,7 +66,7 @@ make_sure_not_resized(self.locals_stack_w) check_nonneg(self.nlocals) # - if space.config.objspace.honor__builtins__: + if space.config.objspace.honor__builtins__ and w_globals is not None: self.builtin = space.builtin.pick_builtin(w_globals) # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. @@ -614,7 +614,8 @@ return self.get_builtin().getdict(space) def fget_f_back(self, space): - return self.space.wrap(self.f_backref()) + f_backref = ExecutionContext.getnextframe_nohidden(self) + return self.space.wrap(f_backref) def fget_f_lasti(self, space): return self.space.wrap(self.last_instr) diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -1523,10 +1523,8 @@ if not isinstance(prog, codetype): filename = '' - if not isinstance(prog, str): - if isinstance(prog, basestring): - prog = str(prog) - elif isinstance(prog, file): + if not isinstance(prog, basestring): + if isinstance(prog, file): filename = prog.name prog = prog.read() else: diff --git a/pypy/interpreter/pyparser/future.py b/pypy/interpreter/pyparser/future.py --- a/pypy/interpreter/pyparser/future.py +++ b/pypy/interpreter/pyparser/future.py @@ -109,25 +109,19 @@ self.getc() == self.getc(+2)): self.pos += 3 while 1: # Deal with a triple quoted docstring - if self.getc() == '\\': - self.pos += 2 + c = self.getc() + if c == '\\': + self.pos += 1 + self._skip_next_char_from_docstring() + elif c != endchar: + self._skip_next_char_from_docstring() else: - c = self.getc() - if c != endchar: - self.pos += 1 - if c == '\n': - self.atbol() - elif c == '\r': - if self.getc() == '\n': - self.pos += 1 - self.atbol() - else: - self.pos += 1 - if (self.getc() == endchar and - self.getc(+1) == endchar): - self.pos += 2 - self.consume_empty_line() - break + self.pos += 1 + if (self.getc() == endchar and + self.getc(+1) == endchar): + self.pos += 2 + self.consume_empty_line() + break else: # Deal with a single quoted docstring self.pos += 1 @@ -138,17 +132,21 @@ self.consume_empty_line() return elif c == '\\': - # Deal with linefeeds - if self.getc() != '\r': - self.pos += 1 - else: - self.pos += 1 - if self.getc() == '\n': - self.pos += 1 + self._skip_next_char_from_docstring() elif c in '\r\n': # Syntax error return + def _skip_next_char_from_docstring(self): + c = self.getc() + self.pos += 1 + if c == '\n': + self.atbol() + elif c == '\r': + if self.getc() == '\n': + self.pos += 1 + self.atbol() + def consume_continuation(self): c = self.getc() if c in '\n\r': diff --git a/pypy/interpreter/pyparser/test/test_futureautomaton.py b/pypy/interpreter/pyparser/test/test_futureautomaton.py --- a/pypy/interpreter/pyparser/test/test_futureautomaton.py +++ b/pypy/interpreter/pyparser/test/test_futureautomaton.py @@ -221,6 +221,14 @@ assert f.lineno == 3 assert f.col_offset == 0 +def test_lots_of_continuation_lines(): + s = "\\\n\\\n\\\n\\\n\\\n\\\n\nfrom __future__ import with_statement\n" + f = run(s) + assert f.pos == len(s) + assert f.flags == fut.CO_FUTURE_WITH_STATEMENT + assert f.lineno == 8 + assert f.col_offset == 0 + # This looks like a bug in cpython parser # and would require extensive modifications # to future.py in order to emulate the same behaviour @@ -239,3 +247,19 @@ raise AssertionError('IndentationError not raised') assert f.lineno == 2 assert f.col_offset == 0 + +def test_continuation_lines_in_docstring_single_quoted(): + s = '"\\\n\\\n\\\n\\\n\\\n\\\n"\nfrom __future__ import division\n' + f = run(s) + assert f.pos == len(s) + assert f.flags == fut.CO_FUTURE_DIVISION + assert f.lineno == 8 + assert f.col_offset == 0 + +def test_continuation_lines_in_docstring_triple_quoted(): + s = '"""\\\n\\\n\\\n\\\n\\\n\\\n"""\nfrom __future__ import division\n' + f = run(s) + assert f.pos == len(s) + assert f.flags == fut.CO_FUTURE_DIVISION + assert f.lineno == 8 + assert f.col_offset == 0 diff --git a/pypy/interpreter/test/test_exec.py b/pypy/interpreter/test/test_exec.py --- a/pypy/interpreter/test/test_exec.py +++ b/pypy/interpreter/test/test_exec.py @@ -219,3 +219,30 @@ raise e assert res == 1 + + def test_exec_unicode(self): + # 's' is a string + s = "x = u'\xd0\xb9\xd1\x86\xd1\x83\xd0\xba\xd0\xb5\xd0\xbd'" + # 'u' is a unicode + u = s.decode('utf-8') + exec u + assert len(x) == 6 + assert ord(x[0]) == 0x0439 + assert ord(x[1]) == 0x0446 + assert ord(x[2]) == 0x0443 + assert ord(x[3]) == 0x043a + assert ord(x[4]) == 0x0435 + assert ord(x[5]) == 0x043d + + def test_eval_unicode(self): + u = "u'%s'" % unichr(0x1234) + v = eval(u) + assert v == unichr(0x1234) + + def test_compile_unicode(self): + s = "x = u'\xd0\xb9\xd1\x86\xd1\x83\xd0\xba\xd0\xb5\xd0\xbd'" + u = s.decode('utf-8') + c = compile(u, '', 'exec') + exec c + assert len(x) == 6 + assert ord(x[0]) == 0x0439 diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -71,6 +71,23 @@ assert err.value.match(space, space.w_ValueError) err = raises(OperationError, space.unpackiterable, w_l, 5) assert err.value.match(space, space.w_ValueError) + w_a = space.appexec((), """(): + class A(object): + def __iter__(self): + return self + def next(self): + raise StopIteration + def __len__(self): + 1/0 + return A() + """) + try: + space.unpackiterable(w_a) + except OperationError, o: + if not o.match(space, space.w_ZeroDivisionError): + raise Exception("DID NOT RAISE") + else: + raise Exception("DID NOT RAISE") def test_fixedview(self): space = self.space diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -1,4 +1,5 @@ from pypy.tool import udir +from pypy.conftest import option class AppTestPyFrame: @@ -6,6 +7,15 @@ def setup_class(cls): cls.w_udir = cls.space.wrap(str(udir.udir)) cls.w_tempfile1 = cls.space.wrap(str(udir.udir.join('tempfile1'))) + if not option.runappdirect: + w_call_further = cls.space.appexec([], """(): + def call_further(f): + return f() + return call_further + """) + assert not w_call_further.code.hidden_applevel + w_call_further.code.hidden_applevel = True # hack + cls.w_call_further = w_call_further # test for the presence of the attributes, not functionality @@ -107,6 +117,22 @@ frame = f() assert frame.f_back.f_code.co_name == 'f' + def test_f_back_hidden(self): + if not hasattr(self, 'call_further'): + skip("not for runappdirect testing") + import sys + def f(): + return (sys._getframe(0), + sys._getframe(1), + sys._getframe(0).f_back) + def main(): + return self.call_further(f) + f0, f1, f1bis = main() + assert f0.f_code.co_name == 'f' + assert f1.f_code.co_name == 'main' + assert f1bis is f1 + assert f0.f_back is f1 + def test_f_exc_xxx(self): import sys diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -57,11 +57,13 @@ all_regs = [] no_lower_byte_regs = [] save_around_call_regs = [] - + frame_reg = None + def __init__(self, longevity, frame_manager=None, assembler=None): self.free_regs = self.all_regs[:] self.longevity = longevity self.reg_bindings = {} + self.bindings_to_frame_reg = {} self.position = -1 self.frame_manager = frame_manager self.assembler = assembler @@ -218,6 +220,10 @@ self.reg_bindings[v] = loc return loc + def force_allocate_frame_reg(self, v): + """ Allocate the new variable v in the frame register.""" + self.bindings_to_frame_reg[v] = None + def force_spill_var(self, var): self._sync_var(var) try: @@ -236,6 +242,8 @@ try: return self.reg_bindings[box] except KeyError: + if box in self.bindings_to_frame_reg: + return self.frame_reg return self.frame_manager.loc(box) def return_constant(self, v, forbidden_vars=[], selected_reg=None): @@ -264,8 +272,9 @@ self._check_type(v) if isinstance(v, Const): return self.return_constant(v, forbidden_vars, selected_reg) - prev_loc = self.loc(v) + if prev_loc is self.frame_reg and selected_reg is None: + return prev_loc loc = self.force_allocate_reg(v, forbidden_vars, selected_reg, need_lower_byte=need_lower_byte) if prev_loc is not loc: diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -957,6 +957,7 @@ if (isinstance(from_loc, RegLoc) and from_loc.is_xmm) or (isinstance(to_loc, RegLoc) and to_loc.is_xmm): self.mc.MOVSD(to_loc, from_loc) else: + assert to_loc is not ebp self.mc.MOV(to_loc, from_loc) regalloc_mov = mov # legacy interface @@ -2510,11 +2511,6 @@ genop_discard_cond_call_gc_wb_array = genop_discard_cond_call_gc_wb - def genop_force_token(self, op, arglocs, resloc): - # RegAlloc.consider_force_token ensures this: - assert isinstance(resloc, RegLoc) - self.mc.LEA_rb(resloc.value, FORCE_INDEX_OFS) - def not_implemented_op_discard(self, op, arglocs): not_implemented("not implemented operation: %s" % op.getopname()) diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -29,6 +29,7 @@ all_regs = [eax, ecx, edx, ebx, esi, edi] no_lower_byte_regs = [esi, edi] save_around_call_regs = [eax, edx, ecx] + frame_reg = ebp REGLOC_TO_GCROOTMAP_REG_INDEX = { ebx: 1, @@ -312,8 +313,11 @@ self.fm.frame_bindings[arg] = loc else: if isinstance(loc, RegLoc): - self.rm.reg_bindings[arg] = loc - used[loc] = None + if loc is ebp: + self.rm.bindings_to_frame_reg[arg] = None + else: + self.rm.reg_bindings[arg] = loc + used[loc] = None else: self.fm.frame_bindings[arg] = loc self.rm.free_regs = [] @@ -1358,8 +1362,8 @@ self.assembler.datablockwrapper) def consider_force_token(self, op): - loc = self.rm.force_allocate_reg(op.result) - self.Perform(op, [], loc) + # the FORCE_TOKEN operation returns directly 'ebp' + self.rm.force_allocate_frame_reg(op.result) def not_implemented_op(self, op): not_implemented("not implemented operation: %s" % op.getopname()) diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -119,7 +119,8 @@ setitem(index, null) def get_latest_force_token(self): - return self.assembler.fail_ebp + FORCE_INDEX_OFS + # the FORCE_TOKEN operation and this helper both return 'ebp'. + return self.assembler.fail_ebp def execute_token(self, executable_token): addr = executable_token._x86_bootstrap_code @@ -153,8 +154,9 @@ flavor='raw', zero=True, immortal=True) - def force(self, addr_of_force_index): + def force(self, addr_of_force_token): TP = rffi.CArrayPtr(lltype.Signed) + addr_of_force_index = addr_of_force_token + FORCE_INDEX_OFS fail_index = rffi.cast(TP, addr_of_force_index)[0] assert fail_index >= 0, "already forced!" faildescr = self.get_fail_descr_from_number(fail_index) @@ -164,7 +166,7 @@ # start of "no gc operation!" block fail_index_2 = self.assembler.grab_frame_values( bytecode, - addr_of_force_index - FORCE_INDEX_OFS, + addr_of_force_token, self.all_null_registers) self.assembler.leave_jitted_hook() # end of "no gc operation!" block diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -25,7 +25,7 @@ # 'cached_fields'. # self._cached_fields = {} - self._cached_fields_getfield_op = {} + self._cached_fields_getfield_op = {} self._lazy_setfield = None self._lazy_setfield_registered = False @@ -37,6 +37,12 @@ self.force_lazy_setfield(optheap) assert not self.possible_aliasing(optheap, structvalue) cached_fieldvalue = self._cached_fields.get(structvalue, None) + + # Hack to ensure constants are imported from the preamble + if cached_fieldvalue and fieldvalue.is_constant(): + optheap.optimizer.ensure_imported(cached_fieldvalue) + cached_fieldvalue = self._cached_fields.get(structvalue, None) + if cached_fieldvalue is not fieldvalue: # common case: store the 'op' as lazy_setfield, and register # myself in the optheap's _lazy_setfields_and_arrayitems list @@ -75,7 +81,7 @@ def remember_field_value(self, structvalue, fieldvalue, getfield_op=None): assert self._lazy_setfield is None self._cached_fields[structvalue] = fieldvalue - self._cached_fields_getfield_op[structvalue] = getfield_op + self._cached_fields_getfield_op[structvalue] = getfield_op def force_lazy_setfield(self, optheap, can_cache=True): op = self._lazy_setfield @@ -132,9 +138,7 @@ result = newresult getop = ResOperation(rop.GETFIELD_GC, [op.getarg(0)], result, op.getdescr()) - getop = shortboxes.add_potential(getop) - self._cached_fields_getfield_op[structvalue] = getop - self._cached_fields[structvalue] = optimizer.getvalue(result) + shortboxes.add_potential(getop, synthetic=True) elif op.result is not None: shortboxes.add_potential(op) @@ -163,7 +167,7 @@ def new(self): return OptHeap() - + def produce_potential_short_preamble_ops(self, sb): descrkeys = self.cached_fields.keys() if not we_are_translated(): diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -10,6 +10,7 @@ from pypy.jit.metainterp.typesystem import llhelper, oohelper from pypy.tool.pairtype import extendabletype from pypy.rlib.debug import debug_start, debug_stop, debug_print +from pypy.rlib.objectmodel import specialize LEVEL_UNKNOWN = '\x00' LEVEL_NONNULL = '\x01' @@ -25,6 +26,9 @@ self.descr = descr self.bound = bound + def clone(self): + return LenBound(self.mode, self.descr, self.bound.clone()) + class OptValue(object): __metaclass__ = extendabletype _attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound', 'lenbound') @@ -88,8 +92,27 @@ assert False guards.append(op) self.lenbound.bound.make_guards(lenbox, guards) + return guards - return guards + def import_from(self, other, optimizer): + assert self.level <= LEVEL_NONNULL + if other.level == LEVEL_CONSTANT: + self.make_constant(other.get_key_box()) + optimizer.turned_constant(self) + elif other.level == LEVEL_KNOWNCLASS: + self.make_constant_class(other.known_class, -1) + else: + if other.level == LEVEL_NONNULL: + self.ensure_nonnull() + self.intbound.intersect(other.intbound) + if other.lenbound: + if self.lenbound: + assert other.lenbound.mode == self.lenbound.mode + assert other.lenbound.descr == self.lenbound.descr + self.lenbound.bound.intersect(other.lenbound.bound) + else: + self.lenbound = other.lenbound.clone() + def force_box(self): return self.box @@ -308,7 +331,6 @@ self.resumedata_memo = resume.ResumeDataLoopMemo(metainterp_sd) self.bool_boxes = {} self.pure_operations = args_dict() - self.emitted_pure_operations = {} self.producer = {} self.pendingfields = [] self.posponedop = None @@ -316,12 +338,11 @@ self.quasi_immutable_deps = None self.opaque_pointers = {} self.newoperations = [] - self.emitting_dissabled = False - self.emitted_guards = 0 if loop is not None: self.call_pure_results = loop.call_pure_results self.set_optimizations(optimizations) + self.setup() def set_optimizations(self, optimizations): if optimizations: @@ -348,23 +369,18 @@ assert self.posponedop is None def new(self): + new = Optimizer(self.metainterp_sd, self.loop) + return self._new(new) + + def _new(self, new): assert self.posponedop is None - new = Optimizer(self.metainterp_sd, self.loop) optimizations = [o.new() for o in self.optimizations] new.set_optimizations(optimizations) new.quasi_immutable_deps = self.quasi_immutable_deps return new def produce_potential_short_preamble_ops(self, sb): - for op in self.emitted_pure_operations: - if op.getopnum() == rop.GETARRAYITEM_GC_PURE or \ - op.getopnum() == rop.STRGETITEM or \ - op.getopnum() == rop.UNICODEGETITEM: - if not self.getvalue(op.getarg(1)).is_constant(): - continue - sb.add_potential(op) - for opt in self.optimizations: - opt.produce_potential_short_preamble_ops(sb) + raise NotImplementedError('This is implemented in unroll.UnrollableOptimizer') def turned_constant(self, value): for o in self.optimizations: @@ -386,19 +402,26 @@ else: return box + @specialize.argtype(0) def getvalue(self, box): box = self.getinterned(box) try: value = self.values[box] except KeyError: value = self.values[box] = OptValue(box) + self.ensure_imported(value) return value + def ensure_imported(self, value): + pass + + @specialize.argtype(0) def get_constant_box(self, box): if isinstance(box, Const): return box try: value = self.values[box] + self.ensure_imported(value) except KeyError: return None if value.is_constant(): @@ -481,18 +504,22 @@ def emit_operation(self, op): if op.returns_bool_result(): self.bool_boxes[self.getvalue(op.result)] = None - if self.emitting_dissabled: - return + self._emit_operation(op) + @specialize.argtype(0) + def _emit_operation(self, op): for i in range(op.numargs()): arg = op.getarg(i) - if arg in self.values: - box = self.values[arg].force_box() - op.setarg(i, box) + try: + value = self.values[arg] + except KeyError: + pass + else: + self.ensure_imported(value) + op.setarg(i, value.force_box()) self.metainterp_sd.profiler.count(jitprof.OPT_OPS) if op.is_guard(): self.metainterp_sd.profiler.count(jitprof.OPT_GUARDS) - self.emitted_guards += 1 # FIXME: can we reuse above counter? op = self.store_final_boxes_in_guard(op) elif op.can_raise(): self.exception_might_have_happened = True @@ -548,6 +575,7 @@ args[n + 1 - start] = op.getdescr() return args + @specialize.argtype(0) def optimize_default(self, op): canfold = op.is_always_pure() if op.is_ovf(): @@ -583,13 +611,16 @@ return else: self.pure_operations[args] = op - self.emitted_pure_operations[op] = True + self.remember_emitting_pure(op) # otherwise, the operation remains self.emit_operation(op) if nextop: self.emit_operation(nextop) + def remember_emitting_pure(self, op): + pass + def constant_fold(self, op): argboxes = [self.get_constant_box(op.getarg(i)) for i in range(op.numargs())] diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -4711,6 +4711,7 @@ """ self.optimize_loop(ops, expected) +<<<<<<< local def test_empty_copystrunicontent(self): ops = """ [p0, p1, i0, i2, i3] @@ -4740,6 +4741,34 @@ """ self.optimize_strunicode_loop(ops, expected) + def test_forced_virtuals_aliasing(self): + ops = """ + [i0, i1] + p0 = new(descr=ssize) + p1 = new(descr=ssize) + escape(p0) + escape(p1) + setfield_gc(p0, i0, descr=adescr) + setfield_gc(p1, i1, descr=adescr) + i2 = getfield_gc(p0, descr=adescr) + jump(i2, i2) + """ + expected = """ + [i0, i1] + p0 = new(descr=ssize) + escape(p0) + p1 = new(descr=ssize) + escape(p1) + setfield_gc(p0, i0, descr=adescr) + setfield_gc(p1, i1, descr=adescr) + jump(i0, i0) + """ + py.test.skip("not implemented") + # setfields on things that used to be virtual still can't alias each + # other + self.optimize_loop(ops, expected) + + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -472,7 +472,13 @@ [i0] jump(i0) """ - self.optimize_loop(ops, expected, preamble) + short = """ + [i0] + i1 = int_is_true(i0) + guard_value(i1, 1) [] + jump(i0) + """ + self.optimize_loop(ops, expected, preamble, expected_short=short) def test_bound_int_is_true(self): ops = """ @@ -6997,6 +7003,26 @@ """ self.optimize_loop(ops, expected) + def test_cached_pure_func_of_equal_fields(self): + ops = """ + [p5, p6] + i10 = getfield_gc(p5, descr=valuedescr) + i11 = getfield_gc(p6, descr=nextdescr) + i12 = int_add(i10, 7) + i13 = int_add(i11, 7) + call(i12, i13, descr=nonwritedescr) + setfield_gc(p6, i10, descr=nextdescr) + jump(p5, p6) + """ + expected = """ + [p5, p6, i14, i12, i10] + i13 = int_add(i14, 7) + call(i12, i13, descr=nonwritedescr) + setfield_gc(p6, i10, descr=nextdescr) + jump(p5, p6, i10, i12, i10) + """ + self.optimize_loop(ops, expected) + def test_forced_counter(self): # XXX: VIRTUALHEAP (see above) py.test.skip("would be fixed by make heap optimizer aware of virtual setfields") @@ -7086,8 +7112,84 @@ """ self.optimize_loop(ops, expected) + def test_import_constants_when_folding_pure_operations(self): + ops = """ + [p0] + f1 = getfield_gc(p0, descr=valuedescr) + f2 = float_abs(f1) + call(7.0, descr=nonwritedescr) + setfield_gc(p0, -7.0, descr=valuedescr) + jump(p0) + """ + expected = """ + [p0] + call(7.0, descr=nonwritedescr) + jump(p0) + """ + self.optimize_loop(ops, expected) + + def test_exploding_duplicatipon(self): + ops = """ + [i1, i2] + i3 = int_add(i1, i1) + i4 = int_add(i3, i3) + i5 = int_add(i4, i4) + i6 = int_add(i5, i5) + call(i6, descr=nonwritedescr) + jump(i1, i3) + """ + expected = """ + [i1, i2, i6, i3] + call(i6, descr=nonwritedescr) + jump(i1, i3, i6, i3) + """ + short = """ + [i1, i2] + i3 = int_add(i1, i1) + i4 = int_add(i3, i3) + i5 = int_add(i4, i4) + i6 = int_add(i5, i5) + jump(i1, i2, i6, i3) + """ + self.optimize_loop(ops, expected, expected_short=short) + + def test_prioritize_getfield1(self): + ops = """ + [p1, p2] + i1 = getfield_gc(p1, descr=valuedescr) + setfield_gc(p2, i1, descr=nextdescr) + i2 = int_neg(i1) + call(i2, descr=nonwritedescr) + jump(p1, p2) + """ + expected = """ + [p1, p2, i2, i1] + call(i2, descr=nonwritedescr) + setfield_gc(p2, i1, descr=nextdescr) + jump(p1, p2, i2, i1) + """ + self.optimize_loop(ops, expected) + + def test_prioritize_getfield2(self): + # Same as previous, but with descrs intercahnged which means + # that the getfield is discovered first when looking for + # potential short boxes during tests + ops = """ + [p1, p2] + i1 = getfield_gc(p1, descr=nextdescr) + setfield_gc(p2, i1, descr=valuedescr) + i2 = int_neg(i1) + call(i2, descr=nonwritedescr) + jump(p1, p2) + """ + expected = """ + [p1, p2, i2, i1] + call(i2, descr=nonwritedescr) + setfield_gc(p2, i1, descr=valuedescr) + jump(p1, p2, i2, i1) + """ + self.optimize_loop(ops, expected) - class TestLLtype(OptimizeOptTest, LLtypeMixin): pass diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -70,6 +70,47 @@ self.snapshot_map[snapshot] = new_snapshot return new_snapshot +class UnrollableOptimizer(Optimizer): + def setup(self): + self.importable_values = {} + self.emitting_dissabled = False + self.emitted_guards = 0 + self.emitted_pure_operations = {} + + def ensure_imported(self, value): + if not self.emitting_dissabled and value in self.importable_values: + imp = self.importable_values[value] + del self.importable_values[value] + imp.import_value(value) + + def emit_operation(self, op): + if op.returns_bool_result(): + self.bool_boxes[self.getvalue(op.result)] = None + if self.emitting_dissabled: + return + if op.is_guard(): + self.emitted_guards += 1 # FIXME: can we use counter in self._emit_operation? + self._emit_operation(op) + + def new(self): + new = UnrollableOptimizer(self.metainterp_sd, self.loop) + return self._new(new) + + def remember_emitting_pure(self, op): + self.emitted_pure_operations[op] = True + + def produce_potential_short_preamble_ops(self, sb): + for op in self.emitted_pure_operations: + if op.getopnum() == rop.GETARRAYITEM_GC_PURE or \ + op.getopnum() == rop.STRGETITEM or \ + op.getopnum() == rop.UNICODEGETITEM: + if not self.getvalue(op.getarg(1)).is_constant(): + continue + sb.add_potential(op) + for opt in self.optimizations: + opt.produce_potential_short_preamble_ops(sb) + + class UnrollOptimizer(Optimization): """Unroll the loop into two iterations. The first one will @@ -77,7 +118,7 @@ distinction anymore)""" def __init__(self, metainterp_sd, loop, optimizations): - self.optimizer = Optimizer(metainterp_sd, loop, optimizations) + self.optimizer = UnrollableOptimizer(metainterp_sd, loop, optimizations) self.cloned_operations = [] for op in self.optimizer.loop.operations: newop = op.clone() @@ -150,6 +191,7 @@ args = ", ".join([logops.repr_of_arg(arg) for arg in short_inputargs]) debug_print('short inputargs: ' + args) self.short_boxes.debug_print(logops) + # Force virtuals amoung the jump_args of the preamble to get the # operations needed to setup the proper state of those virtuals @@ -161,8 +203,9 @@ if box in seen: continue seen[box] = True - value = preamble_optimizer.getvalue(box) - inputarg_setup_ops.extend(value.make_guards(box)) + preamble_value = preamble_optimizer.getvalue(box) + value = self.optimizer.getvalue(box) + value.import_from(preamble_value, self.optimizer) for box in short_inputargs: if box in seen: continue @@ -181,23 +224,16 @@ for op in self.short_boxes.operations(): self.ensure_short_op_emitted(op, self.optimizer, seen) if op and op.result: - # The order of these guards is not important as - # self.optimizer.emitting_dissabled is False - value = preamble_optimizer.getvalue(op.result) - for guard in value.make_guards(op.result): - self.optimizer.send_extra_operation(guard) + preamble_value = preamble_optimizer.getvalue(op.result) + value = self.optimizer.getvalue(op.result) + imp = ValueImporter(self, preamble_value, op) + self.optimizer.importable_values[value] = imp newresult = self.optimizer.getvalue(op.result).get_key_box() if newresult is not op.result: self.short_boxes.alias(newresult, op.result) self.optimizer.flush() self.optimizer.emitting_dissabled = False - # XXX Hack to prevent the arraylen/strlen/unicodelen ops generated - # by value.make_guards() from ending up in pure_operations - for key, op in self.optimizer.pure_operations.items(): - if not self.short_boxes.has_producer(op.result): - del self.optimizer.pure_operations[key] - initial_inputargs_len = len(inputargs) self.inliner = Inliner(loop.inputargs, jump_args) @@ -276,16 +312,11 @@ short_jumpargs = inputargs[:] - short = [] - short_seen = {} + short = self.short = [] + short_seen = self.short_seen = {} for box, const in self.constant_inputargs.items(): short_seen[box] = True - for op in self.short_boxes.operations(): - if op is not None: - if len(self.getvalue(op.result).make_guards(op.result)) > 0: - self.add_op_to_short(op, short, short_seen, False, True) - # This loop is equivalent to the main optimization loop in # Optimizer.propagate_all_forward jumpop = None @@ -380,7 +411,7 @@ if op.is_ovf(): guard = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) optimizer.send_extra_operation(guard) - + def add_op_to_short(self, op, short, short_seen, emit=True, guards_needed=False): if op is None: return None @@ -536,6 +567,13 @@ loop_token.failed_states.append(virtual_state) self.emit_operation(op) +class ValueImporter(object): + def __init__(self, unroll, value, op): + self.unroll = unroll + self.preamble_value = value + self.op = op - - + def import_value(self, value): + value.import_from(self.preamble_value, self.unroll.optimizer) + self.unroll.add_op_to_short(self.op, self.unroll.short, self.unroll.short_seen, False, True) + diff --git a/pypy/jit/metainterp/optimizeopt/virtualize.py b/pypy/jit/metainterp/optimizeopt/virtualize.py --- a/pypy/jit/metainterp/optimizeopt/virtualize.py +++ b/pypy/jit/metainterp/optimizeopt/virtualize.py @@ -58,6 +58,9 @@ def _really_force(self): raise NotImplementedError("abstract base") + def import_from(self, other, optimizer): + raise NotImplementedError("should not be called at this level") + def get_fielddescrlist_cache(cpu): if not hasattr(cpu, '_optimizeopt_fielddescrlist_cache'): result = descrlist_dict() diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -12,6 +12,7 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import debug_start, debug_stop, debug_print from pypy.rlib.objectmodel import we_are_translated +import os class AbstractVirtualStateInfo(resume.AbstractVirtualInfo): position = -1 @@ -461,8 +462,10 @@ class ShortBoxes(object): def __init__(self, optimizer, surviving_boxes): self.potential_ops = {} - self.duplicates = {} + self.alternatives = {} + self.synthetic = {} self.aliases = {} + self.rename = {} self.optimizer = optimizer for box in surviving_boxes: self.potential_ops[box] = None @@ -476,33 +479,81 @@ except BoxNotProducable: pass + def prioritized_alternatives(self, box): + if box not in self.alternatives: + return [self.potential_ops[box]] + alts = self.alternatives[box] + hi, lo = 0, len(alts) - 1 + while hi < lo: + if alts[lo] is None: # Inputarg, lowest priority + alts[lo], alts[-1] = alts[-1], alts[lo] + lo -= 1 + elif alts[lo] not in self.synthetic: # Hi priority + alts[hi], alts[lo] = alts[lo], alts[hi] + hi += 1 + else: # Low priority + lo -= 1 + return alts + + def renamed(self, box): + if box in self.rename: + return self.rename[box] + return box + + def add_to_short(self, box, op): + if op: + op = op.clone() + for i in range(op.numargs()): + op.setarg(i, self.renamed(op.getarg(i))) + if box in self.short_boxes: + if op is None: + oldop = self.short_boxes[box].clone() + oldres = oldop.result + newbox = oldop.result = oldres.clonebox() + self.rename[box] = newbox + self.short_boxes[box] = None + self.short_boxes[newbox] = oldop + else: + newop = op.clone() + newbox = newop.result = op.result.clonebox() + self.short_boxes[newop.result] = newop + value = self.optimizer.getvalue(box) + self.optimizer.make_equal_to(newbox, value) + else: + self.short_boxes[box] = op + def produce_short_preamble_box(self, box): if box in self.short_boxes: return if isinstance(box, Const): return if box in self.potential_ops: - op = self.potential_ops[box] - if op: - for arg in op.getarglist(): - self.produce_short_preamble_box(arg) - self.short_boxes[box] = op + ops = self.prioritized_alternatives(box) + produced_one = False + for op in ops: + try: + if op: + for arg in op.getarglist(): + self.produce_short_preamble_box(arg) + except BoxNotProducable: + pass + else: + produced_one = True + self.add_to_short(box, op) + if not produced_one: + raise BoxNotProducable else: raise BoxNotProducable - def add_potential(self, op): + def add_potential(self, op, synthetic=False): if op.result not in self.potential_ops: self.potential_ops[op.result] = op - return op - newop = op.clone() - newop.result = op.result.clonebox() - self.potential_ops[newop.result] = newop - if op.result in self.duplicates: - self.duplicates[op.result].append(newop.result) else: - self.duplicates[op.result] = [newop.result] - self.optimizer.make_equal_to(newop.result, self.optimizer.getvalue(op.result)) - return newop + if op.result not in self.alternatives: + self.alternatives[op.result] = [self.potential_ops[op.result]] + self.alternatives[op.result].append(op) + if synthetic: + self.synthetic[op] = True def debug_print(self, logops): debug_start('jit-short-boxes') diff --git a/pypy/jit/metainterp/test/test_virtualstate.py b/pypy/jit/metainterp/test/test_virtualstate.py --- a/pypy/jit/metainterp/test/test_virtualstate.py +++ b/pypy/jit/metainterp/test/test_virtualstate.py @@ -2,7 +2,7 @@ import py from pypy.jit.metainterp.optimize import InvalidLoop from pypy.jit.metainterp.optimizeopt.virtualstate import VirtualStateInfo, VStructStateInfo, \ - VArrayStateInfo, NotVirtualStateInfo, VirtualState + VArrayStateInfo, NotVirtualStateInfo, VirtualState, ShortBoxes from pypy.jit.metainterp.optimizeopt.optimizer import OptValue from pypy.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr, ConstInt, ConstPtr from pypy.rpython.lltypesystem import lltype @@ -11,6 +11,7 @@ from pypy.jit.metainterp.history import TreeLoop, LoopToken from pypy.jit.metainterp.optimizeopt.test.test_optimizeopt import FakeDescr, FakeMetaInterpStaticData from pypy.jit.metainterp.optimize import RetraceLoop +from pypy.jit.metainterp.resoperation import ResOperation, rop class TestBasic: someptr1 = LLtypeMixin.myptr @@ -129,6 +130,7 @@ info.fieldstate = [info] assert info.generalization_of(info, {}, {}) + class BaseTestGenerateGuards(BaseTest): def guards(self, info1, info2, box, expected): info1.position = info2.position = 0 @@ -910,3 +912,111 @@ class TestLLtypeBridges(BaseTestBridges, LLtypeMixin): pass +class FakeOptimizer: + def make_equal_to(*args): + pass + def getvalue(*args): + pass + +class TestShortBoxes: + p1 = BoxPtr() + p2 = BoxPtr() + p3 = BoxPtr() + p4 = BoxPtr() + i1 = BoxInt() + i2 = BoxInt() + i3 = BoxInt() + i4 = BoxInt() + + def test_short_box_duplication_direct(self): + class Optimizer(FakeOptimizer): + def produce_potential_short_preamble_ops(_self, sb): + sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1)) + sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.i1)) + sb = ShortBoxes(Optimizer(), [self.p1, self.p2]) + assert len(sb.short_boxes) == 4 + assert self.i1 in sb.short_boxes + assert sum([op.result is self.i1 for op in sb.short_boxes.values() if op]) == 1 + + def test_dont_duplicate_potential_boxes(self): + class Optimizer(FakeOptimizer): + def produce_potential_short_preamble_ops(_self, sb): + sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1)) + sb.add_potential(ResOperation(rop.GETFIELD_GC, [BoxPtr()], self.i1)) + sb.add_potential(ResOperation(rop.INT_NEG, [self.i1], self.i2)) + sb.add_potential(ResOperation(rop.INT_ADD, [ConstInt(7), self.i2], + self.i3)) + sb = ShortBoxes(Optimizer(), [self.p1, self.p2]) + assert len(sb.short_boxes) == 5 + + def test_prioritize1(self): + class Optimizer(FakeOptimizer): + def produce_potential_short_preamble_ops(_self, sb): + sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1)) + sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.i1)) + sb.add_potential(ResOperation(rop.INT_NEG, [self.i1], self.i2)) + sb = ShortBoxes(Optimizer(), [self.p1, self.p2]) + assert len(sb.short_boxes.values()) == 5 + int_neg = [op for op in sb.short_boxes.values() + if op and op.getopnum() == rop.INT_NEG] + assert len(int_neg) == 1 + int_neg = int_neg[0] + getfield = [op for op in sb.short_boxes.values() + if op and op.result == int_neg.getarg(0)] + assert len(getfield) == 1 + assert getfield[0].getarg(0) in [self.p1, self.p2] + + def test_prioritize1bis(self): + class Optimizer(FakeOptimizer): + def produce_potential_short_preamble_ops(_self, sb): + sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1), + synthetic=True) + sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.i1), + synthetic=True) + sb.add_potential(ResOperation(rop.INT_NEG, [self.i1], self.i2)) + sb = ShortBoxes(Optimizer(), [self.p1, self.p2]) + assert len(sb.short_boxes.values()) == 5 + int_neg = [op for op in sb.short_boxes.values() + if op and op.getopnum() == rop.INT_NEG] + assert len(int_neg) == 1 + int_neg = int_neg[0] + getfield = [op for op in sb.short_boxes.values() + if op and op.result == int_neg.getarg(0)] + assert len(getfield) == 1 + assert getfield[0].getarg(0) in [self.p1, self.p2] + + def test_prioritize2(self): + class Optimizer(FakeOptimizer): + def produce_potential_short_preamble_ops(_self, sb): + sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1), + synthetic=True) + sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.i1)) + sb.add_potential(ResOperation(rop.INT_NEG, [self.i1], self.i2)) + sb = ShortBoxes(Optimizer(), [self.p1, self.p2]) + assert len(sb.short_boxes.values()) == 5 + int_neg = [op for op in sb.short_boxes.values() + if op and op.getopnum() == rop.INT_NEG] + assert len(int_neg) == 1 + int_neg = int_neg[0] + getfield = [op for op in sb.short_boxes.values() + if op and op.result == int_neg.getarg(0)] + assert len(getfield) == 1 + assert getfield[0].getarg(0) == self.p2 + + def test_prioritize3(self): + class Optimizer(FakeOptimizer): + def produce_potential_short_preamble_ops(_self, sb): + sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1)) + sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.i1), + synthetic=True) + sb.add_potential(ResOperation(rop.INT_NEG, [self.i1], self.i2)) + sb = ShortBoxes(Optimizer(), [self.p1, self.p2]) + assert len(sb.short_boxes.values()) == 5 + int_neg = [op for op in sb.short_boxes.values() + if op and op.getopnum() == rop.INT_NEG] + assert len(int_neg) == 1 + int_neg = int_neg[0] + getfield = [op for op in sb.short_boxes.values() + if op and op.result == int_neg.getarg(0)] + assert len(getfield) == 1 + assert getfield[0].getarg(0) == self.p1 diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -130,8 +130,15 @@ results = _find_jit_marker(graphs, 'jit_merge_point') if not results: raise Exception("no jit_merge_point found!") + seen = set([graph for graph, block, pos in results]) + assert len(seen) == len(results), ( + "found several jit_merge_points in the same graph") return results +def locate_jit_merge_point(graph): + [(graph, block, pos)] = find_jit_merge_points([graph]) + return block, pos, block.operations[pos] + def find_set_param(graphs): return _find_jit_marker(graphs, 'set_param') @@ -235,7 +242,7 @@ def split_graph_and_record_jitdriver(self, graph, block, pos): op = block.operations[pos] jd = JitDriverStaticData() - jd._jit_merge_point_pos = (graph, op) + jd._jit_merge_point_in = graph args = op.args[2:] s_binding = self.translator.annotator.binding jd._portal_args_s = [s_binding(v) for v in args] @@ -504,7 +511,8 @@ self.make_args_specification(jd) def make_args_specification(self, jd): - graph, op = jd._jit_merge_point_pos + graph = jd._jit_merge_point_in + _, _, op = locate_jit_merge_point(graph) greens_v, reds_v = support.decode_hp_hint_args(op) ALLARGS = [v.concretetype for v in (greens_v + reds_v)] jd._green_args_spec = [v.concretetype for v in greens_v] @@ -552,7 +560,7 @@ assert jitdriver in sublists, \ "can_enter_jit with no matching jit_merge_point" jd, sublist = sublists[jitdriver] - origportalgraph = jd._jit_merge_point_pos[0] + origportalgraph = jd._jit_merge_point_in if graph is not origportalgraph: sublist.append((graph, block, index)) jd.no_loop_header = False @@ -582,7 +590,7 @@ can_enter_jits = [(jd.portal_graph, jd.portal_graph.startblock, 0)] for graph, block, index in can_enter_jits: - if graph is jd._jit_merge_point_pos[0]: + if graph is jd._jit_merge_point_in: continue op = block.operations[index] @@ -640,7 +648,7 @@ # while 1: # more stuff # - origportalgraph = jd._jit_merge_point_pos[0] + origportalgraph = jd._jit_merge_point_in portalgraph = jd.portal_graph PORTALFUNC = jd._PORTAL_FUNCTYPE @@ -794,14 +802,7 @@ # ____________________________________________________________ # Now mutate origportalgraph to end with a call to portal_runner_ptr # - _, op = jd._jit_merge_point_pos - for origblock in origportalgraph.iterblocks(): - if op in origblock.operations: - break - else: - assert False, "lost the operation %r in the graph %r" % ( - op, origportalgraph) - origindex = origblock.operations.index(op) + origblock, origindex, op = locate_jit_merge_point(origportalgraph) assert op.opname == 'jit_marker' assert op.args[0].value == 'jit_merge_point' greens_v, reds_v = support.decode_hp_hint_args(op) diff --git a/pypy/jit/tl/pypyjit_demo.py b/pypy/jit/tl/pypyjit_demo.py --- a/pypy/jit/tl/pypyjit_demo.py +++ b/pypy/jit/tl/pypyjit_demo.py @@ -2,22 +2,16 @@ pypyjit.set_param(threshold=200) -def main(a, b): - i = sa = 0 - while i < 300: - if a > 0: # Specialises the loop - pass - if b < 2 and b > 0: - pass - if (a >> b) >= 0: - sa += 1 - if (a << b) > 2: - sa += 10000 - i += 1 - return sa +def f(n): + pairs = [(0.0, 1.0), (2.0, 3.0)] * n + mag = 0 + for (x1, x2) in pairs: + dx = x1 - x2 + mag += ((dx * dx ) ** (-1.5)) + return n try: - print main(2, 1) + print f(301) except Exception, e: print "Exception: ", type(e) diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -8,6 +8,7 @@ appleveldefs = {} interpleveldefs = { + "StringBuilder": "interp_builders.W_StringBuilder", "UnicodeBuilder": "interp_builders.W_UnicodeBuilder", } diff --git a/pypy/module/__pypy__/interp_builders.py b/pypy/module/__pypy__/interp_builders.py --- a/pypy/module/__pypy__/interp_builders.py +++ b/pypy/module/__pypy__/interp_builders.py @@ -2,49 +2,55 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef -from pypy.rlib.rstring import UnicodeBuilder +from pypy.rlib.rstring import UnicodeBuilder, StringBuilder +from pypy.tool.sourcetools import func_with_new_name -class W_UnicodeBuilder(Wrappable): - def __init__(self, space, size): - if size < 0: - self.builder = UnicodeBuilder() - else: - self.builder = UnicodeBuilder(size) - self.done = False +def create_builder(name, strtype, builder_cls): + class W_Builder(Wrappable): + def __init__(self, space, size): + if size < 0: + self.builder = builder_cls() + else: + self.builder = builder_cls(size) - def _check_done(self, space): - if self.done: - raise OperationError(space.w_ValueError, space.wrap("Can't operate on a done builder")) + def _check_done(self, space): + if self.builder is None: + raise OperationError(space.w_ValueError, space.wrap("Can't operate on a done builder")) - @unwrap_spec(size=int) - def descr__new__(space, w_subtype, size=-1): - return W_UnicodeBuilder(space, size) + @unwrap_spec(size=int) + def descr__new__(space, w_subtype, size=-1): + return W_Builder(space, size) - @unwrap_spec(s=unicode) - def descr_append(self, space, s): - self._check_done(space) - self.builder.append(s) + @unwrap_spec(s=strtype) + def descr_append(self, space, s): + self._check_done(space) + self.builder.append(s) - @unwrap_spec(s=unicode, start=int, end=int) - def descr_append_slice(self, space, s, start, end): - self._check_done(space) - if not 0 <= start <= end <= len(s): - raise OperationError(space.w_ValueError, space.wrap("bad start/stop")) - self.builder.append_slice(s, start, end) + @unwrap_spec(s=strtype, start=int, end=int) + def descr_append_slice(self, space, s, start, end): + self._check_done(space) + if not 0 <= start <= end <= len(s): + raise OperationError(space.w_ValueError, space.wrap("bad start/stop")) + self.builder.append_slice(s, start, end) - def descr_build(self, space): - self._check_done(space) - w_s = space.wrap(self.builder.build()) - self.done = True - return w_s + def descr_build(self, space): + self._check_done(space) + w_s = space.wrap(self.builder.build()) + self.builder = None + return w_s + W_Builder.__name__ = "W_%s" % name + W_Builder.typedef = TypeDef(name, + __new__ = interp2app(func_with_new_name( + W_Builder.descr__new__.im_func, + '%s_new' % (name,))), + append = interp2app(W_Builder.descr_append), + append_slice = interp2app(W_Builder.descr_append_slice), + build = interp2app(W_Builder.descr_build), + ) + W_Builder.typedef.acceptable_as_base_class = False + return W_Builder -W_UnicodeBuilder.typedef = TypeDef("UnicodeBuilder", - __new__ = interp2app(W_UnicodeBuilder.descr__new__.im_func), - - append = interp2app(W_UnicodeBuilder.descr_append), - append_slice = interp2app(W_UnicodeBuilder.descr_append_slice), - build = interp2app(W_UnicodeBuilder.descr_build), -) -W_UnicodeBuilder.typedef.acceptable_as_base_class = False +W_StringBuilder = create_builder("StringBuilder", str, StringBuilder) +W_UnicodeBuilder = create_builder("UnicodeBuilder", unicode, UnicodeBuilder) diff --git a/pypy/module/__pypy__/test/test_builders.py b/pypy/module/__pypy__/test/test_builders.py --- a/pypy/module/__pypy__/test/test_builders.py +++ b/pypy/module/__pypy__/test/test_builders.py @@ -31,4 +31,14 @@ raises(ValueError, b.append_slice, u"1", 2, 1) s = b.build() assert s == "cde" - raises(ValueError, b.append_slice, u"abc", 1, 2) \ No newline at end of file + raises(ValueError, b.append_slice, u"abc", 1, 2) + + def test_stringbuilder(self): + from __pypy__.builders import StringBuilder + b = StringBuilder() + b.append("abc") + b.append("123") + b.append("you and me") + s = b.build() + assert s == "abc123you and me" + raises(ValueError, b.build) \ No newline at end of file diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -5,6 +5,7 @@ from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app +from pypy.interpreter.pycode import PyCode class W_Continulet(Wrappable): @@ -30,6 +31,7 @@ start_state.origin = self start_state.w_callable = w_callable start_state.args = __args__ + self.bottomframe = make_fresh_frame(self.space) self.sthread = build_sthread(self.space) try: self.h = self.sthread.new(new_stacklet_callback) @@ -43,16 +45,15 @@ def switch(self, w_to): to = self.space.interp_w(W_Continulet, w_to, can_be_None=True) if to is not None: - if self is to: # double-switch to myself: no-op - return get_result() if to.sthread is None: start_state.clear() raise geterror(self.space, "continulet not initialized yet") + if self is to: # double-switch to myself: no-op + return get_result() if self.sthread is None: start_state.clear() raise geterror(self.space, "continulet not initialized yet") ec = self.check_sthread() - saved_topframeref = ec.topframeref # start_state.origin = self if to is None: @@ -74,8 +75,6 @@ start_state.clear() raise getmemoryerror(self.space) # - ec = sthread.ec - ec.topframeref = saved_topframeref return get_result() def descr_switch(self, w_value=None, w_to=None): @@ -123,13 +122,21 @@ # ____________________________________________________________ +# Continulet objects maintain a dummy frame object in order to ensure +# that the 'f_back' chain is consistent. We hide this dummy frame +# object by giving it a dummy code object with hidden_applevel=True. class State: def __init__(self, space): + from pypy.interpreter.astcompiler.consts import CO_OPTIMIZED self.space = space w_module = space.getbuiltinmodule('_continuation') self.w_error = space.getattr(w_module, space.wrap('error')) self.w_memoryerror = OperationError(space.w_MemoryError, space.w_None) + self.dummy_pycode = PyCode(space, 0, 0, 0, CO_OPTIMIZED, + '', [], [], [], '', + '', 0, '', [], [], + hidden_applevel=True) def geterror(space, message): cs = space.fromcache(State) @@ -139,6 +146,10 @@ cs = space.fromcache(State) return cs.w_memoryerror +def make_fresh_frame(space): + cs = space.fromcache(State) + return space.FrameClass(space, cs.dummy_pycode, None, None) + # ____________________________________________________________ @@ -178,9 +189,8 @@ # space = self.space try: - ec = self.sthread.ec - ec.topframeref = jit.vref_None - + assert self.sthread.ec.topframeref() is None + self.sthread.ec.topframeref = jit.non_virtual_ref(self.bottomframe) if start_state.propagate_exception is not None: raise start_state.propagate_exception # just propagate it further if start_state.w_value is not space.w_None: @@ -193,6 +203,7 @@ start_state.propagate_exception = e else: start_state.w_value = w_result + self.sthread.ec.topframeref = jit.vref_None start_state.origin = self start_state.destination = self return self.h @@ -205,6 +216,11 @@ start_state.origin = None start_state.destination = None self.h, origin.h = origin.h, h + # + current = sthread.ec.topframeref + sthread.ec.topframeref = self.bottomframe.f_backref + self.bottomframe.f_backref = origin.bottomframe.f_backref + origin.bottomframe.f_backref = current def get_result(): if start_state.propagate_exception: @@ -240,6 +256,9 @@ contlist.append(cont) # if len(contlist) > 1: - other = contlist[-1].h + otherh = contlist[-1].h + otherb = contlist[-1].bottomframe.f_backref for cont in contlist: - other, cont.h = cont.h, other + otherh, cont.h = cont.h, otherh + b = cont.bottomframe + otherb, b.f_backref = b.f_backref, otherb diff --git a/pypy/module/_continuation/test/test_stacklet.py b/pypy/module/_continuation/test/test_stacklet.py --- a/pypy/module/_continuation/test/test_stacklet.py +++ b/pypy/module/_continuation/test/test_stacklet.py @@ -312,7 +312,7 @@ res = f() assert res == 2002 - def test_f_back_is_None_for_now(self): + def test_f_back(self): import sys from _continuation import continulet # @@ -321,6 +321,7 @@ c.switch(sys._getframe(0).f_back) c.switch(sys._getframe(1)) c.switch(sys._getframe(1).f_back) + assert sys._getframe(2) is f3.f_back c.switch(sys._getframe(2)) def f(c): g(c) @@ -331,10 +332,21 @@ f2 = c.switch() assert f2.f_code.co_name == 'f' f3 = c.switch() - assert f3.f_code.co_name == 'f' - f4 = c.switch() - assert f4 is None - raises(ValueError, c.switch) # "call stack is not deep enough" + assert f3 is f2 + assert f1.f_back is f3 + def main(): + f4 = c.switch() + assert f4.f_code.co_name == 'main', repr(f4.f_code.co_name) + assert f3.f_back is f1 # not running, so a loop + def main2(): + f5 = c.switch() + assert f5.f_code.co_name == 'main2', repr(f5.f_code.co_name) + assert f3.f_back is f1 # not running, so a loop + main() + main2() + res = c.switch() + assert res is None + assert f3.f_back is None def test_traceback_is_complete(self): import sys @@ -609,6 +621,7 @@ assert res == "ok" def test_permute(self): + import sys from _continuation import continulet, permute # def f1(c1): @@ -617,14 +630,18 @@ return "done" # def f2(c2): + assert sys._getframe(1).f_code.co_name == 'main' permute(c1, c2) + assert sys._getframe(1).f_code.co_name == 'f1' return "ok" # c1 = continulet(f1) c2 = continulet(f2) - c1.switch() - res = c2.switch() - assert res == "done" + def main(): + c1.switch() + res = c2.switch() + assert res == "done" + main() def test_various_depths(self): skip("may fail on top of CPython") diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -639,33 +639,6 @@ a1.free() cb.free() - def test_another_callback_in_stackless(self): - try: - import _stackless - except ImportError: - skip("only valid in a stackless pypy-c") - - import _rawffi - lib = _rawffi.CDLL(self.lib_name) - runcallback = lib.ptr('runcallback', ['P'], 'q') - def callback(): - co = _stackless.coroutine() - def f(): - pass - try: - co.bind(f) - co.switch() - except RuntimeError: - return 1<<42 - return -5 - - cb = _rawffi.CallbackPtr(callback, [], 'q') - a1 = cb.byptr() - res = runcallback(a1) - assert res[0] == 1<<42 - a1.free() - cb.free() - def test_raising_callback(self): import _rawffi, sys import StringIO diff --git a/pypy/module/_stackless/__init__.py b/pypy/module/_stackless/__init__.py deleted file mode 100644 --- a/pypy/module/_stackless/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -# Package initialisation -from pypy.interpreter.mixedmodule import MixedModule - -class Module(MixedModule): - """ - This module implements Stackless for applications. - """ - - appleveldefs = { - 'GreenletExit' : 'app_greenlet.GreenletExit', - 'GreenletError' : 'app_greenlet.GreenletError', - } - - interpleveldefs = { - 'tasklet' : 'interp_stackless.tasklet', - 'coroutine' : 'interp_coroutine.AppCoroutine', - 'greenlet' : 'interp_greenlet.AppGreenlet', - 'usercostate': 'interp_composable_coroutine.W_UserCoState', - '_return_main' : 'interp_coroutine.return_main', - 'get_stack_depth_limit': 'interp_coroutine.get_stack_depth_limit', - 'set_stack_depth_limit': 'interp_coroutine.set_stack_depth_limit', - } - - def setup_after_space_initialization(self): - # post-installing classmethods/staticmethods which - # are not yet directly supported - from pypy.module._stackless.interp_coroutine import post_install as post_install_coro - post_install_coro(self) - from pypy.module._stackless.interp_greenlet import post_install as post_install_greenlet - post_install_greenlet(self) - - if self.space.config.translation.gc == 'marksweep': - from pypy.module._stackless.interp_clonable import post_install as post_install_clonable - self.extra_interpdef('clonable', 'interp_clonable.AppClonableCoroutine') - self.extra_interpdef('fork', 'interp_clonable.fork') - post_install_clonable(self) diff --git a/pypy/module/_stackless/app_greenlet.py b/pypy/module/_stackless/app_greenlet.py deleted file mode 100644 --- a/pypy/module/_stackless/app_greenlet.py +++ /dev/null @@ -1,5 +0,0 @@ -class GreenletExit(Exception): - pass - -class GreenletError(Exception): - pass diff --git a/pypy/module/_stackless/interp_clonable.py b/pypy/module/_stackless/interp_clonable.py deleted file mode 100644 --- a/pypy/module/_stackless/interp_clonable.py +++ /dev/null @@ -1,106 +0,0 @@ -from pypy.interpreter.error import OperationError -from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.gateway import interp2app -from pypy.module._stackless.interp_coroutine import AppCoroutine, AppCoState -from pypy.module._stackless.interp_coroutine import makeStaticMethod -from pypy.module._stackless.rcoroutine import AbstractThunk -from pypy.module._stackless.rclonable import InterpClonableMixin - - -class AppClonableCoroutine(AppCoroutine, InterpClonableMixin): - - def newsubctx(self): - self.hello_local_pool() - AppCoroutine.newsubctx(self) - self.goodbye_local_pool() - - def hello(self): - self.hello_local_pool() - AppCoroutine.hello(self) - - def goodbye(self): - AppCoroutine.goodbye(self) - self.goodbye_local_pool() - - def descr_method__new__(space, w_subtype): - co = space.allocate_instance(AppClonableCoroutine, w_subtype) - costate = AppClonableCoroutine._get_state(space) - AppClonableCoroutine.__init__(co, space, state=costate) - return space.wrap(co) - - def _get_state(space): - return space.fromcache(AppClonableCoState) - _get_state = staticmethod(_get_state) - - def w_getcurrent(space): - return space.wrap(AppClonableCoroutine._get_state(space).current) - w_getcurrent = staticmethod(w_getcurrent) - - def w_clone(self): - space = self.space - costate = self.costate - if costate.current is self: - raise OperationError(space.w_RuntimeError, - space.wrap("clone() cannot clone the " - "current coroutine" - "; use fork() instead")) - copy = AppClonableCoroutine(space, state=costate) - copy.subctx = self.clone_into(copy, self.subctx) - return space.wrap(copy) - - def descr__reduce__(self, space): - raise OperationError(space.w_TypeError, - space.wrap("_stackless.clonable instances are " - "not picklable")) - - -AppClonableCoroutine.typedef = TypeDef("clonable", AppCoroutine.typedef, - __new__ = interp2app(AppClonableCoroutine.descr_method__new__.im_func), - getcurrent = interp2app(AppClonableCoroutine.w_getcurrent), - clone = interp2app(AppClonableCoroutine.w_clone), - __reduce__ = interp2app(AppClonableCoroutine.descr__reduce__), -) - -class AppClonableCoState(AppCoState): - def post_install(self): - self.current = self.main = AppClonableCoroutine(self.space, state=self) - self.main.subctx.clear_framestack() # wack - -def post_install(module): - makeStaticMethod(module, 'clonable', 'getcurrent') - space = module.space - AppClonableCoroutine._get_state(space).post_install() - -# ____________________________________________________________ - -class ForkThunk(AbstractThunk): - def __init__(self, coroutine): - self.coroutine = coroutine - self.newcoroutine = None - def call(self): - oldcoro = self.coroutine - self.coroutine = None - newcoro = AppClonableCoroutine(oldcoro.space, state=oldcoro.costate) - newcoro.subctx = oldcoro.clone_into(newcoro, oldcoro.subctx) - newcoro.parent = oldcoro - self.newcoroutine = newcoro - -def fork(space): - """Fork, as in the Unix fork(): the call returns twice, and the return - value of the call is either the new 'child' coroutine object (if returning - into the parent), or None (if returning into the child). This returns - into the parent first, which can switch to the child later. - """ - costate = AppClonableCoroutine._get_state(space) - current = costate.current - if current is costate.main: - raise OperationError(space.w_RuntimeError, - space.wrap("cannot fork() in the main " - "clonable coroutine")) - thunk = ForkThunk(current) - coro_fork = AppClonableCoroutine(space, state=costate) - coro_fork.bind(thunk) - coro_fork.switch() - # we resume here twice. The following would need explanations about - # why it returns the correct thing in both the parent and the child... - return space.wrap(thunk.newcoroutine) diff --git a/pypy/module/_stackless/interp_composable_coroutine b/pypy/module/_stackless/interp_composable_coroutine deleted file mode 100644 --- a/pypy/module/_stackless/interp_composable_coroutine +++ /dev/null @@ -1,33 +0,0 @@ -from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.typedef import TypeDef, interp2app -from pypy.module._stackless.coroutine import AppCoState, AppCoroutine - - -class W_UserCoState(Wrappable): - def __init__(self, space): - self.costate = AppCoState(space) - self.costate.post_install() - - def descr_method__new__(space, w_subtype): - costate = space.allocate_instance(W_UserCoState, w_subtype) - W_UserCoState.__init__(costate, space) - return space.wrap(costate) - - def w_getcurrent(self): - space = self.costate.space - return space.wrap(self.costate.current) - - def w_spawn(self, w_subtype=None): - space = self.costate.space - if space.is_w(w_subtype, space.w_None): - w_subtype = space.gettypeobject(AppCoroutine.typedef) - co = space.allocate_instance(AppCoroutine, w_subtype) - AppCoroutine.__init__(co, space, state=self.costate) - return space.wrap(co) - -W_UserCoState.typedef = TypeDef("usercostate", - __new__ = interp2app(W_UserCoState.descr_method__new__.im_func), - __module__ = '_stackless', - getcurrent = interp2app(W_UserCoState.w_getcurrent), - spawn = interp2app(W_UserCoState.w_spawn), -) diff --git a/pypy/module/_stackless/interp_composable_coroutine.py b/pypy/module/_stackless/interp_composable_coroutine.py deleted file mode 100644 --- a/pypy/module/_stackless/interp_composable_coroutine.py +++ /dev/null @@ -1,34 +0,0 @@ -from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.typedef import TypeDef, interp2app -from pypy.module._stackless.interp_coroutine import AppCoState, AppCoroutine - - -class W_UserCoState(Wrappable): - def __init__(self, space): - self.costate = AppCoState(space) - self.costate.post_install() - - def descr_method__new__(space, w_subtype): - costate = space.allocate_instance(W_UserCoState, w_subtype) - W_UserCoState.__init__(costate, space) - return space.wrap(costate) - - def w_getcurrent(self): - space = self.costate.space - return space.wrap(self.costate.current) - - def w_spawn(self, w_subtype=None): - space = self.costate.space - if space.is_w(w_subtype, space.w_None): - w_subtype = space.gettypeobject(AppCoroutine.typedef) - co = space.allocate_instance(AppCoroutine, w_subtype) - AppCoroutine.__init__(co, space, state=self.costate) - return space.wrap(co) - -W_UserCoState.typedef = TypeDef("usercostate", - __new__ = interp2app(W_UserCoState.descr_method__new__.im_func), - __module__ = '_stackless', - getcurrent = interp2app(W_UserCoState.w_getcurrent), - spawn = interp2app(W_UserCoState.w_spawn), -) -W_UserCoState.acceptable_as_base_class = False diff --git a/pypy/module/_stackless/interp_coroutine.py b/pypy/module/_stackless/interp_coroutine.py deleted file mode 100644 --- a/pypy/module/_stackless/interp_coroutine.py +++ /dev/null @@ -1,403 +0,0 @@ -""" -Coroutine implementation for application level on top -of the internal coroutines. -This is an extensible concept. Multiple implementations -of concurrency can exist together, if they follow the -basic concept of maintaining their own costate. - -There is also some diversification possible by using -multiple costates for the same type. This leads to -disjoint switchable sets within the same type. - -I'm not so sure to what extent the opposite is possible, too. -I.e., merging the costate of tasklets and greenlets would -allow them to be parents of each other. Needs a bit more -experience to decide where to set the limits. -""" - -from pypy.interpreter.argument import Arguments -from pypy.interpreter.typedef import GetSetProperty, TypeDef -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.error import OperationError, operationerrfmt - -from pypy.module._stackless.stackless_flags import StacklessFlags -from pypy.module._stackless.rcoroutine import Coroutine, BaseCoState, AbstractThunk, CoroutineExit - -from pypy.module.exceptions.interp_exceptions import W_SystemExit, _new_exception - -from pypy.rlib import rstack, jit # for resume points -from pypy.tool import stdlib_opcode as pythonopcode - -class _AppThunk(AbstractThunk): - - def __init__(self, space, costate, w_obj, args): - self.space = space - self.costate = costate - if not space.is_true(space.callable(w_obj)): - raise operationerrfmt( - space.w_TypeError, - "'%s' object is not callable", - space.type(w_obj).getname(space)) - self.w_func = w_obj - self.args = args - - def call(self): - costate = self.costate - w_result = self.space.call_args(self.w_func, self.args) - costate.w_tempval = w_result - -class _ResumeThunk(AbstractThunk): - def __init__(self, space, costate, w_frame): - self.space = space - self.costate = costate - self.w_frame = w_frame - - def call(self): - w_result = resume_frame(self.space, self.w_frame) - # costate.w_tempval = w_result #XXX? - - -W_CoroutineExit = _new_exception('CoroutineExit', W_SystemExit, - """Coroutine killed manually.""") - -# Should be moved to interp_stackless.py if it's ever implemented... Currently -# used by pypy/lib/stackless.py. -W_TaskletExit = _new_exception('TaskletExit', W_SystemExit, - """Tasklet killed manually.""") - -class AppCoroutine(Coroutine): # XXX, StacklessFlags): - - def __init__(self, space, state=None): - self.space = space - if state is None: - state = AppCoroutine._get_state(space) - Coroutine.__init__(self, state) - self.flags = 0 - self.newsubctx() - - def newsubctx(self): - ec = self.space.getexecutioncontext() - self.subctx = ec.Subcontext() - - def descr_method__new__(space, w_subtype): - co = space.allocate_instance(AppCoroutine, w_subtype) - AppCoroutine.__init__(co, space) - return space.wrap(co) - - def _get_state(space): - return space.fromcache(AppCoState) - _get_state = staticmethod(_get_state) - - def w_bind(self, w_func, __args__): - space = self.space - if self.frame is not None: - raise OperationError(space.w_ValueError, space.wrap( - "cannot bind a bound Coroutine")) - state = self.costate - thunk = _AppThunk(space, state, w_func, __args__) - self.bind(thunk) - - def w_switch(self): - space = self.space - if self.frame is None: - raise OperationError(space.w_ValueError, space.wrap( - "cannot switch to an unbound Coroutine")) - state = self.costate - self.switch() - w_ret, state.w_tempval = state.w_tempval, space.w_None - return w_ret - - def switch(self): - space = self.space - try: - Coroutine.switch(self) - except CoroutineExit: - raise OperationError(self.costate.w_CoroutineExit, space.w_None) - - def w_finished(self, w_excinfo): - pass - - def finish(self, operror=None): - space = self.space - if isinstance(operror, OperationError): - w_exctype = operror.w_type - w_excvalue = operror.get_w_value(space) - w_exctraceback = operror.get_traceback() - w_excinfo = space.newtuple([w_exctype, w_excvalue, w_exctraceback]) - - if w_exctype is self.costate.w_CoroutineExit: - self.coroutine_exit = True - else: - w_N = space.w_None - w_excinfo = space.newtuple([w_N, w_N, w_N]) - - return space.call_method(space.wrap(self),'finished', w_excinfo) - - def hello(self): - ec = self.space.getexecutioncontext() - self.subctx.enter(ec) - - def goodbye(self): - ec = self.space.getexecutioncontext() - self.subctx.leave(ec) - - def w_kill(self): - self.kill() - - def w_throw(self, w_type, w_value=None, w_traceback=None): - space = self.space - - operror = OperationError(w_type, w_value) - operror.normalize_exception(space) - - if not space.is_w(w_traceback, space.w_None): - from pypy.interpreter import pytraceback - tb = space.interpclass_w(w_traceback) - if tb is None or not space.is_true(space.isinstance(tb, - space.gettypeobject(pytraceback.PyTraceback.typedef))): - raise OperationError(space.w_TypeError, - space.wrap("throw: arg 3 must be a traceback or None")) - operror.set_traceback(tb) - - self._kill(operror) - - def _userdel(self): - if self.get_is_zombie(): - return - self.set_is_zombie(True) - self.space.userdel(self.space.wrap(self)) - - def w_getcurrent(space): - return space.wrap(AppCoroutine._get_state(space).current) - w_getcurrent = staticmethod(w_getcurrent) - - def w_getmain(space): - return space.wrap(AppCoroutine._get_state(space).main) - w_getmain = staticmethod(w_getmain) - - # pickling interface - def descr__reduce__(self, space): - # this is trying to be simplistic at the moment. - # we neither allow to pickle main (which can become a mess - # since it has some deep anchestor frames) - # nor we allow to pickle the current coroutine. - # rule: switch before pickling. - # you cannot construct the tree that you are climbing. - from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('_stackless') - mod = space.interp_w(MixedModule, w_mod) - w_mod2 = space.getbuiltinmodule('_pickle_support') - mod2 = space.interp_w(MixedModule, w_mod2) - w_new_inst = mod.get('coroutine') - w = space.wrap - nt = space.newtuple - ec = self.space.getexecutioncontext() - - if self is self.costate.main: - return nt([mod.get('_return_main'), nt([])]) - - thunk = self.thunk - if isinstance(thunk, _AppThunk): - w_args, w_kwds = thunk.args.topacked() - w_thunk = nt([thunk.w_func, w_args, w_kwds]) - else: - w_thunk = space.w_None - - tup_base = [ - ] - tup_state = [ - w(self.flags), - self.subctx.getstate(space), - w_thunk, - w(self.parent), - ] - - return nt([w_new_inst, nt(tup_base), nt(tup_state)]) - - def descr__setstate__(self, space, w_args): - w_flags, w_state, w_thunk, w_parent = space.unpackiterable(w_args, - expected_length=4) - self.flags = space.int_w(w_flags) - if space.is_w(w_parent, space.w_None): - w_parent = self.w_getmain(space) - self.parent = space.interp_w(AppCoroutine, w_parent) - ec = self.space.getexecutioncontext() - self.subctx.setstate(space, w_state) - if space.is_w(w_thunk, space.w_None): - if space.is_w(w_state, space.w_None): - self.thunk = None - else: - self.bind(_ResumeThunk(space, self.costate, self.subctx.topframe)) - else: - w_func, w_args, w_kwds = space.unpackiterable(w_thunk, - expected_length=3) - args = Arguments.frompacked(space, w_args, w_kwds) - self.bind(_AppThunk(space, self.costate, w_func, args)) - - -# _mixin_ did not work -for methname in StacklessFlags.__dict__: - meth = getattr(StacklessFlags, methname) - if hasattr(meth, 'im_func'): - setattr(AppCoroutine, meth.__name__, meth.im_func) -del meth, methname - -def w_get_is_zombie(self, space): - return space.wrap(self.get_is_zombie()) -AppCoroutine.w_get_is_zombie = w_get_is_zombie - -def w_get_is_alive(self, space): - return space.wrap(self.is_alive()) -AppCoroutine.w_get_is_alive = w_get_is_alive - -def w_descr__framestack(self, space): - assert isinstance(self, AppCoroutine) - counter = 0 - f = self.subctx.topframe - while f is not None: - counter += 1 - f = f.f_backref() - items = [None] * counter - f = self.subctx.topframe - while f is not None: - counter -= 1 - assert counter >= 0 - items[counter] = space.wrap(f) - f = f.f_backref() - assert counter == 0 - return space.newtuple(items) - -def makeStaticMethod(module, classname, funcname): - "NOT_RPYTHON" - space = module.space - w_klass = space.getattr(space.wrap(module), space.wrap(classname)) - # HACK HACK HACK - # make the typeobject mutable for a while - from pypy.objspace.std.typeobject import W_TypeObject - assert isinstance(w_klass, W_TypeObject) - old_flag = w_klass.flag_heaptype - w_klass.flag_heaptype = True - - space.appexec([w_klass, space.wrap(funcname)], """ - (klass, funcname): - func = getattr(klass, funcname) - setattr(klass, funcname, staticmethod(func.im_func)) - """) - w_klass.flag_heaptype = old_flag - -def post_install(module): - makeStaticMethod(module, 'coroutine', 'getcurrent') - makeStaticMethod(module, 'coroutine', 'getmain') - space = module.space - AppCoroutine._get_state(space).post_install() - -# space.appexec("""() : - -# maybe use __spacebind__ for postprocessing - -AppCoroutine.typedef = TypeDef("coroutine", - __new__ = interp2app(AppCoroutine.descr_method__new__.im_func), - bind = interp2app(AppCoroutine.w_bind), - switch = interp2app(AppCoroutine.w_switch), - kill = interp2app(AppCoroutine.w_kill), - throw = interp2app(AppCoroutine.w_throw), - finished = interp2app(AppCoroutine.w_finished), - is_alive = GetSetProperty(AppCoroutine.w_get_is_alive), - is_zombie = GetSetProperty(AppCoroutine.w_get_is_zombie, - doc=AppCoroutine.get_is_zombie.__doc__), #--- this flag is a bit obscure - # and not useful (it's totally different from Coroutine.is_zombie(), too) - # but lib/stackless.py uses it - _framestack = GetSetProperty(w_descr__framestack), - getcurrent = interp2app(AppCoroutine.w_getcurrent), - getmain = interp2app(AppCoroutine.w_getmain), - __reduce__ = interp2app(AppCoroutine.descr__reduce__), - __setstate__ = interp2app(AppCoroutine.descr__setstate__), - __module__ = '_stackless', -) - -class AppCoState(BaseCoState): - def __init__(self, space): - BaseCoState.__init__(self) - self.w_tempval = space.w_None - self.space = space - - # XXX Workaround: for now we need to instantiate these classes - # explicitly for translation to work - W_CoroutineExit(space) - W_TaskletExit(space) - - # Exporting new exception to space - self.w_CoroutineExit = space.gettypefor(W_CoroutineExit) - space.setitem( - space.exceptions_module.w_dict, - space.new_interned_str('CoroutineExit'), - self.w_CoroutineExit) - space.setitem(space.builtin.w_dict, - space.new_interned_str('CoroutineExit'), - self.w_CoroutineExit) - - # Should be moved to interp_stackless.py if it's ever implemented... - self.w_TaskletExit = space.gettypefor(W_TaskletExit) - space.setitem( - space.exceptions_module.w_dict, - space.new_interned_str('TaskletExit'), - self.w_TaskletExit) - space.setitem(space.builtin.w_dict, - space.new_interned_str('TaskletExit'), - self.w_TaskletExit) - - def post_install(self): - self.current = self.main = AppCoroutine(self.space, state=self) - self.main.subctx.clear_framestack() # wack - -def return_main(space): - return AppCoroutine._get_state(space).main - -def get_stack_depth_limit(space): - return space.wrap(rstack.get_stack_depth_limit()) - - at unwrap_spec(limit=int) -def set_stack_depth_limit(space, limit): - rstack.set_stack_depth_limit(limit) - - -# ___________________________________________________________________ -# unpickling trampoline - -def resume_frame(space, w_frame): - from pypy.interpreter.pyframe import PyFrame - frame = space.interp_w(PyFrame, w_frame, can_be_None=True) - w_result = space.w_None - operr = None - executioncontext = frame.space.getexecutioncontext() - while frame is not None: - code = frame.pycode.co_code - instr = frame.last_instr - opcode = ord(code[instr]) - map = pythonopcode.opmap - call_ops = [map['CALL_FUNCTION'], map['CALL_FUNCTION_KW'], map['CALL_FUNCTION_VAR'], - map['CALL_FUNCTION_VAR_KW'], map['CALL_METHOD']] - assert opcode in call_ops - instr += 1 - oparg = ord(code[instr]) | ord(code[instr + 1]) << 8 - nargs = oparg & 0xff - nkwds = (oparg >> 8) & 0xff - if nkwds == 0: # only positional arguments - # fast paths leaves things on the stack, pop them - if space.config.objspace.opcodes.CALL_METHOD and opcode == map['CALL_METHOD']: - frame.dropvalues(nargs + 2) - elif opcode == map['CALL_FUNCTION']: - frame.dropvalues(nargs + 1) - - # small hack: unlink frame out of the execution context, because - # execute_frame will add it there again - executioncontext.topframeref = jit.non_virtual_ref(frame.f_backref()) - frame.last_instr = instr + 1 # continue after the call - try: - w_result = frame.execute_frame(w_result, operr) - except OperationError, operr: - pass - frame = frame.f_backref() - if operr: - raise operr - return w_result diff --git a/pypy/module/_stackless/interp_greenlet.py b/pypy/module/_stackless/interp_greenlet.py deleted file mode 100644 --- a/pypy/module/_stackless/interp_greenlet.py +++ /dev/null @@ -1,238 +0,0 @@ -from pypy.interpreter.argument import Arguments -from pypy.interpreter.typedef import GetSetProperty, TypeDef -from pypy.interpreter.gateway import interp2app -from pypy.interpreter.gateway import NoneNotWrapped -from pypy.interpreter.error import OperationError - -from pypy.module._stackless.rcoroutine import Coroutine, BaseCoState -from pypy.module._stackless.rcoroutine import AbstractThunk, syncstate -from pypy.module._stackless.interp_coroutine import makeStaticMethod - - -class GreenletThunk(AbstractThunk): - - def __init__(self, greenlet): - self.greenlet = greenlet - - def call(self): - greenlet = self.greenlet - greenlet.active = True - try: - space = greenlet.space - args_w = greenlet.costate.args_w - __args__ = Arguments(space, args_w) - try: - w_run = space.getattr(space.wrap(greenlet), space.wrap('run')) - greenlet.w_callable = None - w_result = space.call_args(w_run, __args__) - except OperationError, operror: - if not operror.match(space, greenlet.costate.w_GreenletExit): - raise - w_result = operror.get_w_value(space) - finally: - greenlet.active = False - greenlet.costate.args_w = [w_result] - -class AppGreenletCoState(BaseCoState): - def __init__(self, space): - BaseCoState.__init__(self) - self.args_w = None - self.space = space - self.w_GreenletExit = get(space, "GreenletExit") - self.w_GreenletError = get(space, "GreenletError") - - def post_install(self): - self.current = self.main = AppGreenlet(self.space, is_main=True) - -class AppGreenlet(Coroutine): - def __init__(self, space, w_callable=None, is_main=False): - Coroutine.__init__(self, self._get_state(space)) - self.space = space - self.w_callable = w_callable - self.active = is_main - self.subctx = space.getexecutioncontext().Subcontext() - if is_main: - self.subctx.clear_framestack() # wack - else: - self.bind(GreenletThunk(self)) - - def descr_method__new__(space, w_subtype, __args__): - co = space.allocate_instance(AppGreenlet, w_subtype) - AppGreenlet.__init__(co, space) - return space.wrap(co) - - def descr_method__init__(self, w_run=NoneNotWrapped, - w_parent=NoneNotWrapped): - if w_run is not None: - self.set_run(w_run) - if w_parent is not None: - self.set_parent(w_parent) - - def _get_state(space): - return space.fromcache(AppGreenletCoState) - _get_state = staticmethod(_get_state) - - def hello(self): - ec = self.space.getexecutioncontext() - self.subctx.enter(ec) - - def goodbye(self): - ec = self.space.getexecutioncontext() - self.subctx.leave(ec) - - def w_getcurrent(space): - return space.wrap(AppGreenlet._get_state(space).current) - w_getcurrent = staticmethod(w_getcurrent) - - def w_switch(self, args_w): - # Find the switch target - it might be a parent greenlet - space = self.space - costate = self.costate - target = self - while target.isdead(): - target = target.parent - assert isinstance(target, AppGreenlet) - # Switch to it - costate.args_w = args_w - if target is not costate.current: - target.switch() - else: - # case not handled in Coroutine.switch() - syncstate._do_things_to_do() - result_w = costate.args_w - costate.args_w = None - # costate.args_w can be set to None above for throw(), but then - # switch() should have raised. At this point cosstate.args_w != None. - assert result_w is not None - # Return the result of a switch, packaging it in a tuple if - # there is more than one value. - if len(result_w) == 1: - return result_w[0] - return space.newtuple(result_w) - - def w_throw(self, w_type=None, w_value=None, w_traceback=None): - space = self.space - if space.is_w(w_type, space.w_None): - w_type = self.costate.w_GreenletExit - # Code copied from RAISE_VARARGS but slightly modified. Not too nice. - operror = OperationError(w_type, w_value) - operror.normalize_exception(space) - if not space.is_w(w_traceback, space.w_None): - from pypy.interpreter import pytraceback - tb = space.interpclass_w(w_traceback) - if tb is None or not space.is_true(space.isinstance(tb, - space.gettypeobject(pytraceback.PyTraceback.typedef))): - raise OperationError(space.w_TypeError, - space.wrap("throw: arg 3 must be a traceback or None")) - operror.set_traceback(tb) - # Dead greenlet: turn GreenletExit into a regular return - if self.isdead() and operror.match(space, self.costate.w_GreenletExit): - args_w = [operror.get_w_value(space)] - else: - syncstate.push_exception(operror) - args_w = None - return self.w_switch(args_w) - - def _userdel(self): - self.space.userdel(self.space.wrap(self)) - - def isdead(self): - return self.thunk is None and not self.active - - def w_get_is_dead(self, space): - return space.newbool(self.isdead()) - - def descr__nonzero__(self): - return self.space.newbool(self.active) - - def w_get_run(self, space): - w_run = self.w_callable - if w_run is None: - raise OperationError(space.w_AttributeError, space.wrap("run")) - return w_run - - def set_run(self, w_run): - space = self.space - if self.thunk is None: - raise OperationError(space.w_AttributeError, - space.wrap("run cannot be set " - "after the start of the greenlet")) - self.w_callable = w_run - - def w_set_run(self, space, w_run): - self.set_run(w_run) - - def w_del_run(self, space): - if self.w_callable is None: - raise OperationError(space.w_AttributeError, space.wrap("run")) - self.w_callable = None - - def w_get_parent(self, space): - return space.wrap(self.parent) - - def set_parent(self, w_parent): - space = self.space - newparent = space.interp_w(AppGreenlet, w_parent) - if newparent.costate is not self.costate: - raise OperationError(self.costate.w_GreenletError, - space.wrap("invalid foreign parent")) - curr = newparent - while curr: - if curr is self: - raise OperationError(space.w_ValueError, - space.wrap("cyclic parent chain")) - curr = curr.parent - self.parent = newparent - - def w_set_parent(self, space, w_parent): - self.set_parent(w_parent) - - def w_get_frame(self, space): - if not self.active or self.costate.current is self: - f = None - else: - f = self.subctx.topframe - return space.wrap(f) - -def get(space, name): - w_module = space.getbuiltinmodule('_stackless') - return space.getattr(w_module, space.wrap(name)) - -def post_install(module): - "NOT_RPYTHON" - makeStaticMethod(module, 'greenlet', 'getcurrent') - space = module.space - state = AppGreenlet._get_state(space) - state.post_install() - w_greenlet = get(space, 'greenlet') - # HACK HACK HACK - # make the typeobject mutable for a while - from pypy.objspace.std.typeobject import W_TypeObject - assert isinstance(w_greenlet, W_TypeObject) - old_flag = w_greenlet.flag_heaptype - w_greenlet.flag_heaptype = True - space.appexec([w_greenlet, - state.w_GreenletExit, - state.w_GreenletError], """ - (greenlet, exit, error): - greenlet.GreenletExit = exit - greenlet.error = error - """) - w_greenlet.flag_heaptype = old_flag - -AppGreenlet.typedef = TypeDef("greenlet", - __new__ = interp2app(AppGreenlet.descr_method__new__.im_func), - __init__ = interp2app(AppGreenlet.descr_method__init__), - switch = interp2app(AppGreenlet.w_switch), - dead = GetSetProperty(AppGreenlet.w_get_is_dead), - run = GetSetProperty(AppGreenlet.w_get_run, - AppGreenlet.w_set_run, - AppGreenlet.w_del_run), - parent = GetSetProperty(AppGreenlet.w_get_parent, - AppGreenlet.w_set_parent), - getcurrent = interp2app(AppGreenlet.w_getcurrent), - throw = interp2app(AppGreenlet.w_throw), - gr_frame = GetSetProperty(AppGreenlet.w_get_frame), - __nonzero__ = interp2app(AppGreenlet.descr__nonzero__), - __module__ = '_stackless', -) diff --git a/pypy/module/_stackless/interp_stackless.py b/pypy/module/_stackless/interp_stackless.py deleted file mode 100644 --- a/pypy/module/_stackless/interp_stackless.py +++ /dev/null @@ -1,28 +0,0 @@ -from pypy.interpreter.baseobjspace import Wrappable -from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.gateway import interp2app -import os - - -class tasklet(Wrappable): - - def __init__(self, space): - self.space = space - self.flags = 0 - self.state = None - - def descr_method__new__(space, w_subtype): - t = space.allocate_instance(tasklet, w_subtype) - tasklet.__init__(t, space) - return space.wrap(t) - - def w_demo(self): - output("42") - -tasklet.typedef = TypeDef("tasklet", - __new__ = interp2app(tasklet.descr_method__new__.im_func), - demo = interp2app(tasklet.w_demo), -) - -def output(stuff): - os.write(2, stuff + '\n') diff --git a/pypy/module/_stackless/rclonable.py b/pypy/module/_stackless/rclonable.py deleted file mode 100644 --- a/pypy/module/_stackless/rclonable.py +++ /dev/null @@ -1,87 +0,0 @@ -from pypy.module._stackless.interp_coroutine import AbstractThunk, Coroutine -from pypy.rlib.rgc import gc_swap_pool, gc_clone -from pypy.rlib.objectmodel import we_are_translated - - -class InterpClonableMixin: - local_pool = None - _mixin_ = True - - def hello_local_pool(self): - if we_are_translated(): - self.saved_pool = gc_swap_pool(self.local_pool) - - def goodbye_local_pool(self): - if we_are_translated(): - self.local_pool = gc_swap_pool(self.saved_pool) - self.saved_pool = None - - def clone_into(self, copy, extradata=None): - if not we_are_translated(): - raise NotImplementedError - # cannot gc_clone() directly self, because it is not in its own - # local_pool. Moreover, it has a __del__, which cloning doesn't - # support properly at the moment. - copy.parent = self.parent - # the hello/goodbye pair has two purposes: it forces - # self.local_pool to be computed even if it was None up to now, - # and it puts the 'data' tuple in the correct pool to be cloned. - self.hello_local_pool() - data = (self.frame, extradata) - self.goodbye_local_pool() - # clone! - data, copy.local_pool = gc_clone(data, self.local_pool) - copy.frame, extradata = data - copy.thunk = self.thunk # in case we haven't switched to self yet - return extradata - - -class InterpClonableCoroutine(Coroutine, InterpClonableMixin): - - def hello(self): - self.hello_local_pool() - - def goodbye(self): - self.goodbye_local_pool() - - def clone(self): - # hack, this is overridden in AppClonableCoroutine - if self.getcurrent() is self: - raise RuntimeError("clone() cannot clone the current coroutine; " - "use fork() instead") - copy = InterpClonableCoroutine(self.costate) - self.clone_into(copy) - return copy - - -class ForkThunk(AbstractThunk): - def __init__(self, coroutine): - self.coroutine = coroutine - self.newcoroutine = None - def call(self): - oldcoro = self.coroutine - self.coroutine = None - newcoro = oldcoro.clone() - newcoro.parent = oldcoro - self.newcoroutine = newcoro - -def fork(): - """Fork, as in the Unix fork(): the call returns twice, and the return - value of the call is either the new 'child' coroutine object (if returning - into the parent), or None (if returning into the child). This returns - into the parent first, which can switch to the child later. - """ - current = InterpClonableCoroutine.getcurrent() - if not isinstance(current, InterpClonableCoroutine): - raise RuntimeError("fork() in a non-clonable coroutine") - thunk = ForkThunk(current) - coro_fork = InterpClonableCoroutine() - coro_fork.bind(thunk) - coro_fork.switch() - # we resume here twice. The following would need explanations about - # why it returns the correct thing in both the parent and the child... - return thunk.newcoroutine - -## from pypy.rpython.lltypesystem import lltype, lloperation -## lloperation.llop.debug_view(lltype.Void, current, thunk, -## lloperation.llop.gc_x_size_header(lltype.Signed)) diff --git a/pypy/module/_stackless/rcoroutine.py b/pypy/module/_stackless/rcoroutine.py deleted file mode 100644 --- a/pypy/module/_stackless/rcoroutine.py +++ /dev/null @@ -1,10 +0,0 @@ -from pypy.rlib.rcoroutine import make_coroutine_classes -from pypy.interpreter.baseobjspace import Wrappable - -d = make_coroutine_classes(Wrappable) - -Coroutine = d['Coroutine'] -BaseCoState = d['BaseCoState'] -AbstractThunk = d['AbstractThunk'] -syncstate = d['syncstate'] -CoroutineExit = d['CoroutineExit'] diff --git a/pypy/module/_stackless/stackless_flags.py b/pypy/module/_stackless/stackless_flags.py deleted file mode 100644 --- a/pypy/module/_stackless/stackless_flags.py +++ /dev/null @@ -1,201 +0,0 @@ -""" -basic definitions for tasklet flags. -For simplicity and compatibility, -they are defined the same for coroutines, -even if they are not used. - -taken from tasklet_structs.h ----------------------------- - -/*************************************************************************** - - Tasklet Flag Definition - ----------------------- - - blocked: The tasklet is either waiting in a channel for - writing (1) or reading (-1) or not blocked (0). - Maintained by the channel logic. Do not change. - - atomic: If true, schedulers will never switch. Driven by - the code object or dynamically, see below. - - ignore_nesting: Allows auto-scheduling, even if nesting_level - is not zero. - - autoschedule: The tasklet likes to be auto-scheduled. User driven. - - block_trap: Debugging aid. Whenever the tasklet would be - blocked by a channel, an exception is raised. - - is_zombie: This tasklet is almost dead, its deallocation has - started. The tasklet *must* die at some time, or the - process can never end. - - pending_irq: If set, an interrupt was issued during an atomic - operation, and should be handled when possible. - - - Policy for atomic/autoschedule and switching: - --------------------------------------------- - A tasklet switch can always be done explicitly by calling schedule(). - Atomic and schedule are concerned with automatic features. - - atomic autoschedule - - 1 any Neither a scheduler nor a watchdog will - try to switch this tasklet. - - 0 0 The tasklet can be stopped on desire, or it - can be killed by an exception. - - 0 1 Like above, plus auto-scheduling is enabled. - - Default settings: - ----------------- - All flags are zero by default. - - ***************************************************************************/ - -typedef struct _tasklet_flags { - int blocked: 2; - unsigned int atomic: 1; - unsigned int ignore_nesting: 1; - unsigned int autoschedule: 1; - unsigned int block_trap: 1; - unsigned int is_zombie: 1; - unsigned int pending_irq: 1; -} PyTaskletFlagStruc; -""" - -from pypy.rlib.rarithmetic import LONG_BIT, intmask - -class BitSetDef(object): - __slots__ = "_names __dict__ _attrname".split() - - def __init__(self, _attrname): - self._names = [] - self._attrname = _attrname - - def __setattr__(self, key, value): - if key not in self.__slots__: - assert key not in self.__dict__ - self._names.append(key) - object.__setattr__(self, key, value) - - def __iter__(self): - return self._enum_objects() - - def _enum_objects(self): - for name in self._names: - yield name, getattr(self, name) - -# negative values are user-writable -flags = BitSetDef("flags") -flags.blocked = 2, """writing (1) or reading (-1) or not blocked (0)""" -flags.atomic = -1, """If true, schedulers will never switch""" -flags.ignore_nesting = -1, """allow auto-scheduling in nested interpreters""" -flags.autoschedule = -1, """enable auto-scheduling""" -flags.block_trap = -1, """raise an exception instead of blocking""" -flags.is_zombie = 1, """__del__ is in progress""" -flags.pending_irq = 1, """an interrupt occured while being atomic""" - -def make_get_bits(name, bits, shift): - """ return a bool for single bits, signed int otherwise """ - signmask = 1 << (bits - 1 + shift) - lshift = bits + shift - rshift = bits - if bits == 1: - return "bool(%s & 0x%x)" % (name, signmask) - else: - return "intmask(%s << (LONG_BIT-%d)) >> (LONG_BIT-%d)" % (name, lshift, rshift) - -def make_set_bits(name, bits, shift): - datamask = int('1' * bits, 2) - clearmask = datamask << shift - return "%s & ~0x%x | (value & 0x%x) << %d" % (name, clearmask, datamask, shift) - -def gen_code(): - from cStringIO import StringIO - f = StringIO() - print >> f, "class StacklessFlags(object):" - print >> f, " _mixin_ = True" - shift = 0 - field = "self.%s" % flags._attrname - for name, (bits, doc) in flags: - write, bits = bits < 0, abs(bits) - print >> f - print >> f, ' def get_%s(self):' % name - print >> f, ' """%s"""' % doc - print >> f, ' return %s' % make_get_bits(field, bits, shift) - print >> f, ' def set_%s(self, value):' % name - print >> f, ' """%s"""' % doc - print >> f, ' %s = %s' % (field, make_set_bits(field, bits, shift)) - print >> f, ' set_%s._public = %s' % (name, write) - shift += bits - return f.getvalue() - -# BEGIN generated code -class StacklessFlags(object): - _mixin_ = True - - def get_blocked(self): - """writing (1) or reading (-1) or not blocked (0)""" - return intmask(self.flags << (LONG_BIT-2)) >> (LONG_BIT-2) - def set_blocked(self, value): - """writing (1) or reading (-1) or not blocked (0)""" - self.flags = self.flags & ~0x3 | (value & 0x3) << 0 - set_blocked._public = False - - def get_atomic(self): - """If true, schedulers will never switch""" - return bool(self.flags & 0x4) - def set_atomic(self, value): - """If true, schedulers will never switch""" - self.flags = self.flags & ~0x4 | (value & 0x1) << 2 - set_atomic._public = True - - def get_ignore_nesting(self): - """allow auto-scheduling in nested interpreters""" - return bool(self.flags & 0x8) - def set_ignore_nesting(self, value): - """allow auto-scheduling in nested interpreters""" - self.flags = self.flags & ~0x8 | (value & 0x1) << 3 - set_ignore_nesting._public = True - - def get_autoschedule(self): - """enable auto-scheduling""" - return bool(self.flags & 0x10) - def set_autoschedule(self, value): - """enable auto-scheduling""" - self.flags = self.flags & ~0x10 | (value & 0x1) << 4 - set_autoschedule._public = True - - def get_block_trap(self): - """raise an exception instead of blocking""" - return bool(self.flags & 0x20) - def set_block_trap(self, value): - """raise an exception instead of blocking""" - self.flags = self.flags & ~0x20 | (value & 0x1) << 5 - set_block_trap._public = True - - def get_is_zombie(self): - """__del__ is in progress""" - return bool(self.flags & 0x40) - def set_is_zombie(self, value): - """__del__ is in progress""" - self.flags = self.flags & ~0x40 | (value & 0x1) << 6 - set_is_zombie._public = False - - def get_pending_irq(self): - """an interrupt occured while being atomic""" - return bool(self.flags & 0x80) - def set_pending_irq(self, value): - """an interrupt occured while being atomic""" - self.flags = self.flags & ~0x80 | (value & 0x1) << 7 - set_pending_irq._public = False - -# END generated code - -if __name__ == '__main__': - # paste this into the file - print gen_code() diff --git a/pypy/module/_stackless/test/__init__.py b/pypy/module/_stackless/test/__init__.py deleted file mode 100644 --- a/pypy/module/_stackless/test/__init__.py +++ /dev/null @@ -1,1 +0,0 @@ -# \ No newline at end of file diff --git a/pypy/module/_stackless/test/conftest.py b/pypy/module/_stackless/test/conftest.py deleted file mode 100644 --- a/pypy/module/_stackless/test/conftest.py +++ /dev/null @@ -1,8 +0,0 @@ -import sys -import py.test - -def pytest_runtest_setup(item): - py.test.importorskip('greenlet') - if sys.platform == 'win32': - py.test.skip("stackless tests segfault on Windows") - diff --git a/pypy/module/_stackless/test/slp_test_pickle.py b/pypy/module/_stackless/test/slp_test_pickle.py deleted file mode 100644 --- a/pypy/module/_stackless/test/slp_test_pickle.py +++ /dev/null @@ -1,35 +0,0 @@ -from pypy.conftest import gettestobjspace - -# app-level testing of coroutine pickling - -class AppTest_Pickle: - - def setup_class(cls): - space = gettestobjspace(usemodules=('_stackless',)) - cls.space = space - - def test_simple_ish(self): - - output = [] - import _stackless - def f(coro, n, x): - if n == 0: - coro.switch() - return - f(coro, n-1, 2*x) - output.append(x) - - def example(): - main_coro = _stackless.coroutine.getcurrent() - sub_coro = _stackless.coroutine() - sub_coro.bind(f, main_coro, 5, 1) - sub_coro.switch() - - import pickle - pckl = pickle.dumps(sub_coro) - new_coro = pickle.loads(pckl) - - new_coro.switch() - - example() - assert output == [16, 8, 4, 2, 1] diff --git a/pypy/module/_stackless/test/test_choicepoint.py b/pypy/module/_stackless/test/test_choicepoint.py deleted file mode 100644 --- a/pypy/module/_stackless/test/test_choicepoint.py +++ /dev/null @@ -1,85 +0,0 @@ -import py; py.test.skip("clonable coroutines not really maintained any more") - -from pypy.rlib.rcoroutine import AbstractThunk -from pypy.module._stackless.rclonable import InterpClonableCoroutine as ClonableCoroutine - -class ChoicePointHolder(object): - def __init__(self): - self.choicepoints = [] - self.clone_me = False - self.answer = 0 - self.solutions_count = 0 - - def next_choice(self): - return self.choicepoints.pop() - - def add(self, choice, answer=0): - self.choicepoints.append((choice, answer)) - - def more_choices(self): - return bool(self.choicepoints) - - def choice(self): - #os.write(1, "choice\n") - self.clone_me = True - self.g_main.switch() - #os.write(1, "answer: %d\n" % (self.answer,)) - return self.answer - - def fail(self): - self.g_main.switch() - assert False - -choicepoints = ChoicePointHolder() - -# ____________________________________________________________ - -class SearchTask(AbstractThunk): - def call(self): - path = [] - for i in range(10): - res = choicepoints.choice() - assert len(path) == i - path.append(res) - #os.write(1, "{%x} trying: %s\n" % (id(path), path)) - if i == 3: - import gc; gc.collect() - #os.write(1, "{%x} found a solution: %s\n" % (id(path), path)) - choicepoints.solutions_count += 1 - -# ____________________________________________________________ - - -class SearchAllTask(AbstractThunk): - def call(self): - search_coro = ClonableCoroutine() - search_coro.bind(SearchTask()) - choicepoints.add(search_coro) - - #os.write(1, "starting\n") - while choicepoints.more_choices(): - searcher, nextvalue = choicepoints.next_choice() - choicepoints.clone_me = False - choicepoints.answer = nextvalue - #os.write(1, '<<< {%x} %d\n' % (id(searcher), nextvalue)) - searcher.switch() - #os.write(1, '>>> %d\n' % (choicepoints.clone_me,)) - if choicepoints.clone_me: - searcher2 = searcher.clone() - #os.write(1, 'searcher = {%x}, searcher2 = {%x}\n' % ( - # id(searcher), id(searcher2))) - choicepoints.add(searcher, 5) - choicepoints.add(searcher2, 4) - -def entry_point(): - choicepoints.g_main = ClonableCoroutine() - choicepoints.g_main.bind(SearchAllTask()) - choicepoints.g_main.switch() - return choicepoints.solutions_count - -def test_choicepoint(): - from pypy.translator.c.test import test_newgc - tester = test_newgc.TestUsingStacklessFramework() - fn = tester.getcompiled(entry_point) - res = fn() - assert res == 2 ** 10 diff --git a/pypy/module/_stackless/test/test_clonable.py b/pypy/module/_stackless/test/test_clonable.py deleted file mode 100644 --- a/pypy/module/_stackless/test/test_clonable.py +++ /dev/null @@ -1,187 +0,0 @@ -import py; py.test.skip("clonable coroutines not really maintained any more") - -from pypy.conftest import gettestobjspace, option -import py, sys - -# app-level testing of coroutine cloning - -class AppTestClonable: - - def setup_class(cls): - if not option.runappdirect: - py.test.skip('pure appdirect test (run with -A)') - cls.space = space = gettestobjspace(usemodules=('_stackless',)) - if not space.is_true(space.appexec([], """(): - import _stackless - return hasattr(_stackless, 'clonable') - """)): - py.test.skip('no _stackless.clonable') - - - def test_solver(self): - import _stackless - - class Fail(Exception): - pass - - class Success(Exception): - pass - - def first_solution(func): - global next_answer - co = _stackless.clonable() - co.bind(func) - pending = [(co, None)] - while pending: - co, next_answer = pending.pop() - try: - co.switch() - except Fail: - pass - except Success, e: - return e.args[0] - else: - # zero_or_one() called, clone the coroutine - co2 = co.clone() - pending.append((co2, 1)) - pending.append((co, 0)) - raise Fail("no solution") - - pending = [] - main = _stackless.clonable.getcurrent() - - def zero_or_one(): - main.switch() - return next_answer - - # ____________________________________________________________ - - invalid_prefixes = { - (0, 0): True, - (0, 1, 0): True, - (0, 1, 1): True, - (1, 0): True, - (1, 1, 0, 0): True, - } - - def example(): - test = [] - for n in range(5): - test.append(zero_or_one()) - if tuple(test) in invalid_prefixes: - raise Fail - raise Success(test) - - res = first_solution(example) - assert res == [1, 1, 0, 1, 0] - - - def test_myself_may_not_be_me_any_more(self): - import gc - from _stackless import clonable - - counter = [0] - - def runner(): - while 1: - assert clonable.getcurrent() is coro - counter[0] += 1 - main.switch() - - main = clonable.getcurrent() - coro = clonable() - coro.bind(runner) - - coro.switch() - assert counter == [1] - - assert clonable.getcurrent() is main - coro1 = coro.clone() - assert counter == [1] - assert clonable.getcurrent() is main - coro.switch() - assert counter == [2] - coro.switch() - assert counter == [3] - assert clonable.getcurrent() is main - del coro1 - gc.collect() - #print "collected!" - assert clonable.getcurrent() is main - assert counter == [3] - coro.switch() - assert clonable.getcurrent() is main - assert counter == [4] - - - def test_fork(self): - import _stackless - - class Fail(Exception): - pass - - class Success(Exception): - pass - - def first_solution(func): - global next_answer - co = _stackless.clonable() - co.bind(func) - try: - co.switch() - except Success, e: - return e.args[0] - - def zero_or_one(): - sub = _stackless.fork() - if sub is not None: - # in the parent: run the child first - try: - sub.switch() - except Fail: - pass - # then proceed with answer '1' - return 1 - else: - # in the child: answer '0' - return 0 - - # ____________________________________________________________ - - invalid_prefixes = { - (0, 0): True, - (0, 1, 0): True, - (0, 1, 1): True, - (1, 0): True, - (1, 1, 0, 0): True, - } - - def example(): - test = [] - for n in range(5): - test.append(zero_or_one()) - if tuple(test) in invalid_prefixes: - raise Fail - raise Success(test) - - res = first_solution(example) - assert res == [1, 1, 0, 1, 0] - - def test_clone_before_start(self): - """Tests that a clonable coroutine can be - cloned before it is started - (this used to fail with a segmentation fault) - """ - import _stackless - - counter = [0] - def simple_coro(): - print "hello" - counter[0] += 1 - - s = _stackless.clonable() - s.bind(simple_coro) - t = s.clone() - s.switch() - t.switch() - assert counter[0] == 2 diff --git a/pypy/module/_stackless/test/test_composable_coroutine.py b/pypy/module/_stackless/test/test_composable_coroutine.py deleted file mode 100644 --- a/pypy/module/_stackless/test/test_composable_coroutine.py +++ /dev/null @@ -1,133 +0,0 @@ -""" a faith is the connection between past and future that divides the - application into switch-compatible chunks. - -- stakkars -""" -from pypy.conftest import gettestobjspace -from py.test import skip - -class AppTest_ComposableCoroutine: - - def setup_class(cls): - space = gettestobjspace(usemodules=('_stackless',)) - cls.space = space - - cls.w_generator_ = space.appexec([], """(): - import _stackless - - generators_costate = _stackless.usercostate() - main = generators_costate.getcurrent() - - class generator_iterator(_stackless.coroutine): - - def __iter__(self): - return self - - def next(self): - if self.gi_answer is not None: - raise ValueError('stackless-generator' - ' already executing') - self.gi_answer = [] - self.gi_caller = generators_costate.getcurrent() - self.switch() - answer = self.gi_answer - self.gi_answer = None - if answer: - return answer[0] - else: - raise StopIteration - - def generator(f): - def myfunc(*args, **kwds): - g = generators_costate.spawn(generator_iterator) - g.gi_answer = None - g.bind(f, *args, **kwds) - return g - return myfunc - - def Yield(value): - g = generators_costate.getcurrent() - if g is main: - raise ValueError('Yield() outside any stackless-generator') - assert isinstance(g, generator_iterator) - assert g.gi_answer == [] - g.gi_answer.append(value) - g.gi_caller.switch() - - generator.Yield = Yield - generator._costate = generators_costate - return (generator,) - """) - - def test_simple_costate(self): - import _stackless - costate = _stackless.usercostate() - main = costate.getcurrent() - - result = [] - def f(): - result.append(costate.getcurrent()) - co = costate.spawn() - co.bind(f) - co.switch() - assert result == [co] - - def test_generator(self): - generator, = self.generator_ - - def squares(n): - for i in range(n): - generator.Yield(i*i) - squares = generator(squares) - - lst1 = [i*i for i in range(10)] - for got in squares(10): - expected = lst1.pop(0) - assert got == expected - assert lst1 == [] - - def test_multiple_costates(self): - """Test that two independent costates mix transparently: - - - compute_costate, used for a coroutine that fills a list with - some more items each time it is switched to - - - generators_costate, used interally by self.generator (see above) - """ - - import _stackless - generator, = self.generator_ - - # you can see how it fails if we don't have two different costates - # by setting compute_costate to generator._costate instead - compute_costate = _stackless.usercostate() - compute_main = compute_costate.getcurrent() - lst = [] - - def filler(): # -> 0, 1, 2, 100, 101, 102, 200, 201, 202, 300 ... - for k in range(5): - for j in range(3): - lst.append(100 * k + j) - compute_main.switch() - - filler_co = compute_costate.spawn() - filler_co.bind(filler) - - def grab_next_value(): - while not lst: - #print 'filling more...' - filler_co.switch() - #print 'now lst =', lst - #print 'grabbing', lst[0] - return lst.pop(0) - - def squares(n): - for i in range(n): - #print 'square:', i - generator.Yield(i*grab_next_value()) - squares = generator(squares) - - lst1 = [0, 1, 4, 300, 404, 510, 1200, 1407, 1616, 2700] - for got in squares(10): - expected = lst1.pop(0) - assert got == expected - assert lst1 == [] diff --git a/pypy/module/_stackless/test/test_coroutine.py b/pypy/module/_stackless/test/test_coroutine.py deleted file mode 100644 --- a/pypy/module/_stackless/test/test_coroutine.py +++ /dev/null @@ -1,168 +0,0 @@ -from pypy.conftest import gettestobjspace, option -from py.test import skip - - -class AppTest_Coroutine: - - def setup_class(cls): - space = gettestobjspace(usemodules=('_stackless',)) - cls.space = space - - def test_raise_propagate(self): - import _stackless as stackless - co = stackless.coroutine() - def f(): - return 1/0 - co.bind(f) - try: - co.switch() - except ZeroDivisionError: - pass - else: - raise AssertionError("exception not propagated") - - def test_strange_test(self): - from _stackless import coroutine - def f(): - print "in new coro" - return 42 - def create(): - b = coroutine() - b.bind(f) - print "bound" - b.switch() - print "switched" - return b - a = coroutine() - a.bind(create) - b = a.switch() - # now b.parent = a - def nothing(): - pass - a.bind(nothing) - def kill(): - # this sets a.parent = b - a.kill() - b.bind(kill) - b.switch() - - def test_kill(self): - import _stackless as stackless - co = stackless.coroutine() - def f(): - pass - co.bind(f) - assert co.is_alive - co.kill() - assert not co.is_alive - - def test_kill_running(self): - coroutineexit = [] - import _stackless as stackless - main = stackless.coroutine.getcurrent() - result = [] - co = stackless.coroutine() - def f(): - x = 2 - try: - result.append(1) - main.switch() - x = 3 - except CoroutineExit: - coroutineexit.append(True) - raise - finally: - result.append(x) - result.append(4) - co.bind(f) - assert co.is_alive - co.switch() - assert co.is_alive - assert result == [1] - co.kill() - assert not co.is_alive - assert result == [1, 2] - assert coroutineexit == [True] - - def test_bogus_bind(self): - import _stackless as stackless - co = stackless.coroutine() - def f(): - pass - co.bind(f) - raises(ValueError, co.bind, f) - - def test__framestack(self): - import _stackless as stackless - main = stackless.coroutine.getmain() - co = stackless.coroutine() - def g(): - return co._framestack - def f(): - return g() - - co.bind(f) - stack = co.switch() - assert stack == () # running corountine, _framestack is empty - - co = stackless.coroutine() - def g(): - return main.switch() - def f(): - return g() - - co.bind(f) - co.switch() - stack = co._framestack - assert len(stack) == 2 - assert stack[0].f_code is f.func_code - assert stack[1].f_code is g.func_code - - co = stackless.coroutine() - - - -class AppTestDirect: - def setup_class(cls): - if not option.runappdirect: - skip('pure appdirect test (run with -A)') - cls.space = gettestobjspace(usemodules=('_stackless',)) - - def test_stack_depth_limit(self): - import sys - import _stackless as stackless - st = stackless.get_stack_depth_limit() - try: - stackless.set_stack_depth_limit(1) - assert stackless.get_stack_depth_limit() == 1 - try: - co = stackless.coroutine() - def f(): - pass - co.bind(f) - co.switch() - except RuntimeError: - pass - finally: - stackless.set_stack_depth_limit(st) - -class TestRandomThings: - def setup_class(cls): - cls.space = gettestobjspace(usemodules=('_stackless',)) - - def test___del___handling(self): - space = self.space - w_l = space.newlist([]) - coro = space.appexec([w_l], """(l): - from _stackless import coroutine - class MyCoroutine(coroutine): - def __del__(self): - l.append(self.is_zombie) - return MyCoroutine() - """) - coro.__del__() - space.user_del_action.perform(space.getexecutioncontext(), None) - coro._kill_finally() - assert space.len_w(w_l) == 1 - res = space.is_true(space.getitem(w_l, space.wrap(0))) - assert res diff --git a/pypy/module/_stackless/test/test_greenlet.py b/pypy/module/_stackless/test/test_greenlet.py deleted file mode 100644 --- a/pypy/module/_stackless/test/test_greenlet.py +++ /dev/null @@ -1,643 +0,0 @@ -from pypy.conftest import gettestobjspace, skip_on_missing_buildoption - -class AppTest_Greenlet: - - def setup_class(cls): - space = gettestobjspace(usemodules=('_stackless',)) - cls.space = space - - def test_very_simple(self): - from _stackless import greenlet - lst = [] - def f(x): - lst.append(x) - return x + 10 - g = greenlet(f) - assert not g - res = g.switch(20) - assert res == 30 - assert lst == [20] - assert g.dead - assert not g - - def test_switch_back_to_main(self): - from _stackless import greenlet - lst = [] - main = greenlet.getcurrent() - def f(x): - lst.append(x) - x = main.switch(x + 10) - return 40 + x - g = greenlet(f) - res = g.switch(20) - assert res == 30 - assert lst == [20] - assert not g.dead - res = g.switch(2) - assert res == 42 - assert g.dead - - def test_simple(self): - from _stackless import greenlet - lst = [] - gs = [] - def f(): - lst.append(1) - greenlet.getcurrent().parent.switch() - lst.append(3) - g = greenlet(f) - lst.append(0) - g.switch() - lst.append(2) - g.switch() - lst.append(4) - assert lst == range(5) - - def test_exception_simple(self): - from _stackless import greenlet - def f(): - raise ValueError - g1 = greenlet(f) - raises(ValueError, g1.switch) - - def test_exception_propagate(self): - from _stackless import greenlet - def f(): - raise ValueError - def g(): - return g1.switch() - g1 = greenlet(f) - g2 = greenlet(g) - raises(ValueError, g1.switch) - g1 = greenlet(f) - raises(ValueError, g2.switch) - - - def test_exc_info_save_restore(self): - from _stackless import greenlet - import sys - def f(): - try: - raise ValueError('fun') - except: - exc_info = sys.exc_info() - greenlet(h).switch() - assert exc_info == sys.exc_info() - - def h(): - assert sys.exc_info() == (None, None, None) - - greenlet(f).switch() - - def test_exception(self): - from _stackless import greenlet - import sys - def fmain(seen): - try: - greenlet.getcurrent().parent.switch() - except: - seen.append(sys.exc_info()[0]) - raise - raise ValueError - seen = [] - g1 = greenlet(fmain) - g2 = greenlet(fmain) - g1.switch(seen) - g2.switch(seen) - raises(TypeError, "g2.parent = 1") - g2.parent = g1 - assert seen == [] - raises(ValueError, g2.switch) - assert seen == [ValueError] - g2.switch() - assert seen == [ValueError] - - def test_send_exception(self): - from _stackless import greenlet - import sys - def send_exception(g, exc): - # note: send_exception(g, exc) can be now done with g.throw(exc). - # the purpose of this test is to explicitely check the propagation rules. - def crasher(exc): - raise exc - g1 = greenlet(crasher) - g1.parent = g - g1.switch(exc) - def fmain(seen): - try: - greenlet.getcurrent().parent.switch() - except: - seen.append(sys.exc_info()[0]) - raise - raise ValueError - - seen = [] - g1 = greenlet(fmain) - g1.switch(seen) - raises(KeyError, "send_exception(g1, KeyError)") - assert seen == [KeyError] - seen = [] - g1 = greenlet(fmain) - g1.switch(seen) - raises(KeyError, "g1.throw(KeyError)") - assert seen == [KeyError] - assert g1.dead - - def test_frame(self): - from _stackless import greenlet - import sys - def f1(): - f = sys._getframe(0) - assert f.f_back is None - greenlet.getcurrent().parent.switch(f) - return "meaning of life" - g = greenlet(f1) - frame = g.switch() - assert frame is g.gr_frame - assert g - next = g.switch() - assert not g - assert next == "meaning of life" - assert g.gr_frame is None - - def test_mixing_greenlet_coroutine(self): - from _stackless import greenlet, coroutine - lst = [] - def f(): - lst.append(1) - greenlet.getcurrent().parent.switch() - lst.append(3) - def make_h(c): - def h(): - g = greenlet(f) - lst.append(0) - g.switch() - c.switch() - lst.append(2) - g.switch() - c.switch() - lst.append(4) - c.switch() - return h - c1 = coroutine.getcurrent() - c2 = coroutine() - c3 = coroutine() - c2.bind(make_h(c3)) - c3.bind(make_h(c2)) - c2.switch() - assert lst == [0, 1, 0, 1, 2, 3, 2, 3, 4, 4] - - def test_dealloc(self): - skip("not working yet") - from _stackless import greenlet - import sys - def fmain(seen): - try: - greenlet.getcurrent().parent.switch() - except: - seen.append(sys.exc_info()[0]) - raise - raise ValueError - seen = [] - seen = [] - g1 = greenlet(fmain) - g2 = greenlet(fmain) - g1.switch(seen) - g2.switch(seen) - assert seen == [] - del g1 - assert seen == [greenlet.GreenletExit] - del g2 - assert seen == [greenlet.GreenletExit, greenlet.GreenletExit] - - -# ____________________________________________________________ -# -# The tests from greenlets. -# For now, without the ones that involve threads -# -class AppTest_PyMagicTestGreenlet: - - def setup_class(cls): - space = gettestobjspace(usemodules=('_stackless',)) - cls.space = space - cls.w_glob = space.appexec([], """(): - import sys - from _stackless import greenlet - - class SomeError(Exception): - pass - - def fmain(seen): - try: - greenlet.getcurrent().parent.switch() - except: - seen.append(sys.exc_info()[0]) - raise - raise SomeError - - class Glob: pass - glob = Glob() - glob.__dict__.update(locals()) - return glob - """) - - def test_simple(self): - greenlet = self.glob.greenlet - lst = [] - def f(): - lst.append(1) - greenlet.getcurrent().parent.switch() - lst.append(3) - g = greenlet(f) - lst.append(0) - g.switch() - lst.append(2) - g.switch() - lst.append(4) - assert lst == range(5) - - def test_exception(self): - greenlet = self.glob.greenlet - fmain = self.glob.fmain - SomeError = self.glob.SomeError - seen = [] - g1 = greenlet(fmain) - g2 = greenlet(fmain) - g1.switch(seen) - g2.switch(seen) - g2.parent = g1 - assert seen == [] - raises(SomeError, g2.switch) - assert seen == [SomeError] - g2.switch() - assert seen == [SomeError] - - def test_send_exception(self): - greenlet = self.glob.greenlet - fmain = self.glob.fmain - def send_exception(g, exc): - # note: send_exception(g, exc) can be now done with g.throw(exc). - # the purpose of this test is to explicitely check the - # propagation rules. - def crasher(exc): - raise exc - g1 = greenlet(crasher, parent=g) - g1.switch(exc) - - seen = [] - g1 = greenlet(fmain) - g1.switch(seen) - raises(KeyError, "send_exception(g1, KeyError)") - assert seen == [KeyError] - - def test_dealloc(self): - skip("XXX in-progress: GC handling of greenlets") - import gc - greenlet = self.glob.greenlet - fmain = self.glob.fmain - seen = [] - g1 = greenlet(fmain) - g2 = greenlet(fmain) - g1.switch(seen) - g2.switch(seen) - assert seen == [] - del g1 - gc.collect() - assert seen == [greenlet.GreenletExit] - del g2 - gc.collect() - assert seen == [greenlet.GreenletExit, greenlet.GreenletExit] - - def test_frame(self): - import sys - greenlet = self.glob.greenlet - def f1(): - f = sys._getframe(0) - assert f.f_back is None - greenlet.getcurrent().parent.switch(f) - return "meaning of life" - g = greenlet(f1) - frame = g.switch() - assert frame is g.gr_frame - assert g - next = g.switch() - assert not g - assert next == "meaning of life" - assert g.gr_frame is None - - -class AppTest_PyMagicTestThrow: - - def setup_class(cls): - space = gettestobjspace(usemodules=('_stackless',)) - cls.space = space - - def test_class(self): - from _stackless import greenlet - def switch(*args): - return greenlet.getcurrent().parent.switch(*args) - - def f(): - try: - switch("ok") - except RuntimeError: - switch("ok") - return - switch("fail") - - g = greenlet(f) - res = g.switch() - assert res == "ok" - res = g.throw(RuntimeError) - assert res == "ok" - - def test_val(self): - from _stackless import greenlet - def switch(*args): - return greenlet.getcurrent().parent.switch(*args) - - def f(): - try: - switch("ok") - except RuntimeError, val: - if str(val) == "ciao": - switch("ok") - return - switch("fail") - - g = greenlet(f) - res = g.switch() - assert res == "ok" - res = g.throw(RuntimeError("ciao")) - assert res == "ok" - - g = greenlet(f) - res = g.switch() - assert res == "ok" - res = g.throw(RuntimeError, "ciao") - assert res == "ok" - - def test_kill(self): - from _stackless import greenlet - def switch(*args): - return greenlet.getcurrent().parent.switch(*args) - - def f(): - switch("ok") - switch("fail") - - g = greenlet(f) - res = g.switch() - assert res == "ok" - res = g.throw() - assert isinstance(res, greenlet.GreenletExit) - assert g.dead - res = g.throw() # immediately eaten by the already-dead greenlet - assert isinstance(res, greenlet.GreenletExit) - - def test_throw_goes_to_original_parent(self): - from _stackless import greenlet - main = greenlet.getcurrent() - def f1(): - try: - main.switch("f1 ready to catch") - except IndexError: - return "caught" - else: - return "normal exit" - def f2(): - main.switch("from f2") - - g1 = greenlet(f1) - g2 = greenlet(f2, parent=g1) - raises(IndexError, g2.throw, IndexError) - assert g2.dead - assert g1.dead - - g1 = greenlet(f1) - g2 = greenlet(f2, parent=g1) - res = g1.switch() - assert res == "f1 ready to catch" - res = g2.throw(IndexError) - assert res == "caught" - assert g2.dead - assert g1.dead - - g1 = greenlet(f1) - g2 = greenlet(f2, parent=g1) - res = g1.switch() - assert res == "f1 ready to catch" - res = g2.switch() - assert res == "from f2" - res = g2.throw(IndexError) - assert res == "caught" - assert g2.dead - assert g1.dead - - -class AppTest_PyMagicTestGenerator: - - def setup_class(cls): - space = gettestobjspace(usemodules=('_stackless',)) - cls.space = space - - def test_generator(self): - from _stackless import greenlet - - class genlet(greenlet): - - def __init__(self, *args, **kwds): - self.args = args - self.kwds = kwds - - def run(self): - fn, = self.fn - fn(*self.args, **self.kwds) - - def __iter__(self): - return self - - def next(self): - self.parent = greenlet.getcurrent() - result = self.switch() - if self: - return result - else: - raise StopIteration - - def Yield(value): - g = greenlet.getcurrent() - while not isinstance(g, genlet): - if g is None: - raise RuntimeError, 'yield outside a genlet' - g = g.parent - g.parent.switch(value) - - def generator(func): - class generator(genlet): - fn = (func,) - return generator - - # ___ test starts here ___ - seen = [] - def g(n): - for i in range(n): - seen.append(i) - Yield(i) - g = generator(g) - for k in range(3): - for j in g(5): - seen.append(j) - assert seen == 3 * [0, 0, 1, 1, 2, 2, 3, 3, 4, 4] - - -class AppTest_PyMagicTestGeneratorNested: - - def setup_class(cls): - space = gettestobjspace(usemodules=('_stackless',)) - cls.space = space - cls.w_glob = space.appexec([], """(): - from _stackless import greenlet - - class genlet(greenlet): - - def __init__(self, *args, **kwds): - self.args = args - self.kwds = kwds - self.child = None - - def run(self): - fn, = self.fn - fn(*self.args, **self.kwds) - - def __iter__(self): - return self - - def set_child(self, child): - self.child = child - - def next(self): - if self.child: - child = self.child - while child.child: - tmp = child - child = child.child - tmp.child = None - - result = child.switch() - else: - self.parent = greenlet.getcurrent() - result = self.switch() - - if self: - return result - else: - raise StopIteration - - def Yield(value, level = 1): - g = greenlet.getcurrent() - - while level != 0: - if not isinstance(g, genlet): - raise RuntimeError, 'yield outside a genlet' - if level > 1: - g.parent.set_child(g) - g = g.parent - level -= 1 - - g.switch(value) - - def Genlet(func): - class Genlet(genlet): - fn = (func,) - return Genlet - - class Glob: pass - glob = Glob() - glob.__dict__.update(locals()) - return glob - """) - - def test_genlet_1(self): - Genlet = self.glob.Genlet - Yield = self.glob.Yield - - def g1(n, seen): - for i in range(n): - seen.append(i+1) - yield i - - def g2(n, seen): - for i in range(n): - seen.append(i+1) - Yield(i) - - g2 = Genlet(g2) - - def nested(i): - Yield(i) - - def g3(n, seen): - for i in range(n): - seen.append(i+1) - nested(i) - g3 = Genlet(g3) - - raises(RuntimeError, Yield, 10) - for g in [g1, g2, g3]: - seen = [] - for k in range(3): - for j in g(5, seen): - seen.append(j) - assert seen == 3 * [1, 0, 2, 1, 3, 2, 4, 3, 5, 4] - raises(RuntimeError, Yield, 10) - - def test_nested_genlets(self): - Genlet = self.glob.Genlet - Yield = self.glob.Yield - def a(n): - if n == 0: - return - for ii in ax(n-1): - Yield(ii) - Yield(n) - ax = Genlet(a) - seen = [] - for ii in ax(5): - seen.append(ii) - assert seen == [1, 2, 3, 4, 5] - - def test_perms(self): - Genlet = self.glob.Genlet - Yield = self.glob.Yield - def perms(l): - if len(l) > 1: - for e in l: - # No syntactical sugar for generator expressions - [Yield([e] + p) for p in perms([x for x in l if x!=e])] - else: - Yield(l) - perms = Genlet(perms) - gen_perms = perms(range(4)) - permutations = list(gen_perms) - assert len(permutations) == 4*3*2*1 - assert [0,1,2,3] in permutations - assert [3,2,1,0] in permutations - - def test_layered_genlets(self): - Genlet = self.glob.Genlet - Yield = self.glob.Yield - def gr1(n): - for ii in range(1, n): - Yield(ii) - Yield(ii * ii, 2) - gr1 = Genlet(gr1) - def gr2(n, seen): - for ii in gr1(n): - seen.append(ii) - gr2 = Genlet(gr2) - seen = [] - for ii in gr2(5, seen): - seen.append(ii) - assert seen == [1, 1, 2, 4, 3, 9, 4, 16] diff --git a/pypy/module/_stackless/test/test_interp_clonable.py b/pypy/module/_stackless/test/test_interp_clonable.py deleted file mode 100644 --- a/pypy/module/_stackless/test/test_interp_clonable.py +++ /dev/null @@ -1,118 +0,0 @@ -""" -testing cloning -""" -import py; py.test.skip("clonable coroutines not really maintained any more") - -from pypy import conftest; conftest.translation_test_so_skip_if_appdirect() -from pypy.translator.c import gc -from pypy.rpython.memory.gctransform import stacklessframework -from pypy.rpython.memory.test import test_transformed_gc -from pypy.module._stackless.rclonable import InterpClonableCoroutine as ClonableCoroutine -from pypy.module._stackless.rclonable import AbstractThunk, fork - -class TestClonableCoroutine(test_transformed_gc.GCTest): - - gcname = "marksweep" - stacklessgc = True - class gcpolicy(gc.StacklessFrameworkGcPolicy): - class transformerclass(stacklessframework.StacklessFrameworkGCTransformer): - GC_PARAMS = {'start_heap_size': 4096 } - - def test_clone(self): - class T(AbstractThunk): - def __init__(self, result): - self.result = result - def call(self): - self.result.append(2) - ClonableCoroutine.getmain().switch() - self.result.append(4) - def f(): - result = [] - coro = ClonableCoroutine() - coro.bind(T(result)) - result.append(1) - coro.switch() - coro2 = coro.clone() - result.append(3) - coro2.switch() - result.append(5) - coro.switch() - result.append(6) - n = 0 - for i in result: - n = n*10 + i - return n - - run = self.runner(f) - res = run([]) - assert res == 1234546 - - def test_clone_local_state(self): - class T(AbstractThunk): - def __init__(self, result): - self.result = result - def call(self): - localstate = [] - localstate.append(10) - self.result.append(2) - ClonableCoroutine.getmain().switch() - localstate.append(20) - if localstate == [10, 20]: - self.result.append(4) - else: - self.result.append(0) - def f(): - result = [] - coro = ClonableCoroutine() - coro.bind(T(result)) - result.append(1) - coro.switch() - coro2 = coro.clone() - result.append(3) - coro2.switch() - result.append(5) - coro.switch() - result.append(6) - n = 0 - for i in result: - n = n*10 + i - return n - - run = self.runner(f) - res = run([]) - assert res == 1234546 - - def test_fork(self): - class T(AbstractThunk): - def __init__(self, result): - self.result = result - def call(self): - localdata = [10] - self.result.append(2) - newcoro = fork() - localdata.append(20) - if newcoro is not None: - # in the parent - self.result.append(3) - newcoro.switch() - self.result.append(5) - else: - # in the child - self.result.append(4) - localdata.append(30) - self.result.append(localdata != [10, 20, 30]) - def f(): - result = [] - coro = ClonableCoroutine() - coro.bind(T(result)) - result.append(1) - coro.switch() - result.append(6) - n = 0 - for i in result: - n = n*10 + i - return n - - run = self.runner(f) - res = run([]) - assert res == 12340506 diff --git a/pypy/module/_stackless/test/test_pickle.py b/pypy/module/_stackless/test/test_pickle.py deleted file mode 100644 --- a/pypy/module/_stackless/test/test_pickle.py +++ /dev/null @@ -1,487 +0,0 @@ -from pypy.conftest import gettestobjspace, option -import py - -# app-level testing of coroutine pickling - - -class AppTestBasic: - def setup_class(cls): - cls.space = gettestobjspace(usemodules=('_stackless',)) - - def test_pickle_main(self): - import _stackless, pickle - main = _stackless.coroutine.getcurrent() - s = pickle.dumps(main) - c = pickle.loads(s) - assert c is main - - -class AppTestPickle: - - def setup_class(cls): - cls.space = gettestobjspace(usemodules=('_stackless',), CALL_METHOD=True) - - def test_pickle_coroutine_empty(self): - # this test is limited to basic pickling. - # real stacks can only tested with a stackless pypy build. - import _stackless as stackless - co = stackless.coroutine() - import pickle - pckl = pickle.dumps(co) - co2 = pickle.loads(pckl) - # the empty unpickled coroutine can still be used: - result = [] - co2.bind(result.append, 42) - co2.switch() - assert result == [42] - - def test_pickle_coroutine_bound(self): - import pickle - import _stackless - lst = [4] - co = _stackless.coroutine() - co.bind(lst.append, 2) - pckl = pickle.dumps((co, lst)) - - (co2, lst2) = pickle.loads(pckl) - assert lst2 == [4] - co2.switch() - assert lst2 == [4, 2] - - - def test_simple_ish(self): - - import new, sys - - mod = new.module('mod') - sys.modules['mod'] = mod - try: - exec ''' -output = [] -import _stackless -def f(coro, n, x): - if n == 0: - coro.switch() - return - f(coro, n-1, 2*x) - output.append(x) - -def example(): - main_coro = _stackless.coroutine.getcurrent() - sub_coro = _stackless.coroutine() - sub_coro.bind(f, main_coro, 5, 1) - sub_coro.switch() - - import pickle - pckl = pickle.dumps(sub_coro) - new_coro = pickle.loads(pckl) - - new_coro.switch() - -example() -assert output == [16, 8, 4, 2, 1] -''' in mod.__dict__ - finally: - del sys.modules['mod'] - - def test_pickle_again(self): - - import new, sys - - mod = new.module('mod') - sys.modules['mod'] = mod - try: - exec ''' -output = [] -import _stackless -def f(coro, n, x): - if n == 0: - coro.switch() - return - f(coro, n-1, 2*x) - output.append(x) - -def example(): - main_coro = _stackless.coroutine.getcurrent() - sub_coro = _stackless.coroutine() - sub_coro.bind(f, main_coro, 5, 1) - sub_coro.switch() - - import pickle - pckl = pickle.dumps(sub_coro) - new_coro = pickle.loads(pckl) - pckl = pickle.dumps(new_coro) - newer_coro = pickle.loads(pckl) - - newer_coro.switch() - -example() -assert output == [16, 8, 4, 2, 1] -''' in mod.__dict__ - finally: - del sys.modules['mod'] - - def test_kwargs(self): - - import new, sys - - mod = new.module('mod') - sys.modules['mod'] = mod - try: - exec ''' -output = [] -import _stackless -def f(coro, n, x, step=4): - if n == 0: - coro.switch() - return - f(coro, n-1, 2*x, step=1) - output.append(x) - -def example(): - main_coro = _stackless.coroutine.getcurrent() - sub_coro = _stackless.coroutine() - sub_coro.bind(f, main_coro, 5, 1, 1) - sub_coro.switch() - - import pickle - pckl = pickle.dumps(sub_coro) - new_coro = pickle.loads(pckl) - - new_coro.switch() - -example() -assert output == [16, 8, 4, 2, 1] -''' in mod.__dict__ - finally: - del sys.modules['mod'] - - def test_starstarargs(self): - - import new, sys - - mod = new.module('mod') - sys.modules['mod'] = mod - try: - exec ''' -output = [] -import _stackless -def f(coro, n, x, step=4): - if n == 0: - coro.switch() - return - f(coro, n-1, 2*x, **{'step': 1}) - output.append(x) - -def example(): - main_coro = _stackless.coroutine.getcurrent() - sub_coro = _stackless.coroutine() - sub_coro.bind(f, main_coro, 5, 1, 1) - sub_coro.switch() - - import pickle - pckl = pickle.dumps(sub_coro) - new_coro = pickle.loads(pckl) - - new_coro.switch() - -example() -assert output == [16, 8, 4, 2, 1] -''' in mod.__dict__ - finally: - del sys.modules['mod'] - - def test_closure(self): - import new, sys - - mod = new.module('mod') - sys.modules['mod'] = mod - try: - exec ''' -output = [] -import _stackless -def example(): - main_coro = _stackless.coroutine.getcurrent() - sub_coro = _stackless.coroutine() - y = 3 - def f(coro, n, x): - if n == 0: - coro.switch() - return - f(coro, n-1, 2*x) - output.append(x+y) - - sub_coro.bind(f, main_coro, 5, 1) - sub_coro.switch() - - import pickle - pckl = pickle.dumps(sub_coro) - new_coro = pickle.loads(pckl) - - new_coro.switch() - -example() -assert output == [19, 11, 7, 5, 4] -''' in mod.__dict__ - finally: - del sys.modules['mod'] - - def test_exception(self): - import new, sys - - mod = new.module('mod') - sys.modules['mod'] = mod - try: - exec ''' -output = [] -import _stackless -def f(coro): - try: - raise ValueError - except: - coro.switch() - import sys - t, v, tb = sys.exc_info() - output.append(t) - -def example(): - main_coro = _stackless.coroutine.getcurrent() - sub_coro = _stackless.coroutine() - - sub_coro.bind(f, main_coro) - sub_coro.switch() - - import pickle - pckl = pickle.dumps(sub_coro) - new_coro = pickle.loads(pckl) - - new_coro.switch() - -example() -assert output == [ValueError] -''' in mod.__dict__ - finally: - del sys.modules['mod'] - - def test_exception_after_unpickling(self): - - import new, sys - - mod = new.module('mod') - sys.modules['mod'] = mod - try: - exec ''' -output = [] -import _stackless -def f(coro, n, x): - if n == 0: - coro.switch() - raise ValueError - try: - f(coro, n-1, 2*x) - finally: - output.append(x) - -def example(): - main_coro = _stackless.coroutine.getcurrent() - sub_coro = _stackless.coroutine() - sub_coro.bind(f, main_coro, 5, 1) - sub_coro.switch() - - import pickle - pckl = pickle.dumps(sub_coro) - new_coro = pickle.loads(pckl) - - try: - sub_coro.switch() - except ValueError: - pass - else: - assert 0 - try: - new_coro.switch() - except ValueError: - pass - else: - assert 0 - -example() -assert output == [16, 8, 4, 2, 1] * 2 -''' in mod.__dict__ - finally: - del sys.modules['mod'] - - def test_loop(self): - import new, sys - - mod = new.module('mod') - sys.modules['mod'] = mod - try: - exec ''' -output = [] -import _stackless -def f(coro): - for x in (1,2,3): - coro.switch() - output.append(x) - -def example(): - main_coro = _stackless.coroutine.getcurrent() - sub_coro = _stackless.coroutine() - - sub_coro.bind(f, main_coro) - sub_coro.switch() - - import pickle - pckl = pickle.dumps(sub_coro) - new_coro = pickle.loads(pckl) - - new_coro.switch() - new_coro.switch() - new_coro.switch() - -example() -assert output == [1, 2, 3] -''' in mod.__dict__ - finally: - del sys.modules['mod'] - - def test_valstack(self): - import new, sys - - mod = new.module('mod') - sys.modules['mod'] = mod - try: - exec ''' -output = [] -import _stackless -def f(coro): - r = 1+g(coro)+3 - output.append(r) - -def g(coro): - coro.switch() - return 2 - -def example(): - main_coro = _stackless.coroutine.getcurrent() - sub_coro = _stackless.coroutine() - - sub_coro.bind(f, main_coro) - sub_coro.switch() - - import pickle - pckl = pickle.dumps(sub_coro) - new_coro = pickle.loads(pckl) - - new_coro.switch() - - -example() -assert output == [6] -''' in mod.__dict__ - finally: - del sys.modules['mod'] - - - def test_exec_and_locals(self): - import new, sys - - mod = new.module('mod') - sys.modules['mod'] = mod - try: - exec ''' -output = [] -import _stackless - -def f(coro): - x = None - exec "x = 9" - coro.switch() - output.append(x) - -def example(): - main_coro = _stackless.coroutine.getcurrent() - sub_coro = _stackless.coroutine() - sub_coro.bind(f, main_coro) - sub_coro.switch() - - import pickle - pckl = pickle.dumps(sub_coro) - new_coro = pickle.loads(pckl) - - new_coro.switch() - -example() -assert output == [9] -''' in mod.__dict__ - finally: - del sys.modules['mod'] - - - def test_solver(self): - import new, sys - - mod = new.module('mod') - sys.modules['mod'] = mod - try: - exec ''' -import _stackless, pickle - -class Fail(Exception): - pass - -class Success(Exception): - pass - -def first_solution(func): - global next_answer - co = _stackless.coroutine() - co.bind(func) - pending = [(co, None)] - while pending: - co, next_answer = pending.pop() - try: - co.switch() - except Fail: - pass - except Success, e: - return e.args[0] - else: - # zero_or_one() called, clone the coroutine - # NB. this seems to be quite slow - co2 = pickle.loads(pickle.dumps(co)) - pending.append((co2, 1)) - pending.append((co, 0)) - raise Fail("no solution") - -pending = [] -main = _stackless.coroutine.getcurrent() - -def zero_or_one(): - main.switch() - return next_answer - -# ____________________________________________________________ - -invalid_prefixes = { - (0, 0): True, - (0, 1, 0): True, - (0, 1, 1): True, - (1, 0): True, - (1, 1, 0, 0): True, - } - -def example(): - test = [] - for n in range(5): - test.append(zero_or_one()) - if tuple(test) in invalid_prefixes: - raise Fail - raise Success(test) - -res = first_solution(example) -assert res == [1, 1, 0, 1, 0] -''' in mod.__dict__ - finally: - del sys.modules['mod'] diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -8,24 +8,12 @@ class WeakrefLifeline(W_Root): + cached_weakref_index = -1 + cached_proxy_index = -1 + def __init__(self, space): self.space = space self.refs_weak = [] - self.cached_weakref_index = -1 - self.cached_proxy_index = -1 - - def __del__(self): - """This runs when the interp-level object goes away, and allows - its lifeline to go away. The purpose of this is to activate the - callbacks even if there is no __del__ method on the interp-level - W_Root subclass implementing the object. - """ - for i in range(len(self.refs_weak) - 1, -1, -1): - w_ref = self.refs_weak[i]() - if w_ref is not None and w_ref.w_callable is not None: - w_ref.enqueue_for_destruction(self.space, - W_WeakrefBase.activate_callback, - 'weakref callback of ') def clear_all_weakrefs(self): """Clear all weakrefs. This is called when an app-level object has @@ -39,12 +27,11 @@ # weakref callbacks are not invoked eagerly here. They are # invoked by self.__del__() anyway. - @jit.dont_look_inside - def get_or_make_weakref(self, space, w_subtype, w_obj, w_callable): + def get_or_make_weakref(self, w_subtype, w_obj): + space = self.space w_weakreftype = space.gettypeobject(W_Weakref.typedef) is_weakreftype = space.is_w(w_weakreftype, w_subtype) - can_reuse = space.is_w(w_callable, space.w_None) - if is_weakreftype and can_reuse and self.cached_weakref_index >= 0: + if is_weakreftype and self.cached_weakref_index >= 0: w_cached = self.refs_weak[self.cached_weakref_index]() if w_cached is not None: return w_cached @@ -52,16 +39,15 @@ self.cached_weakref_index = -1 w_ref = space.allocate_instance(W_Weakref, w_subtype) index = len(self.refs_weak) - W_Weakref.__init__(w_ref, space, w_obj, w_callable) + W_Weakref.__init__(w_ref, space, w_obj, None) self.refs_weak.append(weakref.ref(w_ref)) - if is_weakreftype and can_reuse: + if is_weakreftype: self.cached_weakref_index = index return w_ref - @jit.dont_look_inside - def get_or_make_proxy(self, space, w_obj, w_callable): - can_reuse = space.is_w(w_callable, space.w_None) - if can_reuse and self.cached_proxy_index >= 0: + def get_or_make_proxy(self, w_obj): + space = self.space + if self.cached_proxy_index >= 0: w_cached = self.refs_weak[self.cached_proxy_index]() if w_cached is not None: return w_cached @@ -69,12 +55,11 @@ self.cached_proxy_index = -1 index = len(self.refs_weak) if space.is_true(space.callable(w_obj)): - w_proxy = W_CallableProxy(space, w_obj, w_callable) + w_proxy = W_CallableProxy(space, w_obj, None) else: - w_proxy = W_Proxy(space, w_obj, w_callable) + w_proxy = W_Proxy(space, w_obj, None) self.refs_weak.append(weakref.ref(w_proxy)) - if can_reuse: - self.cached_proxy_index = index + self.cached_proxy_index = index return w_proxy def get_any_weakref(self, space): @@ -90,6 +75,45 @@ return w_ref return space.w_None + +class WeakrefLifelineWithCallbacks(WeakrefLifeline): + + def __init__(self, space, oldlifeline=None): + self.space = space + if oldlifeline is None: + self.refs_weak = [] + else: + self.refs_weak = oldlifeline.refs_weak + + def __del__(self): + """This runs when the interp-level object goes away, and allows + its lifeline to go away. The purpose of this is to activate the + callbacks even if there is no __del__ method on the interp-level + W_Root subclass implementing the object. + """ + for i in range(len(self.refs_weak) - 1, -1, -1): + w_ref = self.refs_weak[i]() + if w_ref is not None and w_ref.w_callable is not None: + w_ref.enqueue_for_destruction(self.space, + W_WeakrefBase.activate_callback, + 'weakref callback of ') + + def make_weakref_with_callback(self, w_subtype, w_obj, w_callable): + space = self.space + w_ref = space.allocate_instance(W_Weakref, w_subtype) + W_Weakref.__init__(w_ref, space, w_obj, w_callable) + self.refs_weak.append(weakref.ref(w_ref)) + return w_ref + + def make_proxy_with_callback(self, w_obj, w_callable): + space = self.space + if space.is_true(space.callable(w_obj)): + w_proxy = W_CallableProxy(space, w_obj, w_callable) + else: + w_proxy = W_Proxy(space, w_obj, w_callable) + self.refs_weak.append(weakref.ref(w_proxy)) + return w_proxy + # ____________________________________________________________ class Dummy: @@ -103,8 +127,7 @@ class W_WeakrefBase(Wrappable): def __init__(w_self, space, w_obj, w_callable): - if space.is_w(w_callable, space.w_None): - w_callable = None + assert w_callable is not space.w_None # should be really None w_self.space = space assert w_obj is not None w_self.w_obj_weak = weakref.ref(w_obj) @@ -177,16 +200,39 @@ def descr__ne__(self, space, w_ref2): return space.not_(space.eq(self, w_ref2)) +def getlifeline(space, w_obj): + lifeline = w_obj.getweakref() + if lifeline is None: + lifeline = WeakrefLifeline(space) + w_obj.setweakref(space, lifeline) + return lifeline + +def getlifelinewithcallbacks(space, w_obj): + lifeline = w_obj.getweakref() + if not isinstance(lifeline, WeakrefLifelineWithCallbacks): # or None + oldlifeline = lifeline + lifeline = WeakrefLifelineWithCallbacks(space, oldlifeline) + w_obj.setweakref(space, lifeline) + return lifeline + + at jit.dont_look_inside +def get_or_make_weakref(space, w_subtype, w_obj): + return getlifeline(space, w_obj).get_or_make_weakref(w_subtype, w_obj) + + at jit.dont_look_inside +def make_weakref_with_callback(space, w_subtype, w_obj, w_callable): + lifeline = getlifelinewithcallbacks(space, w_obj) + return lifeline.make_weakref_with_callback(w_subtype, w_obj, w_callable) + def descr__new__weakref(space, w_subtype, w_obj, w_callable=None, __args__=None): if __args__.arguments_w: raise OperationError(space.w_TypeError, space.wrap( "__new__ expected at most 2 arguments")) - lifeline = w_obj.getweakref() - if lifeline is None: - lifeline = WeakrefLifeline(space) - w_obj.setweakref(space, lifeline) - return lifeline.get_or_make_weakref(space, w_subtype, w_obj, w_callable) + if space.is_w(w_callable, space.w_None): + return get_or_make_weakref(space, w_subtype, w_obj) + else: + return make_weakref_with_callback(space, w_subtype, w_obj, w_callable) W_Weakref.typedef = TypeDef("weakref", __doc__ = """A weak reference to an object 'obj'. A 'callback' can be given, @@ -239,15 +285,23 @@ w_obj = force(space, self) return space.call_args(w_obj, __args__) + at jit.dont_look_inside +def get_or_make_proxy(space, w_obj): + return getlifeline(space, w_obj).get_or_make_proxy(w_obj) + + at jit.dont_look_inside +def make_proxy_with_callback(space, w_obj, w_callable): + lifeline = getlifelinewithcallbacks(space, w_obj) + return lifeline.make_proxy_with_callback(w_obj, w_callable) + def proxy(space, w_obj, w_callable=None): """Create a proxy object that weakly references 'obj'. 'callback', if given, is called with the proxy as an argument when 'obj' is about to be finalized.""" - lifeline = w_obj.getweakref() - if lifeline is None: - lifeline = WeakrefLifeline(space) - w_obj.setweakref(space, lifeline) - return lifeline.get_or_make_proxy(space, w_obj, w_callable) + if space.is_w(w_callable, space.w_None): + return get_or_make_proxy(space, w_obj) + else: + return make_proxy_with_callback(space, w_obj, w_callable) def descr__new__proxy(space, w_subtype, w_obj, w_callable=None): raise OperationError( diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py --- a/pypy/module/_weakref/test/test_weakref.py +++ b/pypy/module/_weakref/test/test_weakref.py @@ -369,6 +369,26 @@ return A raises(TypeError, tryit) + def test_proxy_to_dead_object(self): + import _weakref, gc + class A(object): + pass + p = _weakref.proxy(A()) + gc.collect() + raises(ReferenceError, "p + 1") + + def test_proxy_with_callback(self): + import _weakref, gc + class A(object): + pass + a2 = A() + def callback(proxy): + a2.seen = proxy + p = _weakref.proxy(A(), callback) + gc.collect() + raises(ReferenceError, "p + 1") + assert a2.seen is p + def test_repr(self): import _weakref, gc for kind in ('ref', 'proxy'): diff --git a/pypy/module/cpyext/funcobject.py b/pypy/module/cpyext/funcobject.py --- a/pypy/module/cpyext/funcobject.py +++ b/pypy/module/cpyext/funcobject.py @@ -4,9 +4,21 @@ cpython_api, bootstrap_function, cpython_struct, build_type_checkers) from pypy.module.cpyext.pyobject import ( PyObject, make_ref, from_ref, Py_DecRef, make_typedescr, borrow_from) +from pypy.rlib.unroll import unrolling_iterable from pypy.interpreter.error import OperationError from pypy.interpreter.function import Function, Method from pypy.interpreter.pycode import PyCode +from pypy.interpreter import pycode + +CODE_FLAGS = dict( + CO_OPTIMIZED = 0x0001, + CO_NEWLOCALS = 0x0002, + CO_VARARGS = 0x0004, + CO_VARKEYWORDS = 0x0008, + CO_NESTED = 0x0010, + CO_GENERATOR = 0x0020, +) +ALL_CODE_FLAGS = unrolling_iterable(CODE_FLAGS.items()) PyFunctionObjectStruct = lltype.ForwardReference() PyFunctionObject = lltype.Ptr(PyFunctionObjectStruct) @@ -16,7 +28,12 @@ PyCodeObjectStruct = lltype.ForwardReference() PyCodeObject = lltype.Ptr(PyCodeObjectStruct) -cpython_struct("PyCodeObject", PyObjectFields, PyCodeObjectStruct) +PyCodeObjectFields = PyObjectFields + \ + (("co_name", PyObject), + ("co_flags", rffi.INT), + ("co_argcount", rffi.INT), + ) +cpython_struct("PyCodeObject", PyCodeObjectFields, PyCodeObjectStruct) @bootstrap_function def init_functionobject(space): @@ -24,6 +41,10 @@ basestruct=PyFunctionObject.TO, attach=function_attach, dealloc=function_dealloc) + make_typedescr(PyCode.typedef, + basestruct=PyCodeObject.TO, + attach=code_attach, + dealloc=code_dealloc) PyFunction_Check, PyFunction_CheckExact = build_type_checkers("Function", Function) PyMethod_Check, PyMethod_CheckExact = build_type_checkers("Method", Method) @@ -40,6 +61,31 @@ from pypy.module.cpyext.object import PyObject_dealloc PyObject_dealloc(space, py_obj) +def code_attach(space, py_obj, w_obj): + py_code = rffi.cast(PyCodeObject, py_obj) + assert isinstance(w_obj, PyCode) + py_code.c_co_name = make_ref(space, space.wrap(w_obj.co_name)) + co_flags = 0 + for name, value in ALL_CODE_FLAGS: + if w_obj.co_flags & getattr(pycode, name): + co_flags |= value + rffi.setintfield(py_code, 'c_co_flags', co_flags) + rffi.setintfield(py_code, 'c_co_argcount', w_obj.co_argcount) + + at cpython_api([PyObject], lltype.Void, external=False) +def code_dealloc(space, py_obj): + py_code = rffi.cast(PyCodeObject, py_obj) + Py_DecRef(space, py_code.c_co_name) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) + + at cpython_api([PyObject], PyObject) +def PyFunction_GetCode(space, w_func): + """Return the code object associated with the function object op.""" + func = space.interp_w(Function, w_func) + w_code = space.wrap(func.code) + return borrow_from(w_func, w_code) + @cpython_api([PyObject, PyObject, PyObject], PyObject) def PyMethod_New(space, w_func, w_self, w_cls): """Return a new method object, with func being any callable object; this is the diff --git a/pypy/module/cpyext/include/code.h b/pypy/module/cpyext/include/code.h --- a/pypy/module/cpyext/include/code.h +++ b/pypy/module/cpyext/include/code.h @@ -4,7 +4,21 @@ extern "C" { #endif -typedef PyObject PyCodeObject; +typedef struct { + PyObject_HEAD + PyObject *co_name; + int co_argcount; + int co_flags; +} PyCodeObject; + +/* Masks for co_flags above */ +/* These values are also in funcobject.py */ +#define CO_OPTIMIZED 0x0001 +#define CO_NEWLOCALS 0x0002 +#define CO_VARARGS 0x0004 +#define CO_VARKEYWORDS 0x0008 +#define CO_NESTED 0x0010 +#define CO_GENERATOR 0x0020 #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/funcobject.h b/pypy/module/cpyext/include/funcobject.h --- a/pypy/module/cpyext/include/funcobject.h +++ b/pypy/module/cpyext/include/funcobject.h @@ -12,6 +12,8 @@ PyObject *func_name; /* The __name__ attribute, a string object */ } PyFunctionObject; +#define PyFunction_GET_CODE(obj) PyFunction_GetCode((PyObject*)(obj)) + #define PyMethod_GET_FUNCTION(obj) PyMethod_Function((PyObject*)(obj)) #define PyMethod_GET_SELF(obj) PyMethod_Self((PyObject*)(obj)) #define PyMethod_GET_CLASS(obj) PyMethod_Class((PyObject*)(obj)) diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -501,6 +501,9 @@ #define PyObject_TypeCheck(ob, tp) \ ((ob)->ob_type == (tp) || PyType_IsSubtype((ob)->ob_type, (tp))) +#define Py_TRASHCAN_SAFE_BEGIN(pyObj) +#define Py_TRASHCAN_SAFE_END(pyObj) + /* Copied from CPython ----------------------------- */ int PyObject_AsReadBuffer(PyObject *, const void **, Py_ssize_t *); int PyObject_AsWriteBuffer(PyObject *, void **, Py_ssize_t *); diff --git a/pypy/module/cpyext/include/pythonrun.h b/pypy/module/cpyext/include/pythonrun.h --- a/pypy/module/cpyext/include/pythonrun.h +++ b/pypy/module/cpyext/include/pythonrun.h @@ -12,6 +12,7 @@ #define Py_Py3kWarningFlag 0 #define Py_FrozenFlag 0 +#define Py_VerboseFlag 0 typedef struct { int cf_flags; /* bitmask of CO_xxx flags relevant to future */ diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -920,12 +920,6 @@ raise NotImplementedError @cpython_api([PyObject], PyObject) -def PyFunction_GetCode(space, op): - """Return the code object associated with the function object op.""" - borrow_from() - raise NotImplementedError - - at cpython_api([PyObject], PyObject) def PyFunction_GetGlobals(space, op): """Return the globals dictionary associated with the function object op.""" borrow_from() diff --git a/pypy/module/cpyext/test/foo.c b/pypy/module/cpyext/test/foo.c --- a/pypy/module/cpyext/test/foo.c +++ b/pypy/module/cpyext/test/foo.c @@ -215,36 +215,36 @@ typedef struct { PyUnicodeObject HEAD; int val; -} FuuObject; +} UnicodeSubclassObject; -static int Fuu_init(FuuObject *self, PyObject *args, PyObject *kwargs) { +static int UnicodeSubclass_init(UnicodeSubclassObject *self, PyObject *args, PyObject *kwargs) { self->val = 42; return 0; } static PyObject * -Fuu_escape(PyTypeObject* type, PyObject *args) +UnicodeSubclass_escape(PyTypeObject* type, PyObject *args) { Py_RETURN_TRUE; } static PyObject * -Fuu_get_val(FuuObject *self) { +UnicodeSubclass_get_val(UnicodeSubclassObject *self) { return PyInt_FromLong(self->val); } -static PyMethodDef Fuu_methods[] = { - {"escape", (PyCFunction) Fuu_escape, METH_VARARGS, NULL}, - {"get_val", (PyCFunction) Fuu_get_val, METH_NOARGS, NULL}, +static PyMethodDef UnicodeSubclass_methods[] = { + {"escape", (PyCFunction) UnicodeSubclass_escape, METH_VARARGS, NULL}, + {"get_val", (PyCFunction) UnicodeSubclass_get_val, METH_NOARGS, NULL}, {NULL} /* Sentinel */ }; -PyTypeObject FuuType = { +PyTypeObject UnicodeSubtype = { PyObject_HEAD_INIT(NULL) 0, "foo.fuu", - sizeof(FuuObject), + sizeof(UnicodeSubclassObject), 0, 0, /*tp_dealloc*/ 0, /*tp_print*/ @@ -277,7 +277,7 @@ /* Attribute descriptor and subclassing stuff */ - Fuu_methods,/*tp_methods*/ + UnicodeSubclass_methods,/*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ @@ -287,7 +287,7 @@ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ - (initproc) Fuu_init, /*tp_init*/ + (initproc) UnicodeSubclass_init, /*tp_init*/ 0, /*tp_alloc will be set to PyType_GenericAlloc in module init*/ 0, /*tp_new*/ 0, /*tp_free Low-level free-memory routine */ @@ -299,11 +299,11 @@ 0 /*tp_weaklist*/ }; -PyTypeObject Fuu2Type = { +PyTypeObject UnicodeSubtype2 = { PyObject_HEAD_INIT(NULL) 0, "foo.fuu2", - sizeof(FuuObject), + sizeof(UnicodeSubclassObject), 0, 0, /*tp_dealloc*/ 0, /*tp_print*/ @@ -628,15 +628,15 @@ footype.tp_new = PyType_GenericNew; - FuuType.tp_base = &PyUnicode_Type; - Fuu2Type.tp_base = &FuuType; + UnicodeSubtype.tp_base = &PyUnicode_Type; + UnicodeSubtype2.tp_base = &UnicodeSubtype; MetaType.tp_base = &PyType_Type; if (PyType_Ready(&footype) < 0) return; - if (PyType_Ready(&FuuType) < 0) + if (PyType_Ready(&UnicodeSubtype) < 0) return; - if (PyType_Ready(&Fuu2Type) < 0) + if (PyType_Ready(&UnicodeSubtype2) < 0) return; if (PyType_Ready(&MetaType) < 0) return; @@ -655,9 +655,9 @@ return; if (PyDict_SetItemString(d, "fooType", (PyObject *)&footype) < 0) return; - if (PyDict_SetItemString(d, "FuuType", (PyObject *) &FuuType) < 0) + if (PyDict_SetItemString(d, "UnicodeSubtype", (PyObject *) &UnicodeSubtype) < 0) return; - if(PyDict_SetItemString(d, "Fuu2Type", (PyObject *) &Fuu2Type) < 0) + if (PyDict_SetItemString(d, "UnicodeSubtype2", (PyObject *) &UnicodeSubtype2) < 0) return; if (PyDict_SetItemString(d, "MetaType", (PyObject *) &MetaType) < 0) return; diff --git a/pypy/module/cpyext/test/test_funcobject.py b/pypy/module/cpyext/test/test_funcobject.py --- a/pypy/module/cpyext/test/test_funcobject.py +++ b/pypy/module/cpyext/test/test_funcobject.py @@ -2,8 +2,12 @@ from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref -from pypy.module.cpyext.funcobject import PyFunctionObject +from pypy.module.cpyext.funcobject import ( + PyFunctionObject, PyCodeObject, CODE_FLAGS) from pypy.interpreter.function import Function, Method +from pypy.interpreter.pycode import PyCode + +globals().update(CODE_FLAGS) class TestFunctionObject(BaseApiTest): def test_function(self, space, api): @@ -36,6 +40,38 @@ w_method2 = api.PyMethod_New(w_function, w_self, w_class) assert space.eq_w(w_method, w_method2) + def test_getcode(self, space, api): + w_function = space.appexec([], """(): + def func(x, y, z): return x + return func + """) + w_code = api.PyFunction_GetCode(w_function) + assert w_code.co_name == "func" + + ref = make_ref(space, w_code) + assert (from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is + space.gettypeobject(PyCode.typedef)) + assert "func" == space.unwrap( + from_ref(space, rffi.cast(PyCodeObject, ref).c_co_name)) + assert 3 == rffi.cast(PyCodeObject, ref).c_co_argcount + api.Py_DecRef(ref) + + def test_co_flags(self, space, api): + def get_flags(signature, body="pass"): + w_code = space.appexec([], """(): + def func(%s): %s + return func.__code__ + """ % (signature, body)) + ref = make_ref(space, w_code) + co_flags = rffi.cast(PyCodeObject, ref).c_co_flags + api.Py_DecRef(ref) + return co_flags + assert get_flags("x") == CO_NESTED | CO_OPTIMIZED | CO_NEWLOCALS + assert get_flags("x", "exec x") == CO_NESTED | CO_NEWLOCALS + assert get_flags("x, *args") & CO_VARARGS + assert get_flags("x, **kw") & CO_VARKEYWORDS + assert get_flags("x", "yield x") & CO_GENERATOR + def test_newcode(self, space, api): filename = rffi.str2charp('filename') funcname = rffi.str2charp('funcname') diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -119,16 +119,16 @@ module = self.import_module(name='foo') obj = module.new() # call __new__ - newobj = module.FuuType(u"xyz") + newobj = module.UnicodeSubtype(u"xyz") assert newobj == u"xyz" - assert isinstance(newobj, module.FuuType) + assert isinstance(newobj, module.UnicodeSubtype) assert isinstance(module.fooType(), module.fooType) class bar(module.fooType): pass assert isinstance(bar(), bar) - fuu = module.FuuType + fuu = module.UnicodeSubtype class fuu2(fuu): def baz(self): return self @@ -137,20 +137,20 @@ def test_init(self): module = self.import_module(name="foo") - newobj = module.FuuType() + newobj = module.UnicodeSubtype() assert newobj.get_val() == 42 # this subtype should inherit tp_init - newobj = module.Fuu2Type() + newobj = module.UnicodeSubtype2() assert newobj.get_val() == 42 # this subclass redefines __init__ - class Fuu2(module.FuuType): + class UnicodeSubclass2(module.UnicodeSubtype): def __init__(self): self.foobar = 32 - super(Fuu2, self).__init__() + super(UnicodeSubclass2, self).__init__() - newobj = Fuu2() + newobj = UnicodeSubclass2() assert newobj.get_val() == 42 assert newobj.foobar == 32 diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py --- a/pypy/module/marshal/interp_marshal.py +++ b/pypy/module/marshal/interp_marshal.py @@ -40,7 +40,7 @@ reader = FileReader(space, w_f) try: u = Unmarshaller(space, reader) - return u.load_w_obj(False) + return u.load_w_obj() finally: reader.finished() @@ -49,7 +49,7 @@ ignored.""" space.timer.start("marshal loads") u = StringUnmarshaller(space, w_str) - obj = u.load_w_obj(False) + obj = u.load_w_obj() space.timer.stop("marshal loads") return obj @@ -424,7 +424,7 @@ lng = self.get_lng() return self.get(lng) - def get_w_obj(self, allow_null): + def get_w_obj(self, allow_null=False): space = self.space w_ret = space.w_None # something not None tc = self.get1() @@ -434,9 +434,9 @@ 'NULL object in marshal data')) return w_ret - def load_w_obj(self, allow_null): + def load_w_obj(self): try: - return self.get_w_obj(allow_null) + return self.get_w_obj() except rstackovf.StackOverflow: rstackovf.check_stack_overflow() self._overflow() diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -26,13 +26,19 @@ ("copysign", "copysign"), ("cos", "cos"), ("divide", "divide"), + ("equal", "equal"), ("exp", "exp"), ("fabs", "fabs"), ("floor", "floor"), + ("greater", "greater"), + ("greater_equal", "greater_equal"), + ("less", "less"), + ("less_equal", "less_equal"), ("maximum", "maximum"), ("minimum", "minimum"), ("multiply", "multiply"), ("negative", "negative"), + ("not_equal", "not_equal"), ("reciprocal", "reciprocal"), ("sign", "sign"), ("sin", "sin"), diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -129,6 +129,16 @@ )) return impl +def raw_binop(func): + # Returns the result unwrapped. + @functools.wraps(func) + def impl(self, v1, v2): + return func(self, + self.for_computation(self.unbox(v1)), + self.for_computation(self.unbox(v2)) + ) + return impl + def unaryop(func): @functools.wraps(func) def impl(self, v): @@ -170,8 +180,24 @@ def bool(self, v): return bool(self.for_computation(self.unbox(v))) + @raw_binop + def eq(self, v1, v2): + return v1 == v2 + @raw_binop def ne(self, v1, v2): - return self.for_computation(self.unbox(v1)) != self.for_computation(self.unbox(v2)) + return v1 != v2 + @raw_binop + def lt(self, v1, v2): + return v1 < v2 + @raw_binop + def le(self, v1, v2): + return v1 <= v2 + @raw_binop + def gt(self, v1, v2): + return v1 > v2 + @raw_binop + def ge(self, v1, v2): + return v1 >= v2 class FloatArithmeticDtype(ArithmaticTypeMixin): @@ -224,7 +250,7 @@ return math.tan(v) @unaryop def arcsin(self, v): - if v < -1.0 or v > 1.0: + if v < -1.0 or v > 1.0: return rfloat.NAN return math.asin(v) @unaryop diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -74,6 +74,13 @@ descr_pow = _binop_impl("power") descr_mod = _binop_impl("mod") + descr_eq = _binop_impl("equal") + descr_ne = _binop_impl("not_equal") + descr_lt = _binop_impl("less") + descr_le = _binop_impl("less_equal") + descr_gt = _binop_impl("greater") + descr_ge = _binop_impl("greater_equal") + def _binop_right_impl(ufunc_name): def impl(self, space, w_other): w_other = scalar_w(space, @@ -206,7 +213,7 @@ res = "array([" + ", ".join(concrete._getnums(False)) + "]" dtype = concrete.find_dtype() if (dtype is not space.fromcache(interp_dtype.W_Float64Dtype) and - dtype is not space.fromcache(interp_dtype.W_Int64Dtype)): + dtype is not space.fromcache(interp_dtype.W_Int64Dtype)) or not self.find_size(): res += ", dtype=" + dtype.name res += ")" return space.wrap(res) @@ -404,10 +411,11 @@ """ Intermediate class for performing binary operations. """ - def __init__(self, signature, res_dtype, left, right): + def __init__(self, signature, calc_dtype, res_dtype, left, right): VirtualArray.__init__(self, signature, res_dtype) self.left = left self.right = right + self.calc_dtype = calc_dtype def _del_sources(self): self.left = None @@ -421,14 +429,14 @@ return self.right.find_size() def _eval(self, i): - lhs = self.left.eval(i).convert_to(self.res_dtype) - rhs = self.right.eval(i).convert_to(self.res_dtype) + lhs = self.left.eval(i).convert_to(self.calc_dtype) + rhs = self.right.eval(i).convert_to(self.calc_dtype) sig = jit.promote(self.signature) assert isinstance(sig, signature.Signature) call_sig = sig.components[0] assert isinstance(call_sig, signature.Call2) - return call_sig.func(self.res_dtype, lhs, rhs) + return call_sig.func(self.calc_dtype, lhs, rhs) class ViewArray(BaseArray): """ @@ -573,18 +581,28 @@ __pos__ = interp2app(BaseArray.descr_pos), __neg__ = interp2app(BaseArray.descr_neg), __abs__ = interp2app(BaseArray.descr_abs), + __add__ = interp2app(BaseArray.descr_add), __sub__ = interp2app(BaseArray.descr_sub), __mul__ = interp2app(BaseArray.descr_mul), __div__ = interp2app(BaseArray.descr_div), __pow__ = interp2app(BaseArray.descr_pow), __mod__ = interp2app(BaseArray.descr_mod), + __radd__ = interp2app(BaseArray.descr_radd), __rsub__ = interp2app(BaseArray.descr_rsub), __rmul__ = interp2app(BaseArray.descr_rmul), __rdiv__ = interp2app(BaseArray.descr_rdiv), __rpow__ = interp2app(BaseArray.descr_rpow), __rmod__ = interp2app(BaseArray.descr_rmod), + + __eq__ = interp2app(BaseArray.descr_eq), + __ne__ = interp2app(BaseArray.descr_ne), + __lt__ = interp2app(BaseArray.descr_lt), + __le__ = interp2app(BaseArray.descr_le), + __gt__ = interp2app(BaseArray.descr_gt), + __ge__ = interp2app(BaseArray.descr_ge), + __repr__ = interp2app(BaseArray.descr_repr), __str__ = interp2app(BaseArray.descr_str), diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -113,10 +113,11 @@ argcount = 2 def __init__(self, func, name, promote_to_float=False, promote_bools=False, - identity=None): + identity=None, comparison_func=False): W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity) self.func = func + self.comparison_func = comparison_func self.signature = signature.Call2(func) self.reduce_signature = signature.BaseSignature() @@ -127,18 +128,25 @@ [w_lhs, w_rhs] = args_w w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) - res_dtype = find_binop_result_dtype(space, + calc_dtype = find_binop_result_dtype(space, w_lhs.find_dtype(), w_rhs.find_dtype(), promote_to_float=self.promote_to_float, promote_bools=self.promote_bools, ) + if self.comparison_func: + res_dtype = space.fromcache(interp_dtype.W_BoolDtype) + else: + res_dtype = calc_dtype if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar): - return self.func(res_dtype, w_lhs.value, w_rhs.value).wrap(space) + return self.func(calc_dtype, + w_lhs.value.convert_to(calc_dtype), + w_rhs.value.convert_to(calc_dtype) + ).wrap(space) new_sig = signature.Signature.find_sig([ self.signature, w_lhs.signature, w_rhs.signature ]) - w_res = Call2(new_sig, res_dtype, w_lhs, w_rhs) + w_res = Call2(new_sig, calc_dtype, res_dtype, w_lhs, w_rhs) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) return w_res @@ -209,13 +217,16 @@ return space.fromcache(interp_dtype.W_Float64Dtype) -def ufunc_dtype_caller(ufunc_name, op_name, argcount): +def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func): if argcount == 1: def impl(res_dtype, value): return getattr(res_dtype, op_name)(value) elif argcount == 2: def impl(res_dtype, lvalue, rvalue): - return getattr(res_dtype, op_name)(lvalue, rvalue) + res = getattr(res_dtype, op_name)(lvalue, rvalue) + if comparison_func: + res = space.fromcache(interp_dtype.W_BoolDtype).box(res) + return res return func_with_new_name(impl, ufunc_name) class UfuncState(object): @@ -229,6 +240,13 @@ ("mod", "mod", 2, {"promote_bools": True}), ("power", "pow", 2, {"promote_bools": True}), + ("equal", "eq", 2, {"comparison_func": True}), + ("not_equal", "ne", 2, {"comparison_func": True}), + ("less", "lt", 2, {"comparison_func": True}), + ("less_equal", "le", 2, {"comparison_func": True}), + ("greater", "gt", 2, {"comparison_func": True}), + ("greater_equal", "ge", 2, {"comparison_func": True}), + ("maximum", "max", 2), ("minimum", "min", 2), @@ -262,7 +280,9 @@ identity = space.fromcache(interp_dtype.W_Int64Dtype).adapt_val(identity) extra_kwargs["identity"] = identity - func = ufunc_dtype_caller(ufunc_name, op_name, argcount) + func = ufunc_dtype_caller(space, ufunc_name, op_name, argcount, + comparison_func=extra_kwargs.get("comparison_func", False) + ) if argcount == 1: ufunc = W_Ufunc1(func, ufunc_name, **extra_kwargs) elif argcount == 2: diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -52,10 +52,14 @@ from numpy import array, zeros a = array(range(5), float) assert repr(a) == "array([0.0, 1.0, 2.0, 3.0, 4.0])" + a = array([], float) + assert repr(a) == "array([], dtype=float64)" a = zeros(1001) assert repr(a) == "array([0.0, 0.0, 0.0, ..., 0.0, 0.0, 0.0])" a = array(range(5), long) assert repr(a) == "array([0, 1, 2, 3, 4])" + a = array([], long) + assert repr(a) == "array([], dtype=int64)" a = array([True, False, True, False], "?") assert repr(a) == "array([True, False, True, False], dtype=bool)" @@ -553,6 +557,26 @@ assert array([1.2, 5]).dtype is dtype(float) assert array([]).dtype is dtype(float) + def test_comparison(self): + import operator + from numpy import array, dtype + + a = array(range(5)) + b = array(range(5), float) + for func in [ + operator.eq, operator.ne, operator.lt, operator.le, operator.gt, + operator.ge + ]: + c = func(a, 3) + assert c.dtype is dtype(bool) + for i in xrange(5): + assert c[i] == func(a[i], 3) + + c = func(b, 3) + assert c.dtype is dtype(bool) + for i in xrange(5): + assert c[i] == func(b[i], 3) + class AppTestSupport(object): def setup_class(cls): diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -310,4 +310,30 @@ assert add.reduce([1, 2, 3]) == 6 assert maximum.reduce([1]) == 1 assert maximum.reduce([1, 2, 3]) == 3 - raises(ValueError, maximum.reduce, []) \ No newline at end of file + raises(ValueError, maximum.reduce, []) + + def test_comparisons(self): + import operator + from numpy import equal, not_equal, less, less_equal, greater, greater_equal + + for ufunc, func in [ + (equal, operator.eq), + (not_equal, operator.ne), + (less, operator.lt), + (less_equal, operator.le), + (greater, operator.gt), + (greater_equal, operator.ge), + ]: + for a, b in [ + (3, 3), + (3, 4), + (4, 3), + (3.0, 3.0), + (3.0, 3.5), + (3.5, 3.0), + (3.0, 3), + (3, 3.0), + (3.5, 3), + (3, 3.5), + ]: + assert ufunc(a, b) is func(a, b) diff --git a/pypy/module/pwd/__init__.py b/pypy/module/pwd/__init__.py new file mode 100644 --- /dev/null +++ b/pypy/module/pwd/__init__.py @@ -0,0 +1,25 @@ +from pypy.interpreter.mixedmodule import MixedModule + +class Module(MixedModule): + """ + This module provides access to the Unix password database. + It is available on all Unix versions. + + Password database entries are reported as 7-tuples containing the following + items from the password database (see `'), in order: + pw_name, pw_passwd, pw_uid, pw_gid, pw_gecos, pw_dir, pw_shell. + The uid and gid items are integers, all others are strings. An + exception is raised if the entry asked for cannot be found. + """ + + interpleveldefs = { + 'getpwuid': 'interp_pwd.getpwuid', + 'getpwnam': 'interp_pwd.getpwnam', + 'getpwall': 'interp_pwd.getpwall', + } + + appleveldefs = { + 'struct_passwd': 'app_pwd.struct_passwd', + 'struct_pwent': 'app_pwd.struct_passwd', + } + diff --git a/pypy/module/pwd/app_pwd.py b/pypy/module/pwd/app_pwd.py new file mode 100644 --- /dev/null +++ b/pypy/module/pwd/app_pwd.py @@ -0,0 +1,20 @@ +from _structseq import structseqtype, structseqfield + +class struct_passwd: + """ + pwd.struct_passwd: Results from getpw*() routines. + + This object may be accessed either as a tuple of + (pw_name,pw_passwd,pw_uid,pw_gid,pw_gecos,pw_dir,pw_shell) + or via the object attributes as named in the above tuple. + """ + __metaclass__ = structseqtype + name = "pwd.struct_passwd" + + pw_name = structseqfield(0, "user name") + pw_passwd = structseqfield(1, "password") + pw_uid = structseqfield(2, "user id") + pw_gid = structseqfield(3, "group id") + pw_gecos = structseqfield(4, "real name") + pw_dir = structseqfield(5, "home directory") + pw_shell = structseqfield(6, "shell program") diff --git a/pypy/module/pwd/interp_pwd.py b/pypy/module/pwd/interp_pwd.py new file mode 100644 --- /dev/null +++ b/pypy/module/pwd/interp_pwd.py @@ -0,0 +1,95 @@ +from pypy.translator.tool.cbuild import ExternalCompilationInfo +from pypy.rpython.tool import rffi_platform +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.rlib.rarithmetic import intmask + +eci = ExternalCompilationInfo( + includes=['pwd.h'] + ) + +class CConfig: + _compilation_info_ = eci + + uid_t = rffi_platform.SimpleType("uid_t") + + passwd = rffi_platform.Struct( + 'struct passwd', + [('pw_name', rffi.CCHARP), + ('pw_passwd', rffi.CCHARP), + ('pw_uid', rffi.INT), + ('pw_gid', rffi.INT), + ('pw_gecos', rffi.CCHARP), + ('pw_dir', rffi.CCHARP), + ('pw_shell', rffi.CCHARP), + ]) + +config = rffi_platform.configure(CConfig) +passwd_p = lltype.Ptr(config['passwd']) +uid_t = config['uid_t'] + +def external(name, args, result, **kwargs): + return rffi.llexternal(name, args, result, compilation_info=eci, **kwargs) + +c_getpwuid = external("getpwuid", [uid_t], passwd_p) +c_getpwnam = external("getpwnam", [rffi.CCHARP], passwd_p) +c_setpwent = external("setpwent", [], lltype.Void) +c_getpwent = external("getpwent", [], passwd_p) +c_endpwent = external("endpwent", [], lltype.Void) + +def make_struct_passwd(space, pw): + w_passwd_struct = space.getattr(space.getbuiltinmodule('pwd'), + space.wrap('struct_passwd')) + w_tuple = space.newtuple([ + space.wrap(rffi.charp2str(pw.c_pw_name)), + space.wrap(rffi.charp2str(pw.c_pw_passwd)), + space.wrap(intmask(pw.c_pw_uid)), + space.wrap(intmask(pw.c_pw_gid)), + space.wrap(rffi.charp2str(pw.c_pw_gecos)), + space.wrap(rffi.charp2str(pw.c_pw_dir)), + space.wrap(rffi.charp2str(pw.c_pw_shell)), + ]) + return space.call_function(w_passwd_struct, w_tuple) + + at unwrap_spec(uid=int) +def getpwuid(space, uid): + """ + getpwuid(uid) -> (pw_name,pw_passwd,pw_uid, + pw_gid,pw_gecos,pw_dir,pw_shell) + Return the password database entry for the given numeric user ID. + See pwd.__doc__ for more on password database entries. + """ + pw = c_getpwuid(uid) + if not pw: + raise operationerrfmt(space.w_KeyError, + "getpwuid(): uid not found: %d", uid) + return make_struct_passwd(space, pw) + + at unwrap_spec(name=str) +def getpwnam(space, name): + """ + getpwnam(name) -> (pw_name,pw_passwd,pw_uid, + pw_gid,pw_gecos,pw_dir,pw_shell) + Return the password database entry for the given user name. + See pwd.__doc__ for more on password database entries. + """ + pw = c_getpwnam(name) + if not pw: + raise operationerrfmt(space.w_KeyError, + "getpwnam(): name not found: %s", name) + return make_struct_passwd(space, pw) + +def getpwall(space): + users_w = [] + c_setpwent() + try: + while True: + pw = c_getpwent() + if not pw: + break + users_w.append(make_struct_passwd(space, pw)) + finally: + c_endpwent() + return space.newlist(users_w) + diff --git a/pypy/module/pwd/test/test_pwd.py b/pypy/module/pwd/test/test_pwd.py new file mode 100644 --- /dev/null +++ b/pypy/module/pwd/test/test_pwd.py @@ -0,0 +1,31 @@ +from pypy.conftest import gettestobjspace + +class AppTestPwd: + def setup_class(cls): + cls.space = gettestobjspace(usemodules=['pwd']) + + def test_getpwuid(self): + import pwd, sys + raises(KeyError, pwd.getpwuid, -1) + pw = pwd.getpwuid(0) + assert pw.pw_name == 'root' + assert isinstance(pw.pw_passwd, str) + assert pw.pw_uid == 0 + assert pw.pw_gid == 0 + if sys.platform.startswith('linux'): + assert pw.pw_dir == '/root' + else: + assert pw.pw_dir.startswith('/') + assert pw.pw_shell.startswith('/') + # + assert type(pw.pw_uid) is int + assert type(pw.pw_gid) is int + + def test_getpwnam(self): + import pwd + raises(KeyError, pwd.getpwnam, '~invalid~') + assert pwd.getpwnam('root').pw_name == 'root' + + def test_getpwall(self): + import pwd + assert pwd.getpwnam('root') in pwd.getpwall() diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -8,7 +8,8 @@ modname == '__builtin__.interp_classobj' or modname == '__builtin__.functional' or modname == '__builtin__.descriptor' or - modname == 'thread.os_local'): + modname == 'thread.os_local' or + modname == 'thread.os_thread'): return True if '.' in modname: modname, _ = modname.split('.', 1) diff --git a/pypy/module/pypyjit/test/test_policy.py b/pypy/module/pypyjit/test/test_policy.py --- a/pypy/module/pypyjit/test/test_policy.py +++ b/pypy/module/pypyjit/test/test_policy.py @@ -34,7 +34,9 @@ def test_thread_local(): from pypy.module.thread.os_local import Local + from pypy.module.thread.os_thread import get_ident assert pypypolicy.look_inside_function(Local.getdict.im_func) + assert pypypolicy.look_inside_function(get_ident) def test_pypy_module(): from pypy.module._collections.interp_deque import W_Deque diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -2,7 +2,10 @@ import sys import re import os.path -from _pytest.assertion import newinterpret +try: + from _pytest.assertion import newinterpret +except ImportError: # e.g. Python 2.5 + newinterpret = None from pypy.tool.jitlogparser.parser import SimpleParser, Function, TraceForOpcode from pypy.tool.jitlogparser.storage import LoopStorage @@ -196,7 +199,7 @@ source = str(source.deindent()).strip() except py.error.ENOENT: source = None - if source and source.startswith('self._assert('): + if source and source.startswith('self._assert(') and newinterpret: # transform self._assert(x, 'foo') into assert x, 'foo' source = source.replace('self._assert(', 'assert ') source = source[:-1] # remove the trailing ')' diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -1,4 +1,5 @@ -import sys +from __future__ import with_statement +import sys, os import types import subprocess import py diff --git a/pypy/module/pypyjit/test_pypy_c/test__ffi.py b/pypy/module/pypyjit/test_pypy_c/test__ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test__ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test__ffi.py @@ -29,11 +29,13 @@ pow_addr, res = log.result assert res == 8.0 * 300 loop, = log.loops_by_filename(self.filepath) + if 'ConstClass(pow)' in repr(loop): # e.g. OS/X + pow_addr = 'ConstClass(pow)' assert loop.match_by_id('fficall', """ guard_not_invalidated(descr=...) i17 = force_token() setfield_gc(p0, i17, descr=<.* .*PyFrame.vable_token .*>) - f21 = call_release_gil(%d, 2.000000, 3.000000, descr=) + f21 = call_release_gil(%s, 2.000000, 3.000000, descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) """ % pow_addr) @@ -129,4 +131,5 @@ assert opnames.count('call_release_gil') == 1 idx = opnames.index('call_release_gil') call = ops[idx] - assert int(call.args[0]) == fabs_addr + assert (call.args[0] == 'ConstClass(fabs)' or # e.g. OS/X + int(call.args[0]) == fabs_addr) diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -415,26 +415,26 @@ guard_nonnull_class(p8, ConstClass(W_IntObject), descr=...) guard_value(i4, 0, descr=...) guard_value(p3, ConstPtr(ptr14), descr=...) - i15 = getfield_gc_pure(p8, descr=) + i15 = getfield_gc_pure(p8, descr=) i17 = int_lt(i15, 5000) guard_true(i17, descr=...) - p18 = getfield_gc(p0, descr=) + p18 = getfield_gc(p0, descr=) guard_value(p18, ConstPtr(ptr19), descr=...) - p20 = getfield_gc(p18, descr=) + p20 = getfield_gc(p18, descr=) guard_value(p20, ConstPtr(ptr21), descr=...) guard_not_invalidated(descr=...) # most importantly, there is no getarrayitem_gc here p23 = call(ConstClass(getexecutioncontext), descr=) - p24 = getfield_gc(p23, descr=) + p24 = getfield_gc(p23, descr=) i25 = force_token() - p26 = getfield_gc(p23, descr=) + p26 = getfield_gc(p23, descr=) guard_isnull(p26, descr=...) - i27 = getfield_gc(p23, descr=) + i27 = getfield_gc(p23, descr=) i28 = int_is_zero(i27) guard_true(i28, descr=...) - p30 = getfield_gc(ConstPtr(ptr29), descr=) + p30 = getfield_gc(ConstPtr(ptr29), descr=) guard_nonnull_class(p30, ConstClass(W_IntObject), descr=...) - i32 = getfield_gc_pure(p30, descr=) + i32 = getfield_gc_pure(p30, descr=) i33 = int_add_ovf(i15, i32) guard_no_overflow(descr=...) --TICK-- @@ -452,14 +452,14 @@ """, []) loop, = log.loops_by_id('call') assert loop.match(""" - i8 = getfield_gc_pure(p6, descr=) + i8 = getfield_gc_pure(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) i11 = force_token() i13 = int_add(i8, 1) --TICK-- p22 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p22, i13, descr=) - setfield_gc(p4, p22, descr=) + setfield_gc(p22, i13, descr=) + setfield_gc(p4, p22, descr=) jump(p0, p1, p2, p3, p4, p7, p22, p7, descr=) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py @@ -23,6 +23,4 @@ guard_not_invalidated(descr=...) p19 = getfield_gc(ConstPtr(p17), descr=) guard_value(p19, ConstPtr(ptr20), descr=...) - p22 = getfield_gc(ConstPtr(ptr21), descr=) - guard_nonnull(p22, descr=...) - """) + """) \ No newline at end of file diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -142,6 +142,7 @@ i = 0 b = B(1) while i < 100: + b.x v = b.x # ID: loadattr i += v return i @@ -150,8 +151,6 @@ loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id('loadattr', ''' - guard_not_invalidated(descr=...) - i16 = arraylen_gc(p10, descr=) i19 = call(ConstClass(ll_dict_lookup), _, _, _, descr=...) guard_no_exception(descr=...) i21 = int_and(i19, _) @@ -181,8 +180,7 @@ assert loop.match_by_id("contains", """ guard_not_invalidated(descr=...) i11 = force_token() - i12 = int_add_ovf(i5, i7) - guard_no_overflow(descr=...) + i12 = int_add(i5, 1) """) def test_id_compare_optimization(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -92,6 +92,43 @@ """) + def test_cached_pure_func_of_equal_fields(self): + def main(n): + class A(object): + def __init__(self, val): + self.val1 = self.val2 = val + a = A(1) + b = A(1) + sa = 0 + while n: + sa += 2*a.val1 + sa += 2*b.val2 + b.val2 = a.val1 + n -= 1 + return sa + # + log = self.run(main, [1000]) + assert log.result == 4000 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i12 = int_is_true(i4) + guard_true(i12, descr=...) + guard_not_invalidated(descr=...) + i13 = int_add_ovf(i8, i9) + guard_no_overflow(descr=...) + i10p = getfield_gc_pure(p10, descr=...) + i10 = int_mul_ovf(2, i10p) + guard_no_overflow(descr=...) + i14 = int_add_ovf(i13, i10) + guard_no_overflow(descr=...) + setfield_gc(p7, p11, descr=...) + i17 = int_sub_ovf(i4, 1) + guard_no_overflow(descr=...) + --TICK-- + jump(..., descr=...) + """) + + def test_range_iter(self): def main(n): def g(n): @@ -115,7 +152,6 @@ i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated(descr=...) - i26 = int_sub(i9, 1) i23 = int_lt(i18, 0) guard_false(i23, descr=...) i25 = int_ge(i18, i9) diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py --- a/pypy/module/sys/__init__.py +++ b/pypy/module/sys/__init__.py @@ -47,6 +47,7 @@ 'pypy_initial_path' : 'state.pypy_initial_path', '_getframe' : 'vm._getframe', + '_current_frames' : 'vm._current_frames', 'setrecursionlimit' : 'vm.setrecursionlimit', 'getrecursionlimit' : 'vm.getrecursionlimit', 'setcheckinterval' : 'vm.setcheckinterval', diff --git a/pypy/module/sys/test/test_encoding.py b/pypy/module/sys/test/test_encoding.py new file mode 100644 --- /dev/null +++ b/pypy/module/sys/test/test_encoding.py @@ -0,0 +1,30 @@ +import os, py +from pypy.rlib import rlocale +from pypy.module.sys.interp_encoding import _getfilesystemencoding +from pypy.module.sys.interp_encoding import base_encoding + + +def test__getfilesystemencoding(space): + if not (rlocale.HAVE_LANGINFO and rlocale.CODESET): + py.test.skip("requires HAVE_LANGINFO and CODESET") + + def clear(): + for key in os.environ.keys(): + if key == 'LANG' or key.startswith('LC_'): + del os.environ[key] + + def get(**env): + original_env = os.environ.copy() + try: + clear() + os.environ.update(env) + return _getfilesystemencoding(space) + finally: + clear() + os.environ.update(original_env) + + assert get() in (base_encoding, 'ANSI_X3.4-1968') + assert get(LANG='foobar') in (base_encoding, 'ANSI_X3.4-1968') + assert get(LANG='en_US.UTF-8') == 'UTF-8' + assert get(LC_ALL='en_US.UTF-8') == 'UTF-8' + assert get(LC_CTYPE='en_US.UTF-8') == 'UTF-8' diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py --- a/pypy/module/sys/test/test_sysmodule.py +++ b/pypy/module/sys/test/test_sysmodule.py @@ -1,16 +1,16 @@ # -*- coding: iso-8859-1 -*- import autopath -from pypy.conftest import option +from pypy.conftest import option, gettestobjspace from py.test import raises from pypy.interpreter.gateway import app2interp_temp import sys def test_stdin_exists(space): - space.sys.get('stdin') + space.sys.get('stdin') space.sys.get('__stdin__') def test_stdout_exists(space): - space.sys.get('stdout') + space.sys.get('stdout') space.sys.get('__stdout__') class AppTestAppSysTests: @@ -25,7 +25,7 @@ assert 'sys' in modules, ( "An entry for sys " "is not in sys.modules.") sys2 = sys.modules['sys'] - assert sys is sys2, "import sys is not sys.modules[sys]." + assert sys is sys2, "import sys is not sys.modules[sys]." def test_builtin_in_modules(self): import sys modules = sys.modules @@ -89,12 +89,12 @@ else: raise AssertionError, "ZeroDivisionError not caught" - def test_io(self): + def test_io(self): import sys assert isinstance(sys.__stdout__, file) assert isinstance(sys.__stderr__, file) assert isinstance(sys.__stdin__, file) - + if self.appdirect and not isinstance(sys.stdin, file): return @@ -324,7 +324,7 @@ import sys if self.appdirect: skip("not worth running appdirect") - + encoding = sys.getdefaultencoding() try: sys.setdefaultencoding("ascii") @@ -334,11 +334,11 @@ sys.setdefaultencoding("latin-1") assert sys.getdefaultencoding() == 'latin-1' assert unicode('\x80') == u'\u0080' - + finally: sys.setdefaultencoding(encoding) - + # testing sys.settrace() is done in test_trace.py # testing sys.setprofile() is done in test_profile.py @@ -372,6 +372,21 @@ assert isinstance(v[3], int) assert isinstance(v[4], str) + assert v[0] == v.major + assert v[1] == v.minor + assert v[2] == v.build + assert v[3] == v.platform + assert v[4] == v.service_pack + + assert isinstance(v.service_pack_minor, int) + assert isinstance(v.service_pack_major, int) + assert isinstance(v.suite_mask, int) + assert isinstance(v.product_type, int) + + # This is how platform.py calls it. Make sure tuple still has 5 + # elements + maj, min, buildno, plat, csd = sys.getwindowsversion() + def test_winver(self): import sys if hasattr(sys, "winver"): @@ -524,3 +539,51 @@ # If this ever actually becomes a compilation option this test should # be changed. assert sys.float_repr_style == "short" + +class AppTestCurrentFrames: + + def test_current_frames(self): + try: + import thread + except ImportError: + pass + else: + skip('This test requires an intepreter without threads') + import sys + + def f(): + return sys._current_frames() + frames = f() + assert frames.keys() == [0] + assert frames[0].f_code.co_name == 'f' + +class AppTestCurrentFramesWithThread(AppTestCurrentFrames): + def setup_class(cls): + cls.space = gettestobjspace(usemodules=('thread',)) + + def test_current_frames(self): + import sys + import time + import thread + + thread_id = thread.get_ident() + self.ready = False + def other_thread(): + self.ready = True + print "thread started" + time.sleep(5) + thread.start_new_thread(other_thread, ()) + + def f(): + for i in range(100): + if self.ready: break + time.sleep(0.1) + return sys._current_frames() + + frames = f() + thisframe = frames.pop(thread_id) + assert thisframe.f_code.co_name == 'f' + + assert len(frames) == 1 + _, other_frame = frames.popitem() + assert other_frame.f_code.co_name == 'other_thread' diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -14,7 +14,7 @@ if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) -elif platform.cc.startswith('gcc'): +elif platform.cc is not None and platform.cc.startswith('gcc'): out = platform.execute(platform.cc, '--version').out match = re.search(' (\d+\.\d+(\.\d+)*)', out) if match: diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -1,11 +1,13 @@ """ Implementation of interpreter-level 'sys' routines. """ +import sys + +from pypy.interpreter import gateway from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec, NoneNotWrapped +from pypy.rlib import jit from pypy.rlib.runicode import MAXUNICODE -from pypy.rlib import jit -import sys # ____________________________________________________________ @@ -43,6 +45,25 @@ f.mark_as_escaped() return space.wrap(f) +def _current_frames(space): + """_current_frames() -> dictionary + + Return a dictionary mapping each current thread T's thread id to T's + current stack frame. + + This function should be used for specialized purposes only.""" + raise OperationError(space.w_NotImplementedError, + space.wrap("XXX sys._current_frames() incompatible with the JIT")) + w_result = space.newdict() + ecs = space.threadlocals.getallvalues() + for thread_ident, ec in ecs.items(): + f = ec.gettopframe_nohidden() + f.mark_as_escaped() + space.setitem(w_result, + space.wrap(thread_ident), + space.wrap(f)) + return w_result + def setrecursionlimit(space, w_new_limit): """setrecursionlimit() sets the maximum number of nested calls that can occur before a RuntimeError is raised. On PyPy the limit is @@ -107,7 +128,7 @@ """Set the global debug tracing function. It will be called on each function call. See the debugger chapter in the library manual.""" space.getexecutioncontext().settrace(w_func) - + def setprofile(space, w_func): """Set the profiling function. It will be called on each function call and return. See the profiler chapter in the library manual.""" @@ -128,14 +149,47 @@ a debugger from a checkpoint, to recursively debug some other code.""" return space.getexecutioncontext().call_tracing(w_func, w_args) + +app = gateway.applevel(''' +"NOT_RPYTHON" +from _structseq import structseqtype, structseqfield + +class windows_version_info: + __metaclass__ = structseqtype + + name = "sys.getwindowsversion" + + major = structseqfield(0, "Major version number") + minor = structseqfield(1, "Minor version number") + build = structseqfield(2, "Build number") + platform = structseqfield(3, "Operating system platform") + service_pack = structseqfield(4, "Latest Service Pack installed on the system") + + # Because the indices aren't consecutive, they aren't included when + # unpacking and other such operations. + service_pack_major = structseqfield(10, "Service Pack major version number") + service_pack_minor = structseqfield(11, "Service Pack minor version number") + suite_mask = structseqfield(12, "Bit mask identifying available product suites") + product_type = structseqfield(13, "System product type") +''') + + def getwindowsversion(space): from pypy.rlib import rwin32 info = rwin32.GetVersionEx() - return space.newtuple([space.wrap(info[0]), - space.wrap(info[1]), - space.wrap(info[2]), - space.wrap(info[3]), - space.wrap(info[4])]) + w_windows_version_info = app.wget(space, "windows_version_info") + raw_version = space.newtuple([ + space.wrap(info[0]), + space.wrap(info[1]), + space.wrap(info[2]), + space.wrap(info[3]), + space.wrap(info[4]), + space.wrap(info[5]), + space.wrap(info[6]), + space.wrap(info[7]), + space.wrap(info[8]), + ]) + return space.call_function(w_windows_version_info, raw_version) @jit.dont_look_inside def get_dllhandle(space): diff --git a/pypy/module/test_lib_pypy/test_distributed/test_distributed.py b/pypy/module/test_lib_pypy/test_distributed/test_distributed.py --- a/pypy/module/test_lib_pypy/test_distributed/test_distributed.py +++ b/pypy/module/test_lib_pypy/test_distributed/test_distributed.py @@ -1,3 +1,4 @@ +import py; py.test.skip("xxx remove") """ Controllers tests """ @@ -8,7 +9,7 @@ class AppTestDistributed(object): def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, - "usemodules":("_stackless",)}) + "usemodules":("_continuation",)}) def test_init(self): import distributed @@ -90,14 +91,12 @@ class AppTestDistributedTasklets(object): spaceconfig = {"objspace.std.withtproxy": True, - "objspace.usemodules._stackless": True} + "objspace.usemodules._continuation": True} reclimit = sys.getrecursionlimit() def setup_class(cls): import py.test py.test.importorskip('greenlet') - #cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, - # "usemodules":("_stackless",)}) cls.w_test_env_ = cls.space.appexec([], """(): from distributed import test_env return (test_env,) diff --git a/pypy/module/test_lib_pypy/test_distributed/test_greensock.py b/pypy/module/test_lib_pypy/test_distributed/test_greensock.py --- a/pypy/module/test_lib_pypy/test_distributed/test_greensock.py +++ b/pypy/module/test_lib_pypy/test_distributed/test_greensock.py @@ -1,5 +1,4 @@ - -import py +import py; py.test.skip("xxx remove") from pypy.conftest import gettestobjspace, option def setup_module(mod): @@ -10,7 +9,7 @@ if not option.runappdirect: py.test.skip("Cannot run this on top of py.py because of PopenGateway") cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, - "usemodules":("_stackless",)}) + "usemodules":("_continuation",)}) cls.w_remote_side_code = cls.space.appexec([], """(): import sys sys.path.insert(0, '%s') diff --git a/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py b/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py --- a/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py +++ b/pypy/module/test_lib_pypy/test_distributed/test_socklayer.py @@ -1,4 +1,4 @@ -import py +import py; py.test.skip("xxx remove") from pypy.conftest import gettestobjspace def setup_module(mod): @@ -9,7 +9,8 @@ class AppTestSocklayer: def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, - "usemodules":("_stackless","_socket", "select")}) + "usemodules":("_continuation", + "_socket", "select")}) def test_socklayer(self): class X(object): diff --git a/pypy/module/test_lib_pypy/test_greenlet.py b/pypy/module/test_lib_pypy/test_greenlet.py --- a/pypy/module/test_lib_pypy/test_greenlet.py +++ b/pypy/module/test_lib_pypy/test_greenlet.py @@ -231,3 +231,30 @@ assert res == "next step" res = g2.switch("goes to f1 instead") assert res == "all ok" + + def test_throw_in_not_started_yet(self): + from greenlet import greenlet + # + def f1(): + never_reached + # + g1 = greenlet(f1) + raises(ValueError, g1.throw, ValueError) + assert g1.dead + + def test_exc_info_save_restore(self): + # sys.exc_info save/restore behaviour is wrong on CPython's greenlet + from greenlet import greenlet + import sys + def f(): + try: + raise ValueError('fun') + except: + exc_info = sys.exc_info() + greenlet(h).switch() + assert exc_info == sys.exc_info() + + def h(): + assert sys.exc_info() == (None, None, None) + + greenlet(f).switch() diff --git a/pypy/module/test_lib_pypy/test_stackless.py b/pypy/module/test_lib_pypy/test_stackless_pickle.py rename from pypy/module/test_lib_pypy/test_stackless.py rename to pypy/module/test_lib_pypy/test_stackless_pickle.py --- a/pypy/module/test_lib_pypy/test_stackless.py +++ b/pypy/module/test_lib_pypy/test_stackless_pickle.py @@ -1,3 +1,4 @@ +import py; py.test.skip("XXX port me") from pypy.conftest import gettestobjspace, option class AppTest_Stackless: diff --git a/pypy/module/thread/threadlocals.py b/pypy/module/thread/threadlocals.py --- a/pypy/module/thread/threadlocals.py +++ b/pypy/module/thread/threadlocals.py @@ -43,6 +43,9 @@ ident = self._mainthreadident return self._valuedict.get(ident, None) + def getallvalues(self): + return self._valuedict + def enter_thread(self, space): "Notification that the current thread is just starting." ec = space.getexecutioncontext() diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -65,6 +65,10 @@ if isinstance(cell, ModuleCell): cell.w_value = w_value return + # If the new value and the current value are the same, don't create a + # level of indirection, or mutate are version. + if self.space.is_w(w_value, cell): + return if cell is not None: w_value = ModuleCell(w_value) self.mutated() diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -355,9 +355,13 @@ y = w_float2.floatval if y == 0.0: raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float modulo")) - mod = math.fmod(x, y) - if (mod and ((y < 0.0) != (mod < 0.0))): - mod += y + try: + mod = math.fmod(x, y) + except ValueError: + mod = rfloat.NAN + else: + if (mod and ((y < 0.0) != (mod < 0.0))): + mod += y return W_FloatObject(mod) @@ -366,7 +370,10 @@ y = w_float2.floatval if y == 0.0: raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float modulo")) - mod = math.fmod(x, y) + try: + mod = math.fmod(x, y) + except ValueError: + return [W_FloatObject(rfloat.NAN), W_FloatObject(rfloat.NAN)] # fmod is typically exact, so vx-mod is *mathematically* an # exact multiple of wx. But this is fp arithmetic, and fp # vx - mod is an approximation; the result is that div may diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -72,10 +72,6 @@ w_seqiter.index += 1 return w_item -# XXX __length_hint__() -##def len__SeqIter(space, w_seqiter): -## return w_seqiter.getlength(space) - def iter__FastTupleIter(space, w_seqiter): return w_seqiter @@ -93,10 +89,6 @@ w_seqiter.index = index + 1 return w_item -# XXX __length_hint__() -##def len__FastTupleIter(space, w_seqiter): -## return w_seqiter.getlength(space) - def iter__FastListIter(space, w_seqiter): return w_seqiter @@ -114,10 +106,6 @@ w_seqiter.index = index + 1 return w_item -# XXX __length_hint__() -##def len__FastListIter(space, w_seqiter): -## return w_seqiter.getlength(space) - def iter__ReverseSeqIter(space, w_seqiter): return w_seqiter @@ -135,20 +123,5 @@ raise OperationError(space.w_StopIteration, space.w_None) return w_item -# XXX __length_hint__() -##def len__ReverseSeqIter(space, w_seqiter): -## if w_seqiter.w_seq is None: -## return space.wrap(0) -## index = w_seqiter.index+1 -## w_length = space.len(w_seqiter.w_seq) -## # if length of sequence is less than index :exhaust iterator -## if space.is_true(space.gt(space.wrap(w_seqiter.index), w_length)): -## w_len = space.wrap(0) -## w_seqiter.w_seq = None -## else: -## w_len =space.wrap(index) -## if space.is_true(space.lt(w_len,space.wrap(0))): -## w_len = space.wrap(0) -## return w_len register_all(vars()) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -8,7 +8,7 @@ from pypy.objspace.std import slicetype from pypy.interpreter import gateway, baseobjspace -from pypy.rlib.listsort import TimSort +from pypy.rlib.listsort import make_timsort_class from pypy.interpreter.argument import Signature class W_ListObject(W_Object): @@ -44,7 +44,7 @@ if w_iterable is not None: # unfortunately this is duplicating space.unpackiterable to avoid # assigning a new RPython list to 'wrappeditems', which defeats the - # W_FastSeqIterObject optimization. + # W_FastListIterObject optimization. if isinstance(w_iterable, W_ListObject): items_w.extend(w_iterable.wrappeditems) elif isinstance(w_iterable, W_TupleObject): @@ -445,6 +445,7 @@ self.w_key = w_key self.w_item = w_item +TimSort = make_timsort_class() # NOTE: all the subclasses of TimSort should inherit from a common subclass, # so make sure that only SimpleSort inherits directly from TimSort. # This is necessary to hide the parent method TimSort.lt() from the diff --git a/pypy/objspace/std/marshal_impl.py b/pypy/objspace/std/marshal_impl.py --- a/pypy/objspace/std/marshal_impl.py +++ b/pypy/objspace/std/marshal_impl.py @@ -325,10 +325,10 @@ # of building a list of tuples. w_dic = space.newdict() while 1: - w_key = u.get_w_obj(True) + w_key = u.get_w_obj(allow_null=True) if w_key is None: break - w_value = u.get_w_obj(False) + w_value = u.get_w_obj() space.setitem(w_dic, w_key, w_value) return w_dic register(TYPE_DICT, unmarshal_DictMulti) @@ -364,7 +364,7 @@ # so we no longer can handle it in interp_marshal.atom_strlist def unmarshal_str(u): - w_obj = u.get_w_obj(False) + w_obj = u.get_w_obj() try: return u.space.str_w(w_obj) except OperationError, e: diff --git a/pypy/objspace/std/objecttype.py b/pypy/objspace/std/objecttype.py --- a/pypy/objspace/std/objecttype.py +++ b/pypy/objspace/std/objecttype.py @@ -24,7 +24,12 @@ return w_obj.getrepr(space, '%s object' % (classname,)) def descr__str__(space, w_obj): - return space.repr(w_obj) + w_type = space.type(w_obj) + w_impl = w_type.lookup("__repr__") + if w_impl is None: + raise OperationError(space.w_TypeError, # can it really occur? + space.wrap("operand does not support unary str")) + return space.get_and_call_function(w_impl, w_obj) def descr__class__(space, w_obj): return space.type(w_obj) diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -39,6 +39,20 @@ assert d.getitem("a") is None assert d.strategy.getdictvalue_no_unwrapping(d, "a") is None + def test_same_key_set_twice(self): + strategy = ModuleDictStrategy(space) + storage = strategy.get_empty_storage() + d = W_DictMultiObject(space, strategy, storage) + + v1 = strategy.version + x = object() + d.setitem("a", x) + v2 = strategy.version + assert v1 is not v2 + d.setitem("a", x) + v3 = strategy.version + assert v2 is v3 + class AppTestModuleDict(object): def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withcelldict": True}) diff --git a/pypy/objspace/std/test/test_floatobject.py b/pypy/objspace/std/test/test_floatobject.py --- a/pypy/objspace/std/test/test_floatobject.py +++ b/pypy/objspace/std/test/test_floatobject.py @@ -767,3 +767,19 @@ def test_invalid(self): raises(ValueError, float.fromhex, "0P") + + def test_division_edgecases(self): + import math + + # inf + inf = float("inf") + assert math.isnan(inf % 3) + assert math.isnan(inf // 3) + x, y = divmod(inf, 3) + assert math.isnan(x) + assert math.isnan(y) + + # divide by 0 + raises(ZeroDivisionError, lambda: inf % 0) + raises(ZeroDivisionError, lambda: inf // 0) + raises(ZeroDivisionError, divmod, inf, 0) \ No newline at end of file diff --git a/pypy/objspace/std/test/test_methodcache.py b/pypy/objspace/std/test/test_methodcache.py --- a/pypy/objspace/std/test/test_methodcache.py +++ b/pypy/objspace/std/test/test_methodcache.py @@ -88,30 +88,37 @@ def test_many_names(self): import __pypy__ - class A(object): - foo = 5 - bar = 6 - baz = 7 - xyz = 8 - stuff = 9 - a = 10 - foobar = 11 + for j in range(20): + class A(object): + foo = 5 + bar = 6 + baz = 7 + xyz = 8 + stuff = 9 + a = 10 + foobar = 11 - a = A() - names = [name for name in A.__dict__.keys() - if not name.startswith('_')] - names.sort() - names_repeated = names * 10 - result = [] - __pypy__.reset_method_cache_counter() - for name in names_repeated: - result.append(getattr(a, name)) - append_counter = __pypy__.method_cache_counter("append") - names_counters = [__pypy__.method_cache_counter(name) - for name in names] - assert append_counter[0] >= 5 * len(names) - for name, count in zip(names, names_counters): - assert count[0] >= 5, str((name, count)) + a = A() + names = [name for name in A.__dict__.keys() + if not name.startswith('_')] + names.sort() + names_repeated = names * 10 + result = [] + __pypy__.reset_method_cache_counter() + for name in names_repeated: + result.append(getattr(a, name)) + append_counter = __pypy__.method_cache_counter("append") + names_counters = [__pypy__.method_cache_counter(name) + for name in names] + try: + assert append_counter[0] >= 10 * len(names) - 1 + for name, count in zip(names, names_counters): + assert count == (9, 1), str((name, count)) + break + except AssertionError: + pass + else: + raise def test_mutating_bases(self): class C(object): @@ -134,20 +141,24 @@ def test_custom_metaclass(self): import __pypy__ - class MetaA(type): - def __getattribute__(self, x): - return 1 - def f(self): - return 42 - A = type.__new__(MetaA, "A", (), {"f": f}) - l = [type.__getattribute__(A, "__new__")(A)] * 10 - __pypy__.reset_method_cache_counter() - for i, a in enumerate(l): - assert a.f() == 42 - cache_counter = __pypy__.method_cache_counter("f") - assert cache_counter[0] >= 5 - assert cache_counter[1] >= 1 # should be (27, 3) - assert sum(cache_counter) == 10 + for j in range(20): + class MetaA(type): + def __getattribute__(self, x): + return 1 + def f(self): + return 42 + A = type.__new__(MetaA, "A", (), {"f": f}) + l = [type.__getattribute__(A, "__new__")(A)] * 10 + __pypy__.reset_method_cache_counter() + for i, a in enumerate(l): + assert a.f() == 42 + cache_counter = __pypy__.method_cache_counter("f") + assert sum(cache_counter) == 10 + if cache_counter == (9, 1): + break + #else the moon is misaligned, try again + else: + raise AssertionError("cache_counter = %r" % (cache_counter,)) def test_mutate_class(self): import __pypy__ diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -94,3 +94,11 @@ #assert len(log) == 1 #assert log[0].message.args == ("object.__init__() takes no parameters",) #assert type(log[0].message) is DeprecationWarning + + def test_object_str(self): + # obscure case: __str__() must delegate to __repr__() without adding + # type checking on its own + class A(object): + def __repr__(self): + return 123456 + assert A().__str__() == 123456 diff --git a/pypy/objspace/std/test/test_stringformat.py b/pypy/objspace/std/test/test_stringformat.py --- a/pypy/objspace/std/test/test_stringformat.py +++ b/pypy/objspace/std/test/test_stringformat.py @@ -168,7 +168,7 @@ def test_incomplete_format(self): raises(ValueError, '%'.__mod__, ((23,),)) - raises(ValueError, '%('.__mod__, ({},)) + raises((ValueError, TypeError), '%('.__mod__, ({},)) def test_format_char(self): import sys diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -780,8 +780,22 @@ assert type(s) is unicode assert s == u'\u1234' + # now the same with a new-style class... + class A(object): + def __init__(self, num): + self.num = num + def __str__(self): + return unichr(self.num) + + s = '%s' % A(111) # this is ASCII + assert type(s) is unicode + assert s == chr(111) + + s = '%s' % A(0x1234) # this is not ASCII + assert type(s) is unicode + assert s == u'\u1234' + def test_formatting_unicode__str__2(self): - skip("this is completely insane") class A: def __str__(self): return u'baz' @@ -798,9 +812,22 @@ s = '%s %s' % (a, b) assert s == u'baz bar' + skip("but this case here is completely insane") s = '%s %s' % (b, a) assert s == u'foo baz' + def test_formatting_unicode__str__3(self): + # "bah" is all I can say + class X(object): + def __repr__(self): + return u'\u1234' + '%s' % X() + # + class X(object): + def __str__(self): + return u'\u1234' + '%s' % X() + def test_str_subclass(self): class Foo9(str): def __unicode__(self): diff --git a/pypy/rlib/_jit_vref.py b/pypy/rlib/_jit_vref.py --- a/pypy/rlib/_jit_vref.py +++ b/pypy/rlib/_jit_vref.py @@ -46,6 +46,7 @@ def specialize_call(self, hop): r_generic_object = getinstancerepr(hop.rtyper, None) [v] = hop.inputargs(r_generic_object) # might generate a cast_pointer + hop.exception_cannot_occur() return v def rtype_simple_call(self, hop): diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py --- a/pypy/rlib/libffi.py +++ b/pypy/rlib/libffi.py @@ -206,6 +206,7 @@ _immutable_fields_ = ['funcsym'] argtypes = [] restype = lltype.nullptr(clibffi.FFI_TYPE_P.TO) + flags = 0 funcsym = lltype.nullptr(rffi.VOIDP.TO) def __init__(self, name, argtypes, restype, funcsym, flags=FUNCFLAG_CDECL, diff --git a/pypy/rlib/listsort.py b/pypy/rlib/listsort.py --- a/pypy/rlib/listsort.py +++ b/pypy/rlib/listsort.py @@ -7,588 +7,589 @@ ## ------------------------------------------------------------------------ ## Adapted from CPython, original code and algorithms by Tim Peters -## CAREFUL: -## this class has to be used carefully, because all the lists that are -## sorted will be unified +def make_timsort_class(): -class TimSort: - """TimSort(list).sort() + class TimSort: + """TimSort(list).sort() - Sorts the list in-place, using the overridable method lt() for comparison. - """ + Sorts the list in-place, using the overridable method lt() for comparison. + """ - def __init__(self, list, listlength=None): - self.list = list - if listlength is None: - listlength = len(list) - self.listlength = listlength + def __init__(self, list, listlength=None): + self.list = list + if listlength is None: + listlength = len(list) + self.listlength = listlength - def lt(self, a, b): - return a < b + def lt(self, a, b): + return a < b - def le(self, a, b): - return not self.lt(b, a) # always use self.lt() as the primitive + def le(self, a, b): + return not self.lt(b, a) # always use self.lt() as the primitive - # binarysort is the best method for sorting small arrays: it does - # few compares, but can do data movement quadratic in the number of - # elements. - # "a" is a contiguous slice of a list, and is sorted via binary insertion. - # This sort is stable. - # On entry, the first "sorted" elements are already sorted. - # Even in case of error, the output slice will be some permutation of - # the input (nothing is lost or duplicated). + # binarysort is the best method for sorting small arrays: it does + # few compares, but can do data movement quadratic in the number of + # elements. + # "a" is a contiguous slice of a list, and is sorted via binary insertion. + # This sort is stable. + # On entry, the first "sorted" elements are already sorted. + # Even in case of error, the output slice will be some permutation of + # the input (nothing is lost or duplicated). - def binarysort(self, a, sorted=1): - for start in xrange(a.base + sorted, a.base + a.len): - # set l to where list[start] belongs - l = a.base - r = start - pivot = a.list[r] - # Invariants: - # pivot >= all in [base, l). - # pivot < all in [r, start). - # The second is vacuously true at the start. - while l < r: - p = l + ((r - l) >> 1) - if self.lt(pivot, a.list[p]): - r = p + def binarysort(self, a, sorted=1): + for start in xrange(a.base + sorted, a.base + a.len): + # set l to where list[start] belongs + l = a.base + r = start + pivot = a.list[r] + # Invariants: + # pivot >= all in [base, l). + # pivot < all in [r, start). + # The second is vacuously true at the start. + while l < r: + p = l + ((r - l) >> 1) + if self.lt(pivot, a.list[p]): + r = p + else: + l = p+1 + assert l == r + # The invariants still hold, so pivot >= all in [base, l) and + # pivot < all in [l, start), so pivot belongs at l. Note + # that if there are elements equal to pivot, l points to the + # first slot after them -- that's why this sort is stable. + # Slide over to make room. + for p in xrange(start, l, -1): + a.list[p] = a.list[p-1] + a.list[l] = pivot + + # Compute the length of the run in the slice "a". + # "A run" is the longest ascending sequence, with + # + # a[0] <= a[1] <= a[2] <= ... + # + # or the longest descending sequence, with + # + # a[0] > a[1] > a[2] > ... + # + # Return (run, descending) where descending is False in the former case, + # or True in the latter. + # For its intended use in a stable mergesort, the strictness of the defn of + # "descending" is needed so that the caller can safely reverse a descending + # sequence without violating stability (strict > ensures there are no equal + # elements to get out of order). + + def count_run(self, a): + if a.len <= 1: + n = a.len + descending = False + else: + n = 2 + if self.lt(a.list[a.base + 1], a.list[a.base]): + descending = True + for p in xrange(a.base + 2, a.base + a.len): + if self.lt(a.list[p], a.list[p-1]): + n += 1 + else: + break else: - l = p+1 - assert l == r - # The invariants still hold, so pivot >= all in [base, l) and - # pivot < all in [l, start), so pivot belongs at l. Note - # that if there are elements equal to pivot, l points to the - # first slot after them -- that's why this sort is stable. - # Slide over to make room. - for p in xrange(start, l, -1): - a.list[p] = a.list[p-1] - a.list[l] = pivot + descending = False + for p in xrange(a.base + 2, a.base + a.len): + if self.lt(a.list[p], a.list[p-1]): + break + else: + n += 1 + return ListSlice(a.list, a.base, n), descending - # Compute the length of the run in the slice "a". - # "A run" is the longest ascending sequence, with - # - # a[0] <= a[1] <= a[2] <= ... - # - # or the longest descending sequence, with - # - # a[0] > a[1] > a[2] > ... - # - # Return (run, descending) where descending is False in the former case, - # or True in the latter. - # For its intended use in a stable mergesort, the strictness of the defn of - # "descending" is needed so that the caller can safely reverse a descending - # sequence without violating stability (strict > ensures there are no equal - # elements to get out of order). + # Locate the proper position of key in a sorted vector; if the vector + # contains an element equal to key, return the position immediately to the + # left of the leftmost equal element -- or to the right of the rightmost + # equal element if the flag "rightmost" is set. + # + # "hint" is an index at which to begin the search, 0 <= hint < a.len. + # The closer hint is to the final result, the faster this runs. + # + # The return value is the index 0 <= k <= a.len such that + # + # a[k-1] < key <= a[k] (if rightmost is False) + # a[k-1] <= key < a[k] (if rightmost is True) + # + # as long as the indices are in bound. IOW, key belongs at index k; + # or, IOW, the first k elements of a should precede key, and the last + # n-k should follow key. - def count_run(self, a): - if a.len <= 1: - n = a.len - descending = False - else: - n = 2 - if self.lt(a.list[a.base + 1], a.list[a.base]): - descending = True - for p in xrange(a.base + 2, a.base + a.len): - if self.lt(a.list[p], a.list[p-1]): - n += 1 - else: + def gallop(self, key, a, hint, rightmost): + assert 0 <= hint < a.len + if rightmost: + lower = self.le # search for the largest k for which a[k] <= key + else: + lower = self.lt # search for the largest k for which a[k] < key + + p = a.base + hint + lastofs = 0 + ofs = 1 + if lower(a.list[p], key): + # a[hint] < key -- gallop right, until + # a[hint + lastofs] < key <= a[hint + ofs] + + maxofs = a.len - hint # a[a.len-1] is highest + while ofs < maxofs: + if lower(a.list[p + ofs], key): + lastofs = ofs + try: + ofs = ovfcheck_lshift(ofs, 1) + except OverflowError: + ofs = maxofs + else: + ofs = ofs + 1 + else: # key <= a[hint + ofs] break + + if ofs > maxofs: + ofs = maxofs + # Translate back to offsets relative to a. + lastofs += hint + ofs += hint + else: - descending = False - for p in xrange(a.base + 2, a.base + a.len): - if self.lt(a.list[p], a.list[p-1]): + # key <= a[hint] -- gallop left, until + # a[hint - ofs] < key <= a[hint - lastofs] + maxofs = hint + 1 # a[0] is lowest + while ofs < maxofs: + if lower(a.list[p - ofs], key): break else: - n += 1 - return ListSlice(a.list, a.base, n), descending + # key <= a[hint - ofs] + lastofs = ofs + try: + ofs = ovfcheck_lshift(ofs, 1) + except OverflowError: + ofs = maxofs + else: + ofs = ofs + 1 + if ofs > maxofs: + ofs = maxofs + # Translate back to positive offsets relative to a. + lastofs, ofs = hint-ofs, hint-lastofs - # Locate the proper position of key in a sorted vector; if the vector - # contains an element equal to key, return the position immediately to the - # left of the leftmost equal element -- or to the right of the rightmost - # equal element if the flag "rightmost" is set. - # - # "hint" is an index at which to begin the search, 0 <= hint < a.len. - # The closer hint is to the final result, the faster this runs. - # - # The return value is the index 0 <= k <= a.len such that - # - # a[k-1] < key <= a[k] (if rightmost is False) - # a[k-1] <= key < a[k] (if rightmost is True) - # - # as long as the indices are in bound. IOW, key belongs at index k; - # or, IOW, the first k elements of a should precede key, and the last - # n-k should follow key. + assert -1 <= lastofs < ofs <= a.len - def gallop(self, key, a, hint, rightmost): - assert 0 <= hint < a.len - if rightmost: - lower = self.le # search for the largest k for which a[k] <= key - else: - lower = self.lt # search for the largest k for which a[k] < key + # Now a[lastofs] < key <= a[ofs], so key belongs somewhere to the + # right of lastofs but no farther right than ofs. Do a binary + # search, with invariant a[lastofs-1] < key <= a[ofs]. - p = a.base + hint - lastofs = 0 - ofs = 1 - if lower(a.list[p], key): - # a[hint] < key -- gallop right, until - # a[hint + lastofs] < key <= a[hint + ofs] + lastofs += 1 + while lastofs < ofs: + m = lastofs + ((ofs - lastofs) >> 1) + if lower(a.list[a.base + m], key): + lastofs = m+1 # a[m] < key + else: + ofs = m # key <= a[m] - maxofs = a.len - hint # a[a.len-1] is highest - while ofs < maxofs: - if lower(a.list[p + ofs], key): - lastofs = ofs - try: - ofs = ovfcheck_lshift(ofs, 1) - except OverflowError: - ofs = maxofs - else: - ofs = ofs + 1 - else: # key <= a[hint + ofs] - break + assert lastofs == ofs # so a[ofs-1] < key <= a[ofs] + return ofs - if ofs > maxofs: - ofs = maxofs - # Translate back to offsets relative to a. - lastofs += hint - ofs += hint + # hint for the annotator: the argument 'rightmost' is always passed in as + # a constant (either True or False), so we can specialize the function for + # the two cases. (This is actually needed for technical reasons: the + # variable 'lower' must contain a known method, which is the case in each + # specialized version but not in the unspecialized one.) + gallop._annspecialcase_ = "specialize:arg(4)" - else: - # key <= a[hint] -- gallop left, until - # a[hint - ofs] < key <= a[hint - lastofs] - maxofs = hint + 1 # a[0] is lowest - while ofs < maxofs: - if lower(a.list[p - ofs], key): - break - else: - # key <= a[hint - ofs] - lastofs = ofs - try: - ofs = ovfcheck_lshift(ofs, 1) - except OverflowError: - ofs = maxofs - else: - ofs = ofs + 1 - if ofs > maxofs: - ofs = maxofs - # Translate back to positive offsets relative to a. - lastofs, ofs = hint-ofs, hint-lastofs + # ____________________________________________________________ - assert -1 <= lastofs < ofs <= a.len + # When we get into galloping mode, we stay there until both runs win less + # often than MIN_GALLOP consecutive times. See listsort.txt for more info. + MIN_GALLOP = 7 - # Now a[lastofs] < key <= a[ofs], so key belongs somewhere to the - # right of lastofs but no farther right than ofs. Do a binary - # search, with invariant a[lastofs-1] < key <= a[ofs]. - - lastofs += 1 - while lastofs < ofs: - m = lastofs + ((ofs - lastofs) >> 1) - if lower(a.list[a.base + m], key): - lastofs = m+1 # a[m] < key - else: - ofs = m # key <= a[m] + def merge_init(self): + # This controls when we get *into* galloping mode. It's initialized + # to MIN_GALLOP. merge_lo and merge_hi tend to nudge it higher for + # random data, and lower for highly structured data. + self.min_gallop = self.MIN_GALLOP - assert lastofs == ofs # so a[ofs-1] < key <= a[ofs] - return ofs + # A stack of n pending runs yet to be merged. Run #i starts at + # address pending[i].base and extends for pending[i].len elements. + # It's always true (so long as the indices are in bounds) that + # + # pending[i].base + pending[i].len == pending[i+1].base + # + # so we could cut the storage for this, but it's a minor amount, + # and keeping all the info explicit simplifies the code. + self.pending = [] - # hint for the annotator: the argument 'rightmost' is always passed in as - # a constant (either True or False), so we can specialize the function for - # the two cases. (This is actually needed for technical reasons: the - # variable 'lower' must contain a known method, which is the case in each - # specialized version but not in the unspecialized one.) - gallop._annspecialcase_ = "specialize:arg(4)" + # Merge the slice "a" with the slice "b" in a stable way, in-place. + # a.len and b.len must be > 0, and a.base + a.len == b.base. + # Must also have that b.list[b.base] < a.list[a.base], that + # a.list[a.base+a.len-1] belongs at the end of the merge, and should have + # a.len <= b.len. See listsort.txt for more info. - # ____________________________________________________________ + def merge_lo(self, a, b): + assert a.len > 0 and b.len > 0 and a.base + a.len == b.base + min_gallop = self.min_gallop + dest = a.base + a = a.copyitems() - # When we get into galloping mode, we stay there until both runs win less - # often than MIN_GALLOP consecutive times. See listsort.txt for more info. - MIN_GALLOP = 7 + # Invariant: elements in "a" are waiting to be reinserted into the list + # at "dest". They should be merged with the elements of "b". + # b.base == dest + a.len. + # We use a finally block to ensure that the elements remaining in + # the copy "a" are reinserted back into self.list in all cases. + try: + self.list[dest] = b.popleft() + dest += 1 + if a.len == 1 or b.len == 0: + return - def merge_init(self): - # This controls when we get *into* galloping mode. It's initialized - # to MIN_GALLOP. merge_lo and merge_hi tend to nudge it higher for - # random data, and lower for highly structured data. - self.min_gallop = self.MIN_GALLOP + while True: + acount = 0 # number of times A won in a row + bcount = 0 # number of times B won in a row - # A stack of n pending runs yet to be merged. Run #i starts at - # address pending[i].base and extends for pending[i].len elements. - # It's always true (so long as the indices are in bounds) that - # - # pending[i].base + pending[i].len == pending[i+1].base - # - # so we could cut the storage for this, but it's a minor amount, - # and keeping all the info explicit simplifies the code. - self.pending = [] + # Do the straightforward thing until (if ever) one run + # appears to win consistently. + while True: + if self.lt(b.list[b.base], a.list[a.base]): + self.list[dest] = b.popleft() + dest += 1 + if b.len == 0: + return + bcount += 1 + acount = 0 + if bcount >= min_gallop: + break + else: + self.list[dest] = a.popleft() + dest += 1 + if a.len == 1: + return + acount += 1 + bcount = 0 + if acount >= min_gallop: + break - # Merge the slice "a" with the slice "b" in a stable way, in-place. - # a.len and b.len must be > 0, and a.base + a.len == b.base. - # Must also have that b.list[b.base] < a.list[a.base], that - # a.list[a.base+a.len-1] belongs at the end of the merge, and should have - # a.len <= b.len. See listsort.txt for more info. + # One run is winning so consistently that galloping may + # be a huge win. So try that, and continue galloping until + # (if ever) neither run appears to be winning consistently + # anymore. + min_gallop += 1 - def merge_lo(self, a, b): - assert a.len > 0 and b.len > 0 and a.base + a.len == b.base - min_gallop = self.min_gallop - dest = a.base - a = a.copyitems() + while True: + min_gallop -= min_gallop > 1 + self.min_gallop = min_gallop - # Invariant: elements in "a" are waiting to be reinserted into the list - # at "dest". They should be merged with the elements of "b". - # b.base == dest + a.len. - # We use a finally block to ensure that the elements remaining in - # the copy "a" are reinserted back into self.list in all cases. - try: - self.list[dest] = b.popleft() - dest += 1 - if a.len == 1 or b.len == 0: - return + acount = self.gallop(b.list[b.base], a, hint=0, + rightmost=True) + for p in xrange(a.base, a.base + acount): + self.list[dest] = a.list[p] + dest += 1 + a.advance(acount) + # a.len==0 is impossible now if the comparison + # function is consistent, but we can't assume + # that it is. + if a.len <= 1: + return - while True: - acount = 0 # number of times A won in a row - bcount = 0 # number of times B won in a row - - # Do the straightforward thing until (if ever) one run - # appears to win consistently. - while True: - if self.lt(b.list[b.base], a.list[a.base]): self.list[dest] = b.popleft() dest += 1 if b.len == 0: return - bcount += 1 - acount = 0 - if bcount >= min_gallop: - break - else: + + bcount = self.gallop(a.list[a.base], b, hint=0, + rightmost=False) + for p in xrange(b.base, b.base + bcount): + self.list[dest] = b.list[p] + dest += 1 + b.advance(bcount) + if b.len == 0: + return + self.list[dest] = a.popleft() dest += 1 if a.len == 1: return - acount += 1 - bcount = 0 - if acount >= min_gallop: + + if acount < self.MIN_GALLOP and bcount < self.MIN_GALLOP: break - # One run is winning so consistently that galloping may - # be a huge win. So try that, and continue galloping until - # (if ever) neither run appears to be winning consistently - # anymore. - min_gallop += 1 + min_gallop += 1 # penalize it for leaving galloping mode + self.min_gallop = min_gallop + + finally: + # The last element of a belongs at the end of the merge, so we copy + # the remaining elements of b before the remaining elements of a. + assert a.len >= 0 and b.len >= 0 + for p in xrange(b.base, b.base + b.len): + self.list[dest] = b.list[p] + dest += 1 + for p in xrange(a.base, a.base + a.len): + self.list[dest] = a.list[p] + dest += 1 + + # Same as merge_lo(), but should have a.len >= b.len. + + def merge_hi(self, a, b): + assert a.len > 0 and b.len > 0 and a.base + a.len == b.base + min_gallop = self.min_gallop + dest = b.base + b.len + b = b.copyitems() + + # Invariant: elements in "b" are waiting to be reinserted into the list + # before "dest". They should be merged with the elements of "a". + # a.base + a.len == dest - b.len. + # We use a finally block to ensure that the elements remaining in + # the copy "b" are reinserted back into self.list in all cases. + try: + dest -= 1 + self.list[dest] = a.popright() + if a.len == 0 or b.len == 1: + return while True: - min_gallop -= min_gallop > 1 + acount = 0 # number of times A won in a row + bcount = 0 # number of times B won in a row + + # Do the straightforward thing until (if ever) one run + # appears to win consistently. + while True: + nexta = a.list[a.base + a.len - 1] + nextb = b.list[b.base + b.len - 1] + if self.lt(nextb, nexta): + dest -= 1 + self.list[dest] = nexta + a.len -= 1 + if a.len == 0: + return + acount += 1 + bcount = 0 + if acount >= min_gallop: + break + else: + dest -= 1 + self.list[dest] = nextb + b.len -= 1 + if b.len == 1: + return + bcount += 1 + acount = 0 + if bcount >= min_gallop: + break + + # One run is winning so consistently that galloping may + # be a huge win. So try that, and continue galloping until + # (if ever) neither run appears to be winning consistently + # anymore. + min_gallop += 1 + + while True: + min_gallop -= min_gallop > 1 + self.min_gallop = min_gallop + + nextb = b.list[b.base + b.len - 1] + k = self.gallop(nextb, a, hint=a.len-1, rightmost=True) + acount = a.len - k + for p in xrange(a.base + a.len - 1, a.base + k - 1, -1): + dest -= 1 + self.list[dest] = a.list[p] + a.len -= acount + if a.len == 0: + return + + dest -= 1 + self.list[dest] = b.popright() + if b.len == 1: + return + + nexta = a.list[a.base + a.len - 1] + k = self.gallop(nexta, b, hint=b.len-1, rightmost=False) + bcount = b.len - k + for p in xrange(b.base + b.len - 1, b.base + k - 1, -1): + dest -= 1 + self.list[dest] = b.list[p] + b.len -= bcount + # b.len==0 is impossible now if the comparison + # function is consistent, but we can't assume + # that it is. + if b.len <= 1: + return + + dest -= 1 + self.list[dest] = a.popright() + if a.len == 0: + return + + if acount < self.MIN_GALLOP and bcount < self.MIN_GALLOP: + break + + min_gallop += 1 # penalize it for leaving galloping mode self.min_gallop = min_gallop - acount = self.gallop(b.list[b.base], a, hint=0, - rightmost=True) - for p in xrange(a.base, a.base + acount): - self.list[dest] = a.list[p] - dest += 1 - a.advance(acount) - # a.len==0 is impossible now if the comparison - # function is consistent, but we can't assume - # that it is. - if a.len <= 1: - return + finally: + # The last element of a belongs at the end of the merge, so we copy + # the remaining elements of a and then the remaining elements of b. + assert a.len >= 0 and b.len >= 0 + for p in xrange(a.base + a.len - 1, a.base - 1, -1): + dest -= 1 + self.list[dest] = a.list[p] + for p in xrange(b.base + b.len - 1, b.base - 1, -1): + dest -= 1 + self.list[dest] = b.list[p] - self.list[dest] = b.popleft() - dest += 1 - if b.len == 0: - return + # Merge the two runs at stack indices i and i+1. - bcount = self.gallop(a.list[a.base], b, hint=0, - rightmost=False) - for p in xrange(b.base, b.base + bcount): - self.list[dest] = b.list[p] - dest += 1 - b.advance(bcount) - if b.len == 0: - return + def merge_at(self, i): + a = self.pending[i] + b = self.pending[i+1] + assert a.len > 0 and b.len > 0 + assert a.base + a.len == b.base - self.list[dest] = a.popleft() - dest += 1 - if a.len == 1: - return + # Record the length of the combined runs and remove the run b + self.pending[i] = ListSlice(self.list, a.base, a.len + b.len) + del self.pending[i+1] - if acount < self.MIN_GALLOP and bcount < self.MIN_GALLOP: - break - - min_gallop += 1 # penalize it for leaving galloping mode - self.min_gallop = min_gallop - - finally: - # The last element of a belongs at the end of the merge, so we copy - # the remaining elements of b before the remaining elements of a. - assert a.len >= 0 and b.len >= 0 - for p in xrange(b.base, b.base + b.len): - self.list[dest] = b.list[p] - dest += 1 - for p in xrange(a.base, a.base + a.len): - self.list[dest] = a.list[p] - dest += 1 - - # Same as merge_lo(), but should have a.len >= b.len. - - def merge_hi(self, a, b): - assert a.len > 0 and b.len > 0 and a.base + a.len == b.base - min_gallop = self.min_gallop - dest = b.base + b.len - b = b.copyitems() - - # Invariant: elements in "b" are waiting to be reinserted into the list - # before "dest". They should be merged with the elements of "a". - # a.base + a.len == dest - b.len. - # We use a finally block to ensure that the elements remaining in - # the copy "b" are reinserted back into self.list in all cases. - try: - dest -= 1 - self.list[dest] = a.popright() - if a.len == 0 or b.len == 1: + # Where does b start in a? Elements in a before that can be + # ignored (already in place). + k = self.gallop(b.list[b.base], a, hint=0, rightmost=True) + a.advance(k) + if a.len == 0: return - while True: - acount = 0 # number of times A won in a row - bcount = 0 # number of times B won in a row + # Where does a end in b? Elements in b after that can be + # ignored (already in place). + b.len = self.gallop(a.list[a.base+a.len-1], b, hint=b.len-1, + rightmost=False) + if b.len == 0: + return - # Do the straightforward thing until (if ever) one run - # appears to win consistently. - while True: - nexta = a.list[a.base + a.len - 1] - nextb = b.list[b.base + b.len - 1] - if self.lt(nextb, nexta): - dest -= 1 - self.list[dest] = nexta - a.len -= 1 - if a.len == 0: - return - acount += 1 - bcount = 0 - if acount >= min_gallop: - break + # Merge what remains of the runs. The direction is chosen to + # minimize the temporary storage needed. + if a.len <= b.len: + self.merge_lo(a, b) + else: + self.merge_hi(a, b) + + # Examine the stack of runs waiting to be merged, merging adjacent runs + # until the stack invariants are re-established: + # + # 1. len[-3] > len[-2] + len[-1] + # 2. len[-2] > len[-1] + # + # See listsort.txt for more info. + + def merge_collapse(self): + p = self.pending + while len(p) > 1: + if len(p) >= 3 and p[-3].len <= p[-2].len + p[-1].len: + if p[-3].len < p[-1].len: + self.merge_at(-3) else: - dest -= 1 - self.list[dest] = nextb - b.len -= 1 - if b.len == 1: - return - bcount += 1 - acount = 0 - if bcount >= min_gallop: - break + self.merge_at(-2) + elif p[-2].len <= p[-1].len: + self.merge_at(-2) + else: + break - # One run is winning so consistently that galloping may - # be a huge win. So try that, and continue galloping until - # (if ever) neither run appears to be winning consistently - # anymore. - min_gallop += 1 + # Regardless of invariants, merge all runs on the stack until only one + # remains. This is used at the end of the mergesort. - while True: - min_gallop -= min_gallop > 1 - self.min_gallop = min_gallop - - nextb = b.list[b.base + b.len - 1] - k = self.gallop(nextb, a, hint=a.len-1, rightmost=True) - acount = a.len - k - for p in xrange(a.base + a.len - 1, a.base + k - 1, -1): - dest -= 1 - self.list[dest] = a.list[p] - a.len -= acount - if a.len == 0: - return - - dest -= 1 - self.list[dest] = b.popright() - if b.len == 1: - return - - nexta = a.list[a.base + a.len - 1] - k = self.gallop(nexta, b, hint=b.len-1, rightmost=False) - bcount = b.len - k - for p in xrange(b.base + b.len - 1, b.base + k - 1, -1): - dest -= 1 - self.list[dest] = b.list[p] - b.len -= bcount - # b.len==0 is impossible now if the comparison - # function is consistent, but we can't assume - # that it is. - if b.len <= 1: - return - - dest -= 1 - self.list[dest] = a.popright() - if a.len == 0: - return - - if acount < self.MIN_GALLOP and bcount < self.MIN_GALLOP: - break - - min_gallop += 1 # penalize it for leaving galloping mode - self.min_gallop = min_gallop - - finally: - # The last element of a belongs at the end of the merge, so we copy - # the remaining elements of a and then the remaining elements of b. - assert a.len >= 0 and b.len >= 0 - for p in xrange(a.base + a.len - 1, a.base - 1, -1): - dest -= 1 - self.list[dest] = a.list[p] - for p in xrange(b.base + b.len - 1, b.base - 1, -1): - dest -= 1 - self.list[dest] = b.list[p] - - # Merge the two runs at stack indices i and i+1. - - def merge_at(self, i): - a = self.pending[i] - b = self.pending[i+1] - assert a.len > 0 and b.len > 0 - assert a.base + a.len == b.base - - # Record the length of the combined runs and remove the run b - self.pending[i] = ListSlice(self.list, a.base, a.len + b.len) - del self.pending[i+1] - - # Where does b start in a? Elements in a before that can be - # ignored (already in place). - k = self.gallop(b.list[b.base], a, hint=0, rightmost=True) - a.advance(k) - if a.len == 0: - return - - # Where does a end in b? Elements in b after that can be - # ignored (already in place). - b.len = self.gallop(a.list[a.base+a.len-1], b, hint=b.len-1, - rightmost=False) - if b.len == 0: - return - - # Merge what remains of the runs. The direction is chosen to - # minimize the temporary storage needed. - if a.len <= b.len: - self.merge_lo(a, b) - else: - self.merge_hi(a, b) - - # Examine the stack of runs waiting to be merged, merging adjacent runs - # until the stack invariants are re-established: - # - # 1. len[-3] > len[-2] + len[-1] - # 2. len[-2] > len[-1] - # - # See listsort.txt for more info. - - def merge_collapse(self): - p = self.pending - while len(p) > 1: - if len(p) >= 3 and p[-3].len <= p[-2].len + p[-1].len: - if p[-3].len < p[-1].len: + def merge_force_collapse(self): + p = self.pending + while len(p) > 1: + if len(p) >= 3 and p[-3].len < p[-1].len: self.merge_at(-3) else: self.merge_at(-2) - elif p[-2].len <= p[-1].len: - self.merge_at(-2) - else: - break - # Regardless of invariants, merge all runs on the stack until only one - # remains. This is used at the end of the mergesort. + # Compute a good value for the minimum run length; natural runs shorter + # than this are boosted artificially via binary insertion. + # + # If n < 64, return n (it's too small to bother with fancy stuff). + # Else if n is an exact power of 2, return 32. + # Else return an int k, 32 <= k <= 64, such that n/k is close to, but + # strictly less than, an exact power of 2. + # + # See listsort.txt for more info. - def merge_force_collapse(self): - p = self.pending - while len(p) > 1: - if len(p) >= 3 and p[-3].len < p[-1].len: - self.merge_at(-3) - else: - self.merge_at(-2) + def merge_compute_minrun(self, n): + r = 0 # becomes 1 if any 1 bits are shifted off + while n >= 64: + r |= n & 1 + n >>= 1 + return n + r - # Compute a good value for the minimum run length; natural runs shorter - # than this are boosted artificially via binary insertion. - # - # If n < 64, return n (it's too small to bother with fancy stuff). - # Else if n is an exact power of 2, return 32. - # Else return an int k, 32 <= k <= 64, such that n/k is close to, but - # strictly less than, an exact power of 2. - # - # See listsort.txt for more info. + # ____________________________________________________________ + # Entry point. - def merge_compute_minrun(self, n): - r = 0 # becomes 1 if any 1 bits are shifted off - while n >= 64: - r |= n & 1 - n >>= 1 - return n + r + def sort(self): + remaining = ListSlice(self.list, 0, self.listlength) + if remaining.len < 2: + return - # ____________________________________________________________ - # Entry point. + # March over the array once, left to right, finding natural runs, + # and extending short natural runs to minrun elements. + self.merge_init() + minrun = self.merge_compute_minrun(remaining.len) - def sort(self): - remaining = ListSlice(self.list, 0, self.listlength) - if remaining.len < 2: - return + while remaining.len > 0: + # Identify next run. + run, descending = self.count_run(remaining) + if descending: + run.reverse() + # If short, extend to min(minrun, nremaining). + if run.len < minrun: + sorted = run.len + run.len = min(minrun, remaining.len) + self.binarysort(run, sorted) + # Advance remaining past this run. + remaining.advance(run.len) + # Push run onto pending-runs stack, and maybe merge. + self.pending.append(run) + self.merge_collapse() - # March over the array once, left to right, finding natural runs, - # and extending short natural runs to minrun elements. - self.merge_init() - minrun = self.merge_compute_minrun(remaining.len) + assert remaining.base == self.listlength - while remaining.len > 0: - # Identify next run. - run, descending = self.count_run(remaining) - if descending: - run.reverse() - # If short, extend to min(minrun, nremaining). - if run.len < minrun: - sorted = run.len - run.len = min(minrun, remaining.len) - self.binarysort(run, sorted) - # Advance remaining past this run. - remaining.advance(run.len) - # Push run onto pending-runs stack, and maybe merge. - self.pending.append(run) - self.merge_collapse() + self.merge_force_collapse() + assert len(self.pending) == 1 + assert self.pending[0].base == 0 + assert self.pending[0].len == self.listlength - assert remaining.base == self.listlength - self.merge_force_collapse() - assert len(self.pending) == 1 - assert self.pending[0].base == 0 - assert self.pending[0].len == self.listlength + class ListSlice: + "A sublist of a list." + def __init__(self, list, base, len): + self.list = list + self.base = base + self.len = len -class ListSlice: - "A sublist of a list." + def copyitems(self): + "Make a copy of the slice of the original list." + start = self.base + stop = self.base + self.len + assert 0 <= start <= stop # annotator hint + return ListSlice(self.list[start:stop], 0, self.len) - def __init__(self, list, base, len): - self.list = list - self.base = base - self.len = len + def advance(self, n): + self.base += n + self.len -= n - def copyitems(self): - "Make a copy of the slice of the original list." - start = self.base - stop = self.base + self.len - assert 0 <= start <= stop # annotator hint - return ListSlice(self.list[start:stop], 0, self.len) + def popleft(self): + result = self.list[self.base] + self.base += 1 + self.len -= 1 + return result - def advance(self, n): - self.base += n - self.len -= n + def popright(self): + self.len -= 1 + return self.list[self.base + self.len] - def popleft(self): - result = self.list[self.base] - self.base += 1 - self.len -= 1 - return result + def reverse(self): + "Reverse the slice in-place." + list = self.list + lo = self.base + hi = lo + self.len - 1 + while lo < hi: + list[lo], list[hi] = list[hi], list[lo] + lo += 1 + hi -= 1 + return TimSort - def popright(self): - self.len -= 1 - return self.list[self.base + self.len] - - def reverse(self): - "Reverse the slice in-place." - list = self.list - lo = self.base - hi = lo + self.len - 1 - while lo < hi: - list[lo], list[hi] = list[hi], list[lo] - lo += 1 - hi -= 1 +TimSort = make_timsort_class() #backward compatible interface diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -19,6 +19,8 @@ # def f(... # +from pypy.rpython.extregistry import ExtRegistryEntry + class _Specialize(object): def memo(self): """ Specialize functions based on argument values. All arguments has @@ -177,6 +179,34 @@ obj.__class__ = FREED_OBJECT # ____________________________________________________________ + +def newlist(sizehint=0): + """ Create a new list, but pass a hint how big the size should be + preallocated + """ + return [] + +class Entry(ExtRegistryEntry): + _about_ = newlist + + def compute_result_annotation(self, s_sizehint): + from pypy.annotation.model import SomeInteger + + assert isinstance(s_sizehint, SomeInteger) + return self.bookkeeper.newlist() + + def specialize_call(self, orig_hop, i_sizehint=None): + from pypy.rpython.rlist import rtype_newlist + # fish a bit hop + hop = orig_hop.copy() + v = hop.args_v[0] + r, s = hop.r_s_popfirstarg() + if s.is_constant(): + v = hop.inputconst(r, s.const) + hop.exception_is_here() + return rtype_newlist(hop, v_sizehint=v) + +# ____________________________________________________________ # # id-like functions. The idea is that calling hash() or id() is not # allowed in RPython. You have to call one of the following more @@ -301,8 +331,6 @@ # ---------- -from pypy.rpython.extregistry import ExtRegistryEntry - class Entry(ExtRegistryEntry): _about_ = compute_hash diff --git a/pypy/rlib/rcoroutine.py b/pypy/rlib/rcoroutine.py deleted file mode 100644 --- a/pypy/rlib/rcoroutine.py +++ /dev/null @@ -1,357 +0,0 @@ -""" -Basic Concept: --------------- - -All concurrency is expressed by some means of coroutines. -This is the lowest possible exposable interface. - -A coroutine is a structure that controls a sequence -of continuations in time. It contains a frame object -that is a restartable stack chain. This frame object -is updated on every switch. - -The frame can be None. Either the coroutine is not yet -bound, or it is the current coroutine of some costate. -See below. XXX rewrite a definition of these terms. - -There is always a notation of a "current" and a "last" -coroutine. Current has no frame and represents the -running program. last is needed to keep track of the -coroutine that receives a new frame chain after a switch. - -A costate object holds last and current. -There are different coroutine concepts existing in -parallel, like plain interp-level coroutines and -app-level structures like coroutines, greenlets and -tasklets. -Every concept is associated with its own costate object. -This allows for peaceful co-existence of many concepts. -The type of a switch is determined by the target's costate. -""" - -import py; py.test.skip("fixme: rewrite using rlib.rstacklet") -# XXX ^^^ the reason it is not done is that pypy.rlib.rcoroutine -# plus pypy/module/_stackless look like faaaaaar too much code -# to me :-( - -from pypy.rlib.rstack import yield_current_frame_to_caller -from pypy.rlib.objectmodel import we_are_translated - -from pypy.interpreter.error import OperationError - -try: - from greenlet import greenlet - main_greenlet = greenlet.getcurrent() -except (ImportError, ValueError): - def greenlet(*args, **kwargs): - raise NotImplementedError("need either greenlets or a translated version of pypy") - -class FrameChain(object): - """Greenlet-based emulation of the primitive rstack 'frames' of RPython""" - - def __init__(self, thunk=None): - if thunk: - self.greenlet = greenlet(thunk) - else: - self.greenlet = greenlet.getcurrent() - - def switch(self): - last = FrameChain() - return self.greenlet.switch(last) - -import sys, os - -def make_coroutine_classes(baseclass): - class BaseCoState(object): - def __init__(self): - self.current = self.main = None - - def __repr__(self): - "NOT_RPYTHON" - # for debugging only - return '<%s current=%r>' % (self.__class__.__name__, self.current) - - def update(self, new): - syncstate.leaving = self.current - syncstate.entering = new - self.current = new - frame, new.frame = new.frame, None - return frame - - - class CoState(BaseCoState): - def __init__(self): - BaseCoState.__init__(self) - self.current = self.main = Coroutine(self) - - class CoroutineDamage(SystemError): - pass - - - class SyncState(object): - def __init__(self): - self.reset() - - def reset(self): - self.default_costate = None - self.leaving = None - self.entering = None - self.things_to_do = False - self.temp_exc = None - self.to_delete = [] - - def switched(self, incoming_frame): - left = syncstate.leaving - entered = syncstate.entering - syncstate.leaving = syncstate.entering = None - if left is not None: # mostly to work around an annotation problem; - # should not really be None - left.frame = incoming_frame - left.goodbye() - if entered is not None: - entered.hello() - if self.things_to_do: - self._do_things_to_do() - - def push_exception(self, exc): - self.things_to_do = True - self.temp_exc = exc - - def check_for_zombie(self, obj): - return obj in self.to_delete - - def postpone_deletion(self, obj): - self.to_delete.append(obj) - self.things_to_do = True - - def _do_things_to_do(self): - if self.temp_exc is not None: - # somebody left an unhandled exception and switched to us. - # this both provides default exception handling and the - # way to inject an exception, like CoroutineExit. - e, self.temp_exc = self.temp_exc, None - self.things_to_do = bool(self.to_delete) - raise e - while self.to_delete: - delete, self.to_delete = self.to_delete, [] - for obj in delete: - obj.parent = obj.costate.current - obj._kill_finally() - else: - self.things_to_do = False - - def _freeze_(self): - self.reset() - return False - - syncstate = SyncState() - - - class CoroutineExit(SystemExit): - # XXX SystemExit's __init__ creates problems in bookkeeper. - def __init__(self): - pass - - class AbstractThunk(object): - def call(self): - raise NotImplementedError("abstract base class") - - - class Coroutine(baseclass): - def __init__(self, state=None): - self.frame = None - if state is None: - state = self._get_default_costate() - self.costate = state - self.parent = None - self.thunk = None - self.coroutine_exit = False - - def __repr__(self): - 'NOT_RPYTHON' - # just for debugging - if hasattr(self, '__name__'): - return '' % (self.__name__, self.frame, self.thunk is not None) - else: - return '' % (self.frame, self.thunk is not None) - - def _get_default_costate(): - state = syncstate.default_costate - if state is None: - state = syncstate.default_costate = CoState() - return state - _get_default_costate = staticmethod(_get_default_costate) - - def _get_default_parent(self): - return self.costate.current - - def bind(self, thunk): - assert isinstance(thunk, AbstractThunk) - if self.frame is not None: - raise CoroutineDamage - if self.parent is None: - self.parent = self._get_default_parent() - assert self.parent is not None - self.thunk = thunk - if we_are_translated(): - self.frame = self._bind() - else: - self.frame = self._greenlet_bind() - - def _greenlet_bind(self): - weak = [self] - def _greenlet_execute(incoming_frame): - try: - chain2go2next = weak[0]._execute(incoming_frame) - except: - # no exception is supposed to get out of _execute() - # better report it directly into the main greenlet then, - # and hidden to prevent catching - main_greenlet.throw(AssertionError( - "unexpected exception out of Coroutine._execute()", - *sys.exc_info())) - assert 0 - del weak[0] - greenlet.getcurrent().parent = chain2go2next.greenlet - return None # as the result of the FrameChain.switch() - chain = FrameChain(_greenlet_execute) - return chain - - def _bind(self): - state = self.costate - incoming_frame = yield_current_frame_to_caller() - self = state.current - return self._execute(incoming_frame) - - def _execute(self, incoming_frame): - state = self.costate - try: - try: - try: - exc = None - thunk = self.thunk - self.thunk = None - syncstate.switched(incoming_frame) - thunk.call() - except Exception, e: - exc = e - raise - finally: - # warning! we must reload the 'self' from the costate, - # because after a clone() the 'self' of both copies - # point to the original! - self = state.current - self.finish(exc) - except CoroutineExit: - pass - except Exception, e: - if self.coroutine_exit is False: - # redirect all unhandled exceptions to the parent - syncstate.push_exception(e) - - while self.parent is not None and self.parent.frame is None: - # greenlet behavior is fine - self.parent = self.parent.parent - return state.update(self.parent) - - def switch(self): - if self.frame is None: - # considered a programming error. - # greenlets and tasklets have different ideas about this. - raise CoroutineDamage - state = self.costate - incoming_frame = state.update(self).switch() - syncstate.switched(incoming_frame) - - def kill(self): - self._kill(CoroutineExit()) - - def _kill(self, exc): - if self.frame is None: - return - state = self.costate - syncstate.push_exception(exc) - # careful here - if setting self.parent to state.current would - # create a loop, break it. The assumption is that 'self' - # will die, so that state.current's chain of parents can be - # modified to skip 'self' without too many people noticing. - p = state.current - if p is self or self.parent is None: - pass # killing the current of the main - don't change any parent - else: - while p.parent is not None: - if p.parent is self: - p.parent = self.parent - break - p = p.parent - self.parent = state.current - self.switch() - - def _kill_finally(self): - try: - self._userdel() - except Exception: - pass # maybe print a warning? - self.kill() - - __already_postponed = False - - def __del__(self): - # provide the necessary clean-up - # note that AppCoroutine has to take care about this - # as well, including a check for user-supplied __del__. - # Additionally note that in the context of __del__, we are - # not in the position to issue a switch. - # we defer it completely. - - # it is necessary to check whether syncstate is None because CPython - # sets it to None when it cleans up the modules, which will lead to - # very strange effects - - if not we_are_translated(): - # we need to make sure that we postpone each coroutine only once on - # top of CPython, because this resurrects the coroutine and CPython - # calls __del__ again, thus postponing and resurrecting the - # coroutine once more :-( - if self.__already_postponed: - return - self.__already_postponed = True - if syncstate is not None: - syncstate.postpone_deletion(self) - - # coroutines need complete control over their __del__ behaviour. In - # particular they need to care about calling space.userdel themselves - handle_del_manually = True - - def _userdel(self): - # override this for exposed coros - pass - - def is_alive(self): - return self.frame is not None or self is self.costate.current - - def is_zombie(self): - return self.frame is not None and syncstate.check_for_zombie(self) - - def getcurrent(): - costate = Coroutine._get_default_costate() - return costate.current - getcurrent = staticmethod(getcurrent) - - def getmain(): - costate = Coroutine._get_default_costate() - return costate.main - getmain = staticmethod(getmain) - - def hello(self): - "Called when execution is transferred into this coroutine." - - def goodbye(self): - "Called just after execution is transferred away from this coroutine." - - def finish(self, exc=None): - "stephan forgot me" - - return locals() - -# _________________________________________________ diff --git a/pypy/rlib/rstring.py b/pypy/rlib/rstring.py --- a/pypy/rlib/rstring.py +++ b/pypy/rlib/rstring.py @@ -2,7 +2,8 @@ """ from pypy.annotation.model import (SomeObject, SomeString, s_None, SomeChar, - SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString, SomePtr) + SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString, SomePtr, SomePBC) +from pypy.tool.pairtype import pair, pairtype from pypy.rpython.extregistry import ExtRegistryEntry @@ -170,3 +171,24 @@ class UnicodeBuilderEntry(BaseEntry, ExtRegistryEntry): _about_ = UnicodeBuilder use_unicode = True + +class __extend__(pairtype(SomeStringBuilder, SomePBC)): + def union((sb, p)): + assert p.const is None + return SomeStringBuilder(can_be_None=True) + +class __extend__(pairtype(SomePBC, SomeStringBuilder)): + def union((p, sb)): + assert p.const is None + return SomeStringBuilder(can_be_None=True) + +class __extend__(pairtype(SomeUnicodeBuilder, SomePBC)): + def union((sb, p)): + assert p.const is None + return SomeUnicodeBuilder(can_be_None=True) + +class __extend__(pairtype(SomePBC, SomeUnicodeBuilder)): + def union((p, sb)): + assert p.const is None + return SomeUnicodeBuilder(can_be_None=True) + diff --git a/pypy/rlib/rwin32.py b/pypy/rlib/rwin32.py --- a/pypy/rlib/rwin32.py +++ b/pypy/rlib/rwin32.py @@ -55,14 +55,19 @@ SYSTEMTIME = rffi_platform.Struct('SYSTEMTIME', []) - OSVERSIONINFO = rffi_platform.Struct( - 'OSVERSIONINFO', + OSVERSIONINFOEX = rffi_platform.Struct( + 'OSVERSIONINFOEX', [('dwOSVersionInfoSize', rffi.UINT), ('dwMajorVersion', rffi.UINT), ('dwMinorVersion', rffi.UINT), ('dwBuildNumber', rffi.UINT), ('dwPlatformId', rffi.UINT), - ('szCSDVersion', rffi.CFixedArray(lltype.Char, 1))]) + ('szCSDVersion', rffi.CFixedArray(lltype.Char, 1)), + ('wServicePackMajor', rffi.USHORT), + ('wServicePackMinor', rffi.USHORT), + ('wSuiteMask', rffi.USHORT), + ('wProductType', rffi.UCHAR), + ]) LPSECURITY_ATTRIBUTES = rffi_platform.SimpleType( "LPSECURITY_ATTRIBUTES", rffi.CCHARP) @@ -225,14 +230,14 @@ lltype.free(buf, flavor='raw') _GetVersionEx = winexternal('GetVersionExA', - [lltype.Ptr(OSVERSIONINFO)], + [lltype.Ptr(OSVERSIONINFOEX)], DWORD) @jit.dont_look_inside def GetVersionEx(): - info = lltype.malloc(OSVERSIONINFO, flavor='raw') + info = lltype.malloc(OSVERSIONINFOEX, flavor='raw') rffi.setintfield(info, 'c_dwOSVersionInfoSize', - rffi.sizeof(OSVERSIONINFO)) + rffi.sizeof(OSVERSIONINFOEX)) try: if not _GetVersionEx(info): raise lastWindowsError() @@ -241,7 +246,11 @@ rffi.cast(lltype.Signed, info.c_dwBuildNumber), rffi.cast(lltype.Signed, info.c_dwPlatformId), rffi.charp2str(rffi.cast(rffi.CCHARP, - info.c_szCSDVersion))) + info.c_szCSDVersion)), + rffi.cast(lltype.Signed, info.c_wServicePackMajor), + rffi.cast(lltype.Signed, info.c_wServicePackMinor), + rffi.cast(lltype.Signed, info.c_wSuiteMask), + rffi.cast(lltype.Signed, info.c_wProductType)) finally: lltype.free(info, flavor='raw') diff --git a/pypy/rlib/rzlib.py b/pypy/rlib/rzlib.py --- a/pypy/rlib/rzlib.py +++ b/pypy/rlib/rzlib.py @@ -1,3 +1,4 @@ +from __future__ import with_statement import sys from pypy.rlib.rstring import StringBuilder diff --git a/pypy/rlib/test/test_objectmodel.py b/pypy/rlib/test/test_objectmodel.py --- a/pypy/rlib/test/test_objectmodel.py +++ b/pypy/rlib/test/test_objectmodel.py @@ -424,3 +424,32 @@ if option.view: graph.show() return graph + + +def test_newlist(): + from pypy.annotation.model import SomeInteger + def f(z): + x = newlist(sizehint=38) + if z < 0: + x.append(1) + return len(x) + + graph = getgraph(f, [SomeInteger()]) + for llop in graph.startblock.operations: + if llop.opname == 'malloc_varsize': + break + assert llop.args[2].value == 38 + +def test_newlist_nonconst(): + from pypy.annotation.model import SomeInteger + def f(z): + x = newlist(sizehint=z) + return len(x) + + graph = getgraph(f, [SomeInteger()]) + for llop in graph.startblock.operations: + if llop.opname == 'malloc_varsize': + break + assert llop.args[2] is graph.startblock.inputargs[0] + + diff --git a/pypy/rlib/test/test_rcoroutine.py b/pypy/rlib/test/test_rcoroutine.py deleted file mode 100644 --- a/pypy/rlib/test/test_rcoroutine.py +++ /dev/null @@ -1,348 +0,0 @@ -""" -testing coroutines at interprepter level -""" -import py -import os -from pypy import conftest; conftest.translation_test_so_skip_if_appdirect() -from pypy.rlib.rcoroutine import make_coroutine_classes -from pypy.translator.c.test.test_stackless import StacklessTest -from pypy.translator.c import gc - -def setup_module(mod): - py.test.importorskip('greenlet') - -d = make_coroutine_classes(object) -syncstate = d['syncstate'] -Coroutine = d['Coroutine'] -AbstractThunk = d['AbstractThunk'] - -def output(stuff): - os.write(2, stuff + '\n') - -class _TestCoroutine(StacklessTest): - backendopt = True - Coroutine = Coroutine - - def setup_method(self, method): - syncstate.reset() - - def _freeze_(self): # for 'self.Coroutine' - return True - - def test_coroutine1(self): - - def g(lst, coros): - coro_f, coro_g, coro_h = coros - lst.append(2) - output('g appended 2') - coro_h.switch() - lst.append(5) - output('g appended 5') - - def h(lst, coros): - coro_f, coro_g, coro_h = coros - lst.append(3) - output('h appended 3') - coro_f.switch() - lst.append(7) - output('h appended 7') - - class T(AbstractThunk): - def __init__(self, func, arg1, arg2): - self.func = func - self.arg1 = arg1 - self.arg2 = arg2 - def call(self): - self.func(self.arg1, self.arg2) - - def f(): - lst = [1] - coro_f = Coroutine.getcurrent() - coro_g = self.Coroutine() - coro_h = self.Coroutine() - coros = [coro_f, coro_g, coro_h] - thunk_g = T(g, lst, coros) - output('binding g after f set 1') - coro_g.bind(thunk_g) - thunk_h = T(h, lst, coros) - output('binding h after f set 1') - coro_h.bind(thunk_h) - output('switching to g') - coro_g.switch() - lst.append(4) - output('f appended 4') - coro_g.switch() - lst.append(6) - output('f appended 6') - coro_h.switch() - lst.append(8) - output('f appended 8') - n = 0 - for i in lst: - n = n*10 + i - return n - - data = self.wrap_stackless_function(f) - assert data == 12345678 - - def test_coroutine2(self): - - class TBase(AbstractThunk): - def call(self): - pass - - class T(TBase): - def __init__(self, func, arg1, arg2): - self.func = func - self.arg1 = arg1 - self.arg2 = arg2 - def call(self): - self.res = self.func(self.arg1, self.arg2) - - class T1(TBase): - def __init__(self, func, arg1): - self.func = func - self.arg1 = arg1 - def call(self): - self.res = self.func(self.arg1) - - def g(lst, coros): - coro_f1, coro_g, coro_h = coros - lst.append(2) - output('g appended 2') - coro_h.switch() - lst.append(5) - output('g appended 5') - output('exiting g') - - def h(lst, coros): - coro_f1, coro_g, coro_h = coros - lst.append(3) - output('h appended 3') - coro_f1.switch() - lst.append(7) - output('h appended 7') - output('exiting h') - - def f1(coro_f1): - lst = [1] - coro_g = self.Coroutine() - coro_g.__name__ = 'coro_g' - coro_h = self.Coroutine() - coro_h.__name__ = 'coro_h' - coros = [coro_f1, coro_g, coro_h] - thunk_g = T(g, lst, coros) - output('binding g after f1 set 1') - coro_g.bind(thunk_g) - thunk_h = T(h, lst, coros) - output('binding h after f1 set 1') - coro_h.bind(thunk_h) - output('switching to g') - coro_g.switch() - lst.append(4) - output('f1 appended 4') - coro_g.switch() - lst.append(6) - output('f1 appended 6') - coro_h.switch() - lst.append(8) - output('f1 appended 8') - n = 0 - for i in lst: - n = n*10 + i - output('exiting f1') - return n - - def f(): - coro_f = Coroutine.getcurrent() - coro_f.__name__ = 'coro_f' - coro_f1 = self.Coroutine() - coro_f1.__name__ = 'coro_f1' - thunk_f1 = T1(f1, coro_f1) - output('binding f1 after f set 1') - coro_f1.bind(thunk_f1) - coro_f1.switch() - output('return to main :-(') - return thunk_f1.res - - data = self.wrap_stackless_function(f) - assert data == 12345678 - - def test_kill_raise_del_coro(self): - class T(AbstractThunk): - def __init__(self, func, arg): - self.func = func - self.arg = arg - def call(self): - self.func(self.arg, self) - - def g(nrec, t, count=0): - t.count = count - if nrec < 0: - raise ValueError - if nrec: - g(nrec-1, t, count+1) - Coroutine.getmain().switch() - - def f(): - assert Coroutine.getmain().frame is None - coro_g = self.Coroutine() - coro_g.__name__ = 'coro_g' - thunk_g = T(g, 42) - coro_g.bind(thunk_g) - coro_g.switch() - res = thunk_g.count - res *= 10 - res |= coro_g.frame is not None - # testing kill - coro_g.kill() - res *= 10 - res |= coro_g.frame is None - coro_g = self.Coroutine() - # see what happens if we __del__ - thunk_g = T(g, -42) - coro_g.bind(thunk_g) - try: - coro_g.switch() - except ValueError: - res += 500 - return res - - data = self.wrap_stackless_function(f) - assert data == 4711 - - def test_tree_compare(self): - class Node: - def __init__(self, value, left=None, right=None): - self.value = value - self.left = left - self.right = right - def __repr__(self): - return 'Node(%r, %r, %r)'%(self.value, self.left, self.right) - - tree1 = Node(1, Node(2, Node(3))) - tree2 = Node(1, Node(3, Node(2))) - tree3 = Node(1, Node(2), Node(3)) - - class Producer(AbstractThunk): - def __init__(self, tree, objects, consumer): - self.tree = tree - self.objects = objects - self.consumer = consumer - def produce(self, t): - if t is None: - return - self.objects.append(t.value) - self.consumer.switch() - self.produce(t.left) - self.produce(t.right) - def call(self): - self.produce(self.tree) - while 1: - self.consumer.switch() - class Consumer(AbstractThunk): - def __init__(self, tree, objects, producer): - self.tree = tree - self.objects = objects - self.producer = producer - def consume(self, t): - if t is None: - return True - self.producer.switch() - if not self.objects: - return False - if self.objects.pop(0) != t.value: - return False - if not self.consume(t.left): - return False - return self.consume(t.right) - - def call(self): - self.result = self.consume(self.tree) From noreply at buildbot.pypy.org Tue Sep 13 06:01:28 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Tue, 13 Sep 2011 06:01:28 +0200 (CEST) Subject: [pypy-commit] pypy numpy-singledim: close old branch Message-ID: <20110913040128.9257482041@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: numpy-singledim Changeset: r47237:ec0e6a5ca3db Date: 2011-09-12 21:58 -0600 http://bitbucket.org/pypy/pypy/changeset/ec0e6a5ca3db/ Log: close old branch From noreply at buildbot.pypy.org Tue Sep 13 06:01:30 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Tue, 13 Sep 2011 06:01:30 +0200 (CEST) Subject: [pypy-commit] pypy unpack-ints-fast: close failed branch. unroll-if-alt branch should make rstruct faster instead. Message-ID: <20110913040130.2B41F82213@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: unpack-ints-fast Changeset: r47238:3453088df88a Date: 2011-09-12 22:01 -0600 http://bitbucket.org/pypy/pypy/changeset/3453088df88a/ Log: close failed branch. unroll-if-alt branch should make rstruct faster instead. From noreply at buildbot.pypy.org Tue Sep 13 10:27:27 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 13 Sep 2011 10:27:27 +0200 (CEST) Subject: [pypy-commit] pypy default: a test for 2868163b3dc4 Message-ID: <20110913082727.92DC082041@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r47239:e19cad73e3c7 Date: 2011-09-13 10:26 +0200 http://bitbucket.org/pypy/pypy/changeset/e19cad73e3c7/ Log: a test for 2868163b3dc4 diff --git a/pypy/rpython/test/test_rbuilder.py b/pypy/rpython/test/test_rbuilder.py --- a/pypy/rpython/test/test_rbuilder.py +++ b/pypy/rpython/test/test_rbuilder.py @@ -101,6 +101,23 @@ res = self.interpret(func, [1]) assert res + def test_unicode_builder_or_none(self): + def g(s): + if s: + s.append(u"3") + return bool(s) + + def func(i): + if i: + s = UnicodeBuilder() + else: + s = None + return g(s) + res = self.interpret(func, [0]) + assert not res + res = self.interpret(func, [1]) + assert res + class TestLLtype(BaseTestStringBuilder, LLRtypeMixin): pass From noreply at buildbot.pypy.org Tue Sep 13 10:27:28 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 13 Sep 2011 10:27:28 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20110913082728.CA89982213@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r47240:100c7084f643 Date: 2011-09-13 10:26 +0200 http://bitbucket.org/pypy/pypy/changeset/100c7084f643/ Log: merge diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -44,7 +44,7 @@ if w_iterable is not None: # unfortunately this is duplicating space.unpackiterable to avoid # assigning a new RPython list to 'wrappeditems', which defeats the - # W_FastSeqIterObject optimization. + # W_FastListIterObject optimization. if isinstance(w_iterable, W_ListObject): items_w.extend(w_iterable.wrappeditems) elif isinstance(w_iterable, W_TupleObject): From noreply at buildbot.pypy.org Tue Sep 13 10:58:00 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Sep 2011 10:58:00 +0200 (CEST) Subject: [pypy-commit] pypy default: Tighten the checks. Message-ID: <20110913085800.EEA9182041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47241:d462a721df1b Date: 2011-09-12 22:22 +0200 http://bitbucket.org/pypy/pypy/changeset/d462a721df1b/ Log: Tighten the checks. diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -43,16 +43,27 @@ raise getmemoryerror(self.space) def switch(self, w_to): + sthread = self.sthread + if sthread is None: + start_state.clear() + raise geterror(self.space, "continulet not initialized yet") + if sthread.is_empty_handle(self.h): + start_state.clear() + raise geterror(self.space, "continulet already finished") to = self.space.interp_w(W_Continulet, w_to, can_be_None=True) if to is not None: - if to.sthread is None: + if to.sthread is not sthread: start_state.clear() - raise geterror(self.space, "continulet not initialized yet") + if to.sthread is None: + msg = "continulet not initialized yet" + else: + msg = "cross-thread double switch" + raise geterror(self.space, msg) if self is to: # double-switch to myself: no-op return get_result() - if self.sthread is None: - start_state.clear() - raise geterror(self.space, "continulet not initialized yet") + if sthread.is_empty_handle(to.h): + start_state.clear() + raise geterror(self.space, "continulet already finished") ec = self.check_sthread() # start_state.origin = self @@ -63,14 +74,8 @@ # double switch: the final destination is to.h start_state.destination = to # - h = start_state.destination.h - sthread = self.sthread - if sthread.is_empty_handle(h): - start_state.clear() - raise geterror(self.space, "continulet already finished") - # try: - do_switch(sthread, h) + do_switch(sthread, start_state.destination.h) except MemoryError: start_state.clear() raise getmemoryerror(self.space) diff --git a/pypy/module/_continuation/test/test_stacklet.py b/pypy/module/_continuation/test/test_stacklet.py --- a/pypy/module/_continuation/test/test_stacklet.py +++ b/pypy/module/_continuation/test/test_stacklet.py @@ -643,6 +643,16 @@ assert res == "done" main() + def test_bug_finish_with_already_finished_stacklet(self): + from _continuation import continulet, error + # make an already-finished continulet + c1 = continulet(lambda x: x) + c1.switch() + # make another continulet + c2 = continulet(lambda x: x) + # this switch is forbidden, because it causes a crash when c2 finishes + raises(error, c1.switch, to=c2) + def test_various_depths(self): skip("may fail on top of CPython") # run it from test_translated, but not while being actually translated From noreply at buildbot.pypy.org Tue Sep 13 10:58:02 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Sep 2011 10:58:02 +0200 (CEST) Subject: [pypy-commit] pypy default: Performance issue: reading the f_back attribute at app-level would now Message-ID: <20110913085802.33DAD82041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47242:5809fd22dee2 Date: 2011-09-13 10:56 +0200 http://bitbucket.org/pypy/pypy/changeset/5809fd22dee2/ Log: Performance issue: reading the f_back attribute at app-level would now force its result. Thanks fijal for spotting it! diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -48,6 +48,7 @@ return frame @staticmethod + @jit.unroll_safe # should usually loop 0 times, very rarely more than once def getnextframe_nohidden(frame): frame = frame.f_backref() while frame and frame.hide(): diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -614,8 +614,8 @@ return self.get_builtin().getdict(space) def fget_f_back(self, space): - f_backref = ExecutionContext.getnextframe_nohidden(self) - return self.space.wrap(f_backref) + f_back = ExecutionContext.getnextframe_nohidden(self) + return self.space.wrap(f_back) def fget_f_lasti(self, space): return self.space.wrap(self.last_instr) From noreply at buildbot.pypy.org Tue Sep 13 10:58:03 2011 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Sep 2011 10:58:03 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20110913085803.68A0F82041@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47243:179987792c13 Date: 2011-09-13 10:57 +0200 http://bitbucket.org/pypy/pypy/changeset/179987792c13/ Log: merge heads diff --git a/pypy/rpython/test/test_rbuilder.py b/pypy/rpython/test/test_rbuilder.py --- a/pypy/rpython/test/test_rbuilder.py +++ b/pypy/rpython/test/test_rbuilder.py @@ -101,6 +101,23 @@ res = self.interpret(func, [1]) assert res + def test_unicode_builder_or_none(self): + def g(s): + if s: + s.append(u"3") + return bool(s) + + def func(i): + if i: + s = UnicodeBuilder() + else: + s = None + return g(s) + res = self.interpret(func, [0]) + assert not res + res = self.interpret(func, [1]) + assert res + class TestLLtype(BaseTestStringBuilder, LLRtypeMixin): pass From notifications-noreply at bitbucket.org Tue Sep 13 21:05:35 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Tue, 13 Sep 2011 19:05:35 -0000 Subject: [pypy-commit] Notification: pypy3k Message-ID: <20110913190535.20101.27250@bitbucket01.managed.contegix.com> You have received a notification from Benjamin Peterson. Hi, I forked pypy. My fork is at https://bitbucket.org/gutworth/pypy3k. -- Disable notifications at https://bitbucket.org/account/notifications/ From noreply at buildbot.pypy.org Wed Sep 14 16:22:32 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 14 Sep 2011 16:22:32 +0200 (CEST) Subject: [pypy-commit] pypy default: remove outdated comment Message-ID: <20110914142232.DFE2A82298@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r47264:0721688d084e Date: 2011-09-14 16:21 +0200 http://bitbucket.org/pypy/pypy/changeset/0721688d084e/ Log: remove outdated comment diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -347,7 +347,7 @@ SLOTS_STARTING_FROM = 3 -class BaseMapdictObject: # slightly evil to make it inherit from W_Root +class BaseMapdictObject: _mixin_ = True def _init_empty(self, map): From noreply at buildbot.pypy.org Wed Sep 14 18:24:10 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Sep 2011 18:24:10 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: hg merge default Message-ID: <20110914162410.412AD82298@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47265:b848465e6cae Date: 2011-09-14 14:18 +0200 http://bitbucket.org/pypy/pypy/changeset/b848465e6cae/ Log: hg merge default diff --git a/lib-python/2.7/ssl.py b/lib-python/2.7/ssl.py --- a/lib-python/2.7/ssl.py +++ b/lib-python/2.7/ssl.py @@ -62,7 +62,6 @@ from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION from _ssl import SSLError from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED -from _ssl import PROTOCOL_SSLv2, PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1 from _ssl import RAND_status, RAND_egd, RAND_add from _ssl import \ SSL_ERROR_ZERO_RETURN, \ @@ -74,6 +73,18 @@ SSL_ERROR_WANT_CONNECT, \ SSL_ERROR_EOF, \ SSL_ERROR_INVALID_ERROR_CODE +from _ssl import PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1 +_PROTOCOL_NAMES = { + PROTOCOL_TLSv1: "TLSv1", + PROTOCOL_SSLv23: "SSLv23", + PROTOCOL_SSLv3: "SSLv3", +} +try: + from _ssl import PROTOCOL_SSLv2 +except ImportError: + pass +else: + _PROTOCOL_NAMES[PROTOCOL_SSLv2] = "SSLv2" from socket import socket, _fileobject, _delegate_methods, error as socket_error from socket import getnameinfo as _getnameinfo @@ -408,16 +419,7 @@ return DER_cert_to_PEM_cert(dercert) def get_protocol_name(protocol_code): - if protocol_code == PROTOCOL_TLSv1: - return "TLSv1" - elif protocol_code == PROTOCOL_SSLv23: - return "SSLv23" - elif protocol_code == PROTOCOL_SSLv2: - return "SSLv2" - elif protocol_code == PROTOCOL_SSLv3: - return "SSLv3" - else: - return "" + return _PROTOCOL_NAMES.get(protocol_code, '') # a replacement for the old socket.ssl function diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -58,32 +58,35 @@ # Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2 def skip_if_broken_ubuntu_ssl(func): - # We need to access the lower-level wrapper in order to create an - # implicit SSL context without trying to connect or listen. - try: - import _ssl - except ImportError: - # The returned function won't get executed, just ignore the error - pass - @functools.wraps(func) - def f(*args, **kwargs): + if hasattr(ssl, 'PROTOCOL_SSLv2'): + # We need to access the lower-level wrapper in order to create an + # implicit SSL context without trying to connect or listen. try: - s = socket.socket(socket.AF_INET) - _ssl.sslwrap(s._sock, 0, None, None, - ssl.CERT_NONE, ssl.PROTOCOL_SSLv2, None, None) - except ssl.SSLError as e: - if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and - platform.linux_distribution() == ('debian', 'squeeze/sid', '') - and 'Invalid SSL protocol variant specified' in str(e)): - raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour") - return func(*args, **kwargs) - return f + import _ssl + except ImportError: + # The returned function won't get executed, just ignore the error + pass + @functools.wraps(func) + def f(*args, **kwargs): + try: + s = socket.socket(socket.AF_INET) + _ssl.sslwrap(s._sock, 0, None, None, + ssl.CERT_NONE, ssl.PROTOCOL_SSLv2, None, None) + except ssl.SSLError as e: + if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and + platform.linux_distribution() == ('debian', 'squeeze/sid', '') + and 'Invalid SSL protocol variant specified' in str(e)): + raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour") + return func(*args, **kwargs) + return f + else: + return func class BasicSocketTests(unittest.TestCase): def test_constants(self): - ssl.PROTOCOL_SSLv2 + #ssl.PROTOCOL_SSLv2 ssl.PROTOCOL_SSLv23 ssl.PROTOCOL_SSLv3 ssl.PROTOCOL_TLSv1 @@ -964,7 +967,8 @@ try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False) + if hasattr(ssl, 'PROTOCOL_SSLv2'): + try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False) @@ -976,7 +980,8 @@ try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False) + if hasattr(ssl, 'PROTOCOL_SSLv2'): + try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False) diff --git a/lib-python/modified-2.7/ssl.py b/lib-python/modified-2.7/ssl.py --- a/lib-python/modified-2.7/ssl.py +++ b/lib-python/modified-2.7/ssl.py @@ -62,7 +62,6 @@ from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION from _ssl import SSLError from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED -from _ssl import PROTOCOL_SSLv2, PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1 from _ssl import RAND_status, RAND_egd, RAND_add from _ssl import \ SSL_ERROR_ZERO_RETURN, \ @@ -74,6 +73,18 @@ SSL_ERROR_WANT_CONNECT, \ SSL_ERROR_EOF, \ SSL_ERROR_INVALID_ERROR_CODE +from _ssl import PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1 +_PROTOCOL_NAMES = { + PROTOCOL_TLSv1: "TLSv1", + PROTOCOL_SSLv23: "SSLv23", + PROTOCOL_SSLv3: "SSLv3", +} +try: + from _ssl import PROTOCOL_SSLv2 +except ImportError: + pass +else: + _PROTOCOL_NAMES[PROTOCOL_SSLv2] = "SSLv2" from socket import socket, _fileobject, error as socket_error from socket import getnameinfo as _getnameinfo @@ -400,16 +411,7 @@ return DER_cert_to_PEM_cert(dercert) def get_protocol_name(protocol_code): - if protocol_code == PROTOCOL_TLSv1: - return "TLSv1" - elif protocol_code == PROTOCOL_SSLv23: - return "SSLv23" - elif protocol_code == PROTOCOL_SSLv2: - return "SSLv2" - elif protocol_code == PROTOCOL_SSLv3: - return "SSLv3" - else: - return "" + return _PROTOCOL_NAMES.get(protocol_code, '') # a replacement for the old socket.ssl function diff --git a/lib-python/modified-2.7/test/test_ssl.py b/lib-python/modified-2.7/test/test_ssl.py --- a/lib-python/modified-2.7/test/test_ssl.py +++ b/lib-python/modified-2.7/test/test_ssl.py @@ -58,32 +58,35 @@ # Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2 def skip_if_broken_ubuntu_ssl(func): - # We need to access the lower-level wrapper in order to create an - # implicit SSL context without trying to connect or listen. - try: - import _ssl - except ImportError: - # The returned function won't get executed, just ignore the error - pass - @functools.wraps(func) - def f(*args, **kwargs): + if hasattr(ssl, 'PROTOCOL_SSLv2'): + # We need to access the lower-level wrapper in order to create an + # implicit SSL context without trying to connect or listen. try: - s = socket.socket(socket.AF_INET) - _ssl.sslwrap(s._sock, 0, None, None, - ssl.CERT_NONE, ssl.PROTOCOL_SSLv2, None, None) - except ssl.SSLError as e: - if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and - platform.linux_distribution() == ('debian', 'squeeze/sid', '') - and 'Invalid SSL protocol variant specified' in str(e)): - raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour") - return func(*args, **kwargs) - return f + import _ssl + except ImportError: + # The returned function won't get executed, just ignore the error + pass + @functools.wraps(func) + def f(*args, **kwargs): + try: + s = socket.socket(socket.AF_INET) + _ssl.sslwrap(s._sock, 0, None, None, + ssl.CERT_NONE, ssl.PROTOCOL_SSLv2, None, None) + except ssl.SSLError as e: + if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and + platform.linux_distribution() == ('debian', 'squeeze/sid', '') + and 'Invalid SSL protocol variant specified' in str(e)): + raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour") + return func(*args, **kwargs) + return f + else: + return func class BasicSocketTests(unittest.TestCase): def test_constants(self): - ssl.PROTOCOL_SSLv2 + #ssl.PROTOCOL_SSLv2 ssl.PROTOCOL_SSLv23 ssl.PROTOCOL_SSLv3 ssl.PROTOCOL_TLSv1 @@ -966,7 +969,8 @@ try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False) + if hasattr(ssl, 'PROTOCOL_SSLv2'): + try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False) @@ -978,7 +982,8 @@ try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False) + if hasattr(ssl, 'PROTOCOL_SSLv2'): + try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False) diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -48,23 +48,23 @@ def switch(self, *args): "Switch execution to this greenlet, optionally passing the values " "given as argument(s). Returns the value passed when switching back." - return self.__switch(_continulet.switch, args) + return self.__switch('switch', args) def throw(self, typ=GreenletExit, val=None, tb=None): "raise exception in greenlet, return value passed when switching back" - return self.__switch(_continulet.throw, typ, val, tb) + return self.__switch('throw', typ, val, tb) - def __switch(target, unbound_method, *args): + def __switch(target, methodname, *args): current = getcurrent() # while not target: if not target.__started: - if unbound_method != _continulet.throw: + if methodname == 'switch': greenlet_func = _greenlet_start else: greenlet_func = _greenlet_throw _continulet.__init__(target, greenlet_func, *args) - unbound_method = _continulet.switch + methodname = 'switch' args = () target.__started = True break @@ -75,22 +75,8 @@ target = target.parent # try: - if current.__main: - if target.__main: - # switch from main to main - if unbound_method == _continulet.throw: - raise args[0], args[1], args[2] - (args,) = args - else: - # enter from main to target - args = unbound_method(target, *args) - else: - if target.__main: - # leave to go to target=main - args = unbound_method(current, *args) - else: - # switch from non-main to non-main - args = unbound_method(current, *args, to=target) + unbound_method = getattr(_continulet, methodname) + args = unbound_method(current, *args, to=target) except GreenletExit, e: args = (e,) finally: diff --git a/lib_pypy/stackless.py b/lib_pypy/stackless.py --- a/lib_pypy/stackless.py +++ b/lib_pypy/stackless.py @@ -7,19 +7,12 @@ import traceback import _continuation -from functools import partial class TaskletExit(Exception): pass CoroutineExit = TaskletExit -class GWrap(_continuation.continulet): - """This is just a wrapper around continulet to allow - to stick additional attributes to a continulet. - To be more concrete, we need a backreference to - the coroutine object""" - class coroutine(object): "we can't have continulet as a base, because continulets can't be rebound" @@ -42,12 +35,10 @@ arguments *argl, **argd """ if self._frame is None or not self._frame.is_pending(): - - def _func(c, *args, **kwargs): - return func(*args, **kwargs) - - run = partial(_func, *argl, **argd) - self._frame = frame = GWrap(run) + def run(c): + _tls.current_coroutine = self + return func(*argl, **argd) + self._frame = frame = _continuation.continulet(run) else: raise ValueError("cannot bind a bound coroutine") @@ -58,16 +49,18 @@ None is returned """ current = _getcurrent() - current._jump_to(self) - - def _jump_to(self, coroutine): - _tls.current_coroutine = coroutine - self._frame.switch(to=coroutine._frame) + try: + current._frame.switch(to=self._frame) + finally: + _tls.current_coroutine = current def kill(self): """coro.kill() : kill coroutine coro""" - _tls.current_coroutine = self - self._frame.throw(CoroutineExit) + current = _getcurrent() + try: + current._frame.throw(CoroutineExit, to=self._frame) + finally: + _tls.current_coroutine = current def _is_alive(self): if self._frame is None: @@ -78,10 +71,7 @@ def getcurrent(): """coroutine.getcurrent() -> the currently running coroutine""" - try: - return _getcurrent() - except AttributeError: - return _maincoro + return _getcurrent() getcurrent = staticmethod(getcurrent) def __reduce__(self): @@ -109,13 +99,10 @@ # create the main coroutine for this thread _tls.current_coroutine = None main_coroutine = coroutine() - main_coroutine.bind(lambda x:x) + typ = _continuation.continulet + main_coroutine._frame = typ.__new__(typ) _tls.main_coroutine = main_coroutine _tls.current_coroutine = main_coroutine - return main_coroutine - - -_maincoro = _coroutine_create_main() from collections import deque @@ -161,10 +148,10 @@ _last_task = next assert not next.blocked if next is not current: - try: + #try: next.switch() - except CoroutineExit: - raise TaskletExit + #except CoroutineExit: --- they are the same anyway + # raise TaskletExit return current def set_schedule_callback(callback): @@ -459,6 +446,7 @@ def _func(): try: try: + coroutine.switch(back) func(*argl, **argd) except TaskletExit: pass @@ -468,6 +456,8 @@ self.func = None coroutine.bind(self, _func) + back = _getcurrent() + coroutine.switch(self) self.alive = True _scheduler_append(self) return self diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -66,7 +66,7 @@ In practice, in PyPy, you cannot change the ``f_back`` of an abitrary frame, but only of frames stored in ``continulets``. -Continulets are internally implemented using stacklets. Stacklets are a +Continulets are internally implemented using stacklets_. Stacklets are a bit more primitive (they are really one-shot continuations), but that idea only works in C, not in Python. The basic idea of continulets is to have at any point in time a complete valid stack; this is important @@ -280,6 +280,24 @@ to use other interfaces like genlets and greenlets.) +Stacklets ++++++++++ + +Continulets are internally implemented using stacklets, which is the +generic RPython-level building block for "one-shot continuations". For +more information about them please see the documentation in the C source +at `pypy/translator/c/src/stacklet/stacklet.h`_. + +The module ``pypy.rlib.rstacklet`` is a thin wrapper around the above +functions. The key point is that new() and switch() always return a +fresh stacklet handle (or an empty one), and switch() additionally +consumes one. It makes no sense to have code in which the returned +handle is ignored, or used more than once. Note that ``stacklet.c`` is +written assuming that the user knows that, and so no additional checking +occurs; this can easily lead to obscure crashes if you don't use a +wrapper like PyPy's '_continuation' module. + + Theory of composability +++++++++++++++++++++++ diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -3276,7 +3276,27 @@ return n self.meta_interp(f, [10], repeat=3) - + + def test_jit_merge_point_with_pbc(self): + driver = JitDriver(greens = [], reds = ['x']) + + class A(object): + def __init__(self, x): + self.x = x + def _freeze_(self): + return True + pbc = A(1) + + def main(x): + return f(x, pbc) + + def f(x, pbc): + while x > 0: + driver.jit_merge_point(x = x) + x -= pbc.x + return x + + self.meta_interp(main, [10]) class TestLLtype(BaseLLtypeTests, LLJitMixin): pass diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -44,21 +44,23 @@ def switch(self, w_to): sthread = self.sthread - if sthread is None: - global_state.clear() - raise geterror(self.space, "continulet not initialized yet") - if sthread.is_empty_handle(self.h): + if sthread is not None and sthread.is_empty_handle(self.h): global_state.clear() raise geterror(self.space, "continulet already finished") to = self.space.interp_w(W_Continulet, w_to, can_be_None=True) + if to is not None and to.sthread is None: + to = None + if sthread is None: # if self is non-initialized: + if to is not None: # if we are given a 'to' + self = to # then just use it and ignore 'self' + sthread = self.sthread + to = None + else: + return get_result() # else: no-op if to is not None: if to.sthread is not sthread: global_state.clear() - if to.sthread is None: - msg = "continulet not initialized yet" - else: - msg = "cross-thread double switch" - raise geterror(self.space, msg) + raise geterror(self.space, "cross-thread double switch") if self is to: # double-switch to myself: no-op return get_result() if sthread.is_empty_handle(to.h): diff --git a/pypy/module/_continuation/test/test_stacklet.py b/pypy/module/_continuation/test/test_stacklet.py --- a/pypy/module/_continuation/test/test_stacklet.py +++ b/pypy/module/_continuation/test/test_stacklet.py @@ -135,12 +135,6 @@ e = raises(error, c.switch) assert str(e.value) == "continulet already finished" - def test_not_initialized_yet(self): - from _continuation import continulet, error - c = continulet.__new__(continulet) - e = raises(error, c.switch) - assert str(e.value) == "continulet not initialized yet" - def test_go_depth2(self): from _continuation import continulet # @@ -254,6 +248,15 @@ res = c_upper.switch('D') assert res == 'E' + def test_switch_not_initialized(self): + from _continuation import continulet + c0 = continulet.__new__(continulet) + res = c0.switch() + assert res is None + res = c0.switch(123) + assert res == 123 + raises(ValueError, c0.throw, ValueError) + def test_exception_with_switch_depth2(self): from _continuation import continulet # @@ -499,16 +502,31 @@ assert res == 'z' raises(TypeError, c1.switch, to=c2) # "can't send non-None value" - def test_switch2_not_initialized_yet(self): - from _continuation import continulet, error + def test_switch2_not_initialized(self): + from _continuation import continulet + c0 = continulet.__new__(continulet) + c0bis = continulet.__new__(continulet) + res = c0.switch(123, to=c0) + assert res == 123 + res = c0.switch(123, to=c0bis) + assert res == 123 + raises(ValueError, c0.throw, ValueError, to=c0) + raises(ValueError, c0.throw, ValueError, to=c0bis) # def f1(c1): - not_reachable - # + c1.switch('a') + raises(ValueError, c1.switch, 'b') + raises(KeyError, c1.switch, 'c') + return 'd' c1 = continulet(f1) - c2 = continulet.__new__(continulet) - e = raises(error, c1.switch, to=c2) - assert str(e.value) == "continulet not initialized yet" + res = c0.switch(to=c1) + assert res == 'a' + res = c1.switch(to=c0) + assert res == 'b' + res = c1.throw(ValueError, to=c0) + assert res == 'c' + res = c0.throw(KeyError, to=c1) + assert res == 'd' def test_switch2_already_finished(self): from _continuation import continulet, error diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -52,7 +52,8 @@ constants["CERT_OPTIONAL"] = PY_SSL_CERT_OPTIONAL constants["CERT_REQUIRED"] = PY_SSL_CERT_REQUIRED -constants["PROTOCOL_SSLv2"] = PY_SSL_VERSION_SSL2 +if not OPENSSL_NO_SSL2: + constants["PROTOCOL_SSLv2"] = PY_SSL_VERSION_SSL2 constants["PROTOCOL_SSLv3"] = PY_SSL_VERSION_SSL3 constants["PROTOCOL_SSLv23"] = PY_SSL_VERSION_SSL23 constants["PROTOCOL_TLSv1"] = PY_SSL_VERSION_TLS1 @@ -673,7 +674,7 @@ method = libssl_TLSv1_method() elif protocol == PY_SSL_VERSION_SSL3: method = libssl_SSLv3_method() - elif protocol == PY_SSL_VERSION_SSL2: + elif protocol == PY_SSL_VERSION_SSL2 and not OPENSSL_NO_SSL2: method = libssl_SSLv2_method() elif protocol == PY_SSL_VERSION_SSL23: method = libssl_SSLv23_method() diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -508,7 +508,7 @@ return space._type_issubtype(w_sub, w_type) def isinstance(space, w_inst, w_type): - return space._type_isinstance(w_inst, w_type) + return space.wrap(space._type_isinstance(w_inst, w_type)) def issubtype_allow_override(space, w_sub, w_type): w_check = space.lookup(w_type, "__subclasscheck__") diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -369,8 +369,8 @@ newdata = [] for i in range(len(list_w)): w_s = list_w[i] - if not (space.is_true(space.isinstance(w_s, space.w_str)) or - space.is_true(space.isinstance(w_s, space.w_bytearray))): + if not (space.isinstance_w(w_s, space.w_str) or + space.isinstance_w(w_s, space.w_bytearray)): raise operationerrfmt( space.w_TypeError, "sequence item %d: expected string, %s " diff --git a/pypy/objspace/std/complextype.py b/pypy/objspace/std/complextype.py --- a/pypy/objspace/std/complextype.py +++ b/pypy/objspace/std/complextype.py @@ -127,8 +127,8 @@ and space.is_w(space.type(w_real), space.w_complex)): return w_real - if space.is_true(space.isinstance(w_real, space.w_str)) or \ - space.is_true(space.isinstance(w_real, space.w_unicode)): + if space.isinstance_w(w_real, space.w_str) or \ + space.isinstance_w(w_real, space.w_unicode): # a string argument if not noarg2: raise OperationError(space.w_TypeError, @@ -203,8 +203,8 @@ return (w_complex.realval, w_complex.imagval) # # Check that it is not a string (on which space.float() would succeed). - if (space.is_true(space.isinstance(w_complex, space.w_str)) or - space.is_true(space.isinstance(w_complex, space.w_unicode))): + if (space.isinstance_w(w_complex, space.w_str) or + space.isinstance_w(w_complex, space.w_unicode)): raise operationerrfmt(space.w_TypeError, "complex number expected, got '%s'", space.type(w_complex).getname(space)) diff --git a/pypy/objspace/std/floattype.py b/pypy/objspace/std/floattype.py --- a/pypy/objspace/std/floattype.py +++ b/pypy/objspace/std/floattype.py @@ -32,14 +32,14 @@ if space.is_w(w_floattype, space.w_float): return w_obj value = space.float_w(w_obj) - elif space.is_true(space.isinstance(w_value, space.w_str)): + elif space.isinstance_w(w_value, space.w_str): strvalue = space.str_w(w_value) try: value = string_to_float(strvalue) except ParseStringError, e: raise OperationError(space.w_ValueError, space.wrap(e.msg)) - elif space.is_true(space.isinstance(w_value, space.w_unicode)): + elif space.isinstance_w(w_value, space.w_unicode): if space.config.objspace.std.withropeunicode: from pypy.objspace.std.ropeunicodeobject import unicode_to_decimal_w else: diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -415,15 +415,15 @@ space.wrap("operand does not support " "unary str")) w_result = space.get_and_call_function(w_impl, w_value) - if space.is_true(space.isinstance(w_result, - space.w_unicode)): + if space.isinstance_w(w_result, + space.w_unicode): raise NeedUnicodeFormattingError return space.str_w(w_result) def fmt_s(self, w_value): space = self.space - got_unicode = space.is_true(space.isinstance(w_value, - space.w_unicode)) + got_unicode = space.isinstance_w(w_value, + space.w_unicode) if not do_unicode: if got_unicode: raise NeedUnicodeFormattingError @@ -442,13 +442,13 @@ def fmt_c(self, w_value): self.prec = -1 # just because space = self.space - if space.is_true(space.isinstance(w_value, space.w_str)): + if space.isinstance_w(w_value, space.w_str): s = space.str_w(w_value) if len(s) != 1: raise OperationError(space.w_TypeError, space.wrap("%c requires int or char")) self.std_wp(s) - elif space.is_true(space.isinstance(w_value, space.w_unicode)): + elif space.isinstance_w(w_value, space.w_unicode): if not do_unicode: raise NeedUnicodeFormattingError ustr = space.unicode_w(w_value) @@ -510,15 +510,15 @@ return space.wrap(result) def mod_format(space, w_format, w_values, do_unicode=False): - if space.is_true(space.isinstance(w_values, space.w_tuple)): + if space.isinstance_w(w_values, space.w_tuple): values_w = space.fixedview(w_values) return format(space, w_format, values_w, None, do_unicode) else: # we check directly for dict to avoid obscure checking # in simplest case - if space.is_true(space.isinstance(w_values, space.w_dict)) or \ + if space.isinstance_w(w_values, space.w_dict) or \ (space.lookup(w_values, '__getitem__') and - not space.is_true(space.isinstance(w_values, space.w_basestring))): + not space.isinstance_w(w_values, space.w_basestring)): return format(space, w_format, [w_values], w_values, do_unicode) else: return format(space, w_format, [w_values], None, do_unicode) diff --git a/pypy/objspace/std/inttype.py b/pypy/objspace/std/inttype.py --- a/pypy/objspace/std/inttype.py +++ b/pypy/objspace/std/inttype.py @@ -99,10 +99,10 @@ if type(w_value) is W_IntObject: value = w_value.intval ok = True - elif space.is_true(space.isinstance(w_value, space.w_str)): + elif space.isinstance_w(w_value, space.w_str): value, w_longval = string_to_int_or_long(space, space.str_w(w_value)) ok = True - elif space.is_true(space.isinstance(w_value, space.w_unicode)): + elif space.isinstance_w(w_value, space.w_unicode): if space.config.objspace.std.withropeunicode: from pypy.objspace.std.ropeunicodeobject import unicode_to_decimal_w else: @@ -145,7 +145,7 @@ else: base = space.int_w(w_base) - if space.is_true(space.isinstance(w_value, space.w_unicode)): + if space.isinstance_w(w_value, space.w_unicode): if space.config.objspace.std.withropeunicode: from pypy.objspace.std.ropeunicodeobject import unicode_to_decimal_w else: diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -72,6 +72,10 @@ w_seqiter.index += 1 return w_item +# XXX __length_hint__() +##def len__SeqIter(space, w_seqiter): +## return w_seqiter.getlength(space) + def iter__FastTupleIter(space, w_seqiter): return w_seqiter @@ -89,6 +93,10 @@ w_seqiter.index = index + 1 return w_item +# XXX __length_hint__() +##def len__FastTupleIter(space, w_seqiter): +## return w_seqiter.getlength(space) + def iter__FastListIter(space, w_seqiter): return w_seqiter @@ -106,6 +114,10 @@ w_seqiter.index = index + 1 return w_item +# XXX __length_hint__() +##def len__FastListIter(space, w_seqiter): +## return w_seqiter.getlength(space) + def iter__ReverseSeqIter(space, w_seqiter): return w_seqiter @@ -123,5 +135,20 @@ raise OperationError(space.w_StopIteration, space.w_None) return w_item +# XXX __length_hint__() +##def len__ReverseSeqIter(space, w_seqiter): +## if w_seqiter.w_seq is None: +## return space.wrap(0) +## index = w_seqiter.index+1 +## w_length = space.len(w_seqiter.w_seq) +## # if length of sequence is less than index :exhaust iterator +## if space.is_true(space.gt(space.wrap(w_seqiter.index), w_length)): +## w_len = space.wrap(0) +## w_seqiter.w_seq = None +## else: +## w_len =space.wrap(index) +## if space.is_true(space.lt(w_len,space.wrap(0))): +## w_len = space.wrap(0) +## return w_len register_all(vars()) diff --git a/pypy/objspace/std/longtype.py b/pypy/objspace/std/longtype.py --- a/pypy/objspace/std/longtype.py +++ b/pypy/objspace/std/longtype.py @@ -24,9 +24,9 @@ return w_value elif type(w_value) is W_LongObject: return newbigint(space, w_longtype, w_value.num) - elif space.is_true(space.isinstance(w_value, space.w_str)): + elif space.isinstance_w(w_value, space.w_str): return string_to_w_long(space, w_longtype, space.str_w(w_value)) - elif space.is_true(space.isinstance(w_value, space.w_unicode)): + elif space.isinstance_w(w_value, space.w_unicode): if space.config.objspace.std.withropeunicode: from pypy.objspace.std.ropeunicodeobject import unicode_to_decimal_w else: @@ -51,7 +51,7 @@ else: base = space.int_w(w_base) - if space.is_true(space.isinstance(w_value, space.w_unicode)): + if space.isinstance_w(w_value, space.w_unicode): from pypy.objspace.std.unicodeobject import unicode_to_decimal_w s = unicode_to_decimal_w(space, w_value) else: diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -571,5 +571,8 @@ def _type_isinstance(self, w_inst, w_type): if isinstance(w_type, W_TypeObject): - return self.wrap(self.type(w_inst).issubtype(w_type)) + return self.type(w_inst).issubtype(w_type) raise OperationError(self.w_TypeError, self.wrap("need type object")) + + def isinstance_w(space, w_inst, w_type): + return space._type_isinstance(w_inst, w_type) diff --git a/pypy/objspace/std/ropeobject.py b/pypy/objspace/std/ropeobject.py --- a/pypy/objspace/std/ropeobject.py +++ b/pypy/objspace/std/ropeobject.py @@ -292,8 +292,8 @@ l = [] for i in range(size): w_s = list_w[i] - if not space.is_true(space.isinstance(w_s, space.w_str)): - if space.is_true(space.isinstance(w_s, space.w_unicode)): + if not space.isinstance_w(w_s, space.w_str): + if space.isinstance_w(w_s, space.w_unicode): w_u = space.call_function(space.w_unicode, w_self) return space.call_method(w_u, "join", space.newlist(list_w)) raise operationerrfmt( @@ -556,7 +556,7 @@ W_RopeObject.EMPTY, w_start, w_end, True) for w_suffix in space.fixedview(w_suffixes): - if space.is_true(space.isinstance(w_suffix, space.w_unicode)): + if space.isinstance_w(w_suffix, space.w_unicode): w_u = space.call_function(space.w_unicode, w_self) return space.call_method(w_u, "endswith", w_suffixes, w_start, w_end) @@ -576,7 +576,7 @@ (self, _, start, end) = _convert_idx_params(space, w_self, W_RopeObject.EMPTY, w_start, w_end, True) for w_prefix in space.fixedview(w_prefixes): - if space.is_true(space.isinstance(w_prefix, space.w_unicode)): + if space.isinstance_w(w_prefix, space.w_unicode): w_u = space.call_function(space.w_unicode, w_self) return space.call_method(w_u, "startswith", w_prefixes, w_start, w_end) diff --git a/pypy/objspace/std/ropeunicodeobject.py b/pypy/objspace/std/ropeunicodeobject.py --- a/pypy/objspace/std/ropeunicodeobject.py +++ b/pypy/objspace/std/ropeunicodeobject.py @@ -29,7 +29,7 @@ assert isinstance(w_str, W_RopeObject) encoding = getdefaultencoding(space) w_retval = decode_string(space, w_str, encoding, "strict") - if not space.is_true(space.isinstance(w_retval, space.w_unicode)): + if not space.isinstance_w(w_retval, space.w_unicode): raise operationerrfmt( space.w_TypeError, "decoder did not return an unicode object (type '%s')", @@ -254,7 +254,7 @@ if isinstance(w_item, W_RopeUnicodeObject): # shortcut for performane item = w_item._node - elif space.is_true(space.isinstance(w_item, space.w_str)): + elif space.isinstance_w(w_item, space.w_str): item = unicode_from_string(space, w_item)._node else: msg = 'sequence item %d: expected string or Unicode' @@ -828,14 +828,14 @@ else: if space.is_w(w_newval, space.w_None): continue - elif space.is_true(space.isinstance(w_newval, space.w_int)): + elif space.isinstance_w(w_newval, space.w_int): newval = space.int_w(w_newval) if newval < 0 or newval > maxunicode: raise OperationError( space.w_TypeError, space.wrap("character mapping must be in range(0x%x)" % (maxunicode + 1,))) result.append(rope.rope_from_unichar(unichr(newval))) - elif space.is_true(space.isinstance(w_newval, space.w_unicode)): + elif space.isinstance_w(w_newval, space.w_unicode): result.append(ropeunicode_w(space, w_newval)) else: raise OperationError( diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -132,7 +132,7 @@ w_obj.setdata = make_setdata_from_w_iterable(space, w_iterable) def _convert_set_to_frozenset(space, w_obj): - if space.is_true(space.isinstance(w_obj, space.w_set)): + if space.isinstance_w(w_obj, space.w_set): return W_FrozensetObject(space, make_setdata_from_w_iterable(space, w_obj)) else: diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -364,8 +364,8 @@ reslen = len(self) * (size - 1) for i in range(size): w_s = list_w[i] - if not space.is_true(space.isinstance(w_s, space.w_str)): - if space.is_true(space.isinstance(w_s, space.w_unicode)): + if not space.isinstance_w(w_s, space.w_str): + if space.isinstance_w(w_s, space.w_unicode): # we need to rebuild w_list here, because the original # w_list might be an iterable which we already consumed w_list = space.newlist(list_w) @@ -646,7 +646,7 @@ space.wrap(''), w_start, w_end, True) for w_suffix in space.fixedview(w_suffixes): - if space.is_true(space.isinstance(w_suffix, space.w_unicode)): + if space.isinstance_w(w_suffix, space.w_unicode): w_u = space.call_function(space.w_unicode, w_self) return space.call_method(w_u, "endswith", w_suffixes, w_start, w_end) @@ -665,7 +665,7 @@ (u_self, _, start, end) = _convert_idx_params(space, w_self, space.wrap(''), w_start, w_end, True) for w_prefix in space.fixedview(w_prefixes): - if space.is_true(space.isinstance(w_prefix, space.w_unicode)): + if space.isinstance_w(w_prefix, space.w_unicode): w_u = space.call_function(space.w_unicode, w_self) return space.call_method(w_u, "startswith", w_prefixes, w_start, w_end) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -508,15 +508,15 @@ # type name. That's a hack, so we're allowed to use a different # hack... if ('__module__' in w_self.dict_w and - space.is_true(space.isinstance(w_self.getdictvalue(space, '__module__'), - space.w_str))): + space.isinstance_w(w_self.getdictvalue(space, '__module__'), + space.w_str)): return w_self.getdictvalue(space, '__module__') return space.wrap('__builtin__') def get_module_type_name(w_self): space = w_self.space w_mod = w_self.get_module() - if not space.is_true(space.isinstance(w_mod, space.w_str)): + if not space.isinstance_w(w_mod, space.w_str): mod = '__builtin__' else: mod = space.str_w(w_mod) @@ -850,7 +850,7 @@ not space.is_w(w_newtype, space.w_type)): w_type.w_bltin_new = w_newfunc w_newobject = space.call_obj_args(w_newfunc, w_type, __args__) - call_init = space.is_true(space.isinstance(w_newobject, w_type)) + call_init = space.isinstance_w(w_newobject, w_type) # maybe invoke the __init__ of the type if call_init: @@ -876,7 +876,7 @@ def repr__Type(space, w_obj): w_mod = w_obj.get_module() - if not space.is_true(space.isinstance(w_mod, space.w_str)): + if not space.isinstance_w(w_mod, space.w_str): mod = None else: mod = space.str_w(w_mod) diff --git a/pypy/objspace/std/typetype.py b/pypy/objspace/std/typetype.py --- a/pypy/objspace/std/typetype.py +++ b/pypy/objspace/std/typetype.py @@ -110,7 +110,7 @@ if not w_type.is_heaptype(): raise operationerrfmt(space.w_TypeError, "can't set %s.__bases__", w_type.name) - if not space.is_true(space.isinstance(w_value, space.w_tuple)): + if not space.isinstance_w(w_value, space.w_tuple): raise operationerrfmt(space.w_TypeError, "can only assign tuple to %s.__bases__, not %s", w_type.name, diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -900,14 +900,14 @@ else: if space.is_w(w_newval, space.w_None): continue - elif space.is_true(space.isinstance(w_newval, space.w_int)): + elif space.isinstance_w(w_newval, space.w_int): newval = space.int_w(w_newval) if newval < 0 or newval > maxunicode: raise OperationError( space.w_TypeError, space.wrap("character mapping must be in range(0x%x)" % (maxunicode + 1,))) result.append(unichr(newval)) - elif space.is_true(space.isinstance(w_newval, space.w_unicode)): + elif space.isinstance_w(w_newval, space.w_unicode): result.append(space.unicode_w(w_newval)) else: raise OperationError( diff --git a/pypy/objspace/std/unicodetype.py b/pypy/objspace/std/unicodetype.py --- a/pypy/objspace/std/unicodetype.py +++ b/pypy/objspace/std/unicodetype.py @@ -251,7 +251,7 @@ w_errors = space.wrap(errors) w_restuple = space.call_function(w_encoder, w_object, w_errors) w_retval = space.getitem(w_restuple, space.wrap(0)) - if not space.is_true(space.isinstance(w_retval, space.w_str)): + if not space.isinstance_w(w_retval, space.w_str): raise operationerrfmt(space.w_TypeError, "encoder did not return an string object (type '%s')", space.type(w_retval).getname(space)) @@ -286,7 +286,7 @@ def unicode_from_encoded_object(space, w_obj, encoding, errors): w_retval = decode_object(space, w_obj, encoding, errors) - if not space.is_true(space.isinstance(w_retval, space.w_unicode)): + if not space.isinstance_w(w_retval, space.w_unicode): raise operationerrfmt(space.w_TypeError, "decoder did not return an unicode object (type '%s')", space.type(w_retval).getname(space)) @@ -309,7 +309,7 @@ w_res = space.get_and_call_function(w_unicode_method, w_obj) else: w_res = space.str(w_obj) - if space.is_true(space.isinstance(w_res, space.w_unicode)): + if space.isinstance_w(w_res, space.w_unicode): return w_res return unicode_from_encoded_object(space, w_res, None, "strict") @@ -346,7 +346,7 @@ # convoluted logic for the case when unicode subclass has a __unicode__ # method, we need to call this method if (space.is_w(space.type(w_obj), space.w_unicode) or - (space.is_true(space.isinstance(w_obj, space.w_unicode)) and + (space.isinstance_w(w_obj, space.w_unicode) and space.findattr(w_obj, space.wrap('__unicode__')) is None)): if encoding is not None or errors is not None: raise OperationError(space.w_TypeError, diff --git a/pypy/rlib/ropenssl.py b/pypy/rlib/ropenssl.py --- a/pypy/rlib/ropenssl.py +++ b/pypy/rlib/ropenssl.py @@ -62,6 +62,8 @@ "OPENSSL_VERSION_NUMBER") SSLEAY_VERSION = rffi_platform.DefinedConstantString( "SSLEAY_VERSION", "SSLeay_version(SSLEAY_VERSION)") + OPENSSL_NO_SSL2 = rffi_platform.DefinedConstantInteger( + "OPENSSL_NO_SSL2") SSL_FILETYPE_PEM = rffi_platform.ConstantInteger("SSL_FILETYPE_PEM") SSL_OP_ALL = rffi_platform.ConstantInteger("SSL_OP_ALL") SSL_VERIFY_NONE = rffi_platform.ConstantInteger("SSL_VERIFY_NONE") diff --git a/pypy/rlib/rstacklet.py b/pypy/rlib/rstacklet.py --- a/pypy/rlib/rstacklet.py +++ b/pypy/rlib/rstacklet.py @@ -1,6 +1,8 @@ from pypy.rlib import _rffi_stacklet as _c from pypy.rpython.lltypesystem import lltype, llmemory +DEBUG = False + class StackletThread(object): @@ -10,15 +12,30 @@ if not self._thrd: raise MemoryError self._thrd_deleter = StackletThreadDeleter(self._thrd) + if DEBUG: + assert debug.sthread is None, "multithread debug support missing" + debug.sthread = self def new(self, callback, arg=llmemory.NULL): - return self._gcrootfinder.new(self, callback, arg) + if DEBUG: + callback = _debug_wrapper(callback) + h = self._gcrootfinder.new(self, callback, arg) + if DEBUG: + debug.add(h) + return h new._annspecialcase_ = 'specialize:arg(1)' def switch(self, stacklet): - return self._gcrootfinder.switch(self, stacklet) + if DEBUG: + debug.remove(stacklet) + h = self._gcrootfinder.switch(self, stacklet) + if DEBUG: + debug.add(h) + return h def destroy(self, stacklet): + if DEBUG: + debug.remove(stacklet) self._gcrootfinder.destroy(self, stacklet) def is_empty_handle(self, stacklet): @@ -56,3 +73,34 @@ None, None, ['__doc__']) return module.gcrootfinder _getgcrootfinder._annspecialcase_ = 'specialize:memo' + + +class StackletDebugError(Exception): + pass + +class Debug(object): + def __init__(self): + self.sthread = None + self.active = [] + def _freeze_(self): + self.__init__() + return False + def add(self, h): + if not self.sthread.is_empty_handle(h): + self.active.append(h) + def remove(self, h): + try: + i = self.active.index(h) + except ValueError: + raise StackletDebugError + del self.active[i] +debug = Debug() + +def _debug_wrapper(callback): + def wrapper(h, arg): + debug.add(h) + h = callback(h, arg) + debug.remove(h) + return h + return wrapper +_debug_wrapper._annspecialcase_ = 'specialize:memo' diff --git a/pypy/rpython/memory/gctransform/asmgcroot.py b/pypy/rpython/memory/gctransform/asmgcroot.py --- a/pypy/rpython/memory/gctransform/asmgcroot.py +++ b/pypy/rpython/memory/gctransform/asmgcroot.py @@ -636,7 +636,8 @@ ASM_FRAMEDATA_HEAD_PTR], lltype.Signed, sandboxsafe=True, - _nowrapper=True) + _nowrapper=True, + random_effects_on_gcobjs=True) c_asm_stackwalk = Constant(pypy_asm_stackwalk, lltype.typeOf(pypy_asm_stackwalk)) diff --git a/pypy/rpython/memory/gctransform/framework.py b/pypy/rpython/memory/gctransform/framework.py --- a/pypy/rpython/memory/gctransform/framework.py +++ b/pypy/rpython/memory/gctransform/framework.py @@ -41,7 +41,7 @@ seen) def analyze_external_call(self, op, seen=None): funcobj = op.args[0].value._obj - if funcobj._name == 'pypy_asm_stackwalk': + if getattr(funcobj, 'random_effects_on_gcobjs', False): return True return graphanalyze.GraphAnalyzer.analyze_external_call(self, op, seen) diff --git a/pypy/translator/unsimplify.py b/pypy/translator/unsimplify.py --- a/pypy/translator/unsimplify.py +++ b/pypy/translator/unsimplify.py @@ -97,6 +97,23 @@ if _forcelink is not None: assert index == 0 linkargs = list(_forcelink) + for v in varmap: + if v not in linkargs: + # 'v' was not specified by _forcelink, but we found out that + # we need it! Hack: if it is 'concretetype is lltype.Void' + # then it's ok to recreate its value in the target block. + # If not, then we have a problem :-) + from pypy.rpython.lltypesystem import lltype + assert v.concretetype is lltype.Void + c = Constant(None, lltype.Void) + w = varmap[v] + newop = SpaceOperation('same_as', [c], w) + i = 0 + while i < len(moved_operations): + if w in moved_operations[i].args: + break + i += 1 + moved_operations.insert(i, newop) else: linkargs = varmap.keys() newblock = Block([get_new_name(v) for v in linkargs]) From noreply at buildbot.pypy.org Wed Sep 14 18:24:11 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Sep 2011 18:24:11 +0200 (CEST) Subject: [pypy-commit] pypy default: Rename start_state -> global_state Message-ID: <20110914162411.7358982298@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47266:41f4e43f9c4f Date: 2011-09-14 14:20 +0200 http://bitbucket.org/pypy/pypy/changeset/41f4e43f9c4f/ Log: Rename start_state -> global_state diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -21,16 +21,16 @@ def check_sthread(self): ec = self.space.getexecutioncontext() if ec.stacklet_thread is not self.sthread: - start_state.clear() + global_state.clear() raise geterror(self.space, "inter-thread support is missing") return ec def descr_init(self, w_callable, __args__): if self.sthread is not None: raise geterror(self.space, "continulet already __init__ialized") - start_state.origin = self - start_state.w_callable = w_callable - start_state.args = __args__ + global_state.origin = self + global_state.w_callable = w_callable + global_state.args = __args__ self.bottomframe = make_fresh_frame(self.space) self.sthread = build_sthread(self.space) try: @@ -39,13 +39,13 @@ raise MemoryError except MemoryError: self.sthread = None - start_state.clear() + global_state.clear() raise getmemoryerror(self.space) def switch(self, w_to): sthread = self.sthread if sthread is not None and sthread.is_empty_handle(self.h): - start_state.clear() + global_state.clear() raise geterror(self.space, "continulet already finished") to = self.space.interp_w(W_Continulet, w_to, can_be_None=True) if to is not None and to.sthread is None: @@ -59,33 +59,33 @@ return get_result() # else: no-op if to is not None: if to.sthread is not sthread: - start_state.clear() + global_state.clear() raise geterror(self.space, "cross-thread double switch") if self is to: # double-switch to myself: no-op return get_result() if sthread.is_empty_handle(to.h): - start_state.clear() + global_state.clear() raise geterror(self.space, "continulet already finished") ec = self.check_sthread() # - start_state.origin = self + global_state.origin = self if to is None: # simple switch: going to self.h - start_state.destination = self + global_state.destination = self else: # double switch: the final destination is to.h - start_state.destination = to + global_state.destination = to # try: - do_switch(sthread, start_state.destination.h) + do_switch(sthread, global_state.destination.h) except MemoryError: - start_state.clear() + global_state.clear() raise getmemoryerror(self.space) # return get_result() def descr_switch(self, w_value=None, w_to=None): - start_state.w_value = w_value + global_state.w_value = w_value return self.switch(w_to) def descr_throw(self, w_type, w_val=None, w_tb=None, w_to=None): @@ -100,8 +100,8 @@ # operr = OperationError(w_type, w_val, tb) operr.normalize_exception(space) - start_state.w_value = None - start_state.propagate_exception = operr + global_state.w_value = None + global_state.propagate_exception = operr return self.switch(w_to) def descr_is_pending(self): @@ -172,7 +172,7 @@ # ____________________________________________________________ -class StartState: # xxx a single global to pass around the function to start +class GlobalState: def clear(self): self.origin = None self.destination = None @@ -180,15 +180,15 @@ self.args = None self.w_value = None self.propagate_exception = None -start_state = StartState() -start_state.clear() +global_state = GlobalState() +global_state.clear() def new_stacklet_callback(h, arg): - self = start_state.origin - w_callable = start_state.w_callable - args = start_state.args - start_state.clear() + self = global_state.origin + w_callable = global_state.w_callable + args = global_state.args + global_state.clear() try: do_switch(self.sthread, h) except MemoryError: @@ -198,30 +198,30 @@ try: assert self.sthread.ec.topframeref() is None self.sthread.ec.topframeref = jit.non_virtual_ref(self.bottomframe) - if start_state.propagate_exception is not None: - raise start_state.propagate_exception # just propagate it further - if start_state.w_value is not space.w_None: + if global_state.propagate_exception is not None: + raise global_state.propagate_exception # just propagate it further + if global_state.w_value is not space.w_None: raise OperationError(space.w_TypeError, space.wrap( "can't send non-None value to a just-started continulet")) args = args.prepend(self.space.wrap(self)) w_result = space.call_args(w_callable, args) except Exception, e: - start_state.propagate_exception = e + global_state.propagate_exception = e else: - start_state.w_value = w_result + global_state.w_value = w_result self.sthread.ec.topframeref = jit.vref_None - start_state.origin = self - start_state.destination = self + global_state.origin = self + global_state.destination = self return self.h def do_switch(sthread, h): h = sthread.switch(h) - origin = start_state.origin - self = start_state.destination - start_state.origin = None - start_state.destination = None + origin = global_state.origin + self = global_state.destination + global_state.origin = None + global_state.destination = None self.h, origin.h = origin.h, h # current = sthread.ec.topframeref @@ -230,12 +230,12 @@ origin.bottomframe.f_backref = current def get_result(): - if start_state.propagate_exception: - e = start_state.propagate_exception - start_state.propagate_exception = None + if global_state.propagate_exception: + e = global_state.propagate_exception + global_state.propagate_exception = None raise e - w_value = start_state.w_value - start_state.w_value = None + w_value = global_state.w_value + global_state.w_value = None return w_value def build_sthread(space): From noreply at buildbot.pypy.org Wed Sep 14 18:24:12 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Sep 2011 18:24:12 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: hg merge default Message-ID: <20110914162412.9B06F82298@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47267:b9c6f30776c0 Date: 2011-09-14 14:24 +0200 http://bitbucket.org/pypy/pypy/changeset/b9c6f30776c0/ Log: hg merge default diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -229,7 +229,7 @@ assert self.sthread.ec.topframeref() is None self.sthread.ec.topframeref = jit.non_virtual_ref(self.bottomframe) if global_state.propagate_exception is not None: - raise global_state.propagate_exception # just propagate it further + raise global_state.propagate_exception # just propagate it further if global_state.w_value is not space.w_None: raise OperationError(space.w_TypeError, space.wrap( "can't send non-None value to a just-started continulet")) From noreply at buildbot.pypy.org Wed Sep 14 18:24:13 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Sep 2011 18:24:13 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: Refactoring: replace the dummy_pycode with a real one that contains Message-ID: <20110914162413.D8AE482298@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47268:e85840b1268d Date: 2011-09-14 17:36 +0200 http://bitbucket.org/pypy/pypy/changeset/e85840b1268d/ Log: Refactoring: replace the dummy_pycode with a real one that contains the start-up logic. Should be easier to pickle. diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -28,19 +28,28 @@ def descr_init(self, w_callable, __args__): if self.sthread is not None: raise geterror(self.space, "continulet already __init__ialized") + # + # hackish: build the frame "by hand", passing it the correct arguments + space = self.space + w_args, w_kwds = __args__.topacked() + bottomframe = space.createframe(get_entrypoint_pycode(space), + get_w_module_dict(space), None) + bottomframe.locals_stack_w[0] = space.wrap(self) + bottomframe.locals_stack_w[1] = w_callable + bottomframe.locals_stack_w[2] = w_args + bottomframe.locals_stack_w[3] = w_kwds + self.bottomframe = bottomframe + # global_state.origin = self - global_state.w_callable = w_callable - global_state.args = __args__ - self.bottomframe = make_fresh_frame(self.space) - self.sthread = build_sthread(self.space) + sthread = build_sthread(self.space) + self.sthread = sthread try: - self.h = self.sthread.new(new_stacklet_callback) - if self.sthread.is_empty_handle(self.h): # early return - raise MemoryError + h = sthread.new(new_stacklet_callback) except MemoryError: - self.sthread = None global_state.clear() raise getmemoryerror(self.space) + # + post_switch(sthread, h) def switch(self, w_to): sthread = self.sthread @@ -77,12 +86,12 @@ global_state.destination = to # try: - do_switch(sthread, global_state.destination.h) + h = sthread.switch(global_state.destination.h) except MemoryError: global_state.clear() raise getmemoryerror(self.space) # - return get_result() + return post_switch(sthread, h) def descr_switch(self, w_value=None, w_to=None): global_state.w_value = w_value @@ -161,15 +170,29 @@ class State: def __init__(self, space): from pypy.interpreter.astcompiler.consts import CO_OPTIMIZED - self.space = space + self.space = space w_module = space.getbuiltinmodule('_continuation') self.w_error = space.getattr(w_module, space.wrap('error')) self.w_memoryerror = OperationError(space.w_MemoryError, space.w_None) - self.dummy_pycode = PyCode(space, 0, 0, 0, CO_OPTIMIZED, - '', [], [], [], '', - '', 0, '', [], [], - hidden_applevel=True) + # the following function switches away immediately, so that + # continulet.__init__() doesn't immediately run func(), but it + # also has the hidden purpose of making sure we have a single + # bottomframe for the whole duration of the continulet's run. + # Hackish: only the func_code is used, and used in the context + # of w_globals == this module, so we can access the name + # 'continulet' directly. + w_code = space.appexec([], '''(): + def start(c, func, args, kwds): + if continulet.switch(c) is not None: + raise TypeError( + "can\'t send non-None value to a just-started continulet") + return func(c, *args, **kwds) + return start.func_code + ''') + self.entrypoint_pycode = space.interp_w(PyCode, w_code) + self.entrypoint_pycode.hidden_applevel = True self.w_unpickle = w_module.get('_p') + self.w_module_dict = w_module.getdict(space) def geterror(space, message): cs = space.fromcache(State) @@ -179,9 +202,13 @@ cs = space.fromcache(State) return cs.w_memoryerror -def make_fresh_frame(space): +def get_entrypoint_pycode(space): cs = space.fromcache(State) - return space.FrameClass(space, cs.dummy_pycode, None, None) + return cs.entrypoint_pycode + +def get_w_module_dict(space): + cs = space.fromcache(State) + return cs.w_module_dict def getunpickle(space): cs = space.fromcache(State) @@ -206,8 +233,6 @@ def clear(self): self.origin = None self.destination = None - self.w_callable = None - self.args = None self.w_value = None self.propagate_exception = None global_state = GlobalState() @@ -215,27 +240,13 @@ def new_stacklet_callback(h, arg): - self = global_state.origin - w_callable = global_state.w_callable - args = global_state.args + self = global_state.origin + self.h = h global_state.clear() - try: - do_switch(self.sthread, h) - except MemoryError: - return h # oups! do an early return in this case - # space = self.space try: - assert self.sthread.ec.topframeref() is None - self.sthread.ec.topframeref = jit.non_virtual_ref(self.bottomframe) - if global_state.propagate_exception is not None: - raise global_state.propagate_exception # just propagate it further - if global_state.w_value is not space.w_None: - raise OperationError(space.w_TypeError, space.wrap( - "can't send non-None value to a just-started continulet")) - - args = args.prepend(self.space.wrap(self)) - w_result = space.call_args(w_callable, args) + frame = self.bottomframe + w_result = frame.execute_frame() except Exception, e: global_state.propagate_exception = e else: @@ -246,8 +257,7 @@ return self.h -def do_switch(sthread, h): - h = sthread.switch(h) +def post_switch(sthread, h): origin = global_state.origin self = global_state.destination global_state.origin = None @@ -258,6 +268,8 @@ sthread.ec.topframeref = self.bottomframe.f_backref self.bottomframe.f_backref = origin.bottomframe.f_backref origin.bottomframe.f_backref = current + # + return get_result() def get_result(): if global_state.propagate_exception: diff --git a/pypy/module/_continuation/test/test_stacklet.py b/pypy/module/_continuation/test/test_stacklet.py --- a/pypy/module/_continuation/test/test_stacklet.py +++ b/pypy/module/_continuation/test/test_stacklet.py @@ -13,7 +13,7 @@ from _continuation import continulet # def empty_callback(c): - pass + never_called # c = continulet(empty_callback) assert type(c) is continulet From noreply at buildbot.pypy.org Wed Sep 14 18:24:15 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Sep 2011 18:24:15 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: Unused import. Message-ID: <20110914162415.0C3FE82298@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47269:13c72872925a Date: 2011-09-14 17:37 +0200 http://bitbucket.org/pypy/pypy/changeset/13c72872925a/ Log: Unused import. diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -169,7 +169,6 @@ class State: def __init__(self, space): - from pypy.interpreter.astcompiler.consts import CO_OPTIMIZED self.space = space w_module = space.getbuiltinmodule('_continuation') self.w_error = space.getattr(w_module, space.wrap('error')) From noreply at buildbot.pypy.org Wed Sep 14 18:24:16 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Sep 2011 18:24:16 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20110914162416.3733B82298@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47270:dfae5033127e Date: 2011-09-14 18:23 +0200 http://bitbucket.org/pypy/pypy/changeset/dfae5033127e/ Log: merge heads diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -347,7 +347,7 @@ SLOTS_STARTING_FROM = 3 -class BaseMapdictObject: # slightly evil to make it inherit from W_Root +class BaseMapdictObject: _mixin_ = True def _init_empty(self, map): From noreply at buildbot.pypy.org Wed Sep 14 20:26:16 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 14 Sep 2011 20:26:16 +0200 (CEST) Subject: [pypy-commit] pypy unroll-if-alt: merged default. Message-ID: <20110914182616.E973682298@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: unroll-if-alt Changeset: r47271:a16809d0db00 Date: 2011-09-14 11:17 -0400 http://bitbucket.org/pypy/pypy/changeset/a16809d0db00/ Log: merged default. diff --git a/lib-python/2.7/ssl.py b/lib-python/2.7/ssl.py --- a/lib-python/2.7/ssl.py +++ b/lib-python/2.7/ssl.py @@ -62,7 +62,6 @@ from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION from _ssl import SSLError from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED -from _ssl import PROTOCOL_SSLv2, PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1 from _ssl import RAND_status, RAND_egd, RAND_add from _ssl import \ SSL_ERROR_ZERO_RETURN, \ @@ -74,6 +73,18 @@ SSL_ERROR_WANT_CONNECT, \ SSL_ERROR_EOF, \ SSL_ERROR_INVALID_ERROR_CODE +from _ssl import PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1 +_PROTOCOL_NAMES = { + PROTOCOL_TLSv1: "TLSv1", + PROTOCOL_SSLv23: "SSLv23", + PROTOCOL_SSLv3: "SSLv3", +} +try: + from _ssl import PROTOCOL_SSLv2 +except ImportError: + pass +else: + _PROTOCOL_NAMES[PROTOCOL_SSLv2] = "SSLv2" from socket import socket, _fileobject, _delegate_methods, error as socket_error from socket import getnameinfo as _getnameinfo @@ -408,16 +419,7 @@ return DER_cert_to_PEM_cert(dercert) def get_protocol_name(protocol_code): - if protocol_code == PROTOCOL_TLSv1: - return "TLSv1" - elif protocol_code == PROTOCOL_SSLv23: - return "SSLv23" - elif protocol_code == PROTOCOL_SSLv2: - return "SSLv2" - elif protocol_code == PROTOCOL_SSLv3: - return "SSLv3" - else: - return "" + return _PROTOCOL_NAMES.get(protocol_code, '') # a replacement for the old socket.ssl function diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -58,32 +58,35 @@ # Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2 def skip_if_broken_ubuntu_ssl(func): - # We need to access the lower-level wrapper in order to create an - # implicit SSL context without trying to connect or listen. - try: - import _ssl - except ImportError: - # The returned function won't get executed, just ignore the error - pass - @functools.wraps(func) - def f(*args, **kwargs): + if hasattr(ssl, 'PROTOCOL_SSLv2'): + # We need to access the lower-level wrapper in order to create an + # implicit SSL context without trying to connect or listen. try: - s = socket.socket(socket.AF_INET) - _ssl.sslwrap(s._sock, 0, None, None, - ssl.CERT_NONE, ssl.PROTOCOL_SSLv2, None, None) - except ssl.SSLError as e: - if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and - platform.linux_distribution() == ('debian', 'squeeze/sid', '') - and 'Invalid SSL protocol variant specified' in str(e)): - raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour") - return func(*args, **kwargs) - return f + import _ssl + except ImportError: + # The returned function won't get executed, just ignore the error + pass + @functools.wraps(func) + def f(*args, **kwargs): + try: + s = socket.socket(socket.AF_INET) + _ssl.sslwrap(s._sock, 0, None, None, + ssl.CERT_NONE, ssl.PROTOCOL_SSLv2, None, None) + except ssl.SSLError as e: + if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and + platform.linux_distribution() == ('debian', 'squeeze/sid', '') + and 'Invalid SSL protocol variant specified' in str(e)): + raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour") + return func(*args, **kwargs) + return f + else: + return func class BasicSocketTests(unittest.TestCase): def test_constants(self): - ssl.PROTOCOL_SSLv2 + #ssl.PROTOCOL_SSLv2 ssl.PROTOCOL_SSLv23 ssl.PROTOCOL_SSLv3 ssl.PROTOCOL_TLSv1 @@ -964,7 +967,8 @@ try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False) + if hasattr(ssl, 'PROTOCOL_SSLv2'): + try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False) @@ -976,7 +980,8 @@ try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False) + if hasattr(ssl, 'PROTOCOL_SSLv2'): + try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False) diff --git a/lib-python/modified-2.7/ssl.py b/lib-python/modified-2.7/ssl.py --- a/lib-python/modified-2.7/ssl.py +++ b/lib-python/modified-2.7/ssl.py @@ -62,7 +62,6 @@ from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION from _ssl import SSLError from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED -from _ssl import PROTOCOL_SSLv2, PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1 from _ssl import RAND_status, RAND_egd, RAND_add from _ssl import \ SSL_ERROR_ZERO_RETURN, \ @@ -74,6 +73,18 @@ SSL_ERROR_WANT_CONNECT, \ SSL_ERROR_EOF, \ SSL_ERROR_INVALID_ERROR_CODE +from _ssl import PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1 +_PROTOCOL_NAMES = { + PROTOCOL_TLSv1: "TLSv1", + PROTOCOL_SSLv23: "SSLv23", + PROTOCOL_SSLv3: "SSLv3", +} +try: + from _ssl import PROTOCOL_SSLv2 +except ImportError: + pass +else: + _PROTOCOL_NAMES[PROTOCOL_SSLv2] = "SSLv2" from socket import socket, _fileobject, error as socket_error from socket import getnameinfo as _getnameinfo @@ -400,16 +411,7 @@ return DER_cert_to_PEM_cert(dercert) def get_protocol_name(protocol_code): - if protocol_code == PROTOCOL_TLSv1: - return "TLSv1" - elif protocol_code == PROTOCOL_SSLv23: - return "SSLv23" - elif protocol_code == PROTOCOL_SSLv2: - return "SSLv2" - elif protocol_code == PROTOCOL_SSLv3: - return "SSLv3" - else: - return "" + return _PROTOCOL_NAMES.get(protocol_code, '') # a replacement for the old socket.ssl function diff --git a/lib-python/modified-2.7/test/test_ssl.py b/lib-python/modified-2.7/test/test_ssl.py --- a/lib-python/modified-2.7/test/test_ssl.py +++ b/lib-python/modified-2.7/test/test_ssl.py @@ -58,32 +58,35 @@ # Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2 def skip_if_broken_ubuntu_ssl(func): - # We need to access the lower-level wrapper in order to create an - # implicit SSL context without trying to connect or listen. - try: - import _ssl - except ImportError: - # The returned function won't get executed, just ignore the error - pass - @functools.wraps(func) - def f(*args, **kwargs): + if hasattr(ssl, 'PROTOCOL_SSLv2'): + # We need to access the lower-level wrapper in order to create an + # implicit SSL context without trying to connect or listen. try: - s = socket.socket(socket.AF_INET) - _ssl.sslwrap(s._sock, 0, None, None, - ssl.CERT_NONE, ssl.PROTOCOL_SSLv2, None, None) - except ssl.SSLError as e: - if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and - platform.linux_distribution() == ('debian', 'squeeze/sid', '') - and 'Invalid SSL protocol variant specified' in str(e)): - raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour") - return func(*args, **kwargs) - return f + import _ssl + except ImportError: + # The returned function won't get executed, just ignore the error + pass + @functools.wraps(func) + def f(*args, **kwargs): + try: + s = socket.socket(socket.AF_INET) + _ssl.sslwrap(s._sock, 0, None, None, + ssl.CERT_NONE, ssl.PROTOCOL_SSLv2, None, None) + except ssl.SSLError as e: + if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and + platform.linux_distribution() == ('debian', 'squeeze/sid', '') + and 'Invalid SSL protocol variant specified' in str(e)): + raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour") + return func(*args, **kwargs) + return f + else: + return func class BasicSocketTests(unittest.TestCase): def test_constants(self): - ssl.PROTOCOL_SSLv2 + #ssl.PROTOCOL_SSLv2 ssl.PROTOCOL_SSLv23 ssl.PROTOCOL_SSLv3 ssl.PROTOCOL_TLSv1 @@ -966,7 +969,8 @@ try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False) + if hasattr(ssl, 'PROTOCOL_SSLv2'): + try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False) @@ -978,7 +982,8 @@ try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False) + if hasattr(ssl, 'PROTOCOL_SSLv2'): + try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False) diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -48,23 +48,23 @@ def switch(self, *args): "Switch execution to this greenlet, optionally passing the values " "given as argument(s). Returns the value passed when switching back." - return self.__switch(_continulet.switch, args) + return self.__switch('switch', args) def throw(self, typ=GreenletExit, val=None, tb=None): "raise exception in greenlet, return value passed when switching back" - return self.__switch(_continulet.throw, typ, val, tb) + return self.__switch('throw', typ, val, tb) - def __switch(target, unbound_method, *args): + def __switch(target, methodname, *args): current = getcurrent() # while not target: if not target.__started: - if unbound_method != _continulet.throw: + if methodname == 'switch': greenlet_func = _greenlet_start else: greenlet_func = _greenlet_throw _continulet.__init__(target, greenlet_func, *args) - unbound_method = _continulet.switch + methodname = 'switch' args = () target.__started = True break @@ -75,22 +75,8 @@ target = target.parent # try: - if current.__main: - if target.__main: - # switch from main to main - if unbound_method == _continulet.throw: - raise args[0], args[1], args[2] - (args,) = args - else: - # enter from main to target - args = unbound_method(target, *args) - else: - if target.__main: - # leave to go to target=main - args = unbound_method(current, *args) - else: - # switch from non-main to non-main - args = unbound_method(current, *args, to=target) + unbound_method = getattr(_continulet, methodname) + args = unbound_method(current, *args, to=target) except GreenletExit, e: args = (e,) finally: diff --git a/lib_pypy/stackless.py b/lib_pypy/stackless.py --- a/lib_pypy/stackless.py +++ b/lib_pypy/stackless.py @@ -7,19 +7,12 @@ import traceback import _continuation -from functools import partial class TaskletExit(Exception): pass CoroutineExit = TaskletExit -class GWrap(_continuation.continulet): - """This is just a wrapper around continulet to allow - to stick additional attributes to a continulet. - To be more concrete, we need a backreference to - the coroutine object""" - class coroutine(object): "we can't have continulet as a base, because continulets can't be rebound" @@ -42,12 +35,10 @@ arguments *argl, **argd """ if self._frame is None or not self._frame.is_pending(): - - def _func(c, *args, **kwargs): - return func(*args, **kwargs) - - run = partial(_func, *argl, **argd) - self._frame = frame = GWrap(run) + def run(c): + _tls.current_coroutine = self + return func(*argl, **argd) + self._frame = frame = _continuation.continulet(run) else: raise ValueError("cannot bind a bound coroutine") @@ -58,16 +49,18 @@ None is returned """ current = _getcurrent() - current._jump_to(self) - - def _jump_to(self, coroutine): - _tls.current_coroutine = coroutine - self._frame.switch(to=coroutine._frame) + try: + current._frame.switch(to=self._frame) + finally: + _tls.current_coroutine = current def kill(self): """coro.kill() : kill coroutine coro""" - _tls.current_coroutine = self - self._frame.throw(CoroutineExit) + current = _getcurrent() + try: + current._frame.throw(CoroutineExit, to=self._frame) + finally: + _tls.current_coroutine = current def _is_alive(self): if self._frame is None: @@ -78,10 +71,7 @@ def getcurrent(): """coroutine.getcurrent() -> the currently running coroutine""" - try: - return _getcurrent() - except AttributeError: - return _maincoro + return _getcurrent() getcurrent = staticmethod(getcurrent) def __reduce__(self): @@ -109,13 +99,10 @@ # create the main coroutine for this thread _tls.current_coroutine = None main_coroutine = coroutine() - main_coroutine.bind(lambda x:x) + typ = _continuation.continulet + main_coroutine._frame = typ.__new__(typ) _tls.main_coroutine = main_coroutine _tls.current_coroutine = main_coroutine - return main_coroutine - - -_maincoro = _coroutine_create_main() from collections import deque @@ -161,10 +148,10 @@ _last_task = next assert not next.blocked if next is not current: - try: + #try: next.switch() - except CoroutineExit: - raise TaskletExit + #except CoroutineExit: --- they are the same anyway + # raise TaskletExit return current def set_schedule_callback(callback): @@ -459,6 +446,7 @@ def _func(): try: try: + coroutine.switch(back) func(*argl, **argd) except TaskletExit: pass @@ -468,6 +456,8 @@ self.func = None coroutine.bind(self, _func) + back = _getcurrent() + coroutine.switch(self) self.alive = True _scheduler_append(self) return self diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -66,7 +66,7 @@ In practice, in PyPy, you cannot change the ``f_back`` of an abitrary frame, but only of frames stored in ``continulets``. -Continulets are internally implemented using stacklets. Stacklets are a +Continulets are internally implemented using stacklets_. Stacklets are a bit more primitive (they are really one-shot continuations), but that idea only works in C, not in Python. The basic idea of continulets is to have at any point in time a complete valid stack; this is important @@ -280,6 +280,24 @@ to use other interfaces like genlets and greenlets.) +Stacklets ++++++++++ + +Continulets are internally implemented using stacklets, which is the +generic RPython-level building block for "one-shot continuations". For +more information about them please see the documentation in the C source +at `pypy/translator/c/src/stacklet/stacklet.h`_. + +The module ``pypy.rlib.rstacklet`` is a thin wrapper around the above +functions. The key point is that new() and switch() always return a +fresh stacklet handle (or an empty one), and switch() additionally +consumes one. It makes no sense to have code in which the returned +handle is ignored, or used more than once. Note that ``stacklet.c`` is +written assuming that the user knows that, and so no additional checking +occurs; this can easily lead to obscure crashes if you don't use a +wrapper like PyPy's '_continuation' module. + + Theory of composability +++++++++++++++++++++++ diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -48,6 +48,7 @@ return frame @staticmethod + @jit.unroll_safe # should usually loop 0 times, very rarely more than once def getnextframe_nohidden(frame): frame = frame.f_backref() while frame and frame.hide(): diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -614,8 +614,8 @@ return self.get_builtin().getdict(space) def fget_f_back(self, space): - f_backref = ExecutionContext.getnextframe_nohidden(self) - return self.space.wrap(f_backref) + f_back = ExecutionContext.getnextframe_nohidden(self) + return self.space.wrap(f_back) def fget_f_lasti(self, space): return self.space.wrap(self.last_instr) diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -3277,7 +3277,27 @@ return n self.meta_interp(f, [10], repeat=3) - + + def test_jit_merge_point_with_pbc(self): + driver = JitDriver(greens = [], reds = ['x']) + + class A(object): + def __init__(self, x): + self.x = x + def _freeze_(self): + return True + pbc = A(1) + + def main(x): + return f(x, pbc) + + def f(x, pbc): + while x > 0: + driver.jit_merge_point(x = x) + x -= pbc.x + return x + + self.meta_interp(main, [10]) def test_look_inside_iff_const(self): @look_inside_iff(lambda arg: isconstant(arg)) diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -43,16 +43,29 @@ raise getmemoryerror(self.space) def switch(self, w_to): + sthread = self.sthread + if sthread is not None and sthread.is_empty_handle(self.h): + start_state.clear() + raise geterror(self.space, "continulet already finished") to = self.space.interp_w(W_Continulet, w_to, can_be_None=True) + if to is not None and to.sthread is None: + to = None + if sthread is None: # if self is non-initialized: + if to is not None: # if we are given a 'to' + self = to # then just use it and ignore 'self' + sthread = self.sthread + to = None + else: + return get_result() # else: no-op if to is not None: - if to.sthread is None: + if to.sthread is not sthread: start_state.clear() - raise geterror(self.space, "continulet not initialized yet") + raise geterror(self.space, "cross-thread double switch") if self is to: # double-switch to myself: no-op return get_result() - if self.sthread is None: - start_state.clear() - raise geterror(self.space, "continulet not initialized yet") + if sthread.is_empty_handle(to.h): + start_state.clear() + raise geterror(self.space, "continulet already finished") ec = self.check_sthread() # start_state.origin = self @@ -63,14 +76,8 @@ # double switch: the final destination is to.h start_state.destination = to # - h = start_state.destination.h - sthread = self.sthread - if sthread.is_empty_handle(h): - start_state.clear() - raise geterror(self.space, "continulet already finished") - # try: - do_switch(sthread, h) + do_switch(sthread, start_state.destination.h) except MemoryError: start_state.clear() raise getmemoryerror(self.space) diff --git a/pypy/module/_continuation/test/test_stacklet.py b/pypy/module/_continuation/test/test_stacklet.py --- a/pypy/module/_continuation/test/test_stacklet.py +++ b/pypy/module/_continuation/test/test_stacklet.py @@ -135,12 +135,6 @@ e = raises(error, c.switch) assert str(e.value) == "continulet already finished" - def test_not_initialized_yet(self): - from _continuation import continulet, error - c = continulet.__new__(continulet) - e = raises(error, c.switch) - assert str(e.value) == "continulet not initialized yet" - def test_go_depth2(self): from _continuation import continulet # @@ -254,6 +248,15 @@ res = c_upper.switch('D') assert res == 'E' + def test_switch_not_initialized(self): + from _continuation import continulet + c0 = continulet.__new__(continulet) + res = c0.switch() + assert res is None + res = c0.switch(123) + assert res == 123 + raises(ValueError, c0.throw, ValueError) + def test_exception_with_switch_depth2(self): from _continuation import continulet # @@ -499,16 +502,31 @@ assert res == 'z' raises(TypeError, c1.switch, to=c2) # "can't send non-None value" - def test_switch2_not_initialized_yet(self): - from _continuation import continulet, error + def test_switch2_not_initialized(self): + from _continuation import continulet + c0 = continulet.__new__(continulet) + c0bis = continulet.__new__(continulet) + res = c0.switch(123, to=c0) + assert res == 123 + res = c0.switch(123, to=c0bis) + assert res == 123 + raises(ValueError, c0.throw, ValueError, to=c0) + raises(ValueError, c0.throw, ValueError, to=c0bis) # def f1(c1): - not_reachable - # + c1.switch('a') + raises(ValueError, c1.switch, 'b') + raises(KeyError, c1.switch, 'c') + return 'd' c1 = continulet(f1) - c2 = continulet.__new__(continulet) - e = raises(error, c1.switch, to=c2) - assert str(e.value) == "continulet not initialized yet" + res = c0.switch(to=c1) + assert res == 'a' + res = c1.switch(to=c0) + assert res == 'b' + res = c1.throw(ValueError, to=c0) + assert res == 'c' + res = c0.throw(KeyError, to=c1) + assert res == 'd' def test_switch2_already_finished(self): from _continuation import continulet, error @@ -643,6 +661,16 @@ assert res == "done" main() + def test_bug_finish_with_already_finished_stacklet(self): + from _continuation import continulet, error + # make an already-finished continulet + c1 = continulet(lambda x: x) + c1.switch() + # make another continulet + c2 = continulet(lambda x: x) + # this switch is forbidden, because it causes a crash when c2 finishes + raises(error, c1.switch, to=c2) + def test_various_depths(self): skip("may fail on top of CPython") # run it from test_translated, but not while being actually translated diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -52,7 +52,8 @@ constants["CERT_OPTIONAL"] = PY_SSL_CERT_OPTIONAL constants["CERT_REQUIRED"] = PY_SSL_CERT_REQUIRED -constants["PROTOCOL_SSLv2"] = PY_SSL_VERSION_SSL2 +if not OPENSSL_NO_SSL2: + constants["PROTOCOL_SSLv2"] = PY_SSL_VERSION_SSL2 constants["PROTOCOL_SSLv3"] = PY_SSL_VERSION_SSL3 constants["PROTOCOL_SSLv23"] = PY_SSL_VERSION_SSL23 constants["PROTOCOL_TLSv1"] = PY_SSL_VERSION_TLS1 @@ -673,7 +674,7 @@ method = libssl_TLSv1_method() elif protocol == PY_SSL_VERSION_SSL3: method = libssl_SSLv3_method() - elif protocol == PY_SSL_VERSION_SSL2: + elif protocol == PY_SSL_VERSION_SSL2 and not OPENSSL_NO_SSL2: method = libssl_SSLv2_method() elif protocol == PY_SSL_VERSION_SSL23: method = libssl_SSLv23_method() diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -508,7 +508,7 @@ return space._type_issubtype(w_sub, w_type) def isinstance(space, w_inst, w_type): - return space._type_isinstance(w_inst, w_type) + return space.wrap(space._type_isinstance(w_inst, w_type)) def issubtype_allow_override(space, w_sub, w_type): w_check = space.lookup(w_type, "__subclasscheck__") diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -370,8 +370,8 @@ newdata = [] for i in range(len(list_w)): w_s = list_w[i] - if not (space.is_true(space.isinstance(w_s, space.w_str)) or - space.is_true(space.isinstance(w_s, space.w_bytearray))): + if not (space.isinstance_w(w_s, space.w_str) or + space.isinstance_w(w_s, space.w_bytearray)): raise operationerrfmt( space.w_TypeError, "sequence item %d: expected string, %s " diff --git a/pypy/objspace/std/complextype.py b/pypy/objspace/std/complextype.py --- a/pypy/objspace/std/complextype.py +++ b/pypy/objspace/std/complextype.py @@ -127,8 +127,8 @@ and space.is_w(space.type(w_real), space.w_complex)): return w_real - if space.is_true(space.isinstance(w_real, space.w_str)) or \ - space.is_true(space.isinstance(w_real, space.w_unicode)): + if space.isinstance_w(w_real, space.w_str) or \ + space.isinstance_w(w_real, space.w_unicode): # a string argument if not noarg2: raise OperationError(space.w_TypeError, @@ -203,8 +203,8 @@ return (w_complex.realval, w_complex.imagval) # # Check that it is not a string (on which space.float() would succeed). - if (space.is_true(space.isinstance(w_complex, space.w_str)) or - space.is_true(space.isinstance(w_complex, space.w_unicode))): + if (space.isinstance_w(w_complex, space.w_str) or + space.isinstance_w(w_complex, space.w_unicode)): raise operationerrfmt(space.w_TypeError, "complex number expected, got '%s'", space.type(w_complex).getname(space)) diff --git a/pypy/objspace/std/floattype.py b/pypy/objspace/std/floattype.py --- a/pypy/objspace/std/floattype.py +++ b/pypy/objspace/std/floattype.py @@ -32,14 +32,14 @@ if space.is_w(w_floattype, space.w_float): return w_obj value = space.float_w(w_obj) - elif space.is_true(space.isinstance(w_value, space.w_str)): + elif space.isinstance_w(w_value, space.w_str): strvalue = space.str_w(w_value) try: value = string_to_float(strvalue) except ParseStringError, e: raise OperationError(space.w_ValueError, space.wrap(e.msg)) - elif space.is_true(space.isinstance(w_value, space.w_unicode)): + elif space.isinstance_w(w_value, space.w_unicode): if space.config.objspace.std.withropeunicode: from pypy.objspace.std.ropeunicodeobject import unicode_to_decimal_w else: diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -425,15 +425,15 @@ space.wrap("operand does not support " "unary str")) w_result = space.get_and_call_function(w_impl, w_value) - if space.is_true(space.isinstance(w_result, - space.w_unicode)): + if space.isinstance_w(w_result, + space.w_unicode): raise NeedUnicodeFormattingError return space.str_w(w_result) def fmt_s(self, w_value): space = self.space - got_unicode = space.is_true(space.isinstance(w_value, - space.w_unicode)) + got_unicode = space.isinstance_w(w_value, + space.w_unicode) if not do_unicode: if got_unicode: raise NeedUnicodeFormattingError @@ -452,13 +452,13 @@ def fmt_c(self, w_value): self.prec = -1 # just because space = self.space - if space.is_true(space.isinstance(w_value, space.w_str)): + if space.isinstance_w(w_value, space.w_str): s = space.str_w(w_value) if len(s) != 1: raise OperationError(space.w_TypeError, space.wrap("%c requires int or char")) self.std_wp(s) - elif space.is_true(space.isinstance(w_value, space.w_unicode)): + elif space.isinstance_w(w_value, space.w_unicode): if not do_unicode: raise NeedUnicodeFormattingError ustr = space.unicode_w(w_value) @@ -520,15 +520,15 @@ return space.wrap(result) def mod_format(space, w_format, w_values, do_unicode=False): - if space.is_true(space.isinstance(w_values, space.w_tuple)): + if space.isinstance_w(w_values, space.w_tuple): values_w = space.fixedview(w_values) return format(space, w_format, values_w, None, do_unicode) else: # we check directly for dict to avoid obscure checking # in simplest case - if space.is_true(space.isinstance(w_values, space.w_dict)) or \ + if space.isinstance_w(w_values, space.w_dict) or \ (space.lookup(w_values, '__getitem__') and - not space.is_true(space.isinstance(w_values, space.w_basestring))): + not space.isinstance_w(w_values, space.w_basestring)): return format(space, w_format, [w_values], w_values, do_unicode) else: return format(space, w_format, [w_values], None, do_unicode) diff --git a/pypy/objspace/std/inttype.py b/pypy/objspace/std/inttype.py --- a/pypy/objspace/std/inttype.py +++ b/pypy/objspace/std/inttype.py @@ -99,10 +99,10 @@ if type(w_value) is W_IntObject: value = w_value.intval ok = True - elif space.is_true(space.isinstance(w_value, space.w_str)): + elif space.isinstance_w(w_value, space.w_str): value, w_longval = string_to_int_or_long(space, space.str_w(w_value)) ok = True - elif space.is_true(space.isinstance(w_value, space.w_unicode)): + elif space.isinstance_w(w_value, space.w_unicode): if space.config.objspace.std.withropeunicode: from pypy.objspace.std.ropeunicodeobject import unicode_to_decimal_w else: @@ -145,7 +145,7 @@ else: base = space.int_w(w_base) - if space.is_true(space.isinstance(w_value, space.w_unicode)): + if space.isinstance_w(w_value, space.w_unicode): if space.config.objspace.std.withropeunicode: from pypy.objspace.std.ropeunicodeobject import unicode_to_decimal_w else: diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -72,6 +72,10 @@ w_seqiter.index += 1 return w_item +# XXX __length_hint__() +##def len__SeqIter(space, w_seqiter): +## return w_seqiter.getlength(space) + def iter__FastTupleIter(space, w_seqiter): return w_seqiter @@ -89,6 +93,10 @@ w_seqiter.index = index + 1 return w_item +# XXX __length_hint__() +##def len__FastTupleIter(space, w_seqiter): +## return w_seqiter.getlength(space) + def iter__FastListIter(space, w_seqiter): return w_seqiter @@ -106,6 +114,10 @@ w_seqiter.index = index + 1 return w_item +# XXX __length_hint__() +##def len__FastListIter(space, w_seqiter): +## return w_seqiter.getlength(space) + def iter__ReverseSeqIter(space, w_seqiter): return w_seqiter @@ -123,5 +135,20 @@ raise OperationError(space.w_StopIteration, space.w_None) return w_item +# XXX __length_hint__() +##def len__ReverseSeqIter(space, w_seqiter): +## if w_seqiter.w_seq is None: +## return space.wrap(0) +## index = w_seqiter.index+1 +## w_length = space.len(w_seqiter.w_seq) +## # if length of sequence is less than index :exhaust iterator +## if space.is_true(space.gt(space.wrap(w_seqiter.index), w_length)): +## w_len = space.wrap(0) +## w_seqiter.w_seq = None +## else: +## w_len =space.wrap(index) +## if space.is_true(space.lt(w_len,space.wrap(0))): +## w_len = space.wrap(0) +## return w_len register_all(vars()) diff --git a/pypy/objspace/std/longtype.py b/pypy/objspace/std/longtype.py --- a/pypy/objspace/std/longtype.py +++ b/pypy/objspace/std/longtype.py @@ -24,9 +24,9 @@ return w_value elif type(w_value) is W_LongObject: return newbigint(space, w_longtype, w_value.num) - elif space.is_true(space.isinstance(w_value, space.w_str)): + elif space.isinstance_w(w_value, space.w_str): return string_to_w_long(space, w_longtype, space.str_w(w_value)) - elif space.is_true(space.isinstance(w_value, space.w_unicode)): + elif space.isinstance_w(w_value, space.w_unicode): if space.config.objspace.std.withropeunicode: from pypy.objspace.std.ropeunicodeobject import unicode_to_decimal_w else: @@ -51,7 +51,7 @@ else: base = space.int_w(w_base) - if space.is_true(space.isinstance(w_value, space.w_unicode)): + if space.isinstance_w(w_value, space.w_unicode): from pypy.objspace.std.unicodeobject import unicode_to_decimal_w s = unicode_to_decimal_w(space, w_value) else: diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -350,7 +350,7 @@ SLOTS_STARTING_FROM = 3 -class BaseMapdictObject: # slightly evil to make it inherit from W_Root +class BaseMapdictObject: _mixin_ = True def _init_empty(self, map): diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -571,5 +571,8 @@ def _type_isinstance(self, w_inst, w_type): if isinstance(w_type, W_TypeObject): - return self.wrap(self.type(w_inst).issubtype(w_type)) + return self.type(w_inst).issubtype(w_type) raise OperationError(self.w_TypeError, self.wrap("need type object")) + + def isinstance_w(space, w_inst, w_type): + return space._type_isinstance(w_inst, w_type) diff --git a/pypy/objspace/std/ropeobject.py b/pypy/objspace/std/ropeobject.py --- a/pypy/objspace/std/ropeobject.py +++ b/pypy/objspace/std/ropeobject.py @@ -292,8 +292,8 @@ l = [] for i in range(size): w_s = list_w[i] - if not space.is_true(space.isinstance(w_s, space.w_str)): - if space.is_true(space.isinstance(w_s, space.w_unicode)): + if not space.isinstance_w(w_s, space.w_str): + if space.isinstance_w(w_s, space.w_unicode): w_u = space.call_function(space.w_unicode, w_self) return space.call_method(w_u, "join", space.newlist(list_w)) raise operationerrfmt( @@ -556,7 +556,7 @@ W_RopeObject.EMPTY, w_start, w_end, True) for w_suffix in space.fixedview(w_suffixes): - if space.is_true(space.isinstance(w_suffix, space.w_unicode)): + if space.isinstance_w(w_suffix, space.w_unicode): w_u = space.call_function(space.w_unicode, w_self) return space.call_method(w_u, "endswith", w_suffixes, w_start, w_end) @@ -576,7 +576,7 @@ (self, _, start, end) = _convert_idx_params(space, w_self, W_RopeObject.EMPTY, w_start, w_end, True) for w_prefix in space.fixedview(w_prefixes): - if space.is_true(space.isinstance(w_prefix, space.w_unicode)): + if space.isinstance_w(w_prefix, space.w_unicode): w_u = space.call_function(space.w_unicode, w_self) return space.call_method(w_u, "startswith", w_prefixes, w_start, w_end) diff --git a/pypy/objspace/std/ropeunicodeobject.py b/pypy/objspace/std/ropeunicodeobject.py --- a/pypy/objspace/std/ropeunicodeobject.py +++ b/pypy/objspace/std/ropeunicodeobject.py @@ -29,7 +29,7 @@ assert isinstance(w_str, W_RopeObject) encoding = getdefaultencoding(space) w_retval = decode_string(space, w_str, encoding, "strict") - if not space.is_true(space.isinstance(w_retval, space.w_unicode)): + if not space.isinstance_w(w_retval, space.w_unicode): raise operationerrfmt( space.w_TypeError, "decoder did not return an unicode object (type '%s')", @@ -254,7 +254,7 @@ if isinstance(w_item, W_RopeUnicodeObject): # shortcut for performane item = w_item._node - elif space.is_true(space.isinstance(w_item, space.w_str)): + elif space.isinstance_w(w_item, space.w_str): item = unicode_from_string(space, w_item)._node else: msg = 'sequence item %d: expected string or Unicode' @@ -828,14 +828,14 @@ else: if space.is_w(w_newval, space.w_None): continue - elif space.is_true(space.isinstance(w_newval, space.w_int)): + elif space.isinstance_w(w_newval, space.w_int): newval = space.int_w(w_newval) if newval < 0 or newval > maxunicode: raise OperationError( space.w_TypeError, space.wrap("character mapping must be in range(0x%x)" % (maxunicode + 1,))) result.append(rope.rope_from_unichar(unichr(newval))) - elif space.is_true(space.isinstance(w_newval, space.w_unicode)): + elif space.isinstance_w(w_newval, space.w_unicode): result.append(ropeunicode_w(space, w_newval)) else: raise OperationError( diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -132,7 +132,7 @@ w_obj.setdata = make_setdata_from_w_iterable(space, w_iterable) def _convert_set_to_frozenset(space, w_obj): - if space.is_true(space.isinstance(w_obj, space.w_set)): + if space.isinstance_w(w_obj, space.w_set): return W_FrozensetObject(space, make_setdata_from_w_iterable(space, w_obj)) else: diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -364,8 +364,8 @@ reslen = len(self) * (size - 1) for i in range(size): w_s = list_w[i] - if not space.is_true(space.isinstance(w_s, space.w_str)): - if space.is_true(space.isinstance(w_s, space.w_unicode)): + if not space.isinstance_w(w_s, space.w_str): + if space.isinstance_w(w_s, space.w_unicode): # we need to rebuild w_list here, because the original # w_list might be an iterable which we already consumed w_list = space.newlist(list_w) @@ -646,7 +646,7 @@ space.wrap(''), w_start, w_end, True) for w_suffix in space.fixedview(w_suffixes): - if space.is_true(space.isinstance(w_suffix, space.w_unicode)): + if space.isinstance_w(w_suffix, space.w_unicode): w_u = space.call_function(space.w_unicode, w_self) return space.call_method(w_u, "endswith", w_suffixes, w_start, w_end) @@ -665,7 +665,7 @@ (u_self, _, start, end) = _convert_idx_params(space, w_self, space.wrap(''), w_start, w_end, True) for w_prefix in space.fixedview(w_prefixes): - if space.is_true(space.isinstance(w_prefix, space.w_unicode)): + if space.isinstance_w(w_prefix, space.w_unicode): w_u = space.call_function(space.w_unicode, w_self) return space.call_method(w_u, "startswith", w_prefixes, w_start, w_end) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -508,15 +508,15 @@ # type name. That's a hack, so we're allowed to use a different # hack... if ('__module__' in w_self.dict_w and - space.is_true(space.isinstance(w_self.getdictvalue(space, '__module__'), - space.w_str))): + space.isinstance_w(w_self.getdictvalue(space, '__module__'), + space.w_str)): return w_self.getdictvalue(space, '__module__') return space.wrap('__builtin__') def get_module_type_name(w_self): space = w_self.space w_mod = w_self.get_module() - if not space.is_true(space.isinstance(w_mod, space.w_str)): + if not space.isinstance_w(w_mod, space.w_str): mod = '__builtin__' else: mod = space.str_w(w_mod) @@ -850,7 +850,7 @@ not space.is_w(w_newtype, space.w_type)): w_type.w_bltin_new = w_newfunc w_newobject = space.call_obj_args(w_newfunc, w_type, __args__) - call_init = space.is_true(space.isinstance(w_newobject, w_type)) + call_init = space.isinstance_w(w_newobject, w_type) # maybe invoke the __init__ of the type if call_init: @@ -876,7 +876,7 @@ def repr__Type(space, w_obj): w_mod = w_obj.get_module() - if not space.is_true(space.isinstance(w_mod, space.w_str)): + if not space.isinstance_w(w_mod, space.w_str): mod = None else: mod = space.str_w(w_mod) diff --git a/pypy/objspace/std/typetype.py b/pypy/objspace/std/typetype.py --- a/pypy/objspace/std/typetype.py +++ b/pypy/objspace/std/typetype.py @@ -110,7 +110,7 @@ if not w_type.is_heaptype(): raise operationerrfmt(space.w_TypeError, "can't set %s.__bases__", w_type.name) - if not space.is_true(space.isinstance(w_value, space.w_tuple)): + if not space.isinstance_w(w_value, space.w_tuple): raise operationerrfmt(space.w_TypeError, "can only assign tuple to %s.__bases__, not %s", w_type.name, diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -900,14 +900,14 @@ else: if space.is_w(w_newval, space.w_None): continue - elif space.is_true(space.isinstance(w_newval, space.w_int)): + elif space.isinstance_w(w_newval, space.w_int): newval = space.int_w(w_newval) if newval < 0 or newval > maxunicode: raise OperationError( space.w_TypeError, space.wrap("character mapping must be in range(0x%x)" % (maxunicode + 1,))) result.append(unichr(newval)) - elif space.is_true(space.isinstance(w_newval, space.w_unicode)): + elif space.isinstance_w(w_newval, space.w_unicode): result.append(space.unicode_w(w_newval)) else: raise OperationError( diff --git a/pypy/objspace/std/unicodetype.py b/pypy/objspace/std/unicodetype.py --- a/pypy/objspace/std/unicodetype.py +++ b/pypy/objspace/std/unicodetype.py @@ -251,7 +251,7 @@ w_errors = space.wrap(errors) w_restuple = space.call_function(w_encoder, w_object, w_errors) w_retval = space.getitem(w_restuple, space.wrap(0)) - if not space.is_true(space.isinstance(w_retval, space.w_str)): + if not space.isinstance_w(w_retval, space.w_str): raise operationerrfmt(space.w_TypeError, "encoder did not return an string object (type '%s')", space.type(w_retval).getname(space)) @@ -286,7 +286,7 @@ def unicode_from_encoded_object(space, w_obj, encoding, errors): w_retval = decode_object(space, w_obj, encoding, errors) - if not space.is_true(space.isinstance(w_retval, space.w_unicode)): + if not space.isinstance_w(w_retval, space.w_unicode): raise operationerrfmt(space.w_TypeError, "decoder did not return an unicode object (type '%s')", space.type(w_retval).getname(space)) @@ -309,7 +309,7 @@ w_res = space.get_and_call_function(w_unicode_method, w_obj) else: w_res = space.str(w_obj) - if space.is_true(space.isinstance(w_res, space.w_unicode)): + if space.isinstance_w(w_res, space.w_unicode): return w_res return unicode_from_encoded_object(space, w_res, None, "strict") @@ -346,7 +346,7 @@ # convoluted logic for the case when unicode subclass has a __unicode__ # method, we need to call this method if (space.is_w(space.type(w_obj), space.w_unicode) or - (space.is_true(space.isinstance(w_obj, space.w_unicode)) and + (space.isinstance_w(w_obj, space.w_unicode) and space.findattr(w_obj, space.wrap('__unicode__')) is None)): if encoding is not None or errors is not None: raise OperationError(space.w_TypeError, diff --git a/pypy/rlib/ropenssl.py b/pypy/rlib/ropenssl.py --- a/pypy/rlib/ropenssl.py +++ b/pypy/rlib/ropenssl.py @@ -62,6 +62,8 @@ "OPENSSL_VERSION_NUMBER") SSLEAY_VERSION = rffi_platform.DefinedConstantString( "SSLEAY_VERSION", "SSLeay_version(SSLEAY_VERSION)") + OPENSSL_NO_SSL2 = rffi_platform.DefinedConstantInteger( + "OPENSSL_NO_SSL2") SSL_FILETYPE_PEM = rffi_platform.ConstantInteger("SSL_FILETYPE_PEM") SSL_OP_ALL = rffi_platform.ConstantInteger("SSL_OP_ALL") SSL_VERIFY_NONE = rffi_platform.ConstantInteger("SSL_VERIFY_NONE") diff --git a/pypy/rlib/rstacklet.py b/pypy/rlib/rstacklet.py --- a/pypy/rlib/rstacklet.py +++ b/pypy/rlib/rstacklet.py @@ -1,6 +1,8 @@ from pypy.rlib import _rffi_stacklet as _c from pypy.rpython.lltypesystem import lltype, llmemory +DEBUG = False + class StackletThread(object): @@ -10,15 +12,30 @@ if not self._thrd: raise MemoryError self._thrd_deleter = StackletThreadDeleter(self._thrd) + if DEBUG: + assert debug.sthread is None, "multithread debug support missing" + debug.sthread = self def new(self, callback, arg=llmemory.NULL): - return self._gcrootfinder.new(self, callback, arg) + if DEBUG: + callback = _debug_wrapper(callback) + h = self._gcrootfinder.new(self, callback, arg) + if DEBUG: + debug.add(h) + return h new._annspecialcase_ = 'specialize:arg(1)' def switch(self, stacklet): - return self._gcrootfinder.switch(self, stacklet) + if DEBUG: + debug.remove(stacklet) + h = self._gcrootfinder.switch(self, stacklet) + if DEBUG: + debug.add(h) + return h def destroy(self, stacklet): + if DEBUG: + debug.remove(stacklet) self._gcrootfinder.destroy(self, stacklet) def is_empty_handle(self, stacklet): @@ -56,3 +73,34 @@ None, None, ['__doc__']) return module.gcrootfinder _getgcrootfinder._annspecialcase_ = 'specialize:memo' + + +class StackletDebugError(Exception): + pass + +class Debug(object): + def __init__(self): + self.sthread = None + self.active = [] + def _freeze_(self): + self.__init__() + return False + def add(self, h): + if not self.sthread.is_empty_handle(h): + self.active.append(h) + def remove(self, h): + try: + i = self.active.index(h) + except ValueError: + raise StackletDebugError + del self.active[i] +debug = Debug() + +def _debug_wrapper(callback): + def wrapper(h, arg): + debug.add(h) + h = callback(h, arg) + debug.remove(h) + return h + return wrapper +_debug_wrapper._annspecialcase_ = 'specialize:memo' diff --git a/pypy/rpython/memory/gctransform/asmgcroot.py b/pypy/rpython/memory/gctransform/asmgcroot.py --- a/pypy/rpython/memory/gctransform/asmgcroot.py +++ b/pypy/rpython/memory/gctransform/asmgcroot.py @@ -636,7 +636,8 @@ ASM_FRAMEDATA_HEAD_PTR], lltype.Signed, sandboxsafe=True, - _nowrapper=True) + _nowrapper=True, + random_effects_on_gcobjs=True) c_asm_stackwalk = Constant(pypy_asm_stackwalk, lltype.typeOf(pypy_asm_stackwalk)) diff --git a/pypy/rpython/memory/gctransform/framework.py b/pypy/rpython/memory/gctransform/framework.py --- a/pypy/rpython/memory/gctransform/framework.py +++ b/pypy/rpython/memory/gctransform/framework.py @@ -41,7 +41,7 @@ seen) def analyze_external_call(self, op, seen=None): funcobj = op.args[0].value._obj - if funcobj._name == 'pypy_asm_stackwalk': + if getattr(funcobj, 'random_effects_on_gcobjs', False): return True return graphanalyze.GraphAnalyzer.analyze_external_call(self, op, seen) diff --git a/pypy/rpython/test/test_rbuilder.py b/pypy/rpython/test/test_rbuilder.py --- a/pypy/rpython/test/test_rbuilder.py +++ b/pypy/rpython/test/test_rbuilder.py @@ -101,6 +101,23 @@ res = self.interpret(func, [1]) assert res + def test_unicode_builder_or_none(self): + def g(s): + if s: + s.append(u"3") + return bool(s) + + def func(i): + if i: + s = UnicodeBuilder() + else: + s = None + return g(s) + res = self.interpret(func, [0]) + assert not res + res = self.interpret(func, [1]) + assert res + class TestLLtype(BaseTestStringBuilder, LLRtypeMixin): pass diff --git a/pypy/translator/unsimplify.py b/pypy/translator/unsimplify.py --- a/pypy/translator/unsimplify.py +++ b/pypy/translator/unsimplify.py @@ -97,6 +97,23 @@ if _forcelink is not None: assert index == 0 linkargs = list(_forcelink) + for v in varmap: + if v not in linkargs: + # 'v' was not specified by _forcelink, but we found out that + # we need it! Hack: if it is 'concretetype is lltype.Void' + # then it's ok to recreate its value in the target block. + # If not, then we have a problem :-) + from pypy.rpython.lltypesystem import lltype + assert v.concretetype is lltype.Void + c = Constant(None, lltype.Void) + w = varmap[v] + newop = SpaceOperation('same_as', [c], w) + i = 0 + while i < len(moved_operations): + if w in moved_operations[i].args: + break + i += 1 + moved_operations.insert(i, newop) else: linkargs = varmap.keys() newblock = Block([get_new_name(v) for v in linkargs]) From noreply at buildbot.pypy.org Wed Sep 14 20:26:18 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 14 Sep 2011 20:26:18 +0200 (CEST) Subject: [pypy-commit] pypy unroll-if-alt: merged improve-heap-cache-tracing in, so that I can make sure it works. Message-ID: <20110914182618.2D46D82298@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: unroll-if-alt Changeset: r47272:238b94fd721c Date: 2011-09-14 11:18 -0400 http://bitbucket.org/pypy/pypy/changeset/238b94fd721c/ Log: merged improve-heap-cache-tracing in, so that I can make sure it works. diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/heapcache.py @@ -0,0 +1,172 @@ +from pypy.jit.metainterp.history import ConstInt +from pypy.jit.metainterp.resoperation import rop + + +class HeapCache(object): + def __init__(self): + self.reset() + + def reset(self): + # contains boxes where the class is already known + self.known_class_boxes = {} + # store the boxes that contain newly allocated objects: + self.new_boxes = {} + # contains frame boxes that are not virtualizables + self.nonstandard_virtualizables = {} + # heap cache + # maps descrs to {from_box, to_box} dicts + self.heap_cache = {} + # heap array cache + # maps descrs to {index: {from_box: to_box}} dicts + self.heap_array_cache = {} + # cache the length of arrays + self.length_cache = {} + + def invalidate_caches(self, opnum, descr, argboxes): + if opnum == rop.SETFIELD_GC: + return + if opnum == rop.SETARRAYITEM_GC: + return + if opnum == rop.SETFIELD_RAW: + return + if opnum == rop.SETARRAYITEM_RAW: + return + if rop._OVF_FIRST <= opnum <= rop._OVF_LAST: + return + if rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST: + return + if opnum == rop.CALL or opnum == rop.CALL_LOOPINVARIANT: + effectinfo = descr.get_extra_info() + ef = effectinfo.extraeffect + if ef == effectinfo.EF_LOOPINVARIANT or \ + ef == effectinfo.EF_ELIDABLE_CANNOT_RAISE or \ + ef == effectinfo.EF_ELIDABLE_CAN_RAISE: + return + # A special case for ll_arraycopy, because it is so common, and its + # effects are so well defined. + elif effectinfo.oopspecindex == effectinfo.OS_ARRAYCOPY: + # The destination box + if argboxes[2] in self.new_boxes: + # XXX: no descr here so we invalidate any of them, not just + # of the correct type + # XXX: in theory the indices of the copy could be looked at + # as well + for descr, cache in self.heap_array_cache.iteritems(): + for idx, cache in cache.iteritems(): + for frombox in cache.keys(): + if frombox not in self.new_boxes: + del cache[frombox] + return + + self.heap_cache.clear() + self.heap_array_cache.clear() + + def is_class_known(self, box): + return box in self.known_class_boxes + + def class_now_known(self, box): + self.known_class_boxes[box] = None + + def is_nonstandard_virtualizable(self, box): + return box in self.nonstandard_virtualizables + + def nonstandard_virtualizables_now_known(self, box): + self.nonstandard_virtualizables[box] = None + + def new(self, box): + self.new_boxes[box] = None + + def new_array(self, box, lengthbox): + self.new(box) + self.arraylen_now_known(box, lengthbox) + + def getfield(self, box, descr): + d = self.heap_cache.get(descr, None) + if d: + tobox = d.get(box, None) + if tobox: + return tobox + return None + + def getfield_now_known(self, box, descr, fieldbox): + self.heap_cache.setdefault(descr, {})[box] = fieldbox + + def setfield(self, box, descr, fieldbox): + d = self.heap_cache.get(descr, None) + new_d = self._do_write_with_aliasing(d, box, fieldbox) + self.heap_cache[descr] = new_d + + def _do_write_with_aliasing(self, d, box, fieldbox): + # slightly subtle logic here + # a write to an arbitrary box, all other boxes can alias this one + if not d or box not in self.new_boxes: + # therefore we throw away the cache + return {box: fieldbox} + # the object we are writing to is freshly allocated + # only remove some boxes from the cache + new_d = {} + for frombox, tobox in d.iteritems(): + # the other box is *also* freshly allocated + # therefore frombox and box *must* contain different objects + # thus we can keep it in the cache + if frombox in self.new_boxes: + new_d[frombox] = tobox + new_d[box] = fieldbox + return new_d + + def getarrayitem(self, box, descr, indexbox): + if not isinstance(indexbox, ConstInt): + return + index = indexbox.getint() + cache = self.heap_array_cache.get(descr, None) + if cache: + indexcache = cache.get(index, None) + if indexcache is not None: + return indexcache.get(box, None) + + def getarrayitem_now_known(self, box, descr, indexbox, valuebox): + if not isinstance(indexbox, ConstInt): + return + index = indexbox.getint() + cache = self.heap_array_cache.setdefault(descr, {}) + indexcache = cache.get(index, None) + if indexcache is not None: + indexcache[box] = valuebox + else: + cache[index] = {box: valuebox} + + def setarrayitem(self, box, descr, indexbox, valuebox): + if not isinstance(indexbox, ConstInt): + cache = self.heap_array_cache.get(descr, None) + if cache is not None: + cache.clear() + return + index = indexbox.getint() + cache = self.heap_array_cache.setdefault(descr, {}) + indexcache = cache.get(index, None) + cache[index] = self._do_write_with_aliasing(indexcache, box, valuebox) + + + def arraylen(self, box): + return self.length_cache.get(box, None) + + def arraylen_now_known(self, box, lengthbox): + self.length_cache[box] = lengthbox + + def _replace_box(self, d, oldbox, newbox): + new_d = {} + for frombox, tobox in d.iteritems(): + if frombox is oldbox: + frombox = newbox + if tobox is oldbox: + tobox = newbox + new_d[frombox] = tobox + return new_d + + def replace_box(self, oldbox, newbox): + for descr, d in self.heap_cache.iteritems(): + self.heap_cache[descr] = self._replace_box(d, oldbox, newbox) + for descr, d in self.heap_array_cache.iteritems(): + for index, cache in d.iteritems(): + d[index] = self._replace_box(cache, oldbox, newbox) + self.length_cache = self._replace_box(self.length_cache, oldbox, newbox) diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -17,6 +17,7 @@ from pypy.jit.metainterp.jitprof import ABORT_TOO_LONG, ABORT_BRIDGE, \ ABORT_FORCE_QUASIIMMUT, ABORT_BAD_LOOP from pypy.jit.metainterp.jitexc import JitException, get_llexception +from pypy.jit.metainterp.heapcache import HeapCache from pypy.rlib.objectmodel import specialize from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr from pypy.jit.codewriter import heaptracker @@ -321,7 +322,7 @@ def _establish_nullity(self, box, orgpc): value = box.nonnull() if value: - if box not in self.metainterp.known_class_boxes: + if not self.metainterp.heapcache.is_class_known(box): self.generate_guard(rop.GUARD_NONNULL, box, resumepc=orgpc) else: if not isinstance(box, Const): @@ -366,14 +367,17 @@ @arguments("descr") def opimpl_new(self, sizedescr): - return self.execute_with_descr(rop.NEW, sizedescr) + resbox = self.execute_with_descr(rop.NEW, sizedescr) + self.metainterp.heapcache.new(resbox) + return resbox @arguments("descr") def opimpl_new_with_vtable(self, sizedescr): cpu = self.metainterp.cpu cls = heaptracker.descr2vtable(cpu, sizedescr) resbox = self.execute(rop.NEW_WITH_VTABLE, ConstInt(cls)) - self.metainterp.known_class_boxes[resbox] = None + self.metainterp.heapcache.new(resbox) + self.metainterp.heapcache.class_now_known(resbox) return resbox ## @FixME #arguments("box") @@ -392,23 +396,26 @@ ## self.execute(rop.SUBCLASSOF, box1, box2) @arguments("descr", "box") - def opimpl_new_array(self, itemsizedescr, countbox): - return self.execute_with_descr(rop.NEW_ARRAY, itemsizedescr, countbox) + def opimpl_new_array(self, itemsizedescr, lengthbox): + resbox = self.execute_with_descr(rop.NEW_ARRAY, itemsizedescr, lengthbox) + self.metainterp.heapcache.new_array(resbox, lengthbox) + return resbox - @specialize.arg(1) - def _do_getarrayitem_gc_any(self, op, arraybox, arraydescr, indexbox): - cache = self.metainterp.heap_array_cache.get(arraydescr, None) - if cache and isinstance(indexbox, ConstInt): - index = indexbox.getint() - frombox, tobox = cache.get(index, (None, None)) - if frombox is arraybox: - return tobox - resbox = self.execute_with_descr(op, arraydescr, arraybox, indexbox) - if isinstance(indexbox, ConstInt): - if not cache: - cache = self.metainterp.heap_array_cache[arraydescr] = {} - index = indexbox.getint() - cache[index] = arraybox, resbox + @arguments("box", "descr", "box") + def _opimpl_getarrayitem_gc_any(self, arraybox, arraydescr, indexbox): + tobox = self.metainterp.heapcache.getarrayitem( + arraybox, arraydescr, indexbox) + if tobox: + # sanity check: see whether the current array value + # corresponds to what the cache thinks the value is + resbox = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETARRAYITEM_GC, arraydescr, arraybox, indexbox) + assert resbox.constbox().same_constant(tobox.constbox()) + return tobox + resbox = self.execute_with_descr(rop.GETARRAYITEM_GC, + arraydescr, arraybox, indexbox) + self.metainterp.heapcache.getarrayitem_now_known( + arraybox, arraydescr, indexbox, resbox) return resbox @arguments("box", "descr", "box") @@ -440,13 +447,8 @@ indexbox, itembox): self.execute_with_descr(rop.SETARRAYITEM_GC, arraydescr, arraybox, indexbox, itembox) - if isinstance(indexbox, ConstInt): - cache = self.metainterp.heap_array_cache.setdefault(arraydescr, {}) - cache[indexbox.getint()] = arraybox, itembox - else: - cache = self.metainterp.heap_array_cache.get(arraydescr, None) - if cache: - cache.clear() + self.metainterp.heapcache.setarrayitem( + arraybox, arraydescr, indexbox, itembox) opimpl_setarrayitem_gc_i = _opimpl_setarrayitem_gc_any opimpl_setarrayitem_gc_r = _opimpl_setarrayitem_gc_any @@ -463,7 +465,12 @@ @arguments("box", "descr") def opimpl_arraylen_gc(self, arraybox, arraydescr): - return self.execute_with_descr(rop.ARRAYLEN_GC, arraydescr, arraybox) + lengthbox = self.metainterp.heapcache.arraylen(arraybox) + if lengthbox is None: + lengthbox = self.execute_with_descr( + rop.ARRAYLEN_GC, arraydescr, arraybox) + self.metainterp.heapcache.arraylen_now_known(arraybox, lengthbox) + return lengthbox @arguments("orgpc", "box", "descr", "box") def opimpl_check_neg_index(self, orgpc, arraybox, arraydescr, indexbox): @@ -472,19 +479,17 @@ negbox = self.implement_guard_value(orgpc, negbox) if negbox.getint(): # the index is < 0; add the array length to it - lenbox = self.metainterp.execute_and_record( - rop.ARRAYLEN_GC, arraydescr, arraybox) + lengthbox = self.opimpl_arraylen_gc(arraybox, arraydescr) indexbox = self.metainterp.execute_and_record( - rop.INT_ADD, None, indexbox, lenbox) + rop.INT_ADD, None, indexbox, lengthbox) return indexbox @arguments("descr", "descr", "descr", "descr", "box") def opimpl_newlist(self, structdescr, lengthdescr, itemsdescr, arraydescr, sizebox): - sbox = self.metainterp.execute_and_record(rop.NEW, structdescr) + sbox = self.opimpl_new(structdescr) self._opimpl_setfield_gc_any(sbox, lengthdescr, sizebox) - abox = self.metainterp.execute_and_record(rop.NEW_ARRAY, arraydescr, - sizebox) + abox = self.opimpl_new_array(arraydescr, sizebox) self._opimpl_setfield_gc_any(sbox, itemsdescr, abox) return sbox @@ -541,11 +546,15 @@ @specialize.arg(1) def _opimpl_getfield_gc_any_pureornot(self, opnum, box, fielddescr): - frombox, tobox = self.metainterp.heap_cache.get(fielddescr, (None, None)) - if frombox is box: + tobox = self.metainterp.heapcache.getfield(box, fielddescr) + if tobox is not None: + # sanity check: see whether the current struct value + # corresponds to what the cache thinks the value is + resbox = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETFIELD_GC, fielddescr, box) return tobox resbox = self.execute_with_descr(opnum, fielddescr, box) - self.metainterp.heap_cache[fielddescr] = (box, resbox) + self.metainterp.heapcache.getfield_now_known(box, fielddescr, resbox) return resbox @arguments("orgpc", "box", "descr") @@ -566,11 +575,11 @@ @arguments("box", "descr", "box") def _opimpl_setfield_gc_any(self, box, fielddescr, valuebox): - frombox, tobox = self.metainterp.heap_cache.get(fielddescr, (None, None)) - if frombox is box and tobox is valuebox: + tobox = self.metainterp.heapcache.getfield(box, fielddescr) + if tobox is valuebox: return self.execute_with_descr(rop.SETFIELD_GC, fielddescr, box, valuebox) - self.metainterp.heap_cache[fielddescr] = (box, valuebox) + self.metainterp.heapcache.setfield(box, fielddescr, valuebox) opimpl_setfield_gc_i = _opimpl_setfield_gc_any opimpl_setfield_gc_r = _opimpl_setfield_gc_any opimpl_setfield_gc_f = _opimpl_setfield_gc_any @@ -634,7 +643,7 @@ standard_box = self.metainterp.virtualizable_boxes[-1] if standard_box is box: return False - if box in self.metainterp.nonstandard_virtualizables: + if self.metainterp.heapcache.is_nonstandard_virtualizable(box): return True eqbox = self.metainterp.execute_and_record(rop.PTR_EQ, None, box, standard_box) @@ -643,7 +652,7 @@ if isstandard: self.metainterp.replace_box(box, standard_box) else: - self.metainterp.nonstandard_virtualizables[box] = None + self.metainterp.heapcache.nonstandard_virtualizables_now_known(box) return not isstandard def _get_virtualizable_field_index(self, fielddescr): @@ -728,7 +737,7 @@ def opimpl_arraylen_vable(self, pc, box, fdescr, adescr): if self._nonstandard_virtualizable(pc, box): arraybox = self._opimpl_getfield_gc_any(box, fdescr) - return self.execute_with_descr(rop.ARRAYLEN_GC, adescr, arraybox) + return self.opimpl_arraylen_gc(arraybox, adescr) vinfo = self.metainterp.jitdriver_sd.virtualizable_info virtualizable_box = self.metainterp.virtualizable_boxes[-1] virtualizable = vinfo.unwrap_virtualizable_box(virtualizable_box) @@ -893,9 +902,9 @@ @arguments("orgpc", "box") def opimpl_guard_class(self, orgpc, box): clsbox = self.cls_of_box(box) - if box not in self.metainterp.known_class_boxes: + if not self.metainterp.heapcache.is_class_known(box): self.generate_guard(rop.GUARD_CLASS, box, [clsbox], resumepc=orgpc) - self.metainterp.known_class_boxes[box] = None + self.metainterp.heapcache.class_now_known(box) return clsbox @arguments("int", "orgpc") @@ -1507,16 +1516,7 @@ self.last_exc_value_box = None self.retracing_loop_from = None self.call_pure_results = args_dict_box() - # contains boxes where the class is already known - self.known_class_boxes = {} - # contains frame boxes that are not virtualizables - self.nonstandard_virtualizables = {} - # heap cache - # maps descrs to (from_box, to_box) tuples - self.heap_cache = {} - # heap array cache - # maps descrs to {index: (from_box, to_box)} dicts - self.heap_array_cache = {} + self.heapcache = HeapCache() def perform_call(self, jitcode, boxes, greenkey=None): # causes the metainterp to enter the given subfunction @@ -1692,29 +1692,11 @@ # record the operation profiler = self.staticdata.profiler profiler.count_ops(opnum, RECORDED_OPS) - self._invalidate_caches(opnum, descr) + self.heapcache.invalidate_caches(opnum, descr, argboxes) op = self.history.record(opnum, argboxes, resbox, descr) self.attach_debug_info(op) return resbox - def _invalidate_caches(self, opnum, descr): - if opnum == rop.SETFIELD_GC: - return - if opnum == rop.SETARRAYITEM_GC: - return - if rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST: - return - if opnum == rop.CALL: - effectinfo = descr.get_extra_info() - ef = effectinfo.extraeffect - if ef == effectinfo.EF_LOOPINVARIANT or \ - ef == effectinfo.EF_ELIDABLE_CANNOT_RAISE or \ - ef == effectinfo.EF_ELIDABLE_CAN_RAISE: - return - if self.heap_cache: - self.heap_cache.clear() - if self.heap_array_cache: - self.heap_array_cache.clear() def attach_debug_info(self, op): if (not we_are_translated() and op is not None @@ -1877,10 +1859,7 @@ duplicates[box] = None def reached_loop_header(self, greenboxes, redboxes, resumedescr): - self.known_class_boxes = {} - self.nonstandard_virtualizables = {} # XXX maybe not needed? - self.heap_cache = {} - self.heap_array_cache = {} + self.heapcache.reset() duplicates = {} self.remove_consts_and_duplicates(redboxes, len(redboxes), @@ -2388,17 +2367,7 @@ for i in range(len(boxes)): if boxes[i] is oldbox: boxes[i] = newbox - for descr, (frombox, tobox) in self.heap_cache.iteritems(): - change = False - if frombox is oldbox: - change = True - frombox = newbox - if tobox is oldbox: - change = True - tobox = newbox - if change: - self.heap_cache[descr] = frombox, tobox - # XXX what about self.heap_array_cache? + self.heapcache.replace_box(oldbox, newbox) def find_biggest_function(self): start_stack = [] diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/test/test_heapcache.py @@ -0,0 +1,328 @@ +from pypy.jit.metainterp.heapcache import HeapCache +from pypy.jit.metainterp.resoperation import rop +from pypy.jit.metainterp.history import ConstInt + +box1 = object() +box2 = object() +box3 = object() +box4 = object() +lengthbox1 = object() +lengthbox2 = object() +descr1 = object() +descr2 = object() +descr3 = object() + +index1 = ConstInt(0) +index2 = ConstInt(1) + + +class FakeEffektinfo(object): + EF_ELIDABLE_CANNOT_RAISE = 0 #elidable function (and cannot raise) + EF_LOOPINVARIANT = 1 #special: call it only once per loop + EF_CANNOT_RAISE = 2 #a function which cannot raise + EF_ELIDABLE_CAN_RAISE = 3 #elidable function (but can raise) + EF_CAN_RAISE = 4 #normal function (can raise) + EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE = 5 #can raise and force virtualizables + EF_RANDOM_EFFECTS = 6 #can do whatever + + OS_ARRAYCOPY = 0 + + def __init__(self, extraeffect, oopspecindex): + self.extraeffect = extraeffect + self.oopspecindex = oopspecindex + +class FakeCallDescr(object): + def __init__(self, extraeffect, oopspecindex=None): + self.extraeffect = extraeffect + self.oopspecindex = oopspecindex + + def get_extra_info(self): + return FakeEffektinfo(self.extraeffect, self.oopspecindex) + +class TestHeapCache(object): + def test_known_class_box(self): + h = HeapCache() + assert not h.is_class_known(1) + assert not h.is_class_known(2) + h.class_now_known(1) + assert h.is_class_known(1) + assert not h.is_class_known(2) + + h.reset() + assert not h.is_class_known(1) + assert not h.is_class_known(2) + + def test_nonstandard_virtualizable(self): + h = HeapCache() + assert not h.is_nonstandard_virtualizable(1) + assert not h.is_nonstandard_virtualizable(2) + h.nonstandard_virtualizables_now_known(1) + assert h.is_nonstandard_virtualizable(1) + assert not h.is_nonstandard_virtualizable(2) + + h.reset() + assert not h.is_nonstandard_virtualizable(1) + assert not h.is_nonstandard_virtualizable(2) + + + def test_heapcache_fields(self): + h = HeapCache() + assert h.getfield(box1, descr1) is None + assert h.getfield(box1, descr2) is None + h.setfield(box1, descr1, box2) + assert h.getfield(box1, descr1) is box2 + assert h.getfield(box1, descr2) is None + h.setfield(box1, descr2, box3) + assert h.getfield(box1, descr1) is box2 + assert h.getfield(box1, descr2) is box3 + h.setfield(box1, descr1, box3) + assert h.getfield(box1, descr1) is box3 + assert h.getfield(box1, descr2) is box3 + h.setfield(box3, descr1, box1) + assert h.getfield(box3, descr1) is box1 + assert h.getfield(box1, descr1) is None + assert h.getfield(box1, descr2) is box3 + + h.reset() + assert h.getfield(box1, descr1) is None + assert h.getfield(box1, descr2) is None + assert h.getfield(box3, descr1) is None + + def test_heapcache_read_fields_multiple(self): + h = HeapCache() + h.getfield_now_known(box1, descr1, box2) + h.getfield_now_known(box3, descr1, box4) + assert h.getfield(box1, descr1) is box2 + assert h.getfield(box1, descr2) is None + assert h.getfield(box3, descr1) is box4 + assert h.getfield(box3, descr2) is None + + h.reset() + assert h.getfield(box1, descr1) is None + assert h.getfield(box1, descr2) is None + assert h.getfield(box3, descr1) is None + assert h.getfield(box3, descr2) is None + + def test_heapcache_write_fields_multiple(self): + h = HeapCache() + h.setfield(box1, descr1, box2) + assert h.getfield(box1, descr1) is box2 + h.setfield(box3, descr1, box4) + assert h.getfield(box3, descr1) is box4 + assert h.getfield(box1, descr1) is None # box1 and box3 can alias + + h = HeapCache() + h.new(box1) + h.setfield(box1, descr1, box2) + assert h.getfield(box1, descr1) is box2 + h.setfield(box3, descr1, box4) + assert h.getfield(box3, descr1) is box4 + assert h.getfield(box1, descr1) is None # box1 and box3 can alias + + h = HeapCache() + h.new(box1) + h.new(box3) + h.setfield(box1, descr1, box2) + assert h.getfield(box1, descr1) is box2 + h.setfield(box3, descr1, box4) + assert h.getfield(box3, descr1) is box4 + assert h.getfield(box1, descr1) is box2 # box1 and box3 cannot alias + h.setfield(box1, descr1, box3) + assert h.getfield(box1, descr1) is box3 + + + def test_heapcache_arrays(self): + h = HeapCache() + assert h.getarrayitem(box1, descr1, index1) is None + assert h.getarrayitem(box1, descr2, index1) is None + assert h.getarrayitem(box1, descr1, index2) is None + assert h.getarrayitem(box1, descr2, index2) is None + + h.setarrayitem(box1, descr1, index1, box2) + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr2, index1) is None + assert h.getarrayitem(box1, descr1, index2) is None + assert h.getarrayitem(box1, descr2, index2) is None + h.setarrayitem(box1, descr1, index2, box4) + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr2, index1) is None + assert h.getarrayitem(box1, descr1, index2) is box4 + assert h.getarrayitem(box1, descr2, index2) is None + + h.setarrayitem(box1, descr2, index1, box3) + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr2, index1) is box3 + assert h.getarrayitem(box1, descr1, index2) is box4 + assert h.getarrayitem(box1, descr2, index2) is None + + h.setarrayitem(box1, descr1, index1, box3) + assert h.getarrayitem(box1, descr1, index1) is box3 + assert h.getarrayitem(box1, descr2, index1) is box3 + assert h.getarrayitem(box1, descr1, index2) is box4 + assert h.getarrayitem(box1, descr2, index2) is None + + h.setarrayitem(box3, descr1, index1, box1) + assert h.getarrayitem(box3, descr1, index1) is box1 + assert h.getarrayitem(box1, descr1, index1) is None + assert h.getarrayitem(box1, descr2, index1) is box3 + assert h.getarrayitem(box1, descr1, index2) is box4 + assert h.getarrayitem(box1, descr2, index2) is None + + h.reset() + assert h.getarrayitem(box1, descr1, index1) is None + assert h.getarrayitem(box1, descr2, index1) is None + assert h.getarrayitem(box3, descr1, index1) is None + + def test_heapcache_array_nonconst_index(self): + h = HeapCache() + h.setarrayitem(box1, descr1, index1, box2) + h.setarrayitem(box1, descr1, index2, box4) + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr1, index2) is box4 + h.setarrayitem(box1, descr1, box2, box3) + assert h.getarrayitem(box1, descr1, index1) is None + assert h.getarrayitem(box1, descr1, index2) is None + + def test_heapcache_read_fields_multiple_array(self): + h = HeapCache() + h.getarrayitem_now_known(box1, descr1, index1, box2) + h.getarrayitem_now_known(box3, descr1, index1, box4) + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr2, index1) is None + assert h.getarrayitem(box3, descr1, index1) is box4 + assert h.getarrayitem(box3, descr2, index1) is None + + h.reset() + assert h.getarrayitem(box1, descr1, index1) is None + assert h.getarrayitem(box1, descr2, index1) is None + assert h.getarrayitem(box3, descr1, index1) is None + assert h.getarrayitem(box3, descr2, index1) is None + + def test_heapcache_write_fields_multiple_array(self): + h = HeapCache() + h.setarrayitem(box1, descr1, index1, box2) + assert h.getarrayitem(box1, descr1, index1) is box2 + h.setarrayitem(box3, descr1, index1, box4) + assert h.getarrayitem(box3, descr1, index1) is box4 + assert h.getarrayitem(box1, descr1, index1) is None # box1 and box3 can alias + + h = HeapCache() + h.new(box1) + h.setarrayitem(box1, descr1, index1, box2) + assert h.getarrayitem(box1, descr1, index1) is box2 + h.setarrayitem(box3, descr1, index1, box4) + assert h.getarrayitem(box3, descr1, index1) is box4 + assert h.getarrayitem(box1, descr1, index1) is None # box1 and box3 can alias + + h = HeapCache() + h.new(box1) + h.new(box3) + h.setarrayitem(box1, descr1, index1, box2) + assert h.getarrayitem(box1, descr1, index1) is box2 + h.setarrayitem(box3, descr1, index1, box4) + assert h.getarrayitem(box3, descr1, index1) is box4 + assert h.getarrayitem(box1, descr1, index1) is box2 # box1 and box3 cannot alias + h.setarrayitem(box1, descr1, index1, box3) + assert h.getarrayitem(box3, descr1, index1) is box4 + assert h.getarrayitem(box1, descr1, index1) is box3 # box1 and box3 cannot alias + + def test_length_cache(self): + h = HeapCache() + h.new_array(box1, lengthbox1) + assert h.arraylen(box1) is lengthbox1 + + assert h.arraylen(box2) is None + h.arraylen_now_known(box2, lengthbox2) + assert h.arraylen(box2) is lengthbox2 + + + def test_invalidate_cache(self): + h = HeapCache() + h.setfield(box1, descr1, box2) + h.setarrayitem(box1, descr1, index1, box2) + h.setarrayitem(box1, descr1, index2, box4) + h.invalidate_caches(rop.INT_ADD, None, []) + h.invalidate_caches(rop.INT_ADD_OVF, None, []) + h.invalidate_caches(rop.SETFIELD_RAW, None, []) + h.invalidate_caches(rop.SETARRAYITEM_RAW, None, []) + assert h.getfield(box1, descr1) is box2 + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr1, index2) is box4 + + h.invalidate_caches( + rop.CALL, FakeCallDescr(FakeEffektinfo.EF_ELIDABLE_CANNOT_RAISE), []) + assert h.getfield(box1, descr1) is box2 + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr1, index2) is box4 + + h.invalidate_caches( + rop.CALL_LOOPINVARIANT, FakeCallDescr(FakeEffektinfo.EF_LOOPINVARIANT), []) + + h.invalidate_caches( + rop.CALL, FakeCallDescr(FakeEffektinfo.EF_RANDOM_EFFECTS), []) + assert h.getfield(box1, descr1) is None + assert h.getarrayitem(box1, descr1, index1) is None + assert h.getarrayitem(box1, descr1, index2) is None + + + def test_replace_box(self): + h = HeapCache() + h.setfield(box1, descr1, box2) + h.setfield(box1, descr2, box3) + h.setfield(box2, descr3, box3) + h.replace_box(box1, box4) + assert h.getfield(box1, descr1) is None + assert h.getfield(box1, descr2) is None + assert h.getfield(box4, descr1) is box2 + assert h.getfield(box4, descr2) is box3 + assert h.getfield(box2, descr3) is box3 + + def test_replace_box_array(self): + h = HeapCache() + h.setarrayitem(box1, descr1, index1, box2) + h.setarrayitem(box1, descr2, index1, box3) + h.arraylen_now_known(box1, lengthbox1) + h.setarrayitem(box2, descr1, index2, box1) + h.setarrayitem(box3, descr2, index2, box1) + h.setarrayitem(box2, descr3, index2, box3) + h.replace_box(box1, box4) + assert h.getarrayitem(box1, descr1, index1) is None + assert h.getarrayitem(box1, descr2, index1) is None + assert h.arraylen(box1) is None + assert h.arraylen(box4) is lengthbox1 + assert h.getarrayitem(box4, descr1, index1) is box2 + assert h.getarrayitem(box4, descr2, index1) is box3 + assert h.getarrayitem(box2, descr1, index2) is box4 + assert h.getarrayitem(box3, descr2, index2) is box4 + assert h.getarrayitem(box2, descr3, index2) is box3 + + h.replace_box(lengthbox1, lengthbox2) + assert h.arraylen(box4) is lengthbox2 + + def test_ll_arraycopy(self): + h = HeapCache() + h.new_array(box1, lengthbox1) + h.setarrayitem(box1, descr1, index1, box2) + h.new_array(box2, lengthbox1) + # Just need the destination box for this call + h.invalidate_caches( + rop.CALL, + FakeCallDescr(FakeEffektinfo.EF_CANNOT_RAISE, FakeEffektinfo.OS_ARRAYCOPY), + [None, None, box2, None, None] + ) + assert h.getarrayitem(box1, descr1, index1) is box2 + h.invalidate_caches( + rop.CALL, + FakeCallDescr(FakeEffektinfo.EF_CANNOT_RAISE, FakeEffektinfo.OS_ARRAYCOPY), + [None, None, box3, None, None] + ) + assert h.getarrayitem(box1, descr1, index1) is None + + h.setarrayitem(box4, descr1, index1, box2) + assert h.getarrayitem(box4, descr1, index1) is box2 + h.invalidate_caches( + rop.CALL, + FakeCallDescr(FakeEffektinfo.EF_CANNOT_RAISE, FakeEffektinfo.OS_ARRAYCOPY), + [None, None, box2, None, None] + ) + assert h.getarrayitem(box4, descr1, index1) is None diff --git a/pypy/jit/metainterp/test/test_tracingopts.py b/pypy/jit/metainterp/test/test_tracingopts.py --- a/pypy/jit/metainterp/test/test_tracingopts.py +++ b/pypy/jit/metainterp/test/test_tracingopts.py @@ -257,6 +257,28 @@ self.check_operations_history(setarrayitem_gc=2, setfield_gc=2, getarrayitem_gc=0, getfield_gc=2) + def test_promote_changes_array_cache(self): + a1 = [0, 0] + a2 = [0, 0] + def fn(n): + if n > 0: + a = a1 + else: + a = a2 + a[0] = n + jit.hint(n, promote=True) + x1 = a[0] + jit.hint(x1, promote=True) + a[n - n] = n + 1 + return a[0] + x1 + res = self.interp_operations(fn, [7]) + assert res == 7 + 7 + 1 + self.check_operations_history(getarrayitem_gc=0, guard_value=1) + res = self.interp_operations(fn, [-7]) + assert res == -7 - 7 + 1 + self.check_operations_history(getarrayitem_gc=0, guard_value=1) + + def test_list_caching(self): a1 = [0, 0] a2 = [0, 0] @@ -357,7 +379,7 @@ assert res == f(10, 1, 1) self.check_history(getarrayitem_gc=0, getfield_gc=0) - def test_heap_caching_pure(self): + def test_heap_caching_array_pure(self): class A(object): pass p1 = A() @@ -405,3 +427,149 @@ assert res == -7 + 7 self.check_operations_history(getfield_gc=0) return + + def test_heap_caching_multiple_objects(self): + class Gbl(object): + pass + g = Gbl() + class A(object): + pass + a1 = A() + g.a1 = a1 + a1.x = 7 + a2 = A() + g.a2 = a2 + a2.x = 7 + def gn(a1, a2): + return a1.x + a2.x + def fn(n): + if n < 0: + a1 = A() + g.a1 = a1 + a1.x = n + a2 = A() + g.a2 = a2 + a2.x = n - 1 + else: + a1 = g.a1 + a2 = g.a2 + return a1.x + a2.x + gn(a1, a2) + res = self.interp_operations(fn, [-7]) + assert res == 2 * -7 + 2 * -8 + self.check_operations_history(setfield_gc=4, getfield_gc=0) + res = self.interp_operations(fn, [7]) + assert res == 4 * 7 + self.check_operations_history(getfield_gc=4) + + def test_heap_caching_multiple_tuples(self): + class Gbl(object): + pass + g = Gbl() + def gn(a1, a2): + return a1[0] + a2[0] + def fn(n): + a1 = (n, ) + g.a = a1 + a2 = (n - 1, ) + g.a = a2 + jit.promote(n) + return a1[0] + a2[0] + gn(a1, a2) + res = self.interp_operations(fn, [7]) + assert res == 2 * 7 + 2 * 6 + self.check_operations_history(getfield_gc_pure=0) + res = self.interp_operations(fn, [-7]) + assert res == 2 * -7 + 2 * -8 + self.check_operations_history(getfield_gc_pure=0) + + def test_heap_caching_multiple_arrays(self): + class Gbl(object): + pass + g = Gbl() + def fn(n): + a1 = [n, n, n] + g.a = a1 + a1[0] = n + a2 = [n, n, n] + g.a = a2 + a2[0] = n - 1 + return a1[0] + a2[0] + a1[0] + a2[0] + res = self.interp_operations(fn, [7]) + assert res == 2 * 7 + 2 * 6 + self.check_operations_history(getarrayitem_gc=0) + res = self.interp_operations(fn, [-7]) + assert res == 2 * -7 + 2 * -8 + self.check_operations_history(getarrayitem_gc=0) + + def test_heap_caching_multiple_arrays_getarrayitem(self): + class Gbl(object): + pass + g = Gbl() + g.a1 = [7, 8, 9] + g.a2 = [8, 9, 10, 11] + + def fn(i): + if i < 0: + g.a1 = [7, 8, 9] + g.a2 = [7, 8, 9, 10] + jit.promote(i) + a1 = g.a1 + a1[i + 1] = 15 # make lists mutable + a2 = g.a2 + a2[i + 1] = 19 + return a1[i] + a2[i] + a1[i] + a2[i] + res = self.interp_operations(fn, [0]) + assert res == 2 * 7 + 2 * 8 + self.check_operations_history(getarrayitem_gc=2) + + + def test_heap_caching_multiple_lists(self): + class Gbl(object): + pass + g = Gbl() + g.l = [] + def fn(n): + if n < -100: + g.l.append(1) + a1 = [n, n, n] + g.l = a1 + a1[0] = n + a2 = [n, n, n] + g.l = a2 + a2[0] = n - 1 + return a1[0] + a2[0] + a1[0] + a2[0] + res = self.interp_operations(fn, [7]) + assert res == 2 * 7 + 2 * 6 + self.check_operations_history(getarrayitem_gc=0, getfield_gc=0) + res = self.interp_operations(fn, [-7]) + assert res == 2 * -7 + 2 * -8 + self.check_operations_history(getarrayitem_gc=0, getfield_gc=0) + + def test_length_caching(self): + class Gbl(object): + pass + g = Gbl() + g.a = [0] * 7 + def fn(n): + a = g.a + res = len(a) + len(a) + a1 = [0] * n + g.a = a1 + return len(a1) + res + res = self.interp_operations(fn, [7]) + assert res == 7 * 3 + self.check_operations_history(arraylen_gc=1) + + def test_arraycopy(self): + class Gbl(object): + pass + g = Gbl() + g.a = [0] * 7 + def fn(n): + assert n >= 0 + a = g.a + x = [0] * n + x[2] = 21 + return len(a[:n]) + x[2] + res = self.interp_operations(fn, [3]) + assert res == 24 + self.check_operations_history(getarrayitem_gc=0) From noreply at buildbot.pypy.org Wed Sep 14 20:26:19 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 14 Sep 2011 20:26:19 +0200 (CEST) Subject: [pypy-commit] pypy unroll-if-alt: fix some merge failures. Message-ID: <20110914182619.663CE82298@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: unroll-if-alt Changeset: r47273:42daaab99f83 Date: 2011-09-14 14:26 -0400 http://bitbucket.org/pypy/pypy/changeset/42daaab99f83/ Log: fix some merge failures. diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -4711,7 +4711,6 @@ """ self.optimize_loop(ops, expected) -<<<<<<< local def test_empty_copystrunicontent(self): ops = """ [p0, p1, i0, i2, i3] diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -102,9 +102,9 @@ print "Short Preamble:" short = loop.preamble.token.short_preamble[0] print short.inputargs - print '\n'.join([str(o) for o in short.operations]) + print '\n'.join([str(o) for o in short.operations]) print - + assert expected != "crash!", "should have raised an exception" self.assert_equal(loop, expected) if expected_preamble: @@ -113,7 +113,7 @@ if expected_short: self.assert_equal(short, expected_short, text_right='expected short preamble') - + return loop class OptimizeOptTest(BaseTestWithUnroll): @@ -866,10 +866,10 @@ setfield_gc(p3sub, i1, descr=valuedescr) setfield_gc(p1, p3sub, descr=nextdescr) # XXX: We get two extra operations here because the setfield - # above is the result of forcing p1 and thus not + # above is the result of forcing p1 and thus not # registered with the heap optimizer. I've makred tests # below with VIRTUALHEAP if they suffer from this issue - p3sub2 = getfield_gc(p1, descr=nextdescr) + p3sub2 = getfield_gc(p1, descr=nextdescr) guard_nonnull_class(p3sub2, ConstClass(node_vtable2)) [] jump(i1, p1, p3sub2) """ @@ -1411,7 +1411,7 @@ guard_isnull(p18) [p0, p8] p31 = new(descr=ssize) p35 = new_with_vtable(ConstClass(node_vtable)) - setfield_gc(p35, p31, descr=valuedescr) + setfield_gc(p35, p31, descr=valuedescr) jump(p0, p35) """ expected = """ @@ -1426,7 +1426,7 @@ guard_isnull(p18) [p0, p8] p31 = new(descr=ssize) p35 = new_with_vtable(ConstClass(node_vtable)) - setfield_gc(p35, p31, descr=valuedescr) + setfield_gc(p35, p31, descr=valuedescr) jump(p0, p35, p19, p18) """ expected = """ @@ -1435,7 +1435,7 @@ jump(p0, NULL) """ self.optimize_loop(ops, expected) - + def test_varray_1(self): ops = """ [i1] @@ -2181,7 +2181,7 @@ jump(p1) """ self.optimize_loop(ops, expected) - + def test_duplicate_getarrayitem_2(self): ops = """ [p1, i0] @@ -2199,7 +2199,7 @@ jump(p1, i7, i6) """ self.optimize_loop(ops, expected) - + def test_duplicate_getarrayitem_after_setarrayitem_1(self): ops = """ [p1, p2] @@ -2812,14 +2812,14 @@ guard_no_overflow() [] i3b = int_is_true(i3) guard_true(i3b) [] - setfield_gc(p1, i1, descr=valuedescr) + setfield_gc(p1, i1, descr=valuedescr) escape(i3) escape(i3) jump(i1, p1, i3) """ expected = """ [i1, p1, i3] - setfield_gc(p1, i1, descr=valuedescr) + setfield_gc(p1, i1, descr=valuedescr) escape(i3) escape(i3) jump(i1, p1, i3) @@ -2830,7 +2830,7 @@ ops = """ [p8, p11, i24] p26 = new_with_vtable(ConstClass(node_vtable)) - setfield_gc(p26, i24, descr=adescr) + setfield_gc(p26, i24, descr=adescr) i34 = getfield_gc_pure(p11, descr=valuedescr) i35 = getfield_gc_pure(p26, descr=adescr) i36 = int_add_ovf(i34, i35) @@ -2839,10 +2839,10 @@ """ expected = """ [p8, p11, i26] - jump(p8, p11, i26) - """ - self.optimize_loop(ops, expected) - + jump(p8, p11, i26) + """ + self.optimize_loop(ops, expected) + def test_ovf_guard_in_short_preamble2(self): ops = """ [p8, p11, p12] @@ -5150,14 +5150,14 @@ [i0, i1, i10, i11, i2, i3, i4] escape(i2) escape(i3) - escape(i4) + escape(i4) i24 = int_mul_ovf(i10, i11) guard_no_overflow() [] i23 = int_sub_ovf(i10, i11) guard_no_overflow() [] i22 = int_add_ovf(i10, i11) guard_no_overflow() [] - jump(i0, i1, i10, i11, i2, i3, i4) + jump(i0, i1, i10, i11, i2, i3, i4) """ self.optimize_loop(ops, expected) @@ -5699,14 +5699,14 @@ ops = """ [p0, i0] i1 = unicodegetitem(p0, i0) - i10 = unicodegetitem(p0, i0) + i10 = unicodegetitem(p0, i0) i2 = int_lt(i1, 0) guard_false(i2) [] jump(p0, i0) """ expected = """ [p0, i0] - i1 = unicodegetitem(p0, i0) + i1 = unicodegetitem(p0, i0) jump(p0, i0) """ self.optimize_loop(ops, expected) @@ -5865,7 +5865,7 @@ """ preamble = """ [p1, i1, i2, p3] - guard_nonnull(p3) [] + guard_nonnull(p3) [] i4 = int_sub(i2, i1) i0 = call(0, p1, i1, i4, p3, descr=streq_slice_nonnull_descr) escape(i0) @@ -6474,7 +6474,7 @@ setfield_gc(p3, i1, descr=adescr) setfield_gc(p3, i2, descr=bdescr) i5 = int_gt(ii, 42) - guard_true(i5) [] + guard_true(i5) [] jump(p0, p1, p3, ii2, ii, i1, i2) """ self.optimize_loop(ops, expected) @@ -6500,7 +6500,7 @@ p1 = getfield_gc(p0, descr=nextdescr) guard_nonnull_class(p1, ConstClass(node_vtable)) [] p2 = getfield_gc(p1, descr=nextdescr) - guard_nonnull_class(p2, ConstClass(node_vtable)) [] + guard_nonnull_class(p2, ConstClass(node_vtable)) [] jump(p0) """ expected = """ @@ -6514,11 +6514,11 @@ guard_class(p1, ConstClass(node_vtable)) [] p2 = getfield_gc(p1, descr=nextdescr) guard_nonnull(p2) [] - guard_class(p2, ConstClass(node_vtable)) [] + guard_class(p2, ConstClass(node_vtable)) [] jump(p0) """ self.optimize_loop(ops, expected, expected_short=short) - + def test_forced_virtual_pure_getfield(self): ops = """ [p0] @@ -6582,7 +6582,7 @@ jump(p1, i2) """ self.optimize_loop(ops, expected) - + def test_loopinvariant_strlen(self): ops = """ [p9] @@ -6715,7 +6715,7 @@ [p0, p1] p2 = new_with_vtable(ConstClass(node_vtable)) p3 = new_with_vtable(ConstClass(node_vtable)) - setfield_gc(p2, p3, descr=nextdescr) + setfield_gc(p2, p3, descr=nextdescr) jump(p2, p3) """ expected = """ @@ -6734,7 +6734,7 @@ jump(p2, i2) """ expected = """ - [p1] + [p1] p2 = getarrayitem_gc(p1, 7, descr=) i1 = arraylen_gc(p1) jump(p2) @@ -6775,8 +6775,8 @@ jump(p0, p2, p1) """ self.optimize_loop(ops, expected, expected_short=short) - - + + def test_loopinvariant_constant_strgetitem(self): ops = """ [p0] @@ -6830,11 +6830,11 @@ expected = """ [p0, i22, p1] call(i22, descr=nonwritedescr) - i3 = unicodelen(p1) # Should be killed by backend + i3 = unicodelen(p1) # Should be killed by backend jump(p0, i22, p1) """ self.optimize_loop(ops, expected, expected_short=short) - + def test_propagate_virtual_arryalen(self): ops = """ [p0] @@ -6903,7 +6903,7 @@ [p0, p1, p10, p11] i1 = arraylen_gc(p10, descr=arraydescr) getarrayitem_gc(p11, 1, descr=arraydescr) - call(i1, descr=nonwritedescr) + call(i1, descr=nonwritedescr) jump(p1, p0, p11, p10) """ self.optimize_loop(ops, expected) @@ -6912,20 +6912,20 @@ ops = """ [p5] i10 = getfield_gc(p5, descr=valuedescr) - call(i10, descr=nonwritedescr) + call(i10, descr=nonwritedescr) setfield_gc(p5, 1, descr=valuedescr) jump(p5) """ preamble = """ [p5] i10 = getfield_gc(p5, descr=valuedescr) - call(i10, descr=nonwritedescr) + call(i10, descr=nonwritedescr) setfield_gc(p5, 1, descr=valuedescr) jump(p5) """ expected = """ [p5] - call(1, descr=nonwritedescr) + call(1, descr=nonwritedescr) jump(p5) """ self.optimize_loop(ops, expected, preamble) @@ -6963,7 +6963,7 @@ [p9] call_assembler(0, descr=asmdescr) i18 = getfield_gc(p9, descr=valuedescr) - guard_value(i18, 0) [] + guard_value(i18, 0) [] jump(p9) """ self.optimize_loop(ops, expected) @@ -6992,17 +6992,17 @@ i10 = getfield_gc(p5, descr=valuedescr) i11 = getfield_gc(p6, descr=nextdescr) call(i10, i11, descr=nonwritedescr) - setfield_gc(p6, i10, descr=nextdescr) + setfield_gc(p6, i10, descr=nextdescr) jump(p5, p6) """ expected = """ [p5, p6, i10, i11] call(i10, i11, descr=nonwritedescr) - setfield_gc(p6, i10, descr=nextdescr) + setfield_gc(p6, i10, descr=nextdescr) jump(p5, p6, i10, i10) """ self.optimize_loop(ops, expected) - + def test_cached_pure_func_of_equal_fields(self): ops = """ [p5, p6] @@ -7011,18 +7011,18 @@ i12 = int_add(i10, 7) i13 = int_add(i11, 7) call(i12, i13, descr=nonwritedescr) - setfield_gc(p6, i10, descr=nextdescr) + setfield_gc(p6, i10, descr=nextdescr) jump(p5, p6) """ expected = """ [p5, p6, i14, i12, i10] i13 = int_add(i14, 7) call(i12, i13, descr=nonwritedescr) - setfield_gc(p6, i10, descr=nextdescr) + setfield_gc(p6, i10, descr=nextdescr) jump(p5, p6, i10, i12, i10) """ self.optimize_loop(ops, expected) - + def test_forced_counter(self): # XXX: VIRTUALHEAP (see above) py.test.skip("would be fixed by make heap optimizer aware of virtual setfields") @@ -7165,7 +7165,7 @@ expected = """ [p1, p2, i2, i1] call(i2, descr=nonwritedescr) - setfield_gc(p2, i1, descr=nextdescr) + setfield_gc(p2, i1, descr=nextdescr) jump(p1, p2, i2, i1) """ self.optimize_loop(ops, expected) @@ -7185,11 +7185,11 @@ expected = """ [p1, p2, i2, i1] call(i2, descr=nonwritedescr) - setfield_gc(p2, i1, descr=valuedescr) + setfield_gc(p2, i1, descr=valuedescr) jump(p1, p2, i2, i1) """ self.optimize_loop(ops, expected) - + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass - + diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -401,19 +401,17 @@ self.metainterp.heapcache.new_array(resbox, lengthbox) return resbox - @arguments("box", "descr", "box") - def _opimpl_getarrayitem_gc_any(self, arraybox, arraydescr, indexbox): + def _do_getarrayitem_gc_any(self, op, arraybox, arraydescr, indexbox): tobox = self.metainterp.heapcache.getarrayitem( arraybox, arraydescr, indexbox) if tobox: # sanity check: see whether the current array value # corresponds to what the cache thinks the value is - resbox = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETARRAYITEM_GC, arraydescr, arraybox, indexbox) + resbox = executor.execute(self.metainterp.cpu, self.metainterp, op, + arraydescr, arraybox, indexbox) assert resbox.constbox().same_constant(tobox.constbox()) return tobox - resbox = self.execute_with_descr(rop.GETARRAYITEM_GC, - arraydescr, arraybox, indexbox) + resbox = self.execute_with_descr(op, arraydescr, arraybox, indexbox) self.metainterp.heapcache.getarrayitem_now_known( arraybox, arraydescr, indexbox, resbox) return resbox From noreply at buildbot.pypy.org Wed Sep 14 21:52:19 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Sep 2011 21:52:19 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: Improve the DEBUG checks. Add a test that checks that we don't Message-ID: <20110914195219.5B1F482298@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47274:ea97f442a1f2 Date: 2011-09-14 20:54 +0200 http://bitbucket.org/pypy/pypy/changeset/ea97f442a1f2/ Log: Improve the DEBUG checks. Add a test that checks that we don't accidentally check-in DEBUG=True. diff --git a/pypy/rlib/rstacklet.py b/pypy/rlib/rstacklet.py --- a/pypy/rlib/rstacklet.py +++ b/pypy/rlib/rstacklet.py @@ -87,12 +87,20 @@ return False def add(self, h): if not self.sthread.is_empty_handle(h): + if h == self.sthread.get_null_handle(): + raise StackletDebugError("unexpected null handle") self.active.append(h) def remove(self, h): try: i = self.active.index(h) except ValueError: - raise StackletDebugError + if self.sthread.is_empty_handle(h): + msg = "empty stacklet handle" + elif h == self.sthread.get_null_handle(): + msg = "unexpected null handle" + else: + msg = "double usage of handle %r" % (h,) + raise StackletDebugError(msg) del self.active[i] debug = Debug() diff --git a/pypy/rlib/test/test_rstacklet.py b/pypy/rlib/test/test_rstacklet.py --- a/pypy/rlib/test/test_rstacklet.py +++ b/pypy/rlib/test/test_rstacklet.py @@ -264,6 +264,10 @@ gcrootfinder = 'shadowstack' +def test_dont_keep_debug_to_true(): + assert not rstacklet.DEBUG + + def target(*args): return entry_point, None From noreply at buildbot.pypy.org Wed Sep 14 21:52:20 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Sep 2011 21:52:20 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: Yay! The first tests pass. Message-ID: <20110914195220.8A8D582298@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47275:fae2526c2764 Date: 2011-09-14 20:55 +0200 http://bitbucket.org/pypy/pypy/changeset/fae2526c2764/ Log: Yay! The first tests pass. diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -6,6 +6,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app from pypy.interpreter.pycode import PyCode +from pypy.interpreter.pyframe import PyFrame class W_Continulet(Wrappable): @@ -23,7 +24,6 @@ if ec.stacklet_thread is not self.sthread: global_state.clear() raise geterror(self.space, "inter-thread support is missing") - return ec def descr_init(self, w_callable, __args__): if self.sthread is not None: @@ -43,12 +43,7 @@ global_state.origin = self sthread = build_sthread(self.space) self.sthread = sthread - try: - h = sthread.new(new_stacklet_callback) - except MemoryError: - global_state.clear() - raise getmemoryerror(self.space) - # + h = sthread.new(new_stacklet_callback) post_switch(sthread, h) def switch(self, w_to): @@ -75,7 +70,7 @@ if sthread.is_empty_handle(to.h): global_state.clear() raise geterror(self.space, "continulet already finished") - ec = self.check_sthread() + self.check_sthread() # global_state.origin = self if to is None: @@ -85,12 +80,7 @@ # double switch: the final destination is to.h global_state.destination = to # - try: - h = sthread.switch(global_state.destination.h) - except MemoryError: - global_state.clear() - raise getmemoryerror(self.space) - # + h = sthread.switch(global_state.destination.h) return post_switch(sthread, h) def descr_switch(self, w_value=None, w_to=None): @@ -124,17 +114,36 @@ # __getnewargs__ or __getstate__ defined in the subclass, etc. # Doing the right thing looks involved, though... space = self.space + if self.sthread is None: + raise geterror(space, "cannot pickle (yet) a continulet that is " + "not initialized") + if self.sthread.is_empty_handle(self.h): + raise geterror(space, "cannot pickle (yet) a continulet that is " + "already finished") w_continulet_type = space.type(space.wrap(self)) - if self.sthread is None: - args = [getunpickle(space), - space.newtuple([w_continulet_type])] - w_dict = self.getdict(space) - if w_dict is not None: - args = args + [w_dict] - return space.newtuple(args) - else: - raise geterror(space, "cannot pickle yet a continulet that was " - "initialized but not started") + w_frame = space.wrap(self.bottomframe) + w_dict = self.getdict(space) or space.w_None + args = [getunpickle(space), + space.newtuple([w_continulet_type]), + space.newtuple([w_frame, w_dict]), + ] + return space.newtuple(args) + + def descr__setstate__(self, w_args): + if self.sthread is not None: + raise geterror(space, "continulet.__setstate__() on an already-" + "initialized continulet") + space = self.space + w_frame, w_dict = space.fixedview(w_args, expected_length=2) + self.bottomframe = space.interp_w(PyFrame, w_frame) + if not space.is_w(w_dict, space.w_None): + self.setdict(w_dict) + # + global_state.origin = self + sthread = build_sthread(self.space) + self.sthread = sthread + self.h = sthread.new(resume_trampoline_callback) + global_state.origin = None def W_Continulet___new__(space, w_subtype, __args__): @@ -158,6 +167,7 @@ throw = interp2app(W_Continulet.descr_throw), is_pending = interp2app(W_Continulet.descr_is_pending), __reduce__ = interp2app(W_Continulet.descr__reduce__), + __setstate__= interp2app(W_Continulet.descr__setstate__), ) @@ -172,7 +182,6 @@ self.space = space w_module = space.getbuiltinmodule('_continuation') self.w_error = space.getattr(w_module, space.wrap('error')) - self.w_memoryerror = OperationError(space.w_MemoryError, space.w_None) # the following function switches away immediately, so that # continulet.__init__() doesn't immediately run func(), but it # also has the hidden purpose of making sure we have a single @@ -197,10 +206,6 @@ cs = space.fromcache(State) return OperationError(cs.w_error, space.wrap(message)) -def getmemoryerror(space): - cs = space.fromcache(State) - return cs.w_memoryerror - def get_entrypoint_pycode(space): cs = space.fromcache(State) return cs.entrypoint_pycode @@ -255,6 +260,66 @@ global_state.destination = self return self.h +def resume_trampoline_callback(h, arg): + from pypy.tool import stdlib_opcode as pythonopcode + self = global_state.origin + self.h = h + space = self.space + try: + h = self.sthread.switch(self.h) + try: + w_result = post_switch(self.sthread, h) + operr = None + except OperationError, operr: + pass + # + while True: + ec = self.sthread.ec + frame = ec.topframeref() + if frame.pycode is get_entrypoint_pycode(space): + break + # + code = frame.pycode.co_code + instr = frame.last_instr + opcode = ord(code[instr]) + map = pythonopcode.opmap + call_ops = [map['CALL_FUNCTION'], map['CALL_FUNCTION_KW'], + map['CALL_FUNCTION_VAR'], map['CALL_FUNCTION_VAR_KW'], + map['CALL_METHOD']] + assert opcode in call_ops # XXX check better, and complain better + instr += 1 + oparg = ord(code[instr]) | ord(code[instr + 1]) << 8 + nargs = oparg & 0xff + nkwds = (oparg >> 8) & 0xff + if nkwds == 0: # only positional arguments + # fast paths leaves things on the stack, pop them + if (space.config.objspace.opcodes.CALL_METHOD and + opcode == map['CALL_METHOD']): + frame.dropvalues(nargs + 2) + elif opcode == map['CALL_FUNCTION']: + frame.dropvalues(nargs + 1) + frame.last_instr = instr + 1 # continue after the call + # + # small hack: unlink frame out of the execution context, because + # execute_frame will add it there again + ec.topframeref = frame.f_backref + # + try: + w_result = frame.execute_frame(w_result, operr) + operr = None + except OperationError, operr: + pass + if operr: + raise operr + except Exception, e: + global_state.propagate_exception = e + else: + global_state.w_value = w_result + self.sthread.ec.topframeref = jit.vref_None + global_state.origin = self + global_state.destination = self + return self.h + def post_switch(sthread, h): origin = global_state.origin diff --git a/pypy/module/_continuation/test/test_zpickle.py b/pypy/module/_continuation/test/test_zpickle.py --- a/pypy/module/_continuation/test/test_zpickle.py +++ b/pypy/module/_continuation/test/test_zpickle.py @@ -1,6 +1,61 @@ from pypy.conftest import gettestobjspace +class AppTestCopy: + def setup_class(cls): + cls.space = gettestobjspace(usemodules=('_continuation',), + CALL_METHOD=True) + + def test_basic_setup(self): + from _continuation import continulet + lst = [4] + co = continulet(lst.append) + assert lst == [4] + res = co.switch() + assert res is None + assert lst == [4, co] + + def test_copy_continulet_not_started(self): + from _continuation import continulet, error + import copy + lst = [] + co = continulet(lst.append) + co2, lst2 = copy.deepcopy((co, lst)) + assert lst2 == [] + xxx + + def test_copy_continulet_real(self): + import new, sys + mod = new.module('test_copy_continulet_real') + sys.modules['test_copy_continulet_real'] = mod + exec '''if 1: + from _continuation import continulet + import copy + def f(co, x): + co.switch(x + 1) + co.switch(x + 2) + return x + 3 + co = continulet(f, 40) + res = co.switch() + assert res == 41 + co2 = copy.deepcopy(co) + # + res = co2.switch() + assert res == 42 + assert co2.is_pending() + res = co2.switch() + assert res == 43 + assert not co2.is_pending() + # + res = co.switch() + assert res == 42 + assert co.is_pending() + res = co.switch() + assert res == 43 + assert not co.is_pending() + ''' in mod.__dict__ + + class AppTestPickle: version = 0 @@ -23,16 +78,8 @@ """) cls.w_version = cls.space.wrap(cls.version) - def test_basic_setup(self): - from _continuation import continulet - lst = [4] - co = continulet(lst.append) - assert lst == [4] - res = co.switch() - assert res is None - assert lst == [4, co] - def test_pickle_continulet_empty(self): + skip("pickle a not-initialized continulet") from _continuation import continulet lst = [4] co = continulet.__new__(continulet) @@ -51,6 +98,7 @@ assert result == [5, co2] def test_pickle_continulet_empty_subclass(self): + skip("pickle a not-initialized continulet") from test_pickle_continulet import continulet, A lst = [4] co = continulet.__new__(A) @@ -73,11 +121,51 @@ assert res is None assert result == [5, co2] - def test_pickle_coroutine_not_started(self): + def test_pickle_continulet_not_started(self): from _continuation import continulet, error import pickle - co = continulet([].append) - raises(error, pickle.dumps, co) # xxx for now + lst = [] + co = continulet(lst.append) + pckl = pickle.dumps((co, lst)) + print pckl + co2, lst2 = pickle.loads(pckl) + assert co is not co2 + assert lst2 == [] + xxx + + def test_pickle_continulet_real(self): + import new, sys + mod = new.module('test_pickle_continulet_real') + sys.modules['test_pickle_continulet_real'] = mod + mod.version = self.version + exec '''if 1: + from _continuation import continulet + import pickle + def f(co, x): + co.switch(x + 1) + co.switch(x + 2) + return x + 3 + co = continulet(f, 40) + res = co.switch() + assert res == 41 + pckl = pickle.dumps(co, version) + print repr(pckl) + co2 = pickle.loads(pckl) + # + res = co2.switch() + assert res == 42 + assert co2.is_pending() + res = co2.switch() + assert res == 43 + assert not co2.is_pending() + # + res = co.switch() + assert res == 42 + assert co.is_pending() + res = co.switch() + assert res == 43 + assert not co.is_pending() + ''' in mod.__dict__ class AppTestPickle_v1(AppTestPickle): From noreply at buildbot.pypy.org Wed Sep 14 21:52:21 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Sep 2011 21:52:21 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: Revert this check. It seems that w_globals cannot and should not be null. Message-ID: <20110914195221.B889A82298@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47276:b1917be98364 Date: 2011-09-14 20:55 +0200 http://bitbucket.org/pypy/pypy/changeset/b1917be98364/ Log: Revert this check. It seems that w_globals cannot and should not be null. diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -66,7 +66,7 @@ make_sure_not_resized(self.locals_stack_w) check_nonneg(self.nlocals) # - if space.config.objspace.honor__builtins__ and w_globals is not None: + if space.config.objspace.honor__builtins__: self.builtin = space.builtin.pick_builtin(w_globals) # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. From noreply at buildbot.pypy.org Wed Sep 14 21:52:22 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Sep 2011 21:52:22 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: Fix. Message-ID: <20110914195222.ED84A82298@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47277:4f008a29d7ff Date: 2011-09-14 20:58 +0200 http://bitbucket.org/pypy/pypy/changeset/4f008a29d7ff/ Log: Fix. diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -276,8 +276,7 @@ while True: ec = self.sthread.ec frame = ec.topframeref() - if frame.pycode is get_entrypoint_pycode(space): - break + last_level = frame.pycode is get_entrypoint_pycode(space) # code = frame.pycode.co_code instr = frame.last_instr @@ -309,6 +308,8 @@ operr = None except OperationError, operr: pass + if last_level: + break if operr: raise operr except Exception, e: diff --git a/pypy/module/_continuation/test/test_zpickle.py b/pypy/module/_continuation/test/test_zpickle.py --- a/pypy/module/_continuation/test/test_zpickle.py +++ b/pypy/module/_continuation/test/test_zpickle.py @@ -21,8 +21,14 @@ lst = [] co = continulet(lst.append) co2, lst2 = copy.deepcopy((co, lst)) + # + assert lst == [] + co.switch() + assert lst == [co] + # assert lst2 == [] - xxx + co2.switch() + assert lst2 == [co2] def test_copy_continulet_real(self): import new, sys From noreply at buildbot.pypy.org Wed Sep 14 21:52:24 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Sep 2011 21:52:24 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: Tests pass. Message-ID: <20110914195224.292D982298@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47278:1f82c034af3e Date: 2011-09-14 21:32 +0200 http://bitbucket.org/pypy/pypy/changeset/1f82c034af3e/ Log: Tests pass. diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -109,41 +109,12 @@ return self.space.newbool(valid) def descr__reduce__(self): - # xxx this is known to be not completely correct with respect - # to subclasses, e.g. no __slots__ support, no looking for a - # __getnewargs__ or __getstate__ defined in the subclass, etc. - # Doing the right thing looks involved, though... - space = self.space - if self.sthread is None: - raise geterror(space, "cannot pickle (yet) a continulet that is " - "not initialized") - if self.sthread.is_empty_handle(self.h): - raise geterror(space, "cannot pickle (yet) a continulet that is " - "already finished") - w_continulet_type = space.type(space.wrap(self)) - w_frame = space.wrap(self.bottomframe) - w_dict = self.getdict(space) or space.w_None - args = [getunpickle(space), - space.newtuple([w_continulet_type]), - space.newtuple([w_frame, w_dict]), - ] - return space.newtuple(args) + from pypy.module._continuation import interp_pickle + return interp_pickle.reduce(self) def descr__setstate__(self, w_args): - if self.sthread is not None: - raise geterror(space, "continulet.__setstate__() on an already-" - "initialized continulet") - space = self.space - w_frame, w_dict = space.fixedview(w_args, expected_length=2) - self.bottomframe = space.interp_w(PyFrame, w_frame) - if not space.is_w(w_dict, space.w_None): - self.setdict(w_dict) - # - global_state.origin = self - sthread = build_sthread(self.space) - self.sthread = sthread - self.h = sthread.new(resume_trampoline_callback) - global_state.origin = None + from pypy.module._continuation import interp_pickle + interp_pickle.setstate(self, w_args) def W_Continulet___new__(space, w_subtype, __args__): @@ -170,7 +141,6 @@ __setstate__= interp2app(W_Continulet.descr__setstate__), ) - # ____________________________________________________________ # Continulet objects maintain a dummy frame object in order to ensure @@ -214,10 +184,6 @@ cs = space.fromcache(State) return cs.w_module_dict -def getunpickle(space): - cs = space.fromcache(State) - return cs.w_unpickle - # ____________________________________________________________ @@ -227,6 +193,9 @@ StackletThread.__init__(self, space.config) self.space = space self.ec = ec + # for unpickling + from pypy.rlib.rweakref import RWeakKeyDictionary + self.frame2continulet = RWeakKeyDictionary(PyFrame, W_Continulet) ExecutionContext.stacklet_thread = None @@ -260,68 +229,6 @@ global_state.destination = self return self.h -def resume_trampoline_callback(h, arg): - from pypy.tool import stdlib_opcode as pythonopcode - self = global_state.origin - self.h = h - space = self.space - try: - h = self.sthread.switch(self.h) - try: - w_result = post_switch(self.sthread, h) - operr = None - except OperationError, operr: - pass - # - while True: - ec = self.sthread.ec - frame = ec.topframeref() - last_level = frame.pycode is get_entrypoint_pycode(space) - # - code = frame.pycode.co_code - instr = frame.last_instr - opcode = ord(code[instr]) - map = pythonopcode.opmap - call_ops = [map['CALL_FUNCTION'], map['CALL_FUNCTION_KW'], - map['CALL_FUNCTION_VAR'], map['CALL_FUNCTION_VAR_KW'], - map['CALL_METHOD']] - assert opcode in call_ops # XXX check better, and complain better - instr += 1 - oparg = ord(code[instr]) | ord(code[instr + 1]) << 8 - nargs = oparg & 0xff - nkwds = (oparg >> 8) & 0xff - if nkwds == 0: # only positional arguments - # fast paths leaves things on the stack, pop them - if (space.config.objspace.opcodes.CALL_METHOD and - opcode == map['CALL_METHOD']): - frame.dropvalues(nargs + 2) - elif opcode == map['CALL_FUNCTION']: - frame.dropvalues(nargs + 1) - frame.last_instr = instr + 1 # continue after the call - # - # small hack: unlink frame out of the execution context, because - # execute_frame will add it there again - ec.topframeref = frame.f_backref - # - try: - w_result = frame.execute_frame(w_result, operr) - operr = None - except OperationError, operr: - pass - if last_level: - break - if operr: - raise operr - except Exception, e: - global_state.propagate_exception = e - else: - global_state.w_value = w_result - self.sthread.ec.topframeref = jit.vref_None - global_state.origin = self - global_state.destination = self - return self.h - - def post_switch(sthread, h): origin = global_state.origin self = global_state.destination diff --git a/pypy/module/_continuation/interp_pickle.py b/pypy/module/_continuation/interp_pickle.py new file mode 100644 --- /dev/null +++ b/pypy/module/_continuation/interp_pickle.py @@ -0,0 +1,118 @@ +from pypy.tool import stdlib_opcode as pythonopcode +from pypy.rlib import jit +from pypy.interpreter.pyframe import PyFrame +from pypy.module._continuation.interp_continuation import State, global_state +from pypy.module._continuation.interp_continuation import build_sthread +from pypy.module._continuation.interp_continuation import post_switch + + +def getunpickle(space): + cs = space.fromcache(State) + return cs.w_unpickle + + +def reduce(self): + # xxx this is known to be not completely correct with respect + # to subclasses, e.g. no __slots__ support, no looking for a + # __getnewargs__ or __getstate__ defined in the subclass, etc. + # Doing the right thing looks involved, though... + space = self.space + if self.sthread is None: + raise geterror(space, "cannot pickle (yet) a continulet that is " + "not initialized") + if self.sthread.is_empty_handle(self.h): + raise geterror(space, "cannot pickle (yet) a continulet that is " + "already finished") + w_continulet_type = space.type(space.wrap(self)) + w_frame = space.wrap(self.bottomframe) + w_dict = self.getdict(space) or space.w_None + args = [getunpickle(space), + space.newtuple([w_continulet_type]), + space.newtuple([w_frame, w_dict]), + ] + return space.newtuple(args) + +def setstate(self, w_args): + if self.sthread is not None: + raise geterror(space, "continulet.__setstate__() on an already-" + "initialized continulet") + space = self.space + w_frame, w_dict = space.fixedview(w_args, expected_length=2) + self.bottomframe = space.interp_w(PyFrame, w_frame) + if not space.is_w(w_dict, space.w_None): + self.setdict(w_dict) + # + global_state.origin = self + sthread = build_sthread(self.space) + sthread.frame2continulet.set(self.bottomframe, self) + self.sthread = sthread + self.h = sthread.new(resume_trampoline_callback) + global_state.origin = None + +# ____________________________________________________________ + +def resume_trampoline_callback(h, arg): + self = global_state.origin + self.h = h + space = self.space + try: + sthread = self.sthread + h = sthread.switch(self.h) + try: + w_result = post_switch(sthread, h) + operr = None + except OperationError, operr: + pass + # + while True: + ec = sthread.ec + frame = ec.topframeref() + assert frame is not None # XXX better error message + exit_continulet = sthread.frame2continulet.get(frame) + # + continue_after_call(frame) + # + # small hack: unlink frame out of the execution context, because + # execute_frame will add it there again + ec.topframeref = frame.f_backref + # + try: + w_result = frame.execute_frame(w_result, operr) + operr = None + except OperationError, operr: + pass + if exit_continulet is not None: + self = exit_continulet + break + if operr: + raise operr + except Exception, e: + global_state.propagate_exception = e + else: + global_state.w_value = w_result + sthread.ec.topframeref = jit.vref_None + global_state.origin = self + global_state.destination = self + return self.h + +def continue_after_call(frame): + code = frame.pycode.co_code + instr = frame.last_instr + opcode = ord(code[instr]) + map = pythonopcode.opmap + call_ops = [map['CALL_FUNCTION'], map['CALL_FUNCTION_KW'], + map['CALL_FUNCTION_VAR'], map['CALL_FUNCTION_VAR_KW'], + map['CALL_METHOD']] + assert opcode in call_ops # XXX check better, and complain better + instr += 1 + oparg = ord(code[instr]) | ord(code[instr + 1]) << 8 + nargs = oparg & 0xff + nkwds = (oparg >> 8) & 0xff + if nkwds == 0: # only positional arguments + # fast paths leaves things on the stack, pop them + if (frame.space.config.objspace.opcodes.CALL_METHOD and + opcode == map['CALL_METHOD']): + frame.dropvalues(nargs + 2) + elif opcode == map['CALL_FUNCTION']: + frame.dropvalues(nargs + 1) + frame.last_instr = instr + 1 # continue after the call diff --git a/pypy/module/_continuation/test/test_stacklet.py b/pypy/module/_continuation/test/test_stacklet.py --- a/pypy/module/_continuation/test/test_stacklet.py +++ b/pypy/module/_continuation/test/test_stacklet.py @@ -36,7 +36,7 @@ from _continuation import continulet, error # def empty_callback(c1): - pass + never_called # c = continulet(empty_callback) raises(error, c.__init__, empty_callback) diff --git a/pypy/module/_continuation/test/test_zpickle.py b/pypy/module/_continuation/test/test_zpickle.py --- a/pypy/module/_continuation/test/test_zpickle.py +++ b/pypy/module/_continuation/test/test_zpickle.py @@ -30,6 +30,31 @@ co2.switch() assert lst2 == [co2] + def test_copy_continulet_not_started_multiple(self): + from _continuation import continulet, error + import copy + lst = [] + co = continulet(lst.append) + co2, lst2 = copy.deepcopy((co, lst)) + co3, lst3 = copy.deepcopy((co, lst)) + co4, lst4 = copy.deepcopy((co, lst)) + # + assert lst == [] + co.switch() + assert lst == [co] + # + assert lst2 == [] + co2.switch() + assert lst2 == [co2] + # + assert lst3 == [] + co3.switch() + assert lst3 == [co3] + # + assert lst4 == [] + co4.switch() + assert lst4 == [co4] + def test_copy_continulet_real(self): import new, sys mod = new.module('test_copy_continulet_real') @@ -134,10 +159,13 @@ co = continulet(lst.append) pckl = pickle.dumps((co, lst)) print pckl - co2, lst2 = pickle.loads(pckl) - assert co is not co2 - assert lst2 == [] - xxx + del co, lst + for i in range(2): + print 'resume...' + co2, lst2 = pickle.loads(pckl) + assert lst2 == [] + co2.switch() + assert lst2 == [co2] def test_pickle_continulet_real(self): import new, sys From noreply at buildbot.pypy.org Wed Sep 14 21:52:25 2011 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Sep 2011 21:52:25 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: Finished pickling :-) Message-ID: <20110914195225.51D6282298@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47279:46308e5543ff Date: 2011-09-14 21:50 +0200 http://bitbucket.org/pypy/pypy/changeset/46308e5543ff/ Log: Finished pickling :-) diff --git a/pypy/module/_continuation/interp_pickle.py b/pypy/module/_continuation/interp_pickle.py --- a/pypy/module/_continuation/interp_pickle.py +++ b/pypy/module/_continuation/interp_pickle.py @@ -4,6 +4,7 @@ from pypy.module._continuation.interp_continuation import State, global_state from pypy.module._continuation.interp_continuation import build_sthread from pypy.module._continuation.interp_continuation import post_switch +from pypy.module._continuation.interp_continuation import get_result def getunpickle(space): @@ -18,13 +19,12 @@ # Doing the right thing looks involved, though... space = self.space if self.sthread is None: - raise geterror(space, "cannot pickle (yet) a continulet that is " - "not initialized") - if self.sthread.is_empty_handle(self.h): - raise geterror(space, "cannot pickle (yet) a continulet that is " - "already finished") + w_frame = space.w_False + elif self.sthread.is_empty_handle(self.h): + w_frame = space.w_None + else: + w_frame = space.wrap(self.bottomframe) w_continulet_type = space.type(space.wrap(self)) - w_frame = space.wrap(self.bottomframe) w_dict = self.getdict(space) or space.w_None args = [getunpickle(space), space.newtuple([w_continulet_type]), @@ -38,16 +38,19 @@ "initialized continulet") space = self.space w_frame, w_dict = space.fixedview(w_args, expected_length=2) - self.bottomframe = space.interp_w(PyFrame, w_frame) if not space.is_w(w_dict, space.w_None): - self.setdict(w_dict) + self.setdict(space, w_dict) + if space.is_w(w_frame, space.w_False): + return # not initialized + sthread = build_sthread(self.space) + self.sthread = sthread + self.bottomframe = space.interp_w(PyFrame, w_frame, can_be_None=True) # global_state.origin = self - sthread = build_sthread(self.space) - sthread.frame2continulet.set(self.bottomframe, self) - self.sthread = sthread + if self.bottomframe is not None: + sthread.frame2continulet.set(self.bottomframe, self) self.h = sthread.new(resume_trampoline_callback) - global_state.origin = None + get_result() # propagate the eventual MemoryError # ____________________________________________________________ @@ -55,42 +58,46 @@ self = global_state.origin self.h = h space = self.space + sthread = self.sthread try: - sthread = self.sthread - h = sthread.switch(self.h) - try: - w_result = post_switch(sthread, h) - operr = None - except OperationError, operr: - pass - # - while True: - ec = sthread.ec - frame = ec.topframeref() - assert frame is not None # XXX better error message - exit_continulet = sthread.frame2continulet.get(frame) - # - continue_after_call(frame) - # - # small hack: unlink frame out of the execution context, because - # execute_frame will add it there again - ec.topframeref = frame.f_backref - # + global_state.clear() + if self.bottomframe is None: + w_result = space.w_None + else: + h = sthread.switch(self.h) try: - w_result = frame.execute_frame(w_result, operr) + w_result = post_switch(sthread, h) operr = None except OperationError, operr: pass - if exit_continulet is not None: - self = exit_continulet - break - if operr: - raise operr + # + while True: + ec = sthread.ec + frame = ec.topframeref() + assert frame is not None # XXX better error message + exit_continulet = sthread.frame2continulet.get(frame) + # + continue_after_call(frame) + # + # small hack: unlink frame out of the execution context, + # because execute_frame will add it there again + ec.topframeref = frame.f_backref + # + try: + w_result = frame.execute_frame(w_result, operr) + operr = None + except OperationError, operr: + pass + if exit_continulet is not None: + self = exit_continulet + break + sthread.ec.topframeref = jit.vref_None + if operr: + raise operr except Exception, e: global_state.propagate_exception = e else: global_state.w_value = w_result - sthread.ec.topframeref = jit.vref_None global_state.origin = self global_state.destination = self return self.h diff --git a/pypy/module/_continuation/test/test_zpickle.py b/pypy/module/_continuation/test/test_zpickle.py --- a/pypy/module/_continuation/test/test_zpickle.py +++ b/pypy/module/_continuation/test/test_zpickle.py @@ -86,6 +86,20 @@ assert not co.is_pending() ''' in mod.__dict__ + def test_copy_continulet_already_finished(self): + from _continuation import continulet, error + import copy + lst = [] + co = continulet(lst.append) + co.switch() + co2 = copy.deepcopy(co) + assert not co.is_pending() + assert not co2.is_pending() + raises(error, co.__init__, lst.append) + raises(error, co2.__init__, lst.append) + raises(error, co.switch) + raises(error, co2.switch) + class AppTestPickle: version = 0 @@ -110,7 +124,6 @@ cls.w_version = cls.space.wrap(cls.version) def test_pickle_continulet_empty(self): - skip("pickle a not-initialized continulet") from _continuation import continulet lst = [4] co = continulet.__new__(continulet) @@ -129,7 +142,6 @@ assert result == [5, co2] def test_pickle_continulet_empty_subclass(self): - skip("pickle a not-initialized continulet") from test_pickle_continulet import continulet, A lst = [4] co = continulet.__new__(A) @@ -201,6 +213,46 @@ assert not co.is_pending() ''' in mod.__dict__ + def test_pickle_continulet_real_subclass(self): + import new, sys + mod = new.module('test_pickle_continulet_real_subclass') + sys.modules['test_pickle_continulet_real_subclass'] = mod + mod.version = self.version + exec '''if 1: + from _continuation import continulet + import pickle + class A(continulet): + def __init__(self): + crash + def f(co): + co.switch(co.x + 1) + co.switch(co.x + 2) + return co.x + 3 + co = A.__new__(A) + continulet.__init__(co, f) + co.x = 40 + res = co.switch() + assert res == 41 + pckl = pickle.dumps(co, version) + print repr(pckl) + co2 = pickle.loads(pckl) + # + assert type(co2) is A + res = co2.switch() + assert res == 42 + assert co2.is_pending() + res = co2.switch() + assert res == 43 + assert not co2.is_pending() + # + res = co.switch() + assert res == 42 + assert co.is_pending() + res = co.switch() + assert res == 43 + assert not co.is_pending() + ''' in mod.__dict__ + class AppTestPickle_v1(AppTestPickle): version = 1 From noreply at buildbot.pypy.org Thu Sep 15 11:07:05 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Sep 2011 11:07:05 +0200 (CEST) Subject: [pypy-commit] buildbot default: I *think* it makes no sense to haltOnFailure here. If we have an upload Message-ID: <20110915090705.845B8820B1@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r595:1cb386a05ee5 Date: 2011-09-15 11:06 +0200 http://bitbucket.org/pypy/buildbot/changeset/1cb386a05ee5/ Log: I *think* it makes no sense to haltOnFailure here. If we have an upload failure for example, we want to still save the file 'result.json' to the master. diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -348,7 +348,6 @@ '--branch', WithProperties('%(branch)s'), ] + addopts, workdir='./benchmarks', - haltOnFailure=True, timeout=3600)) # a bit obscure hack to get both os.path.expand and a property filename = '%(got_revision)s' + (postfix or '') From noreply at buildbot.pypy.org Fri Sep 16 09:30:15 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Sep 2011 09:30:15 +0200 (CEST) Subject: [pypy-commit] pypy default: permute() should also ignore non-initialized continulets, Message-ID: <20110916073015.AE4F4820B1@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47280:e0b2d986086d Date: 2011-09-16 09:29 +0200 http://bitbucket.org/pypy/pypy/changeset/e0b2d986086d/ Log: permute() should also ignore non-initialized continulets, just like switch(). diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -124,8 +124,7 @@ try: res = greenlet.run(*args) finally: - if greenlet.parent is not _tls.main: - _continuation.permute(greenlet, greenlet.parent) + _continuation.permute(greenlet, greenlet.parent) return (res,) def _greenlet_throw(greenlet, exc, value, tb): @@ -133,5 +132,4 @@ try: raise exc, value, tb finally: - if greenlet.parent is not _tls.main: - _continuation.permute(greenlet, greenlet.parent) + _continuation.permute(greenlet, greenlet.parent) diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -255,7 +255,7 @@ cont = space.interp_w(W_Continulet, w_cont) if cont.sthread is not sthread: if cont.sthread is None: - raise geterror(space, "got a non-initialized continulet") + continue # ignore non-initialized continulets else: raise geterror(space, "inter-thread support is missing") elif sthread.is_empty_handle(cont.h): diff --git a/pypy/module/_continuation/test/test_stacklet.py b/pypy/module/_continuation/test/test_stacklet.py --- a/pypy/module/_continuation/test/test_stacklet.py +++ b/pypy/module/_continuation/test/test_stacklet.py @@ -661,6 +661,12 @@ assert res == "done" main() + def test_permute_noninitialized(self): + from _continuation import continulet, permute + permute(continulet.__new__(continulet)) # ignored + permute(continulet.__new__(continulet), # ignored + continulet.__new__(continulet)) + def test_bug_finish_with_already_finished_stacklet(self): from _continuation import continulet, error # make an already-finished continulet From noreply at buildbot.pypy.org Fri Sep 16 10:48:03 2011 From: noreply at buildbot.pypy.org (hager) Date: Fri, 16 Sep 2011 10:48:03 +0200 (CEST) Subject: [pypy-commit] pypy ppc-jit-backend: Refactored code to be more similar to ARM backend. Message-ID: <20110916084803.69F5F820B1@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r47281:b6d28dac666d Date: 2011-09-16 10:47 +0200 http://bitbucket.org/pypy/pypy/changeset/b6d28dac666d/ Log: Refactored code to be more similar to ARM backend. diff --git a/pypy/jit/backend/ppc/ppcgen/assembler.py b/pypy/jit/backend/ppc/ppcgen/assembler.py --- a/pypy/jit/backend/ppc/ppcgen/assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/assembler.py @@ -37,7 +37,7 @@ def get_number_of_ops(self): return len(self.insts) - def get_relative_pos(self): + def get_rel_pos(self): return 4 * len(self.insts) def patch_op(self, index): diff --git a/pypy/jit/backend/ppc/ppcgen/codebuilder.py b/pypy/jit/backend/ppc/ppcgen/codebuilder.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/ppc/ppcgen/codebuilder.py @@ -0,0 +1,1002 @@ +import os +import struct +from pypy.jit.backend.ppc.ppcgen.ppc_form import PPCForm as Form +from pypy.jit.backend.ppc.ppcgen.ppc_field import ppc_fields +from pypy.jit.backend.ppc.ppcgen.regalloc import (TempInt, PPCFrameManager, + Regalloc) +from pypy.jit.backend.ppc.ppcgen.assembler import Assembler +from pypy.jit.backend.ppc.ppcgen.symbol_lookup import lookup +from pypy.jit.backend.ppc.ppcgen.arch import (IS_PPC_32, WORD, NONVOLATILES, + GPR_SAVE_AREA) +from pypy.jit.backend.ppc.ppcgen.helper.assembler import gen_emit_cmp_op +import pypy.jit.backend.ppc.ppcgen.register as r +import pypy.jit.backend.ppc.ppcgen.condition as c +from pypy.jit.metainterp.history import (Const, ConstPtr, LoopToken, + AbstractFailDescr) +from pypy.jit.backend.llsupport.asmmemmgr import (BlockBuilderMixin, AsmMemoryManager, MachineDataBlockWrapper) +from pypy.jit.backend.llsupport.regalloc import (RegisterManager, + compute_vars_longevity) +from pypy.jit.backend.llsupport import symbolic +from pypy.jit.backend.model import CompiledLoopToken +from pypy.rpython.lltypesystem import lltype, rffi, rstr +from pypy.jit.metainterp.resoperation import rop +from pypy.jit.metainterp.history import (BoxInt, ConstInt, ConstPtr, + ConstFloat, Box, INT, REF, FLOAT) +from pypy.jit.backend.x86.support import values_array + +A = Form("frD", "frA", "frB", "XO3", "Rc") +A1 = Form("frD", "frB", "XO3", "Rc") +A2 = Form("frD", "frA", "frC", "XO3", "Rc") +A3 = Form("frD", "frA", "frC", "frB", "XO3", "Rc") + +I = Form("LI", "AA", "LK") + +B = Form("BO", "BI", "BD", "AA", "LK") + +SC = Form("AA") # fudge + +DD = Form("rD", "rA", "SIMM") +DDO = Form("rD", "rA", "ds", "XO4") +DS = Form("rA", "rS", "UIMM") + +X = Form("XO1") +XS = Form("rA", "rS", "rB", "XO1", "Rc") +XSO = Form("rS", "rA", "rB", "XO1") +XD = Form("rD", "rA", "rB", "XO1") +XO = Form("rD", "rA", "rB", "OE", "XO2", "Rc") +XO0 = Form("rD", "rA", "OE", "XO2", "Rc") +XDB = Form("frD", "frB", "XO1", "Rc") +XS0 = Form("rA", "rS", "XO1", "Rc") +X0 = Form("rA", "rB", "XO1") +XcAB = Form("crfD", "rA", "rB", "XO1") +XN = Form("rD", "rA", "NB", "XO1") +XL = Form("crbD", "crbA", "crbB", "XO1") +XL1 = Form("crfD", "crfS") +XL2 = Form("crbD", "XO1", "Rc") +XFL = Form("FM", "frB", "XO1", "Rc") +XFX = Form("CRM", "rS", "XO1") + +MI = Form("rA", "rS", "SH", "MB", "ME", "Rc") +MB = Form("rA", "rS", "rB", "MB", "ME", "Rc") +MDI = Form("rA", "rS", "sh", "mbe", "XO5", "Rc") +MDS = Form("rA", "rS", "rB", "mbe", "XO5", "Rc") + +class BasicPPCAssembler(Assembler): + + def disassemble(cls, inst, labels={}, pc=0): + cache = cls.__dict__.get('idesc cache') + if cache is None: + idescs = cls.get_idescs() + cache = {} + for n, i in idescs: + cache.setdefault(i.specializations[ppc_fields['opcode']], + []).append((n,i)) + setattr(cls, 'idesc cache', cache) + matches = [] + idescs = cache[ppc_fields['opcode'].decode(inst)] + for name, idesc in idescs: + m = idesc.match(inst) + if m > 0: + matches.append((m, idesc, name)) + if matches: + score, idesc, name = max(matches) + return idesc.disassemble(name, inst, labels, pc) + disassemble = classmethod(disassemble) + + # "basic" means no simplified mnemonics + + # I form + b = I(18, AA=0, LK=0) + ba = I(18, AA=1, LK=0) + bl = I(18, AA=0, LK=1) + bla = I(18, AA=1, LK=1) + + # B form + bc = B(16, AA=0, LK=0) + bcl = B(16, AA=0, LK=1) + bca = B(16, AA=1, LK=0) + bcla = B(16, AA=1, LK=1) + + # SC form + sc = SC(17, AA=1) # it's not really the aa field... + + # D form + addi = DD(14) + addic = DD(12) + addicx = DD(13) + addis = DD(15) + + andix = DS(28) + andisx = DS(29) + + cmpi = Form("crfD", "L", "rA", "SIMM")(11) + cmpi.default(L=0).default(crfD=0) + cmpli = Form("crfD", "L", "rA", "UIMM")(10) + cmpli.default(L=0).default(crfD=0) + + lbz = DD(34) + lbzu = DD(35) + ld = DDO(58, XO4=0) + ldu = DDO(58, XO4=1) + lfd = DD(50) + lfdu = DD(51) + lfs = DD(48) + lfsu = DD(49) + lha = DD(42) + lhau = DD(43) + lhz = DD(40) + lhzu = DD(41) + lmw = DD(46) + lwa = DDO(58, XO4=2) + lwz = DD(32) + lwzu = DD(33) + + mulli = DD(7) + ori = DS(24) + oris = DS(25) + + stb = DD(38) + stbu = DD(39) + std = DDO(62, XO4=0) + stdu = DDO(62, XO4=1) + stfd = DD(54) + stfdu = DD(55) + stfs = DD(52) + stfsu = DD(53) + sth = DD(44) + sthu = DD(45) + stmw = DD(47) + stw = DD(36) + stwu = DD(37) + + subfic = DD(8) + tdi = Form("TO", "rA", "SIMM")(2) + twi = Form("TO", "rA", "SIMM")(3) + xori = DS(26) + xoris = DS(27) + + # X form + + and_ = XS(31, XO1=28, Rc=0) + and_x = XS(31, XO1=28, Rc=1) + + andc_ = XS(31, XO1=60, Rc=0) + andc_x = XS(31, XO1=60, Rc=1) + + # is the L bit for 64 bit compares? hmm + cmp = Form("crfD", "L", "rA", "rB", "XO1")(31, XO1=0) + cmp.default(L=0).default(crfD=0) + cmpl = Form("crfD", "L", "rA", "rB", "XO1")(31, XO1=32) + cmpl.default(L=0).default(crfD=0) + + cntlzd = XS0(31, XO1=58, Rc=0) + cntlzdx = XS0(31, XO1=58, Rc=1) + cntlzw = XS0(31, XO1=26, Rc=0) + cntlzwx = XS0(31, XO1=26, Rc=1) + + dcba = X0(31, XO1=758) + dcbf = X0(31, XO1=86) + dcbi = X0(31, XO1=470) + dcbst = X0(31, XO1=54) + dcbt = X0(31, XO1=278) + dcbtst = X0(31, XO1=246) + dcbz = X0(31, XO1=1014) + + eciwx = XD(31, XO1=310) + ecowx = XS(31, XO1=438, Rc=0) + + eieio = X(31, XO1=854) + + eqv = XS(31, XO1=284, Rc=0) + eqvx = XS(31, XO1=284, Rc=1) + + extsb = XS0(31, XO1=954, Rc=0) + extsbx = XS0(31, XO1=954, Rc=1) + + extsh = XS0(31, XO1=922, Rc=0) + extshx = XS0(31, XO1=922, Rc=1) + + extsw = XS0(31, XO1=986, Rc=0) + extswx = XS0(31, XO1=986, Rc=1) + + fabs = XDB(63, XO1=264, Rc=0) + fabsx = XDB(63, XO1=264, Rc=1) + + fcmpo = XcAB(63, XO1=32) + fcmpu = XcAB(63, XO1=0) + + fcfid = XDB(63, XO1=846, Rc=0) + fcfidx = XDB(63, XO1=846, Rc=1) + + fctid = XDB(63, XO1=814, Rc=0) + fctidx = XDB(63, XO1=814, Rc=1) + + fctidz = XDB(63, XO1=815, Rc=0) + fctidzx = XDB(63, XO1=815, Rc=1) + + fctiw = XDB(63, XO1=14, Rc=0) + fctiwx = XDB(63, XO1=14, Rc=1) + + fctiwz = XDB(63, XO1=15, Rc=0) + fctiwzx = XDB(63, XO1=15, Rc=1) + + fmr = XDB(63, XO1=72, Rc=0) + fmrx = XDB(63, XO1=72, Rc=1) + + fnabs = XDB(63, XO1=136, Rc=0) + fnabsx = XDB(63, XO1=136, Rc=1) + + fneg = XDB(63, XO1=40, Rc=0) + fnegx = XDB(63, XO1=40, Rc=1) + + frsp = XDB(63, XO1=12, Rc=0) + frspx = XDB(63, XO1=12, Rc=1) + + fsqrt = XDB(63, XO1=22, Rc=0) + + icbi = X0(31, XO1=982) + + lbzux = XD(31, XO1=119) + lbzx = XD(31, XO1=87) + ldarx = XD(31, XO1=84) + ldux = XD(31, XO1=53) + ldx = XD(31, XO1=21) + lfdux = XD(31, XO1=631) + lfdx = XD(31, XO1=599) + lfsux = XD(31, XO1=567) + lfsx = XD(31, XO1=535) + lhaux = XD(31, XO1=375) + lhax = XD(31, XO1=343) + lhbrx = XD(31, XO1=790) + lhzux = XD(31, XO1=311) + lhzx = XD(31, XO1=279) + lswi = XD(31, XO1=597) + lswx = XD(31, XO1=533) + lwarx = XD(31, XO1=20) + lwaux = XD(31, XO1=373) + lwax = XD(31, XO1=341) + lwbrx = XD(31, XO1=534) + lwzux = XD(31, XO1=55) + lwzx = XD(31, XO1=23) + + mcrfs = Form("crfD", "crfS", "XO1")(63, XO1=64) + mcrxr = Form("crfD", "XO1")(31, XO1=512) + mfcr = Form("rD", "XO1")(31, XO1=19) + mffs = Form("frD", "XO1", "Rc")(63, XO1=583, Rc=0) + mffsx = Form("frD", "XO1", "Rc")(63, XO1=583, Rc=1) + mfmsr = Form("rD", "XO1")(31, XO1=83) + mfsr = Form("rD", "SR", "XO1")(31, XO1=595) + mfsrin = XDB(31, XO1=659, Rc=0) + + add = XO(31, XO2=266, OE=0, Rc=0) + addx = XO(31, XO2=266, OE=0, Rc=1) + addo = XO(31, XO2=266, OE=1, Rc=0) + addox = XO(31, XO2=266, OE=1, Rc=1) + + addc = XO(31, XO2=10, OE=0, Rc=0) + addcx = XO(31, XO2=10, OE=0, Rc=1) + addco = XO(31, XO2=10, OE=1, Rc=0) + addcox = XO(31, XO2=10, OE=1, Rc=1) + + adde = XO(31, XO2=138, OE=0, Rc=0) + addex = XO(31, XO2=138, OE=0, Rc=1) + addeo = XO(31, XO2=138, OE=1, Rc=0) + addeox = XO(31, XO2=138, OE=1, Rc=1) + + addme = XO(31, rB=0, XO2=234, OE=0, Rc=0) + addmex = XO(31, rB=0, XO2=234, OE=0, Rc=1) + addmeo = XO(31, rB=0, XO2=234, OE=1, Rc=0) + addmeox = XO(31, rB=0, XO2=234, OE=1, Rc=1) + + addze = XO(31, rB=0, XO2=202, OE=0, Rc=0) + addzex = XO(31, rB=0, XO2=202, OE=0, Rc=1) + addzeo = XO(31, rB=0, XO2=202, OE=1, Rc=0) + addzeox = XO(31, rB=0, XO2=202, OE=1, Rc=1) + + bcctr = Form("BO", "BI", "XO1", "LK")(19, XO1=528, LK=0) + bcctrl = Form("BO", "BI", "XO1", "LK")(19, XO1=528, LK=1) + + bclr = Form("BO", "BI", "XO1", "LK")(19, XO1=16, LK=0) + bclrl = Form("BO", "BI", "XO1", "LK")(19, XO1=16, LK=1) + + crand = XL(19, XO1=257) + crandc = XL(19, XO1=129) + creqv = XL(19, XO1=289) + crnand = XL(19, XO1=225) + crnor = XL(19, XO1=33) + cror = XL(19, XO1=449) + crorc = XL(19, XO1=417) + crxor = XL(19, XO1=193) + + divd = XO(31, XO2=489, OE=0, Rc=0) + divdx = XO(31, XO2=489, OE=0, Rc=1) + divdo = XO(31, XO2=489, OE=1, Rc=0) + divdox = XO(31, XO2=489, OE=1, Rc=1) + + divdu = XO(31, XO2=457, OE=0, Rc=0) + divdux = XO(31, XO2=457, OE=0, Rc=1) + divduo = XO(31, XO2=457, OE=1, Rc=0) + divduox = XO(31, XO2=457, OE=1, Rc=1) + + divw = XO(31, XO2=491, OE=0, Rc=0) + divwx = XO(31, XO2=491, OE=0, Rc=1) + divwo = XO(31, XO2=491, OE=1, Rc=0) + divwox = XO(31, XO2=491, OE=1, Rc=1) + + divwu = XO(31, XO2=459, OE=0, Rc=0) + divwux = XO(31, XO2=459, OE=0, Rc=1) + divwuo = XO(31, XO2=459, OE=1, Rc=0) + divwuox = XO(31, XO2=459, OE=1, Rc=1) + + fadd = A(63, XO3=21, Rc=0) + faddx = A(63, XO3=21, Rc=1) + fadds = A(59, XO3=21, Rc=0) + faddsx = A(59, XO3=21, Rc=1) + + fdiv = A(63, XO3=18, Rc=0) + fdivx = A(63, XO3=18, Rc=1) + fdivs = A(59, XO3=18, Rc=0) + fdivsx = A(59, XO3=18, Rc=1) + + fmadd = A3(63, XO3=19, Rc=0) + fmaddx = A3(63, XO3=19, Rc=1) + fmadds = A3(59, XO3=19, Rc=0) + fmaddsx = A3(59, XO3=19, Rc=1) + + fmsub = A3(63, XO3=28, Rc=0) + fmsubx = A3(63, XO3=28, Rc=1) + fmsubs = A3(59, XO3=28, Rc=0) + fmsubsx = A3(59, XO3=28, Rc=1) + + fmul = A2(63, XO3=25, Rc=0) + fmulx = A2(63, XO3=25, Rc=1) + fmuls = A2(59, XO3=25, Rc=0) + fmulsx = A2(59, XO3=25, Rc=1) + + fnmadd = A3(63, XO3=31, Rc=0) + fnmaddx = A3(63, XO3=31, Rc=1) + fnmadds = A3(59, XO3=31, Rc=0) + fnmaddsx = A3(59, XO3=31, Rc=1) + + fnmsub = A3(63, XO3=30, Rc=0) + fnmsubx = A3(63, XO3=30, Rc=1) + fnmsubs = A3(59, XO3=30, Rc=0) + fnmsubsx = A3(59, XO3=30, Rc=1) + + fres = A1(59, XO3=24, Rc=0) + fresx = A1(59, XO3=24, Rc=1) + + frsp = A1(63, XO3=12, Rc=0) + frspx = A1(63, XO3=12, Rc=1) + + frsqrte = A1(63, XO3=26, Rc=0) + frsqrtex = A1(63, XO3=26, Rc=1) + + fsel = A3(63, XO3=23, Rc=0) + fselx = A3(63, XO3=23, Rc=1) + + frsqrt = A1(63, XO3=22, Rc=0) + frsqrtx = A1(63, XO3=22, Rc=1) + frsqrts = A1(59, XO3=22, Rc=0) + frsqrtsx = A1(59, XO3=22, Rc=1) + + fsub = A(63, XO3=20, Rc=0) + fsubx = A(63, XO3=20, Rc=1) + fsubs = A(59, XO3=20, Rc=0) + fsubsx = A(59, XO3=20, Rc=1) + + isync = X(19, XO1=150) + + mcrf = XL1(19) + + mfspr = Form("rD", "spr", "XO1")(31, XO1=339) + mftb = Form("rD", "spr", "XO1")(31, XO1=371) + + mtcrf = XFX(31, XO1=144) + + mtfsb0 = XL2(63, XO1=70, Rc=0) + mtfsb0x = XL2(63, XO1=70, Rc=1) + mtfsb1 = XL2(63, XO1=38, Rc=0) + mtfsb1x = XL2(63, XO1=38, Rc=1) + + mtfsf = XFL(63, XO1=711, Rc=0) + mtfsfx = XFL(63, XO1=711, Rc=1) + + mtfsfi = Form("crfD", "IMM", "XO1", "Rc")(63, XO1=134, Rc=0) + mtfsfix = Form("crfD", "IMM", "XO1", "Rc")(63, XO1=134, Rc=1) + + mtmsr = Form("rS", "XO1")(31, XO1=146) + + mtspr = Form("rS", "spr", "XO1")(31, XO1=467) + + mtsr = Form("rS", "SR", "XO1")(31, XO1=210) + mtsrin = Form("rS", "rB", "XO1")(31, XO1=242) + + mulhd = XO(31, OE=0, XO2=73, Rc=0) + mulhdx = XO(31, OE=0, XO2=73, Rc=1) + + mulhdu = XO(31, OE=0, XO2=9, Rc=0) + mulhdux = XO(31, OE=0, XO2=9, Rc=1) + + mulld = XO(31, OE=0, XO2=233, Rc=0) + mulldx = XO(31, OE=0, XO2=233, Rc=1) + mulldo = XO(31, OE=1, XO2=233, Rc=0) + mulldox = XO(31, OE=1, XO2=233, Rc=1) + + mulhw = XO(31, OE=0, XO2=75, Rc=0) + mulhwx = XO(31, OE=0, XO2=75, Rc=1) + + mulhwu = XO(31, OE=0, XO2=11, Rc=0) + mulhwux = XO(31, OE=0, XO2=11, Rc=1) + + mullw = XO(31, OE=0, XO2=235, Rc=0) + mullwx = XO(31, OE=0, XO2=235, Rc=1) + mullwo = XO(31, OE=1, XO2=235, Rc=0) + mullwox = XO(31, OE=1, XO2=235, Rc=1) + + nand = XS(31, XO1=476, Rc=0) + nandx = XS(31, XO1=476, Rc=1) + + neg = XO0(31, OE=0, XO2=104, Rc=0) + negx = XO0(31, OE=0, XO2=104, Rc=1) + nego = XO0(31, OE=1, XO2=104, Rc=0) + negox = XO0(31, OE=1, XO2=104, Rc=1) + + nor = XS(31, XO1=124, Rc=0) + norx = XS(31, XO1=124, Rc=1) + + or_ = XS(31, XO1=444, Rc=0) + or_x = XS(31, XO1=444, Rc=1) + + orc = XS(31, XO1=412, Rc=0) + orcx = XS(31, XO1=412, Rc=1) + + rfi = X(19, XO1=50) + + rfid = X(19, XO1=18) + + rldcl = MDS(30, XO5=8, Rc=0) + rldclx = MDS(30, XO5=8, Rc=1) + rldcr = MDS(30, XO5=9, Rc=0) + rldcrx = MDS(30, XO5=9, Rc=1) + + rldic = MDI(30, XO5=2, Rc=0) + rldicx = MDI(30, XO5=2, Rc=1) + rldicl = MDI(30, XO5=0, Rc=0) + rldiclx = MDI(30, XO5=0, Rc=1) + rldicr = MDI(30, XO5=1, Rc=0) + rldicrx = MDI(30, XO5=1, Rc=1) + rldimi = MDI(30, XO5=3, Rc=0) + rldimix = MDI(30, XO5=3, Rc=1) + + rlwimi = MI(20, Rc=0) + rlwimix = MI(20, Rc=1) + + rlwinm = MI(21, Rc=0) + rlwinmx = MI(21, Rc=1) + + rlwnm = MB(23, Rc=0) + rlwnmx = MB(23, Rc=1) + + sld = XS(31, XO1=27, Rc=0) + sldx = XS(31, XO1=27, Rc=1) + + slw = XS(31, XO1=24, Rc=0) + slwx = XS(31, XO1=24, Rc=1) + + srad = XS(31, XO1=794, Rc=0) + sradx = XS(31, XO1=794, Rc=1) + + sradi = Form("rA", "rS", "SH", "XO6", "sh", "Rc")(31, XO6=413, Rc=0) + sradix = Form("rA", "rS", "SH", "XO6", "sh", "Rc")(31, XO6=413, Rc=1) + + sraw = XS(31, XO1=792, Rc=0) + srawx = XS(31, XO1=792, Rc=1) + + srawi = Form("rA", "rS", "SH", "XO1", "Rc")(31, XO1=824, Rc=0) + srawix = Form("rA", "rS", "SH", "XO1", "Rc")(31, XO1=824, Rc=1) + + srd = XS(31, XO1=539, Rc=0) + srdx = XS(31, XO1=539, Rc=1) + + srw = XS(31, XO1=536, Rc=0) + srwx = XS(31, XO1=536, Rc=1) + + stbux = XSO(31, XO1=247) + stbx = XSO(31, XO1=215) + stdcxx = Form("rS", "rA", "rB", "XO1", "Rc")(31, XO1=214, Rc=1) + stdux = XSO(31, XO1=181) + stdx = XSO(31, XO1=149) + stfdux = XSO(31, XO1=759) + stfdx = XSO(31, XO1=727) + stfiwx = XSO(31, XO1=983) + stfsux = XSO(31, XO1=695) + stfsx = XSO(31, XO1=663) + sthbrx = XSO(31, XO1=918) + sthux = XSO(31, XO1=439) + sthx = XSO(31, XO1=407) + stswi = Form("rS", "rA", "NB", "XO1")(31, XO1=725) + stswx = XSO(31, XO1=661) + stwbrx = XSO(31, XO1=662) + stwcxx = Form("rS", "rA", "rB", "XO1", "Rc")(31, XO1=150, Rc=1) + stwux = XSO(31, XO1=183) + stwx = XSO(31, XO1=151) + + subf = XO(31, XO2=40, OE=0, Rc=0) + subfx = XO(31, XO2=40, OE=0, Rc=1) + subfo = XO(31, XO2=40, OE=1, Rc=0) + subfox = XO(31, XO2=40, OE=1, Rc=1) + + subfc = XO(31, XO2=8, OE=0, Rc=0) + subfcx = XO(31, XO2=8, OE=0, Rc=1) + subfco = XO(31, XO2=8, OE=1, Rc=0) + subfcox = XO(31, XO2=8, OE=1, Rc=1) + + subfe = XO(31, XO2=136, OE=0, Rc=0) + subfex = XO(31, XO2=136, OE=0, Rc=1) + subfeo = XO(31, XO2=136, OE=1, Rc=0) + subfeox = XO(31, XO2=136, OE=1, Rc=1) + + subfme = XO0(31, OE=0, XO2=232, Rc=0) + subfmex = XO0(31, OE=0, XO2=232, Rc=1) + subfmeo = XO0(31, OE=1, XO2=232, Rc=0) + subfmeox= XO0(31, OE=1, XO2=232, Rc=1) + + subfze = XO0(31, OE=0, XO2=200, Rc=0) + subfzex = XO0(31, OE=0, XO2=200, Rc=1) + subfzeo = XO0(31, OE=1, XO2=200, Rc=0) + subfzeox= XO0(31, OE=1, XO2=200, Rc=1) + + sync = X(31, XO1=598) + + tlbia = X(31, XO1=370) + tlbie = Form("rB", "XO1")(31, XO1=306) + tlbsync = X(31, XO1=566) + + td = Form("TO", "rA", "rB", "XO1")(31, XO1=68) + tw = Form("TO", "rA", "rB", "XO1")(31, XO1=4) + + xor = XS(31, XO1=316, Rc=0) + xorx = XS(31, XO1=316, Rc=1) + +class PPCAssembler(BasicPPCAssembler): + BA = BasicPPCAssembler + + # awkward mnemonics: + # mftb + # most of the branch mnemonics... + + # F.2 Simplified Mnemonics for Subtract Instructions + + def subi(self, rD, rA, value): + self.addi(rD, rA, -value) + def subis(self, rD, rA, value): + self.addis(rD, rA, -value) + def subic(self, rD, rA, value): + self.addic(rD, rA, -value) + def subicx(self, rD, rA, value): + self.addicx(rD, rA, -value) + + def sub(self, rD, rA, rB): + self.subf(rD, rB, rA) + def subc(self, rD, rA, rB): + self.subfc(rD, rB, rA) + def subx(self, rD, rA, rB): + self.subfx(rD, rB, rA) + def subcx(self, rD, rA, rB): + self.subfcx(rD, rB, rA) + def subo(self, rD, rA, rB): + self.subfo(rD, rB, rA) + def subco(self, rD, rA, rB): + self.subfco(rD, rB, rA) + def subox(self, rD, rA, rB): + self.subfox(rD, rB, rA) + def subcox(self, rD, rA, rB): + self.subfcox(rD, rB, rA) + + # F.3 Simplified Mnemonics for Compare Instructions + + cmpdi = BA.cmpi(L=1) + cmpwi = BA.cmpi(L=0) + cmpldi = BA.cmpli(L=1) + cmplwi = BA.cmpli(L=0) + cmpd = BA.cmp(L=1) + cmpw = BA.cmp(L=0) + cmpld = BA.cmpl(L=1) + cmplw = BA.cmpl(L=0) + + # F.4 Simplified Mnemonics for Rotate and Shift Instructions + + def extlwi(self, rA, rS, n, b): + self.rlwinm(rA, rS, b, 0, n-1) + + def extrwi(self, rA, rS, n, b): + self.rlwinm(rA, rS, b+n, 32-n, 31) + + def inslwi(self, rA, rS, n, b): + self.rwlimi(rA, rS, 32-b, b, b + n -1) + + def insrwi(self, rA, rS, n, b): + self.rwlimi(rA, rS, 32-(b+n), b, b + n -1) + + def rotlwi(self, rA, rS, n): + self.rlwinm(rA, rS, n, 0, 31) + + def rotrwi(self, rA, rS, n): + self.rlwinm(rA, rS, 32-n, 0, 31) + + def rotlw(self, rA, rS, rB): + self.rlwnm(rA, rS, rB, 0, 31) + + def slwi(self, rA, rS, n): + self.rlwinm(rA, rS, n, 0, 31-n) + + def srwi(self, rA, rS, n): + self.rlwinm(rA, rS, 32-n, n, 31) + + def sldi(self, rA, rS, n): + self.rldicr(rA, rS, n, 63-n) + + def srdi(self, rA, rS, n): + self.rldicl(rA, rS, 64-n, n) + + # F.5 Simplified Mnemonics for Branch Instructions + + # there's a lot of these! + bt = BA.bc(BO=12) + bf = BA.bc(BO=4) + bdnz = BA.bc(BO=16, BI=0) + bdnzt = BA.bc(BO=8) + bdnzf = BA.bc(BO=0) + bdz = BA.bc(BO=18) + bdzt = BA.bc(BO=10) + bdzf = BA.bc(BO=2) + + bta = BA.bca(BO=12) + bfa = BA.bca(BO=4) + bdnza = BA.bca(BO=16, BI=0) + bdnzta = BA.bca(BO=8) + bdnzfa = BA.bca(BO=0) + bdza = BA.bca(BO=18) + bdzta = BA.bca(BO=10) + bdzfa = BA.bca(BO=2) + + btl = BA.bcl(BO=12) + bfl = BA.bcl(BO=4) + bdnzl = BA.bcl(BO=16, BI=0) + bdnztl = BA.bcl(BO=8) + bdnzfl = BA.bcl(BO=0) + bdzl = BA.bcl(BO=18) + bdztl = BA.bcl(BO=10) + bdzfl = BA.bcl(BO=2) + + btla = BA.bcla(BO=12) + bfla = BA.bcla(BO=4) + bdnzla = BA.bcla(BO=16, BI=0) + bdnztla = BA.bcla(BO=8) + bdnzfla = BA.bcla(BO=0) + bdzla = BA.bcla(BO=18) + bdztla = BA.bcla(BO=10) + bdzfla = BA.bcla(BO=2) + + blr = BA.bclr(BO=20, BI=0) + btlr = BA.bclr(BO=12) + bflr = BA.bclr(BO=4) + bdnzlr = BA.bclr(BO=16, BI=0) + bdnztlr = BA.bclr(BO=8) + bdnzflr = BA.bclr(BO=0) + bdzlr = BA.bclr(BO=18, BI=0) + bdztlr = BA.bclr(BO=10) + bdzflr = BA.bclr(BO=2) + + bctr = BA.bcctr(BO=20, BI=0) + btctr = BA.bcctr(BO=12) + bfctr = BA.bcctr(BO=4) + + blrl = BA.bclrl(BO=20, BI=0) + btlrl = BA.bclrl(BO=12) + bflrl = BA.bclrl(BO=4) + bdnzlrl = BA.bclrl(BO=16, BI=0) + bdnztlrl = BA.bclrl(BO=8) + bdnzflrl = BA.bclrl(BO=0) + bdzlrl = BA.bclrl(BO=18, BI=0) + bdztlrl = BA.bclrl(BO=10) + bdzflrl = BA.bclrl(BO=2) + + bctrl = BA.bcctrl(BO=20, BI=0) + btctrl = BA.bcctrl(BO=12) + bfctrl = BA.bcctrl(BO=4) + + # these should/could take a[n optional] crf argument, but it's a + # bit hard to see how to arrange that. + + blt = BA.bc(BO=12, BI=0) + ble = BA.bc(BO=4, BI=1) + beq = BA.bc(BO=12, BI=2) + bge = BA.bc(BO=4, BI=0) + bgt = BA.bc(BO=12, BI=1) + bnl = BA.bc(BO=4, BI=0) + bne = BA.bc(BO=4, BI=2) + bng = BA.bc(BO=4, BI=1) + bso = BA.bc(BO=12, BI=3) + bns = BA.bc(BO=4, BI=3) + bun = BA.bc(BO=12, BI=3) + bnu = BA.bc(BO=4, BI=3) + + blta = BA.bca(BO=12, BI=0) + blea = BA.bca(BO=4, BI=1) + beqa = BA.bca(BO=12, BI=2) + bgea = BA.bca(BO=4, BI=0) + bgta = BA.bca(BO=12, BI=1) + bnla = BA.bca(BO=4, BI=0) + bnea = BA.bca(BO=4, BI=2) + bnga = BA.bca(BO=4, BI=1) + bsoa = BA.bca(BO=12, BI=3) + bnsa = BA.bca(BO=4, BI=3) + buna = BA.bca(BO=12, BI=3) + bnua = BA.bca(BO=4, BI=3) + + bltl = BA.bcl(BO=12, BI=0) + blel = BA.bcl(BO=4, BI=1) + beql = BA.bcl(BO=12, BI=2) + bgel = BA.bcl(BO=4, BI=0) + bgtl = BA.bcl(BO=12, BI=1) + bnll = BA.bcl(BO=4, BI=0) + bnel = BA.bcl(BO=4, BI=2) + bngl = BA.bcl(BO=4, BI=1) + bsol = BA.bcl(BO=12, BI=3) + bnsl = BA.bcl(BO=4, BI=3) + bunl = BA.bcl(BO=12, BI=3) + bnul = BA.bcl(BO=4, BI=3) + + bltla = BA.bcla(BO=12, BI=0) + blela = BA.bcla(BO=4, BI=1) + beqla = BA.bcla(BO=12, BI=2) + bgela = BA.bcla(BO=4, BI=0) + bgtla = BA.bcla(BO=12, BI=1) + bnlla = BA.bcla(BO=4, BI=0) + bnela = BA.bcla(BO=4, BI=2) + bngla = BA.bcla(BO=4, BI=1) + bsola = BA.bcla(BO=12, BI=3) + bnsla = BA.bcla(BO=4, BI=3) + bunla = BA.bcla(BO=12, BI=3) + bnula = BA.bcla(BO=4, BI=3) + + bltlr = BA.bclr(BO=12, BI=0) + blelr = BA.bclr(BO=4, BI=1) + beqlr = BA.bclr(BO=12, BI=2) + bgelr = BA.bclr(BO=4, BI=0) + bgtlr = BA.bclr(BO=12, BI=1) + bnllr = BA.bclr(BO=4, BI=0) + bnelr = BA.bclr(BO=4, BI=2) + bnglr = BA.bclr(BO=4, BI=1) + bsolr = BA.bclr(BO=12, BI=3) + bnslr = BA.bclr(BO=4, BI=3) + bunlr = BA.bclr(BO=12, BI=3) + bnulr = BA.bclr(BO=4, BI=3) + + bltctr = BA.bcctr(BO=12, BI=0) + blectr = BA.bcctr(BO=4, BI=1) + beqctr = BA.bcctr(BO=12, BI=2) + bgectr = BA.bcctr(BO=4, BI=0) + bgtctr = BA.bcctr(BO=12, BI=1) + bnlctr = BA.bcctr(BO=4, BI=0) + bnectr = BA.bcctr(BO=4, BI=2) + bngctr = BA.bcctr(BO=4, BI=1) + bsoctr = BA.bcctr(BO=12, BI=3) + bnsctr = BA.bcctr(BO=4, BI=3) + bunctr = BA.bcctr(BO=12, BI=3) + bnuctr = BA.bcctr(BO=4, BI=3) + + bltlrl = BA.bclrl(BO=12, BI=0) + blelrl = BA.bclrl(BO=4, BI=1) + beqlrl = BA.bclrl(BO=12, BI=2) + bgelrl = BA.bclrl(BO=4, BI=0) + bgtlrl = BA.bclrl(BO=12, BI=1) + bnllrl = BA.bclrl(BO=4, BI=0) + bnelrl = BA.bclrl(BO=4, BI=2) + bnglrl = BA.bclrl(BO=4, BI=1) + bsolrl = BA.bclrl(BO=12, BI=3) + bnslrl = BA.bclrl(BO=4, BI=3) + bunlrl = BA.bclrl(BO=12, BI=3) + bnulrl = BA.bclrl(BO=4, BI=3) + + bltctrl = BA.bcctrl(BO=12, BI=0) + blectrl = BA.bcctrl(BO=4, BI=1) + beqctrl = BA.bcctrl(BO=12, BI=2) + bgectrl = BA.bcctrl(BO=4, BI=0) + bgtctrl = BA.bcctrl(BO=12, BI=1) + bnlctrl = BA.bcctrl(BO=4, BI=0) + bnectrl = BA.bcctrl(BO=4, BI=2) + bngctrl = BA.bcctrl(BO=4, BI=1) + bsoctrl = BA.bcctrl(BO=12, BI=3) + bnsctrl = BA.bcctrl(BO=4, BI=3) + bunctrl = BA.bcctrl(BO=12, BI=3) + bnuctrl = BA.bcctrl(BO=4, BI=3) + + # whew! and we haven't even begun the predicted versions... + + # F.6 Simplified Mnemonics for Condition Register + # Logical Instructions + + crset = BA.creqv(crbA="crbD", crbB="crbD") + crclr = BA.crxor(crbA="crbD", crbB="crbD") + crmove = BA.cror(crbA="crbB") + crnot = BA.crnor(crbA="crbB") + + # F.7 Simplified Mnemonics for Trap Instructions + + trap = BA.tw(TO=31, rA=0, rB=0) + twlt = BA.tw(TO=16) + twle = BA.tw(TO=20) + tweq = BA.tw(TO=4) + twge = BA.tw(TO=12) + twgt = BA.tw(TO=8) + twnl = BA.tw(TO=12) + twng = BA.tw(TO=24) + twllt = BA.tw(TO=2) + twlle = BA.tw(TO=6) + twlge = BA.tw(TO=5) + twlgt = BA.tw(TO=1) + twlnl = BA.tw(TO=5) + twlng = BA.tw(TO=6) + + twlti = BA.twi(TO=16) + twlei = BA.twi(TO=20) + tweqi = BA.twi(TO=4) + twgei = BA.twi(TO=12) + twgti = BA.twi(TO=8) + twnli = BA.twi(TO=12) + twnei = BA.twi(TO=24) + twngi = BA.twi(TO=20) + twllti = BA.twi(TO=2) + twllei = BA.twi(TO=6) + twlgei = BA.twi(TO=5) + twlgti = BA.twi(TO=1) + twlnli = BA.twi(TO=5) + twlngi = BA.twi(TO=6) + + # F.8 Simplified Mnemonics for Special-Purpose + # Registers + + mfctr = BA.mfspr(spr=9) + mflr = BA.mfspr(spr=8) + mftbl = BA.mftb(spr=268) + mftbu = BA.mftb(spr=269) + mfxer = BA.mfspr(spr=1) + + mtctr = BA.mtspr(spr=9) + mtlr = BA.mtspr(spr=8) + mtxer = BA.mtspr(spr=1) + + # F.9 Recommended Simplified Mnemonics + + nop = BA.ori(rS=0, rA=0, UIMM=0) + + li = BA.addi(rA=0) + lis = BA.addis(rA=0) + + mr = BA.or_(rB="rS") + mrx = BA.or_x(rB="rS") + + not_ = BA.nor(rB="rS") + not_x = BA.norx(rB="rS") + + mtcr = BA.mtcrf(CRM=0xFF) + + def emit(self, insn): + bytes = struct.pack("i", insn) + for byte in bytes: + self.writechar(byte) + +def hi(w): + return w >> 16 + +def ha(w): + if (w >> 15) & 1: + return (w >> 16) + 1 + else: + return w >> 16 + +def lo(w): + return w & 0x0000FFFF + +def la(w): + v = w & 0x0000FFFF + if v & 0x8000: + return -((v ^ 0xFFFF) + 1) # "sign extend" to 32 bits + return v + +def highest(w): + return w >> 48 + +def higher(w): + return (w >> 32) & 0x0000FFFF + +def high(w): + return (w >> 16) & 0x0000FFFF + +class GuardToken(object): + def __init__(self, descr, failargs, faillocs, offset, + save_exc=False, is_invalidate=False): + self.descr = descr + self.offset = offset + self.is_invalidate = is_invalidate + self.failargs = failargs + self.faillocs = faillocs + self.save_exc = save_exc + +class PPCBuilder(BlockBuilderMixin, PPCAssembler): + def __init__(self, failargs_limit=1000): + PPCAssembler.__init__(self) + self.fail_boxes_int = values_array(lltype.Signed, failargs_limit) + + def load_imm(self, rD, word): + rD = rD.as_key() + if word <= 32767 and word >= -32768: + self.li(rD, word) + elif IS_PPC_32 or (word <= 2147483647 and word >= -2147483648): + self.lis(rD, hi(word)) + if word & 0xFFFF != 0: + self.ori(rD, rD, lo(word)) + else: + self.lis(rD, highest(word)) + self.ori(rD, rD, higher(word)) + self.sldi(rD, rD, 32) + self.oris(rD, rD, high(word)) + self.ori(rD, rD, lo(word)) + + def load_from_addr(self, rD, addr): + if IS_PPC_32: + self.addis(rD, 0, ha(addr)) + self.lwz(rD, rD, la(addr)) + else: + self.load_word(rD, addr) + self.ld(rD, rD, 0) + + def store_reg(self, source_reg, addr): + self.load_imm(r.r0, addr) + if IS_PPC_32: + self.stwx(source_reg.value, 0, 0) + else: + self.std(source_reg.value, 0, 0) + +class BranchUpdater(PPCAssembler): + def __init__(self): + PPCAssembler.__init__(self) + self.init_block_builder() + + def write_to_mem(self, addr): + self.assemble() + self.copy_to_raw_memory(addr) + + def assemble(self, dump=os.environ.has_key('PYPY_DEBUG')): + insns = self.assemble0(dump) + for i in insns: + self.emit(i) + +def b(n): + r = [] + for i in range(32): + r.append(n&1) + n >>= 1 + r.reverse() + return ''.join(map(str, r)) + +def make_operations(): + def not_implemented(builder, trace_op, cpu, *rest_args): + import pdb; pdb.set_trace() + + oplist = [None] * (rop._LAST + 1) + for key, val in rop.__dict__.items(): + if key.startswith("_"): + continue + opname = key.lower() + methname = "emit_%s" % opname + if hasattr(PPCBuilder, methname): + oplist[val] = getattr(PPCBuilder, methname).im_func + else: + oplist[val] = not_implemented + return oplist + +PPCBuilder.operations = make_operations() diff --git a/pypy/jit/backend/ppc/ppcgen/helper/assembler.py b/pypy/jit/backend/ppc/ppcgen/helper/assembler.py --- a/pypy/jit/backend/ppc/ppcgen/helper/assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/helper/assembler.py @@ -4,14 +4,14 @@ def f(self, op, arglocs, regalloc): l0, l1, res = arglocs if l1.is_imm(): - self.cmpwi(0, l0.value, l1.value) + self.mc.cmpwi(0, l0.value, l1.value) else: - self.cmpw(0, l0.value, l1.value) + self.mc.cmpw(0, l0.value, l1.value) if condition == c.LE: - self.cror(0, 0, 2) + self.mc.cror(0, 0, 2) resval = res.value - self.mfcr(resval) - self.rlwinm(resval, resval, 1, 31, 31) + self.mc.mfcr(resval) + self.mc.rlwinm(resval, resval, 1, 31, 31) return f diff --git a/pypy/jit/backend/ppc/ppcgen/opassembler.py b/pypy/jit/backend/ppc/ppcgen/opassembler.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/ppc/ppcgen/opassembler.py @@ -0,0 +1,86 @@ +from pypy.jit.backend.ppc.ppcgen.helper.assembler import gen_emit_cmp_op +import pypy.jit.backend.ppc.ppcgen.condition as c +import pypy.jit.backend.ppc.ppcgen.register as r +from pypy.jit.backend.ppc.ppcgen.arch import GPR_SAVE_AREA, IS_PPC_32, WORD + +from pypy.jit.metainterp.history import LoopToken + +class OpAssembler(object): + + def emit_int_add(self, op, arglocs, regalloc): + l0, l1, res = arglocs + if l0.is_imm(): + self.mc.addi(res.value, l1.value, l0.value) + elif l1.is_imm(): + self.mc.addi(res.value, l0.value, l1.value) + else: + self.add(res.value, l0.value, l1.value) + + emit_int_le = gen_emit_cmp_op(c.LE) + + def _guard_epilogue(self, op, failargs): + fail_descr = op.getdescr() + fail_index = self._get_identifier_from_descr(fail_descr) + fail_descr.index = fail_index + self.cpu.saved_descr[fail_index] = fail_descr + numops = self.mc.get_number_of_ops() + self.mc.beq(0) + reglist = [] + for failarg in failargs: + if failarg is None: + reglist.append(None) + else: + reglist.append(failarg) + self.patch_list.append((numops, fail_index, op, reglist)) + + def _emit_guard(self, op, arglocs, save_exc=False, + is_guard_not_invalidated=False): + descr = op.getdescr() + assert isinstance(descr, AbstractFailDescr) + pos = self.get_relative_pos() + self.mc.b(0) # has to be patched later on + self.pending_guards.append(GuardToken(descr, + failargs=op.getfailargs(), + faillocs=arglocs, + offset=pos, + is_invalidate=is_guard_not_invalidated, + save_exc=save_exc)) + + def emit_guard_true(self, op, arglocs, regalloc): + l0 = arglocs[0] + failargs = arglocs[1:] + #import pdb; pdb.set_trace() + self.mc.cmpi(l0.value, 0) + #self._emit_guard(op, failargs) + self._guard_epilogue(op, failargs) + + def emit_finish(self, op, arglocs, regalloc): + descr = op.getdescr() + identifier = self._get_identifier_from_descr(descr) + self.cpu.saved_descr[identifier] = descr + args = op.getarglist() + for index, arg in enumerate(arglocs): + addr = self.fail_boxes_int.get_addr_for_num(index) + self.store_reg(arg, addr) + + framesize = 256 + GPR_SAVE_AREA + + self._restore_nonvolatiles() + + if IS_PPC_32: + self.mc.lwz(0, 1, self.framesize + WORD) + else: + self.mc.ld(0, 1, framesize + WORD) + self.mc.mtlr(0) + self.mc.addi(1, 1, framesize) + self.load_imm(r.r3, identifier) + self.mc.blr() + + def emit_jump(self, op, arglocs, regalloc): + descr = op.getdescr() + assert isinstance(descr, LoopToken) + if descr._ppc_bootstrap_code == 0: + curpos = self.mc.get_rel_pos() + self.mc.b(descr._ppc_loop_code - curpos) + else: + assert 0, "case not implemented yet" diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -5,15 +5,17 @@ from pypy.jit.backend.ppc.ppcgen.regalloc import (TempInt, PPCFrameManager, Regalloc) from pypy.jit.backend.ppc.ppcgen.assembler import Assembler +from pypy.jit.backend.ppc.ppcgen.opassembler import OpAssembler from pypy.jit.backend.ppc.ppcgen.symbol_lookup import lookup +from pypy.jit.backend.ppc.ppcgen.codebuilder import PPCBuilder from pypy.jit.backend.ppc.ppcgen.arch import (IS_PPC_32, WORD, NONVOLATILES, GPR_SAVE_AREA) from pypy.jit.backend.ppc.ppcgen.helper.assembler import gen_emit_cmp_op import pypy.jit.backend.ppc.ppcgen.register as r import pypy.jit.backend.ppc.ppcgen.condition as c -from pypy.jit.metainterp.history import Const, ConstPtr, LoopToken -from pypy.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin -from pypy.jit.backend.llsupport.asmmemmgr import AsmMemoryManager +from pypy.jit.metainterp.history import (Const, ConstPtr, LoopToken, + AbstractFailDescr) +from pypy.jit.backend.llsupport.asmmemmgr import (BlockBuilderMixin, AsmMemoryManager, MachineDataBlockWrapper) from pypy.jit.backend.llsupport.regalloc import (RegisterManager, compute_vars_longevity) from pypy.jit.backend.llsupport import symbolic @@ -24,870 +26,1878 @@ ConstFloat, Box, INT, REF, FLOAT) from pypy.jit.backend.x86.support import values_array -A = Form("frD", "frA", "frB", "XO3", "Rc") -A1 = Form("frD", "frB", "XO3", "Rc") -A2 = Form("frD", "frA", "frC", "XO3", "Rc") -A3 = Form("frD", "frA", "frC", "frB", "XO3", "Rc") - -I = Form("LI", "AA", "LK") - -B = Form("BO", "BI", "BD", "AA", "LK") - -SC = Form("AA") # fudge - -DD = Form("rD", "rA", "SIMM") -DDO = Form("rD", "rA", "ds", "XO4") -DS = Form("rA", "rS", "UIMM") - -X = Form("XO1") -XS = Form("rA", "rS", "rB", "XO1", "Rc") -XSO = Form("rS", "rA", "rB", "XO1") -XD = Form("rD", "rA", "rB", "XO1") -XO = Form("rD", "rA", "rB", "OE", "XO2", "Rc") -XO0 = Form("rD", "rA", "OE", "XO2", "Rc") -XDB = Form("frD", "frB", "XO1", "Rc") -XS0 = Form("rA", "rS", "XO1", "Rc") -X0 = Form("rA", "rB", "XO1") -XcAB = Form("crfD", "rA", "rB", "XO1") -XN = Form("rD", "rA", "NB", "XO1") -XL = Form("crbD", "crbA", "crbB", "XO1") -XL1 = Form("crfD", "crfS") -XL2 = Form("crbD", "XO1", "Rc") -XFL = Form("FM", "frB", "XO1", "Rc") -XFX = Form("CRM", "rS", "XO1") - -MI = Form("rA", "rS", "SH", "MB", "ME", "Rc") -MB = Form("rA", "rS", "rB", "MB", "ME", "Rc") -MDI = Form("rA", "rS", "sh", "mbe", "XO5", "Rc") -MDS = Form("rA", "rS", "rB", "mbe", "XO5", "Rc") - -class BasicPPCAssembler(Assembler): - - def disassemble(cls, inst, labels={}, pc=0): - cache = cls.__dict__.get('idesc cache') - if cache is None: - idescs = cls.get_idescs() - cache = {} - for n, i in idescs: - cache.setdefault(i.specializations[ppc_fields['opcode']], - []).append((n,i)) - setattr(cls, 'idesc cache', cache) - matches = [] - idescs = cache[ppc_fields['opcode'].decode(inst)] - for name, idesc in idescs: - m = idesc.match(inst) - if m > 0: - matches.append((m, idesc, name)) - if matches: - score, idesc, name = max(matches) - return idesc.disassemble(name, inst, labels, pc) - disassemble = classmethod(disassemble) - - # "basic" means no simplified mnemonics - - # I form - b = I(18, AA=0, LK=0) - ba = I(18, AA=1, LK=0) - bl = I(18, AA=0, LK=1) - bla = I(18, AA=1, LK=1) - - # B form - bc = B(16, AA=0, LK=0) - bcl = B(16, AA=0, LK=1) - bca = B(16, AA=1, LK=0) - bcla = B(16, AA=1, LK=1) - - # SC form - sc = SC(17, AA=1) # it's not really the aa field... - - # D form - addi = DD(14) - addic = DD(12) - addicx = DD(13) - addis = DD(15) - - andix = DS(28) - andisx = DS(29) - - cmpi = Form("crfD", "L", "rA", "SIMM")(11) - cmpi.default(L=0).default(crfD=0) - cmpli = Form("crfD", "L", "rA", "UIMM")(10) - cmpli.default(L=0).default(crfD=0) - - lbz = DD(34) - lbzu = DD(35) - ld = DDO(58, XO4=0) - ldu = DDO(58, XO4=1) - lfd = DD(50) - lfdu = DD(51) - lfs = DD(48) - lfsu = DD(49) - lha = DD(42) - lhau = DD(43) - lhz = DD(40) - lhzu = DD(41) - lmw = DD(46) - lwa = DDO(58, XO4=2) - lwz = DD(32) - lwzu = DD(33) - - mulli = DD(7) - ori = DS(24) - oris = DS(25) - - stb = DD(38) - stbu = DD(39) - std = DDO(62, XO4=0) - stdu = DDO(62, XO4=1) - stfd = DD(54) - stfdu = DD(55) - stfs = DD(52) - stfsu = DD(53) - sth = DD(44) - sthu = DD(45) - stmw = DD(47) - stw = DD(36) - stwu = DD(37) - - subfic = DD(8) - tdi = Form("TO", "rA", "SIMM")(2) - twi = Form("TO", "rA", "SIMM")(3) - xori = DS(26) - xoris = DS(27) - - # X form - - and_ = XS(31, XO1=28, Rc=0) - and_x = XS(31, XO1=28, Rc=1) - - andc_ = XS(31, XO1=60, Rc=0) - andc_x = XS(31, XO1=60, Rc=1) - - # is the L bit for 64 bit compares? hmm - cmp = Form("crfD", "L", "rA", "rB", "XO1")(31, XO1=0) - cmp.default(L=0).default(crfD=0) - cmpl = Form("crfD", "L", "rA", "rB", "XO1")(31, XO1=32) - cmpl.default(L=0).default(crfD=0) - - cntlzd = XS0(31, XO1=58, Rc=0) - cntlzdx = XS0(31, XO1=58, Rc=1) - cntlzw = XS0(31, XO1=26, Rc=0) - cntlzwx = XS0(31, XO1=26, Rc=1) - - dcba = X0(31, XO1=758) - dcbf = X0(31, XO1=86) - dcbi = X0(31, XO1=470) - dcbst = X0(31, XO1=54) - dcbt = X0(31, XO1=278) - dcbtst = X0(31, XO1=246) - dcbz = X0(31, XO1=1014) - - eciwx = XD(31, XO1=310) - ecowx = XS(31, XO1=438, Rc=0) - - eieio = X(31, XO1=854) - - eqv = XS(31, XO1=284, Rc=0) - eqvx = XS(31, XO1=284, Rc=1) - - extsb = XS0(31, XO1=954, Rc=0) - extsbx = XS0(31, XO1=954, Rc=1) - - extsh = XS0(31, XO1=922, Rc=0) - extshx = XS0(31, XO1=922, Rc=1) - - extsw = XS0(31, XO1=986, Rc=0) - extswx = XS0(31, XO1=986, Rc=1) - - fabs = XDB(63, XO1=264, Rc=0) - fabsx = XDB(63, XO1=264, Rc=1) - - fcmpo = XcAB(63, XO1=32) - fcmpu = XcAB(63, XO1=0) - - fcfid = XDB(63, XO1=846, Rc=0) - fcfidx = XDB(63, XO1=846, Rc=1) - - fctid = XDB(63, XO1=814, Rc=0) - fctidx = XDB(63, XO1=814, Rc=1) - - fctidz = XDB(63, XO1=815, Rc=0) - fctidzx = XDB(63, XO1=815, Rc=1) - - fctiw = XDB(63, XO1=14, Rc=0) - fctiwx = XDB(63, XO1=14, Rc=1) - - fctiwz = XDB(63, XO1=15, Rc=0) - fctiwzx = XDB(63, XO1=15, Rc=1) - - fmr = XDB(63, XO1=72, Rc=0) - fmrx = XDB(63, XO1=72, Rc=1) - - fnabs = XDB(63, XO1=136, Rc=0) - fnabsx = XDB(63, XO1=136, Rc=1) - - fneg = XDB(63, XO1=40, Rc=0) - fnegx = XDB(63, XO1=40, Rc=1) - - frsp = XDB(63, XO1=12, Rc=0) - frspx = XDB(63, XO1=12, Rc=1) - - fsqrt = XDB(63, XO1=22, Rc=0) - - icbi = X0(31, XO1=982) - - lbzux = XD(31, XO1=119) - lbzx = XD(31, XO1=87) - ldarx = XD(31, XO1=84) - ldux = XD(31, XO1=53) - ldx = XD(31, XO1=21) - lfdux = XD(31, XO1=631) - lfdx = XD(31, XO1=599) - lfsux = XD(31, XO1=567) - lfsx = XD(31, XO1=535) - lhaux = XD(31, XO1=375) - lhax = XD(31, XO1=343) - lhbrx = XD(31, XO1=790) - lhzux = XD(31, XO1=311) - lhzx = XD(31, XO1=279) - lswi = XD(31, XO1=597) - lswx = XD(31, XO1=533) - lwarx = XD(31, XO1=20) - lwaux = XD(31, XO1=373) - lwax = XD(31, XO1=341) - lwbrx = XD(31, XO1=534) - lwzux = XD(31, XO1=55) - lwzx = XD(31, XO1=23) - - mcrfs = Form("crfD", "crfS", "XO1")(63, XO1=64) - mcrxr = Form("crfD", "XO1")(31, XO1=512) - mfcr = Form("rD", "XO1")(31, XO1=19) - mffs = Form("frD", "XO1", "Rc")(63, XO1=583, Rc=0) - mffsx = Form("frD", "XO1", "Rc")(63, XO1=583, Rc=1) - mfmsr = Form("rD", "XO1")(31, XO1=83) - mfsr = Form("rD", "SR", "XO1")(31, XO1=595) - mfsrin = XDB(31, XO1=659, Rc=0) - - add = XO(31, XO2=266, OE=0, Rc=0) - addx = XO(31, XO2=266, OE=0, Rc=1) - addo = XO(31, XO2=266, OE=1, Rc=0) - addox = XO(31, XO2=266, OE=1, Rc=1) - - addc = XO(31, XO2=10, OE=0, Rc=0) - addcx = XO(31, XO2=10, OE=0, Rc=1) - addco = XO(31, XO2=10, OE=1, Rc=0) - addcox = XO(31, XO2=10, OE=1, Rc=1) - - adde = XO(31, XO2=138, OE=0, Rc=0) - addex = XO(31, XO2=138, OE=0, Rc=1) - addeo = XO(31, XO2=138, OE=1, Rc=0) - addeox = XO(31, XO2=138, OE=1, Rc=1) - - addme = XO(31, rB=0, XO2=234, OE=0, Rc=0) - addmex = XO(31, rB=0, XO2=234, OE=0, Rc=1) - addmeo = XO(31, rB=0, XO2=234, OE=1, Rc=0) - addmeox = XO(31, rB=0, XO2=234, OE=1, Rc=1) - - addze = XO(31, rB=0, XO2=202, OE=0, Rc=0) - addzex = XO(31, rB=0, XO2=202, OE=0, Rc=1) - addzeo = XO(31, rB=0, XO2=202, OE=1, Rc=0) - addzeox = XO(31, rB=0, XO2=202, OE=1, Rc=1) - - bcctr = Form("BO", "BI", "XO1", "LK")(19, XO1=528, LK=0) - bcctrl = Form("BO", "BI", "XO1", "LK")(19, XO1=528, LK=1) - - bclr = Form("BO", "BI", "XO1", "LK")(19, XO1=16, LK=0) - bclrl = Form("BO", "BI", "XO1", "LK")(19, XO1=16, LK=1) - - crand = XL(19, XO1=257) - crandc = XL(19, XO1=129) - creqv = XL(19, XO1=289) - crnand = XL(19, XO1=225) - crnor = XL(19, XO1=33) - cror = XL(19, XO1=449) - crorc = XL(19, XO1=417) - crxor = XL(19, XO1=193) - - divd = XO(31, XO2=489, OE=0, Rc=0) - divdx = XO(31, XO2=489, OE=0, Rc=1) - divdo = XO(31, XO2=489, OE=1, Rc=0) - divdox = XO(31, XO2=489, OE=1, Rc=1) - - divdu = XO(31, XO2=457, OE=0, Rc=0) - divdux = XO(31, XO2=457, OE=0, Rc=1) - divduo = XO(31, XO2=457, OE=1, Rc=0) - divduox = XO(31, XO2=457, OE=1, Rc=1) - - divw = XO(31, XO2=491, OE=0, Rc=0) - divwx = XO(31, XO2=491, OE=0, Rc=1) - divwo = XO(31, XO2=491, OE=1, Rc=0) - divwox = XO(31, XO2=491, OE=1, Rc=1) - - divwu = XO(31, XO2=459, OE=0, Rc=0) - divwux = XO(31, XO2=459, OE=0, Rc=1) - divwuo = XO(31, XO2=459, OE=1, Rc=0) - divwuox = XO(31, XO2=459, OE=1, Rc=1) - - fadd = A(63, XO3=21, Rc=0) - faddx = A(63, XO3=21, Rc=1) - fadds = A(59, XO3=21, Rc=0) - faddsx = A(59, XO3=21, Rc=1) - - fdiv = A(63, XO3=18, Rc=0) - fdivx = A(63, XO3=18, Rc=1) - fdivs = A(59, XO3=18, Rc=0) - fdivsx = A(59, XO3=18, Rc=1) - - fmadd = A3(63, XO3=19, Rc=0) - fmaddx = A3(63, XO3=19, Rc=1) - fmadds = A3(59, XO3=19, Rc=0) - fmaddsx = A3(59, XO3=19, Rc=1) - - fmsub = A3(63, XO3=28, Rc=0) - fmsubx = A3(63, XO3=28, Rc=1) - fmsubs = A3(59, XO3=28, Rc=0) - fmsubsx = A3(59, XO3=28, Rc=1) - - fmul = A2(63, XO3=25, Rc=0) - fmulx = A2(63, XO3=25, Rc=1) - fmuls = A2(59, XO3=25, Rc=0) - fmulsx = A2(59, XO3=25, Rc=1) - - fnmadd = A3(63, XO3=31, Rc=0) - fnmaddx = A3(63, XO3=31, Rc=1) - fnmadds = A3(59, XO3=31, Rc=0) - fnmaddsx = A3(59, XO3=31, Rc=1) - - fnmsub = A3(63, XO3=30, Rc=0) - fnmsubx = A3(63, XO3=30, Rc=1) - fnmsubs = A3(59, XO3=30, Rc=0) - fnmsubsx = A3(59, XO3=30, Rc=1) - - fres = A1(59, XO3=24, Rc=0) - fresx = A1(59, XO3=24, Rc=1) - - frsp = A1(63, XO3=12, Rc=0) - frspx = A1(63, XO3=12, Rc=1) - - frsqrte = A1(63, XO3=26, Rc=0) - frsqrtex = A1(63, XO3=26, Rc=1) - - fsel = A3(63, XO3=23, Rc=0) - fselx = A3(63, XO3=23, Rc=1) - - frsqrt = A1(63, XO3=22, Rc=0) - frsqrtx = A1(63, XO3=22, Rc=1) - frsqrts = A1(59, XO3=22, Rc=0) - frsqrtsx = A1(59, XO3=22, Rc=1) - - fsub = A(63, XO3=20, Rc=0) - fsubx = A(63, XO3=20, Rc=1) - fsubs = A(59, XO3=20, Rc=0) - fsubsx = A(59, XO3=20, Rc=1) - - isync = X(19, XO1=150) - - mcrf = XL1(19) - - mfspr = Form("rD", "spr", "XO1")(31, XO1=339) - mftb = Form("rD", "spr", "XO1")(31, XO1=371) - - mtcrf = XFX(31, XO1=144) - - mtfsb0 = XL2(63, XO1=70, Rc=0) - mtfsb0x = XL2(63, XO1=70, Rc=1) - mtfsb1 = XL2(63, XO1=38, Rc=0) - mtfsb1x = XL2(63, XO1=38, Rc=1) - - mtfsf = XFL(63, XO1=711, Rc=0) - mtfsfx = XFL(63, XO1=711, Rc=1) - - mtfsfi = Form("crfD", "IMM", "XO1", "Rc")(63, XO1=134, Rc=0) - mtfsfix = Form("crfD", "IMM", "XO1", "Rc")(63, XO1=134, Rc=1) - - mtmsr = Form("rS", "XO1")(31, XO1=146) - - mtspr = Form("rS", "spr", "XO1")(31, XO1=467) - - mtsr = Form("rS", "SR", "XO1")(31, XO1=210) - mtsrin = Form("rS", "rB", "XO1")(31, XO1=242) - - mulhd = XO(31, OE=0, XO2=73, Rc=0) - mulhdx = XO(31, OE=0, XO2=73, Rc=1) - - mulhdu = XO(31, OE=0, XO2=9, Rc=0) - mulhdux = XO(31, OE=0, XO2=9, Rc=1) - - mulld = XO(31, OE=0, XO2=233, Rc=0) - mulldx = XO(31, OE=0, XO2=233, Rc=1) - mulldo = XO(31, OE=1, XO2=233, Rc=0) - mulldox = XO(31, OE=1, XO2=233, Rc=1) - - mulhw = XO(31, OE=0, XO2=75, Rc=0) - mulhwx = XO(31, OE=0, XO2=75, Rc=1) - - mulhwu = XO(31, OE=0, XO2=11, Rc=0) - mulhwux = XO(31, OE=0, XO2=11, Rc=1) - - mullw = XO(31, OE=0, XO2=235, Rc=0) - mullwx = XO(31, OE=0, XO2=235, Rc=1) - mullwo = XO(31, OE=1, XO2=235, Rc=0) - mullwox = XO(31, OE=1, XO2=235, Rc=1) - - nand = XS(31, XO1=476, Rc=0) - nandx = XS(31, XO1=476, Rc=1) - - neg = XO0(31, OE=0, XO2=104, Rc=0) - negx = XO0(31, OE=0, XO2=104, Rc=1) - nego = XO0(31, OE=1, XO2=104, Rc=0) - negox = XO0(31, OE=1, XO2=104, Rc=1) - - nor = XS(31, XO1=124, Rc=0) - norx = XS(31, XO1=124, Rc=1) - - or_ = XS(31, XO1=444, Rc=0) - or_x = XS(31, XO1=444, Rc=1) - - orc = XS(31, XO1=412, Rc=0) - orcx = XS(31, XO1=412, Rc=1) - - rfi = X(19, XO1=50) - - rfid = X(19, XO1=18) - - rldcl = MDS(30, XO5=8, Rc=0) - rldclx = MDS(30, XO5=8, Rc=1) - rldcr = MDS(30, XO5=9, Rc=0) - rldcrx = MDS(30, XO5=9, Rc=1) - - rldic = MDI(30, XO5=2, Rc=0) - rldicx = MDI(30, XO5=2, Rc=1) - rldicl = MDI(30, XO5=0, Rc=0) - rldiclx = MDI(30, XO5=0, Rc=1) - rldicr = MDI(30, XO5=1, Rc=0) - rldicrx = MDI(30, XO5=1, Rc=1) - rldimi = MDI(30, XO5=3, Rc=0) - rldimix = MDI(30, XO5=3, Rc=1) - - rlwimi = MI(20, Rc=0) - rlwimix = MI(20, Rc=1) - - rlwinm = MI(21, Rc=0) - rlwinmx = MI(21, Rc=1) - - rlwnm = MB(23, Rc=0) - rlwnmx = MB(23, Rc=1) - - sld = XS(31, XO1=27, Rc=0) - sldx = XS(31, XO1=27, Rc=1) - - slw = XS(31, XO1=24, Rc=0) - slwx = XS(31, XO1=24, Rc=1) - - srad = XS(31, XO1=794, Rc=0) - sradx = XS(31, XO1=794, Rc=1) - - sradi = Form("rA", "rS", "SH", "XO6", "sh", "Rc")(31, XO6=413, Rc=0) - sradix = Form("rA", "rS", "SH", "XO6", "sh", "Rc")(31, XO6=413, Rc=1) - - sraw = XS(31, XO1=792, Rc=0) - srawx = XS(31, XO1=792, Rc=1) - - srawi = Form("rA", "rS", "SH", "XO1", "Rc")(31, XO1=824, Rc=0) - srawix = Form("rA", "rS", "SH", "XO1", "Rc")(31, XO1=824, Rc=1) - - srd = XS(31, XO1=539, Rc=0) - srdx = XS(31, XO1=539, Rc=1) - - srw = XS(31, XO1=536, Rc=0) - srwx = XS(31, XO1=536, Rc=1) - - stbux = XSO(31, XO1=247) - stbx = XSO(31, XO1=215) - stdcxx = Form("rS", "rA", "rB", "XO1", "Rc")(31, XO1=214, Rc=1) - stdux = XSO(31, XO1=181) - stdx = XSO(31, XO1=149) - stfdux = XSO(31, XO1=759) - stfdx = XSO(31, XO1=727) - stfiwx = XSO(31, XO1=983) - stfsux = XSO(31, XO1=695) - stfsx = XSO(31, XO1=663) - sthbrx = XSO(31, XO1=918) - sthux = XSO(31, XO1=439) - sthx = XSO(31, XO1=407) - stswi = Form("rS", "rA", "NB", "XO1")(31, XO1=725) - stswx = XSO(31, XO1=661) - stwbrx = XSO(31, XO1=662) - stwcxx = Form("rS", "rA", "rB", "XO1", "Rc")(31, XO1=150, Rc=1) - stwux = XSO(31, XO1=183) - stwx = XSO(31, XO1=151) - - subf = XO(31, XO2=40, OE=0, Rc=0) - subfx = XO(31, XO2=40, OE=0, Rc=1) - subfo = XO(31, XO2=40, OE=1, Rc=0) - subfox = XO(31, XO2=40, OE=1, Rc=1) - - subfc = XO(31, XO2=8, OE=0, Rc=0) - subfcx = XO(31, XO2=8, OE=0, Rc=1) - subfco = XO(31, XO2=8, OE=1, Rc=0) - subfcox = XO(31, XO2=8, OE=1, Rc=1) - - subfe = XO(31, XO2=136, OE=0, Rc=0) - subfex = XO(31, XO2=136, OE=0, Rc=1) - subfeo = XO(31, XO2=136, OE=1, Rc=0) - subfeox = XO(31, XO2=136, OE=1, Rc=1) - - subfme = XO0(31, OE=0, XO2=232, Rc=0) - subfmex = XO0(31, OE=0, XO2=232, Rc=1) - subfmeo = XO0(31, OE=1, XO2=232, Rc=0) - subfmeox= XO0(31, OE=1, XO2=232, Rc=1) - - subfze = XO0(31, OE=0, XO2=200, Rc=0) - subfzex = XO0(31, OE=0, XO2=200, Rc=1) - subfzeo = XO0(31, OE=1, XO2=200, Rc=0) - subfzeox= XO0(31, OE=1, XO2=200, Rc=1) - - sync = X(31, XO1=598) - - tlbia = X(31, XO1=370) - tlbie = Form("rB", "XO1")(31, XO1=306) - tlbsync = X(31, XO1=566) - - td = Form("TO", "rA", "rB", "XO1")(31, XO1=68) - tw = Form("TO", "rA", "rB", "XO1")(31, XO1=4) - - xor = XS(31, XO1=316, Rc=0) - xorx = XS(31, XO1=316, Rc=1) - -class PPCAssembler(BasicPPCAssembler): - BA = BasicPPCAssembler - - # awkward mnemonics: - # mftb - # most of the branch mnemonics... - - # F.2 Simplified Mnemonics for Subtract Instructions - - def subi(self, rD, rA, value): - self.addi(rD, rA, -value) - def subis(self, rD, rA, value): - self.addis(rD, rA, -value) - def subic(self, rD, rA, value): - self.addic(rD, rA, -value) - def subicx(self, rD, rA, value): - self.addicx(rD, rA, -value) - - def sub(self, rD, rA, rB): - self.subf(rD, rB, rA) - def subc(self, rD, rA, rB): - self.subfc(rD, rB, rA) - def subx(self, rD, rA, rB): - self.subfx(rD, rB, rA) - def subcx(self, rD, rA, rB): - self.subfcx(rD, rB, rA) - def subo(self, rD, rA, rB): - self.subfo(rD, rB, rA) - def subco(self, rD, rA, rB): - self.subfco(rD, rB, rA) - def subox(self, rD, rA, rB): - self.subfox(rD, rB, rA) - def subcox(self, rD, rA, rB): - self.subfcox(rD, rB, rA) - - # F.3 Simplified Mnemonics for Compare Instructions - - cmpdi = BA.cmpi(L=1) - cmpwi = BA.cmpi(L=0) - cmpldi = BA.cmpli(L=1) - cmplwi = BA.cmpli(L=0) - cmpd = BA.cmp(L=1) - cmpw = BA.cmp(L=0) - cmpld = BA.cmpl(L=1) - cmplw = BA.cmpl(L=0) - - # F.4 Simplified Mnemonics for Rotate and Shift Instructions - - def extlwi(self, rA, rS, n, b): - self.rlwinm(rA, rS, b, 0, n-1) - - def extrwi(self, rA, rS, n, b): - self.rlwinm(rA, rS, b+n, 32-n, 31) - - def inslwi(self, rA, rS, n, b): - self.rwlimi(rA, rS, 32-b, b, b + n -1) - - def insrwi(self, rA, rS, n, b): - self.rwlimi(rA, rS, 32-(b+n), b, b + n -1) - - def rotlwi(self, rA, rS, n): - self.rlwinm(rA, rS, n, 0, 31) - - def rotrwi(self, rA, rS, n): - self.rlwinm(rA, rS, 32-n, 0, 31) - - def rotlw(self, rA, rS, rB): - self.rlwnm(rA, rS, rB, 0, 31) - - def slwi(self, rA, rS, n): - self.rlwinm(rA, rS, n, 0, 31-n) - - def srwi(self, rA, rS, n): - self.rlwinm(rA, rS, 32-n, n, 31) - - def sldi(self, rA, rS, n): - self.rldicr(rA, rS, n, 63-n) - - def srdi(self, rA, rS, n): - self.rldicl(rA, rS, 64-n, n) - - # F.5 Simplified Mnemonics for Branch Instructions - - # there's a lot of these! - bt = BA.bc(BO=12) - bf = BA.bc(BO=4) - bdnz = BA.bc(BO=16, BI=0) - bdnzt = BA.bc(BO=8) - bdnzf = BA.bc(BO=0) - bdz = BA.bc(BO=18) - bdzt = BA.bc(BO=10) - bdzf = BA.bc(BO=2) - - bta = BA.bca(BO=12) - bfa = BA.bca(BO=4) - bdnza = BA.bca(BO=16, BI=0) - bdnzta = BA.bca(BO=8) - bdnzfa = BA.bca(BO=0) - bdza = BA.bca(BO=18) - bdzta = BA.bca(BO=10) - bdzfa = BA.bca(BO=2) - - btl = BA.bcl(BO=12) - bfl = BA.bcl(BO=4) - bdnzl = BA.bcl(BO=16, BI=0) - bdnztl = BA.bcl(BO=8) - bdnzfl = BA.bcl(BO=0) - bdzl = BA.bcl(BO=18) - bdztl = BA.bcl(BO=10) - bdzfl = BA.bcl(BO=2) - - btla = BA.bcla(BO=12) - bfla = BA.bcla(BO=4) - bdnzla = BA.bcla(BO=16, BI=0) - bdnztla = BA.bcla(BO=8) - bdnzfla = BA.bcla(BO=0) - bdzla = BA.bcla(BO=18) - bdztla = BA.bcla(BO=10) - bdzfla = BA.bcla(BO=2) - - blr = BA.bclr(BO=20, BI=0) - btlr = BA.bclr(BO=12) - bflr = BA.bclr(BO=4) - bdnzlr = BA.bclr(BO=16, BI=0) - bdnztlr = BA.bclr(BO=8) - bdnzflr = BA.bclr(BO=0) - bdzlr = BA.bclr(BO=18, BI=0) - bdztlr = BA.bclr(BO=10) - bdzflr = BA.bclr(BO=2) - - bctr = BA.bcctr(BO=20, BI=0) - btctr = BA.bcctr(BO=12) - bfctr = BA.bcctr(BO=4) - - blrl = BA.bclrl(BO=20, BI=0) - btlrl = BA.bclrl(BO=12) - bflrl = BA.bclrl(BO=4) - bdnzlrl = BA.bclrl(BO=16, BI=0) - bdnztlrl = BA.bclrl(BO=8) - bdnzflrl = BA.bclrl(BO=0) - bdzlrl = BA.bclrl(BO=18, BI=0) - bdztlrl = BA.bclrl(BO=10) - bdzflrl = BA.bclrl(BO=2) - - bctrl = BA.bcctrl(BO=20, BI=0) - btctrl = BA.bcctrl(BO=12) - bfctrl = BA.bcctrl(BO=4) - - # these should/could take a[n optional] crf argument, but it's a - # bit hard to see how to arrange that. - - blt = BA.bc(BO=12, BI=0) - ble = BA.bc(BO=4, BI=1) - beq = BA.bc(BO=12, BI=2) - bge = BA.bc(BO=4, BI=0) - bgt = BA.bc(BO=12, BI=1) - bnl = BA.bc(BO=4, BI=0) - bne = BA.bc(BO=4, BI=2) - bng = BA.bc(BO=4, BI=1) - bso = BA.bc(BO=12, BI=3) - bns = BA.bc(BO=4, BI=3) - bun = BA.bc(BO=12, BI=3) - bnu = BA.bc(BO=4, BI=3) - - blta = BA.bca(BO=12, BI=0) - blea = BA.bca(BO=4, BI=1) - beqa = BA.bca(BO=12, BI=2) - bgea = BA.bca(BO=4, BI=0) - bgta = BA.bca(BO=12, BI=1) - bnla = BA.bca(BO=4, BI=0) - bnea = BA.bca(BO=4, BI=2) - bnga = BA.bca(BO=4, BI=1) - bsoa = BA.bca(BO=12, BI=3) - bnsa = BA.bca(BO=4, BI=3) - buna = BA.bca(BO=12, BI=3) - bnua = BA.bca(BO=4, BI=3) - - bltl = BA.bcl(BO=12, BI=0) - blel = BA.bcl(BO=4, BI=1) - beql = BA.bcl(BO=12, BI=2) - bgel = BA.bcl(BO=4, BI=0) - bgtl = BA.bcl(BO=12, BI=1) - bnll = BA.bcl(BO=4, BI=0) - bnel = BA.bcl(BO=4, BI=2) - bngl = BA.bcl(BO=4, BI=1) - bsol = BA.bcl(BO=12, BI=3) - bnsl = BA.bcl(BO=4, BI=3) - bunl = BA.bcl(BO=12, BI=3) - bnul = BA.bcl(BO=4, BI=3) - - bltla = BA.bcla(BO=12, BI=0) - blela = BA.bcla(BO=4, BI=1) - beqla = BA.bcla(BO=12, BI=2) - bgela = BA.bcla(BO=4, BI=0) - bgtla = BA.bcla(BO=12, BI=1) - bnlla = BA.bcla(BO=4, BI=0) - bnela = BA.bcla(BO=4, BI=2) - bngla = BA.bcla(BO=4, BI=1) - bsola = BA.bcla(BO=12, BI=3) - bnsla = BA.bcla(BO=4, BI=3) - bunla = BA.bcla(BO=12, BI=3) - bnula = BA.bcla(BO=4, BI=3) - - bltlr = BA.bclr(BO=12, BI=0) - blelr = BA.bclr(BO=4, BI=1) - beqlr = BA.bclr(BO=12, BI=2) - bgelr = BA.bclr(BO=4, BI=0) - bgtlr = BA.bclr(BO=12, BI=1) - bnllr = BA.bclr(BO=4, BI=0) - bnelr = BA.bclr(BO=4, BI=2) - bnglr = BA.bclr(BO=4, BI=1) - bsolr = BA.bclr(BO=12, BI=3) - bnslr = BA.bclr(BO=4, BI=3) - bunlr = BA.bclr(BO=12, BI=3) - bnulr = BA.bclr(BO=4, BI=3) - - bltctr = BA.bcctr(BO=12, BI=0) - blectr = BA.bcctr(BO=4, BI=1) - beqctr = BA.bcctr(BO=12, BI=2) - bgectr = BA.bcctr(BO=4, BI=0) - bgtctr = BA.bcctr(BO=12, BI=1) - bnlctr = BA.bcctr(BO=4, BI=0) - bnectr = BA.bcctr(BO=4, BI=2) - bngctr = BA.bcctr(BO=4, BI=1) - bsoctr = BA.bcctr(BO=12, BI=3) - bnsctr = BA.bcctr(BO=4, BI=3) - bunctr = BA.bcctr(BO=12, BI=3) - bnuctr = BA.bcctr(BO=4, BI=3) - - bltlrl = BA.bclrl(BO=12, BI=0) - blelrl = BA.bclrl(BO=4, BI=1) - beqlrl = BA.bclrl(BO=12, BI=2) - bgelrl = BA.bclrl(BO=4, BI=0) - bgtlrl = BA.bclrl(BO=12, BI=1) - bnllrl = BA.bclrl(BO=4, BI=0) - bnelrl = BA.bclrl(BO=4, BI=2) - bnglrl = BA.bclrl(BO=4, BI=1) - bsolrl = BA.bclrl(BO=12, BI=3) - bnslrl = BA.bclrl(BO=4, BI=3) - bunlrl = BA.bclrl(BO=12, BI=3) - bnulrl = BA.bclrl(BO=4, BI=3) - - bltctrl = BA.bcctrl(BO=12, BI=0) - blectrl = BA.bcctrl(BO=4, BI=1) - beqctrl = BA.bcctrl(BO=12, BI=2) - bgectrl = BA.bcctrl(BO=4, BI=0) - bgtctrl = BA.bcctrl(BO=12, BI=1) - bnlctrl = BA.bcctrl(BO=4, BI=0) - bnectrl = BA.bcctrl(BO=4, BI=2) - bngctrl = BA.bcctrl(BO=4, BI=1) - bsoctrl = BA.bcctrl(BO=12, BI=3) - bnsctrl = BA.bcctrl(BO=4, BI=3) - bunctrl = BA.bcctrl(BO=12, BI=3) - bnuctrl = BA.bcctrl(BO=4, BI=3) - - # whew! and we haven't even begun the predicted versions... - - # F.6 Simplified Mnemonics for Condition Register - # Logical Instructions - - crset = BA.creqv(crbA="crbD", crbB="crbD") - crclr = BA.crxor(crbA="crbD", crbB="crbD") - crmove = BA.cror(crbA="crbB") - crnot = BA.crnor(crbA="crbB") - - # F.7 Simplified Mnemonics for Trap Instructions - - trap = BA.tw(TO=31, rA=0, rB=0) - twlt = BA.tw(TO=16) - twle = BA.tw(TO=20) - tweq = BA.tw(TO=4) - twge = BA.tw(TO=12) - twgt = BA.tw(TO=8) - twnl = BA.tw(TO=12) - twng = BA.tw(TO=24) - twllt = BA.tw(TO=2) - twlle = BA.tw(TO=6) - twlge = BA.tw(TO=5) - twlgt = BA.tw(TO=1) - twlnl = BA.tw(TO=5) - twlng = BA.tw(TO=6) - - twlti = BA.twi(TO=16) - twlei = BA.twi(TO=20) - tweqi = BA.twi(TO=4) - twgei = BA.twi(TO=12) - twgti = BA.twi(TO=8) - twnli = BA.twi(TO=12) - twnei = BA.twi(TO=24) - twngi = BA.twi(TO=20) - twllti = BA.twi(TO=2) - twllei = BA.twi(TO=6) - twlgei = BA.twi(TO=5) - twlgti = BA.twi(TO=1) - twlnli = BA.twi(TO=5) - twlngi = BA.twi(TO=6) - - # F.8 Simplified Mnemonics for Special-Purpose - # Registers - - mfctr = BA.mfspr(spr=9) - mflr = BA.mfspr(spr=8) - mftbl = BA.mftb(spr=268) - mftbu = BA.mftb(spr=269) - mfxer = BA.mfspr(spr=1) - - mtctr = BA.mtspr(spr=9) - mtlr = BA.mtspr(spr=8) - mtxer = BA.mtspr(spr=1) - - # F.9 Recommended Simplified Mnemonics - - nop = BA.ori(rS=0, rA=0, UIMM=0) - - li = BA.addi(rA=0) - lis = BA.addis(rA=0) - - mr = BA.or_(rB="rS") - mrx = BA.or_x(rB="rS") - - not_ = BA.nor(rB="rS") - not_x = BA.norx(rB="rS") - - mtcr = BA.mtcrf(CRM=0xFF) - - def emit(self, insn): - bytes = struct.pack("i", insn) - for byte in bytes: - self.writechar(byte) +#A = Form("frD", "frA", "frB", "XO3", "Rc") +#A1 = Form("frD", "frB", "XO3", "Rc") +#A2 = Form("frD", "frA", "frC", "XO3", "Rc") +#A3 = Form("frD", "frA", "frC", "frB", "XO3", "Rc") +# +#I = Form("LI", "AA", "LK") +# +#B = Form("BO", "BI", "BD", "AA", "LK") +# +#SC = Form("AA") # fudge +# +#DD = Form("rD", "rA", "SIMM") +#DDO = Form("rD", "rA", "ds", "XO4") +#DS = Form("rA", "rS", "UIMM") +# +#X = Form("XO1") +#XS = Form("rA", "rS", "rB", "XO1", "Rc") +#XSO = Form("rS", "rA", "rB", "XO1") +#XD = Form("rD", "rA", "rB", "XO1") +#XO = Form("rD", "rA", "rB", "OE", "XO2", "Rc") +#XO0 = Form("rD", "rA", "OE", "XO2", "Rc") +#XDB = Form("frD", "frB", "XO1", "Rc") +#XS0 = Form("rA", "rS", "XO1", "Rc") +#X0 = Form("rA", "rB", "XO1") +#XcAB = Form("crfD", "rA", "rB", "XO1") +#XN = Form("rD", "rA", "NB", "XO1") +#XL = Form("crbD", "crbA", "crbB", "XO1") +#XL1 = Form("crfD", "crfS") +#XL2 = Form("crbD", "XO1", "Rc") +#XFL = Form("FM", "frB", "XO1", "Rc") +#XFX = Form("CRM", "rS", "XO1") +# +#MI = Form("rA", "rS", "SH", "MB", "ME", "Rc") +#MB = Form("rA", "rS", "rB", "MB", "ME", "Rc") +#MDI = Form("rA", "rS", "sh", "mbe", "XO5", "Rc") +#MDS = Form("rA", "rS", "rB", "mbe", "XO5", "Rc") +# +#class BasicPPCAssembler(Assembler): +# +# def disassemble(cls, inst, labels={}, pc=0): +# cache = cls.__dict__.get('idesc cache') +# if cache is None: +# idescs = cls.get_idescs() +# cache = {} +# for n, i in idescs: +# cache.setdefault(i.specializations[ppc_fields['opcode']], +# []).append((n,i)) +# setattr(cls, 'idesc cache', cache) +# matches = [] +# idescs = cache[ppc_fields['opcode'].decode(inst)] +# for name, idesc in idescs: +# m = idesc.match(inst) +# if m > 0: +# matches.append((m, idesc, name)) +# if matches: +# score, idesc, name = max(matches) +# return idesc.disassemble(name, inst, labels, pc) +# disassemble = classmethod(disassemble) +# +# # "basic" means no simplified mnemonics +# +# # I form +# b = I(18, AA=0, LK=0) +# ba = I(18, AA=1, LK=0) +# bl = I(18, AA=0, LK=1) +# bla = I(18, AA=1, LK=1) +# +# # B form +# bc = B(16, AA=0, LK=0) +# bcl = B(16, AA=0, LK=1) +# bca = B(16, AA=1, LK=0) +# bcla = B(16, AA=1, LK=1) +# +# # SC form +# sc = SC(17, AA=1) # it's not really the aa field... +# +# # D form +# addi = DD(14) +# addic = DD(12) +# addicx = DD(13) +# addis = DD(15) +# +# andix = DS(28) +# andisx = DS(29) +# +# cmpi = Form("crfD", "L", "rA", "SIMM")(11) +# cmpi.default(L=0).default(crfD=0) +# cmpli = Form("crfD", "L", "rA", "UIMM")(10) +# cmpli.default(L=0).default(crfD=0) +# +# lbz = DD(34) +# lbzu = DD(35) +# ld = DDO(58, XO4=0) +# ldu = DDO(58, XO4=1) +# lfd = DD(50) +# lfdu = DD(51) +# lfs = DD(48) +# lfsu = DD(49) +# lha = DD(42) +# lhau = DD(43) +# lhz = DD(40) +# lhzu = DD(41) +# lmw = DD(46) +# lwa = DDO(58, XO4=2) +# lwz = DD(32) +# lwzu = DD(33) +# +# mulli = DD(7) +# ori = DS(24) +# oris = DS(25) +# +# stb = DD(38) +# stbu = DD(39) +# std = DDO(62, XO4=0) +# stdu = DDO(62, XO4=1) +# stfd = DD(54) +# stfdu = DD(55) +# stfs = DD(52) +# stfsu = DD(53) +# sth = DD(44) +# sthu = DD(45) +# stmw = DD(47) +# stw = DD(36) +# stwu = DD(37) +# +# subfic = DD(8) +# tdi = Form("TO", "rA", "SIMM")(2) +# twi = Form("TO", "rA", "SIMM")(3) +# xori = DS(26) +# xoris = DS(27) +# +# # X form +# +# and_ = XS(31, XO1=28, Rc=0) +# and_x = XS(31, XO1=28, Rc=1) +# +# andc_ = XS(31, XO1=60, Rc=0) +# andc_x = XS(31, XO1=60, Rc=1) +# +# # is the L bit for 64 bit compares? hmm +# cmp = Form("crfD", "L", "rA", "rB", "XO1")(31, XO1=0) +# cmp.default(L=0).default(crfD=0) +# cmpl = Form("crfD", "L", "rA", "rB", "XO1")(31, XO1=32) +# cmpl.default(L=0).default(crfD=0) +# +# cntlzd = XS0(31, XO1=58, Rc=0) +# cntlzdx = XS0(31, XO1=58, Rc=1) +# cntlzw = XS0(31, XO1=26, Rc=0) +# cntlzwx = XS0(31, XO1=26, Rc=1) +# +# dcba = X0(31, XO1=758) +# dcbf = X0(31, XO1=86) +# dcbi = X0(31, XO1=470) +# dcbst = X0(31, XO1=54) +# dcbt = X0(31, XO1=278) +# dcbtst = X0(31, XO1=246) +# dcbz = X0(31, XO1=1014) +# +# eciwx = XD(31, XO1=310) +# ecowx = XS(31, XO1=438, Rc=0) +# +# eieio = X(31, XO1=854) +# +# eqv = XS(31, XO1=284, Rc=0) +# eqvx = XS(31, XO1=284, Rc=1) +# +# extsb = XS0(31, XO1=954, Rc=0) +# extsbx = XS0(31, XO1=954, Rc=1) +# +# extsh = XS0(31, XO1=922, Rc=0) +# extshx = XS0(31, XO1=922, Rc=1) +# +# extsw = XS0(31, XO1=986, Rc=0) +# extswx = XS0(31, XO1=986, Rc=1) +# +# fabs = XDB(63, XO1=264, Rc=0) +# fabsx = XDB(63, XO1=264, Rc=1) +# +# fcmpo = XcAB(63, XO1=32) +# fcmpu = XcAB(63, XO1=0) +# +# fcfid = XDB(63, XO1=846, Rc=0) +# fcfidx = XDB(63, XO1=846, Rc=1) +# +# fctid = XDB(63, XO1=814, Rc=0) +# fctidx = XDB(63, XO1=814, Rc=1) +# +# fctidz = XDB(63, XO1=815, Rc=0) +# fctidzx = XDB(63, XO1=815, Rc=1) +# +# fctiw = XDB(63, XO1=14, Rc=0) +# fctiwx = XDB(63, XO1=14, Rc=1) +# +# fctiwz = XDB(63, XO1=15, Rc=0) +# fctiwzx = XDB(63, XO1=15, Rc=1) +# +# fmr = XDB(63, XO1=72, Rc=0) +# fmrx = XDB(63, XO1=72, Rc=1) +# +# fnabs = XDB(63, XO1=136, Rc=0) +# fnabsx = XDB(63, XO1=136, Rc=1) +# +# fneg = XDB(63, XO1=40, Rc=0) +# fnegx = XDB(63, XO1=40, Rc=1) +# +# frsp = XDB(63, XO1=12, Rc=0) +# frspx = XDB(63, XO1=12, Rc=1) +# +# fsqrt = XDB(63, XO1=22, Rc=0) +# +# icbi = X0(31, XO1=982) +# +# lbzux = XD(31, XO1=119) +# lbzx = XD(31, XO1=87) +# ldarx = XD(31, XO1=84) +# ldux = XD(31, XO1=53) +# ldx = XD(31, XO1=21) +# lfdux = XD(31, XO1=631) +# lfdx = XD(31, XO1=599) +# lfsux = XD(31, XO1=567) +# lfsx = XD(31, XO1=535) +# lhaux = XD(31, XO1=375) +# lhax = XD(31, XO1=343) +# lhbrx = XD(31, XO1=790) +# lhzux = XD(31, XO1=311) +# lhzx = XD(31, XO1=279) +# lswi = XD(31, XO1=597) +# lswx = XD(31, XO1=533) +# lwarx = XD(31, XO1=20) +# lwaux = XD(31, XO1=373) +# lwax = XD(31, XO1=341) +# lwbrx = XD(31, XO1=534) +# lwzux = XD(31, XO1=55) +# lwzx = XD(31, XO1=23) +# +# mcrfs = Form("crfD", "crfS", "XO1")(63, XO1=64) +# mcrxr = Form("crfD", "XO1")(31, XO1=512) +# mfcr = Form("rD", "XO1")(31, XO1=19) +# mffs = Form("frD", "XO1", "Rc")(63, XO1=583, Rc=0) +# mffsx = Form("frD", "XO1", "Rc")(63, XO1=583, Rc=1) +# mfmsr = Form("rD", "XO1")(31, XO1=83) +# mfsr = Form("rD", "SR", "XO1")(31, XO1=595) +# mfsrin = XDB(31, XO1=659, Rc=0) +# +# add = XO(31, XO2=266, OE=0, Rc=0) +# addx = XO(31, XO2=266, OE=0, Rc=1) +# addo = XO(31, XO2=266, OE=1, Rc=0) +# addox = XO(31, XO2=266, OE=1, Rc=1) +# +# addc = XO(31, XO2=10, OE=0, Rc=0) +# addcx = XO(31, XO2=10, OE=0, Rc=1) +# addco = XO(31, XO2=10, OE=1, Rc=0) +# addcox = XO(31, XO2=10, OE=1, Rc=1) +# +# adde = XO(31, XO2=138, OE=0, Rc=0) +# addex = XO(31, XO2=138, OE=0, Rc=1) +# addeo = XO(31, XO2=138, OE=1, Rc=0) +# addeox = XO(31, XO2=138, OE=1, Rc=1) +# +# addme = XO(31, rB=0, XO2=234, OE=0, Rc=0) +# addmex = XO(31, rB=0, XO2=234, OE=0, Rc=1) +# addmeo = XO(31, rB=0, XO2=234, OE=1, Rc=0) +# addmeox = XO(31, rB=0, XO2=234, OE=1, Rc=1) +# +# addze = XO(31, rB=0, XO2=202, OE=0, Rc=0) +# addzex = XO(31, rB=0, XO2=202, OE=0, Rc=1) +# addzeo = XO(31, rB=0, XO2=202, OE=1, Rc=0) +# addzeox = XO(31, rB=0, XO2=202, OE=1, Rc=1) +# +# bcctr = Form("BO", "BI", "XO1", "LK")(19, XO1=528, LK=0) +# bcctrl = Form("BO", "BI", "XO1", "LK")(19, XO1=528, LK=1) +# +# bclr = Form("BO", "BI", "XO1", "LK")(19, XO1=16, LK=0) +# bclrl = Form("BO", "BI", "XO1", "LK")(19, XO1=16, LK=1) +# +# crand = XL(19, XO1=257) +# crandc = XL(19, XO1=129) +# creqv = XL(19, XO1=289) +# crnand = XL(19, XO1=225) +# crnor = XL(19, XO1=33) +# cror = XL(19, XO1=449) +# crorc = XL(19, XO1=417) +# crxor = XL(19, XO1=193) +# +# divd = XO(31, XO2=489, OE=0, Rc=0) +# divdx = XO(31, XO2=489, OE=0, Rc=1) +# divdo = XO(31, XO2=489, OE=1, Rc=0) +# divdox = XO(31, XO2=489, OE=1, Rc=1) +# +# divdu = XO(31, XO2=457, OE=0, Rc=0) +# divdux = XO(31, XO2=457, OE=0, Rc=1) +# divduo = XO(31, XO2=457, OE=1, Rc=0) +# divduox = XO(31, XO2=457, OE=1, Rc=1) +# +# divw = XO(31, XO2=491, OE=0, Rc=0) +# divwx = XO(31, XO2=491, OE=0, Rc=1) +# divwo = XO(31, XO2=491, OE=1, Rc=0) +# divwox = XO(31, XO2=491, OE=1, Rc=1) +# +# divwu = XO(31, XO2=459, OE=0, Rc=0) +# divwux = XO(31, XO2=459, OE=0, Rc=1) +# divwuo = XO(31, XO2=459, OE=1, Rc=0) +# divwuox = XO(31, XO2=459, OE=1, Rc=1) +# +# fadd = A(63, XO3=21, Rc=0) +# faddx = A(63, XO3=21, Rc=1) +# fadds = A(59, XO3=21, Rc=0) +# faddsx = A(59, XO3=21, Rc=1) +# +# fdiv = A(63, XO3=18, Rc=0) +# fdivx = A(63, XO3=18, Rc=1) +# fdivs = A(59, XO3=18, Rc=0) +# fdivsx = A(59, XO3=18, Rc=1) +# +# fmadd = A3(63, XO3=19, Rc=0) +# fmaddx = A3(63, XO3=19, Rc=1) +# fmadds = A3(59, XO3=19, Rc=0) +# fmaddsx = A3(59, XO3=19, Rc=1) +# +# fmsub = A3(63, XO3=28, Rc=0) +# fmsubx = A3(63, XO3=28, Rc=1) +# fmsubs = A3(59, XO3=28, Rc=0) +# fmsubsx = A3(59, XO3=28, Rc=1) +# +# fmul = A2(63, XO3=25, Rc=0) +# fmulx = A2(63, XO3=25, Rc=1) +# fmuls = A2(59, XO3=25, Rc=0) +# fmulsx = A2(59, XO3=25, Rc=1) +# +# fnmadd = A3(63, XO3=31, Rc=0) +# fnmaddx = A3(63, XO3=31, Rc=1) +# fnmadds = A3(59, XO3=31, Rc=0) +# fnmaddsx = A3(59, XO3=31, Rc=1) +# +# fnmsub = A3(63, XO3=30, Rc=0) +# fnmsubx = A3(63, XO3=30, Rc=1) +# fnmsubs = A3(59, XO3=30, Rc=0) +# fnmsubsx = A3(59, XO3=30, Rc=1) +# +# fres = A1(59, XO3=24, Rc=0) +# fresx = A1(59, XO3=24, Rc=1) +# +# frsp = A1(63, XO3=12, Rc=0) +# frspx = A1(63, XO3=12, Rc=1) +# +# frsqrte = A1(63, XO3=26, Rc=0) +# frsqrtex = A1(63, XO3=26, Rc=1) +# +# fsel = A3(63, XO3=23, Rc=0) +# fselx = A3(63, XO3=23, Rc=1) +# +# frsqrt = A1(63, XO3=22, Rc=0) +# frsqrtx = A1(63, XO3=22, Rc=1) +# frsqrts = A1(59, XO3=22, Rc=0) +# frsqrtsx = A1(59, XO3=22, Rc=1) +# +# fsub = A(63, XO3=20, Rc=0) +# fsubx = A(63, XO3=20, Rc=1) +# fsubs = A(59, XO3=20, Rc=0) +# fsubsx = A(59, XO3=20, Rc=1) +# +# isync = X(19, XO1=150) +# +# mcrf = XL1(19) +# +# mfspr = Form("rD", "spr", "XO1")(31, XO1=339) +# mftb = Form("rD", "spr", "XO1")(31, XO1=371) +# +# mtcrf = XFX(31, XO1=144) +# +# mtfsb0 = XL2(63, XO1=70, Rc=0) +# mtfsb0x = XL2(63, XO1=70, Rc=1) +# mtfsb1 = XL2(63, XO1=38, Rc=0) +# mtfsb1x = XL2(63, XO1=38, Rc=1) +# +# mtfsf = XFL(63, XO1=711, Rc=0) +# mtfsfx = XFL(63, XO1=711, Rc=1) +# +# mtfsfi = Form("crfD", "IMM", "XO1", "Rc")(63, XO1=134, Rc=0) +# mtfsfix = Form("crfD", "IMM", "XO1", "Rc")(63, XO1=134, Rc=1) +# +# mtmsr = Form("rS", "XO1")(31, XO1=146) +# +# mtspr = Form("rS", "spr", "XO1")(31, XO1=467) +# +# mtsr = Form("rS", "SR", "XO1")(31, XO1=210) +# mtsrin = Form("rS", "rB", "XO1")(31, XO1=242) +# +# mulhd = XO(31, OE=0, XO2=73, Rc=0) +# mulhdx = XO(31, OE=0, XO2=73, Rc=1) +# +# mulhdu = XO(31, OE=0, XO2=9, Rc=0) +# mulhdux = XO(31, OE=0, XO2=9, Rc=1) +# +# mulld = XO(31, OE=0, XO2=233, Rc=0) +# mulldx = XO(31, OE=0, XO2=233, Rc=1) +# mulldo = XO(31, OE=1, XO2=233, Rc=0) +# mulldox = XO(31, OE=1, XO2=233, Rc=1) +# +# mulhw = XO(31, OE=0, XO2=75, Rc=0) +# mulhwx = XO(31, OE=0, XO2=75, Rc=1) +# +# mulhwu = XO(31, OE=0, XO2=11, Rc=0) +# mulhwux = XO(31, OE=0, XO2=11, Rc=1) +# +# mullw = XO(31, OE=0, XO2=235, Rc=0) +# mullwx = XO(31, OE=0, XO2=235, Rc=1) +# mullwo = XO(31, OE=1, XO2=235, Rc=0) +# mullwox = XO(31, OE=1, XO2=235, Rc=1) +# +# nand = XS(31, XO1=476, Rc=0) +# nandx = XS(31, XO1=476, Rc=1) +# +# neg = XO0(31, OE=0, XO2=104, Rc=0) +# negx = XO0(31, OE=0, XO2=104, Rc=1) +# nego = XO0(31, OE=1, XO2=104, Rc=0) +# negox = XO0(31, OE=1, XO2=104, Rc=1) +# +# nor = XS(31, XO1=124, Rc=0) +# norx = XS(31, XO1=124, Rc=1) +# +# or_ = XS(31, XO1=444, Rc=0) +# or_x = XS(31, XO1=444, Rc=1) +# +# orc = XS(31, XO1=412, Rc=0) +# orcx = XS(31, XO1=412, Rc=1) +# +# rfi = X(19, XO1=50) +# +# rfid = X(19, XO1=18) +# +# rldcl = MDS(30, XO5=8, Rc=0) +# rldclx = MDS(30, XO5=8, Rc=1) +# rldcr = MDS(30, XO5=9, Rc=0) +# rldcrx = MDS(30, XO5=9, Rc=1) +# +# rldic = MDI(30, XO5=2, Rc=0) +# rldicx = MDI(30, XO5=2, Rc=1) +# rldicl = MDI(30, XO5=0, Rc=0) +# rldiclx = MDI(30, XO5=0, Rc=1) +# rldicr = MDI(30, XO5=1, Rc=0) +# rldicrx = MDI(30, XO5=1, Rc=1) +# rldimi = MDI(30, XO5=3, Rc=0) +# rldimix = MDI(30, XO5=3, Rc=1) +# +# rlwimi = MI(20, Rc=0) +# rlwimix = MI(20, Rc=1) +# +# rlwinm = MI(21, Rc=0) +# rlwinmx = MI(21, Rc=1) +# +# rlwnm = MB(23, Rc=0) +# rlwnmx = MB(23, Rc=1) +# +# sld = XS(31, XO1=27, Rc=0) +# sldx = XS(31, XO1=27, Rc=1) +# +# slw = XS(31, XO1=24, Rc=0) +# slwx = XS(31, XO1=24, Rc=1) +# +# srad = XS(31, XO1=794, Rc=0) +# sradx = XS(31, XO1=794, Rc=1) +# +# sradi = Form("rA", "rS", "SH", "XO6", "sh", "Rc")(31, XO6=413, Rc=0) +# sradix = Form("rA", "rS", "SH", "XO6", "sh", "Rc")(31, XO6=413, Rc=1) +# +# sraw = XS(31, XO1=792, Rc=0) +# srawx = XS(31, XO1=792, Rc=1) +# +# srawi = Form("rA", "rS", "SH", "XO1", "Rc")(31, XO1=824, Rc=0) +# srawix = Form("rA", "rS", "SH", "XO1", "Rc")(31, XO1=824, Rc=1) +# +# srd = XS(31, XO1=539, Rc=0) +# srdx = XS(31, XO1=539, Rc=1) +# +# srw = XS(31, XO1=536, Rc=0) +# srwx = XS(31, XO1=536, Rc=1) +# +# stbux = XSO(31, XO1=247) +# stbx = XSO(31, XO1=215) +# stdcxx = Form("rS", "rA", "rB", "XO1", "Rc")(31, XO1=214, Rc=1) +# stdux = XSO(31, XO1=181) +# stdx = XSO(31, XO1=149) +# stfdux = XSO(31, XO1=759) +# stfdx = XSO(31, XO1=727) +# stfiwx = XSO(31, XO1=983) +# stfsux = XSO(31, XO1=695) +# stfsx = XSO(31, XO1=663) +# sthbrx = XSO(31, XO1=918) +# sthux = XSO(31, XO1=439) +# sthx = XSO(31, XO1=407) +# stswi = Form("rS", "rA", "NB", "XO1")(31, XO1=725) +# stswx = XSO(31, XO1=661) +# stwbrx = XSO(31, XO1=662) +# stwcxx = Form("rS", "rA", "rB", "XO1", "Rc")(31, XO1=150, Rc=1) +# stwux = XSO(31, XO1=183) +# stwx = XSO(31, XO1=151) +# +# subf = XO(31, XO2=40, OE=0, Rc=0) +# subfx = XO(31, XO2=40, OE=0, Rc=1) +# subfo = XO(31, XO2=40, OE=1, Rc=0) +# subfox = XO(31, XO2=40, OE=1, Rc=1) +# +# subfc = XO(31, XO2=8, OE=0, Rc=0) +# subfcx = XO(31, XO2=8, OE=0, Rc=1) +# subfco = XO(31, XO2=8, OE=1, Rc=0) +# subfcox = XO(31, XO2=8, OE=1, Rc=1) +# +# subfe = XO(31, XO2=136, OE=0, Rc=0) +# subfex = XO(31, XO2=136, OE=0, Rc=1) +# subfeo = XO(31, XO2=136, OE=1, Rc=0) +# subfeox = XO(31, XO2=136, OE=1, Rc=1) +# +# subfme = XO0(31, OE=0, XO2=232, Rc=0) +# subfmex = XO0(31, OE=0, XO2=232, Rc=1) +# subfmeo = XO0(31, OE=1, XO2=232, Rc=0) +# subfmeox= XO0(31, OE=1, XO2=232, Rc=1) +# +# subfze = XO0(31, OE=0, XO2=200, Rc=0) +# subfzex = XO0(31, OE=0, XO2=200, Rc=1) +# subfzeo = XO0(31, OE=1, XO2=200, Rc=0) +# subfzeox= XO0(31, OE=1, XO2=200, Rc=1) +# +# sync = X(31, XO1=598) +# +# tlbia = X(31, XO1=370) +# tlbie = Form("rB", "XO1")(31, XO1=306) +# tlbsync = X(31, XO1=566) +# +# td = Form("TO", "rA", "rB", "XO1")(31, XO1=68) +# tw = Form("TO", "rA", "rB", "XO1")(31, XO1=4) +# +# xor = XS(31, XO1=316, Rc=0) +# xorx = XS(31, XO1=316, Rc=1) +# +#class PPCAssembler(BasicPPCAssembler): +# BA = BasicPPCAssembler +# +# # awkward mnemonics: +# # mftb +# # most of the branch mnemonics... +# +# # F.2 Simplified Mnemonics for Subtract Instructions +# +# def subi(self, rD, rA, value): +# self.addi(rD, rA, -value) +# def subis(self, rD, rA, value): +# self.addis(rD, rA, -value) +# def subic(self, rD, rA, value): +# self.addic(rD, rA, -value) +# def subicx(self, rD, rA, value): +# self.addicx(rD, rA, -value) +# +# def sub(self, rD, rA, rB): +# self.subf(rD, rB, rA) +# def subc(self, rD, rA, rB): +# self.subfc(rD, rB, rA) +# def subx(self, rD, rA, rB): +# self.subfx(rD, rB, rA) +# def subcx(self, rD, rA, rB): +# self.subfcx(rD, rB, rA) +# def subo(self, rD, rA, rB): +# self.subfo(rD, rB, rA) +# def subco(self, rD, rA, rB): +# self.subfco(rD, rB, rA) +# def subox(self, rD, rA, rB): +# self.subfox(rD, rB, rA) +# def subcox(self, rD, rA, rB): +# self.subfcox(rD, rB, rA) +# +# # F.3 Simplified Mnemonics for Compare Instructions +# +# cmpdi = BA.cmpi(L=1) +# cmpwi = BA.cmpi(L=0) +# cmpldi = BA.cmpli(L=1) +# cmplwi = BA.cmpli(L=0) +# cmpd = BA.cmp(L=1) +# cmpw = BA.cmp(L=0) +# cmpld = BA.cmpl(L=1) +# cmplw = BA.cmpl(L=0) +# +# # F.4 Simplified Mnemonics for Rotate and Shift Instructions +# +# def extlwi(self, rA, rS, n, b): +# self.rlwinm(rA, rS, b, 0, n-1) +# +# def extrwi(self, rA, rS, n, b): +# self.rlwinm(rA, rS, b+n, 32-n, 31) +# +# def inslwi(self, rA, rS, n, b): +# self.rwlimi(rA, rS, 32-b, b, b + n -1) +# +# def insrwi(self, rA, rS, n, b): +# self.rwlimi(rA, rS, 32-(b+n), b, b + n -1) +# +# def rotlwi(self, rA, rS, n): +# self.rlwinm(rA, rS, n, 0, 31) +# +# def rotrwi(self, rA, rS, n): +# self.rlwinm(rA, rS, 32-n, 0, 31) +# +# def rotlw(self, rA, rS, rB): +# self.rlwnm(rA, rS, rB, 0, 31) +# +# def slwi(self, rA, rS, n): +# self.rlwinm(rA, rS, n, 0, 31-n) +# +# def srwi(self, rA, rS, n): +# self.rlwinm(rA, rS, 32-n, n, 31) +# +# def sldi(self, rA, rS, n): +# self.rldicr(rA, rS, n, 63-n) +# +# def srdi(self, rA, rS, n): +# self.rldicl(rA, rS, 64-n, n) +# +# # F.5 Simplified Mnemonics for Branch Instructions +# +# # there's a lot of these! +# bt = BA.bc(BO=12) +# bf = BA.bc(BO=4) +# bdnz = BA.bc(BO=16, BI=0) +# bdnzt = BA.bc(BO=8) +# bdnzf = BA.bc(BO=0) +# bdz = BA.bc(BO=18) +# bdzt = BA.bc(BO=10) +# bdzf = BA.bc(BO=2) +# +# bta = BA.bca(BO=12) +# bfa = BA.bca(BO=4) +# bdnza = BA.bca(BO=16, BI=0) +# bdnzta = BA.bca(BO=8) +# bdnzfa = BA.bca(BO=0) +# bdza = BA.bca(BO=18) +# bdzta = BA.bca(BO=10) +# bdzfa = BA.bca(BO=2) +# +# btl = BA.bcl(BO=12) +# bfl = BA.bcl(BO=4) +# bdnzl = BA.bcl(BO=16, BI=0) +# bdnztl = BA.bcl(BO=8) +# bdnzfl = BA.bcl(BO=0) +# bdzl = BA.bcl(BO=18) +# bdztl = BA.bcl(BO=10) +# bdzfl = BA.bcl(BO=2) +# +# btla = BA.bcla(BO=12) +# bfla = BA.bcla(BO=4) +# bdnzla = BA.bcla(BO=16, BI=0) +# bdnztla = BA.bcla(BO=8) +# bdnzfla = BA.bcla(BO=0) +# bdzla = BA.bcla(BO=18) +# bdztla = BA.bcla(BO=10) +# bdzfla = BA.bcla(BO=2) +# +# blr = BA.bclr(BO=20, BI=0) +# btlr = BA.bclr(BO=12) +# bflr = BA.bclr(BO=4) +# bdnzlr = BA.bclr(BO=16, BI=0) +# bdnztlr = BA.bclr(BO=8) +# bdnzflr = BA.bclr(BO=0) +# bdzlr = BA.bclr(BO=18, BI=0) +# bdztlr = BA.bclr(BO=10) +# bdzflr = BA.bclr(BO=2) +# +# bctr = BA.bcctr(BO=20, BI=0) +# btctr = BA.bcctr(BO=12) +# bfctr = BA.bcctr(BO=4) +# +# blrl = BA.bclrl(BO=20, BI=0) +# btlrl = BA.bclrl(BO=12) +# bflrl = BA.bclrl(BO=4) +# bdnzlrl = BA.bclrl(BO=16, BI=0) +# bdnztlrl = BA.bclrl(BO=8) +# bdnzflrl = BA.bclrl(BO=0) +# bdzlrl = BA.bclrl(BO=18, BI=0) +# bdztlrl = BA.bclrl(BO=10) +# bdzflrl = BA.bclrl(BO=2) +# +# bctrl = BA.bcctrl(BO=20, BI=0) +# btctrl = BA.bcctrl(BO=12) +# bfctrl = BA.bcctrl(BO=4) +# +# # these should/could take a[n optional] crf argument, but it's a +# # bit hard to see how to arrange that. +# +# blt = BA.bc(BO=12, BI=0) +# ble = BA.bc(BO=4, BI=1) +# beq = BA.bc(BO=12, BI=2) +# bge = BA.bc(BO=4, BI=0) +# bgt = BA.bc(BO=12, BI=1) +# bnl = BA.bc(BO=4, BI=0) +# bne = BA.bc(BO=4, BI=2) +# bng = BA.bc(BO=4, BI=1) +# bso = BA.bc(BO=12, BI=3) +# bns = BA.bc(BO=4, BI=3) +# bun = BA.bc(BO=12, BI=3) +# bnu = BA.bc(BO=4, BI=3) +# +# blta = BA.bca(BO=12, BI=0) +# blea = BA.bca(BO=4, BI=1) +# beqa = BA.bca(BO=12, BI=2) +# bgea = BA.bca(BO=4, BI=0) +# bgta = BA.bca(BO=12, BI=1) +# bnla = BA.bca(BO=4, BI=0) +# bnea = BA.bca(BO=4, BI=2) +# bnga = BA.bca(BO=4, BI=1) +# bsoa = BA.bca(BO=12, BI=3) +# bnsa = BA.bca(BO=4, BI=3) +# buna = BA.bca(BO=12, BI=3) +# bnua = BA.bca(BO=4, BI=3) +# +# bltl = BA.bcl(BO=12, BI=0) +# blel = BA.bcl(BO=4, BI=1) +# beql = BA.bcl(BO=12, BI=2) +# bgel = BA.bcl(BO=4, BI=0) +# bgtl = BA.bcl(BO=12, BI=1) +# bnll = BA.bcl(BO=4, BI=0) +# bnel = BA.bcl(BO=4, BI=2) +# bngl = BA.bcl(BO=4, BI=1) +# bsol = BA.bcl(BO=12, BI=3) +# bnsl = BA.bcl(BO=4, BI=3) +# bunl = BA.bcl(BO=12, BI=3) +# bnul = BA.bcl(BO=4, BI=3) +# +# bltla = BA.bcla(BO=12, BI=0) +# blela = BA.bcla(BO=4, BI=1) +# beqla = BA.bcla(BO=12, BI=2) +# bgela = BA.bcla(BO=4, BI=0) +# bgtla = BA.bcla(BO=12, BI=1) +# bnlla = BA.bcla(BO=4, BI=0) +# bnela = BA.bcla(BO=4, BI=2) +# bngla = BA.bcla(BO=4, BI=1) +# bsola = BA.bcla(BO=12, BI=3) +# bnsla = BA.bcla(BO=4, BI=3) +# bunla = BA.bcla(BO=12, BI=3) +# bnula = BA.bcla(BO=4, BI=3) +# +# bltlr = BA.bclr(BO=12, BI=0) +# blelr = BA.bclr(BO=4, BI=1) +# beqlr = BA.bclr(BO=12, BI=2) +# bgelr = BA.bclr(BO=4, BI=0) +# bgtlr = BA.bclr(BO=12, BI=1) +# bnllr = BA.bclr(BO=4, BI=0) +# bnelr = BA.bclr(BO=4, BI=2) +# bnglr = BA.bclr(BO=4, BI=1) +# bsolr = BA.bclr(BO=12, BI=3) +# bnslr = BA.bclr(BO=4, BI=3) +# bunlr = BA.bclr(BO=12, BI=3) +# bnulr = BA.bclr(BO=4, BI=3) +# +# bltctr = BA.bcctr(BO=12, BI=0) +# blectr = BA.bcctr(BO=4, BI=1) +# beqctr = BA.bcctr(BO=12, BI=2) +# bgectr = BA.bcctr(BO=4, BI=0) +# bgtctr = BA.bcctr(BO=12, BI=1) +# bnlctr = BA.bcctr(BO=4, BI=0) +# bnectr = BA.bcctr(BO=4, BI=2) +# bngctr = BA.bcctr(BO=4, BI=1) +# bsoctr = BA.bcctr(BO=12, BI=3) +# bnsctr = BA.bcctr(BO=4, BI=3) +# bunctr = BA.bcctr(BO=12, BI=3) +# bnuctr = BA.bcctr(BO=4, BI=3) +# +# bltlrl = BA.bclrl(BO=12, BI=0) +# blelrl = BA.bclrl(BO=4, BI=1) +# beqlrl = BA.bclrl(BO=12, BI=2) +# bgelrl = BA.bclrl(BO=4, BI=0) +# bgtlrl = BA.bclrl(BO=12, BI=1) +# bnllrl = BA.bclrl(BO=4, BI=0) +# bnelrl = BA.bclrl(BO=4, BI=2) +# bnglrl = BA.bclrl(BO=4, BI=1) +# bsolrl = BA.bclrl(BO=12, BI=3) +# bnslrl = BA.bclrl(BO=4, BI=3) +# bunlrl = BA.bclrl(BO=12, BI=3) +# bnulrl = BA.bclrl(BO=4, BI=3) +# +# bltctrl = BA.bcctrl(BO=12, BI=0) +# blectrl = BA.bcctrl(BO=4, BI=1) +# beqctrl = BA.bcctrl(BO=12, BI=2) +# bgectrl = BA.bcctrl(BO=4, BI=0) +# bgtctrl = BA.bcctrl(BO=12, BI=1) +# bnlctrl = BA.bcctrl(BO=4, BI=0) +# bnectrl = BA.bcctrl(BO=4, BI=2) +# bngctrl = BA.bcctrl(BO=4, BI=1) +# bsoctrl = BA.bcctrl(BO=12, BI=3) +# bnsctrl = BA.bcctrl(BO=4, BI=3) +# bunctrl = BA.bcctrl(BO=12, BI=3) +# bnuctrl = BA.bcctrl(BO=4, BI=3) +# +# # whew! and we haven't even begun the predicted versions... +# +# # F.6 Simplified Mnemonics for Condition Register +# # Logical Instructions +# +# crset = BA.creqv(crbA="crbD", crbB="crbD") +# crclr = BA.crxor(crbA="crbD", crbB="crbD") +# crmove = BA.cror(crbA="crbB") +# crnot = BA.crnor(crbA="crbB") +# +# # F.7 Simplified Mnemonics for Trap Instructions +# +# trap = BA.tw(TO=31, rA=0, rB=0) +# twlt = BA.tw(TO=16) +# twle = BA.tw(TO=20) +# tweq = BA.tw(TO=4) +# twge = BA.tw(TO=12) +# twgt = BA.tw(TO=8) +# twnl = BA.tw(TO=12) +# twng = BA.tw(TO=24) +# twllt = BA.tw(TO=2) +# twlle = BA.tw(TO=6) +# twlge = BA.tw(TO=5) +# twlgt = BA.tw(TO=1) +# twlnl = BA.tw(TO=5) +# twlng = BA.tw(TO=6) +# +# twlti = BA.twi(TO=16) +# twlei = BA.twi(TO=20) +# tweqi = BA.twi(TO=4) +# twgei = BA.twi(TO=12) +# twgti = BA.twi(TO=8) +# twnli = BA.twi(TO=12) +# twnei = BA.twi(TO=24) +# twngi = BA.twi(TO=20) +# twllti = BA.twi(TO=2) +# twllei = BA.twi(TO=6) +# twlgei = BA.twi(TO=5) +# twlgti = BA.twi(TO=1) +# twlnli = BA.twi(TO=5) +# twlngi = BA.twi(TO=6) +# +# # F.8 Simplified Mnemonics for Special-Purpose +# # Registers +# +# mfctr = BA.mfspr(spr=9) +# mflr = BA.mfspr(spr=8) +# mftbl = BA.mftb(spr=268) +# mftbu = BA.mftb(spr=269) +# mfxer = BA.mfspr(spr=1) +# +# mtctr = BA.mtspr(spr=9) +# mtlr = BA.mtspr(spr=8) +# mtxer = BA.mtspr(spr=1) +# +# # F.9 Recommended Simplified Mnemonics +# +# nop = BA.ori(rS=0, rA=0, UIMM=0) +# +# li = BA.addi(rA=0) +# lis = BA.addis(rA=0) +# +# mr = BA.or_(rB="rS") +# mrx = BA.or_x(rB="rS") +# +# not_ = BA.nor(rB="rS") +# not_x = BA.norx(rB="rS") +# +# mtcr = BA.mtcrf(CRM=0xFF) +# +# def emit(self, insn): +# bytes = struct.pack("i", insn) +# for byte in bytes: +# self.writechar(byte) +# +#def hi(w): +# return w >> 16 +# +#def ha(w): +# if (w >> 15) & 1: +# return (w >> 16) + 1 +# else: +# return w >> 16 +# +#def lo(w): +# return w & 0x0000FFFF +# +#def la(w): +# v = w & 0x0000FFFF +# if v & 0x8000: +# return -((v ^ 0xFFFF) + 1) # "sign extend" to 32 bits +# return v +# +#def highest(w): +# return w >> 48 +# +#def higher(w): +# return (w >> 32) & 0x0000FFFF +# +#def high(w): +# return (w >> 16) & 0x0000FFFF +# +#class GuardToken(object): +# def __init__(self, descr, failargs, faillocs, offset, +# save_exc=False, is_invalidate=False): +# self.descr = descr +# self.offset = offset +# self.is_invalidate = is_invalidate +# self.failargs = failargs +# self.faillocs = faillocs +# self.save_exc = save_exc +# +#class PPCBuilder(PPCAssembler): +# def __init__(self, cpu, failargs_limit=1000): +# PPCAssembler.__init__(self) +# self.cpu = cpu +# self.fail_boxes_int = values_array(lltype.Signed, failargs_limit) +# +# def load_imm(self, rD, word): +# rD = rD.as_key() +# if word <= 32767 and word >= -32768: +# self.li(rD, word) +# elif IS_PPC_32 or (word <= 2147483647 and word >= -2147483648): +# self.lis(rD, hi(word)) +# if word & 0xFFFF != 0: +# self.ori(rD, rD, lo(word)) +# else: +# self.lis(rD, highest(word)) +# self.ori(rD, rD, higher(word)) +# self.sldi(rD, rD, 32) +# self.oris(rD, rD, high(word)) +# self.ori(rD, rD, lo(word)) +# +# def load_from_addr(self, rD, addr): +# if IS_PPC_32: +# self.addis(rD, 0, ha(addr)) +# self.lwz(rD, rD, la(addr)) +# else: +# self.load_word(rD, addr) +# self.ld(rD, rD, 0) +# +# def store_reg(self, source_reg, addr): +# self.load_imm(r.r0, addr) +# if IS_PPC_32: +# self.stwx(source_reg.value, 0, 0) +# else: +# self.std(source_reg.value, 0, 0) +# +# def _save_nonvolatiles(self): +# for i, reg in enumerate(NONVOLATILES): +# if IS_PPC_32: +# self.stw(reg, 1, self.framesize - WORD * i) +# else: +# self.ld(reg, 1, self.framesize - WORD * i) +# +# def _restore_nonvolatiles(self): +# for i, reg in enumerate(NONVOLATILES): +# if IS_PPC_32: +# self.lwz(reg, 1, self.framesize - WORD * i) +# else: +# self.ld(reg, 1, self.framesize - WORD * i) +# +# def _make_prologue(self): +# if IS_PPC_32: +# self.stwu(1, 1, -self.framesize) +# self.mflr(0) +# self.stw(0, 1, self.framesize + 4) +# else: +# self.stdu(1, 1, -self.framesize) +# self.mflr(0) +# self.std(0, 1, self.framesize + 4) +# self._save_nonvolatiles() +# +# def _make_epilogue(self): +# for op_index, fail_index, guard, reglist in self.patch_list: +# curpos = self.get_relative_pos() +# offset = curpos - (4 * op_index) +# assert (1 << 15) > offset +# self.beq(offset) +# self.patch_op(op_index) +# +# # store return parameters in memory +# used_mem_indices = [] +# for index, reg in enumerate(reglist): +# # if reg is None, then there is a hole in the failargs +# if reg is not None: +# addr = self.fail_boxes_int.get_addr_for_num(index) +# self.store_reg(reg, addr) +# used_mem_indices.append(index) +# +# patch_op = self.get_number_of_ops() +# patch_pos = self.get_relative_pos() +# descr = self.cpu.saved_descr[fail_index] +# descr.patch_op = patch_op +# descr.patch_pos = patch_pos +# descr.used_mem_indices = used_mem_indices +# +# self._restore_nonvolatiles() +# +# self.lwz(0, 1, self.framesize + 4) +# if IS_PPC_32: +# self.lwz(0, 1, self.framesize + WORD) # 36 +# else: +# self.ld(0, 1, self.framesize + WORD) # 36 +# self.mtlr(0) +# self.addi(1, 1, self.framesize) +# self.li(r.r3.value, fail_index) +# self.blr() +# +# def gen_bootstrap_code(self, nonfloatlocs, inputargs): +# for i in range(len(nonfloatlocs)): +# loc = nonfloatlocs[i] +# arg = inputargs[i] +# assert arg.type != FLOAT +# if arg.type == INT: +# addr = self.fail_boxes_int.get_addr_for_num(i) +# elif args.type == REF: +# addr = self.fail_boxes_ptr.get_addr_for_num(i) +# else: +# assert 0, "%s not supported" % arg.type +# if loc.is_reg(): +# reg = loc +# else: +# assert 0, "FIX LATER" +# self.load_from_addr(reg.value, addr) +# +# def assemble_loop(self, inputargs, operations, looptoken, log): +# self.framesize = 256 + GPR_SAVE_AREA +# self.patch_list = [] +# self.pending_guards = [] +# self.startpos = self.get_relative_pos() +# +# clt = CompiledLoopToken(self.cpu, looptoken.number) +# looptoken.compiled_loop_token = clt +# +# self.current_clt = clt +# +# longevity = compute_vars_longevity(inputargs, operations) +# regalloc = Regalloc(longevity, assembler=self, +# frame_manager=PPCFrameManager()) +# +# self._make_prologue() +# nonfloatlocs = regalloc.prepare_loop(inputargs, operations, looptoken) +# self.gen_bootstrap_code(nonfloatlocs, inputargs) +# +# looptoken._ppc_loop_code = self.get_relative_pos() +# looptoken._ppc_arglocs = [nonfloatlocs] +# looptoken._ppc_bootstrap_code = 0 +# +# self._walk_operations(operations, regalloc) +# #self.write_pending_failure_recoveries() +# self._make_epilogue() +# +# loop_start = self.assemble() +# looptoken.ppc_code = loop_start +# #self.process_pending_guards(loop_start) +# self._teardown() +# +# def assemble_bridge(self, faildescr, inputargs, operations, +# original_loop_token, log): +# assert isinstance(faildescr, AbstractFailDescr) +# code = faildescr._failure_recovery_code +# enc = rffi.cast(rffi.CCHARP, code) +# longevity = compute_vars_longevity(inputargs, operations) +# regalloc = Regalloc(longevity, assembler=self, +# frame_manager=PPCFrameManager()) +# +# def process_pending_guards(self, block_start): +# print "Signed addr = %x" % rffi.cast(lltype.Signed, block_start) +# print "USigned addr = %x" % rffi.cast(lltype.Unsigned, block_start) +# clt = self.current_clt +# for tok in self.pending_guards: +# descr = tok.descr +# assert isinstance(descr, AbstractFailDescr) +# descr._ppc_block_start = block_start +# +# if not tok.is_invalidate: +# #import pdb; pdb.set_trace() +# x = 1 +# else: +# #import pdb; pdb.set_trace() +# x = 2 +# +# def write_pending_failure_recoveries(self): +# for tok in self.pending_guards: +# descr = tok.descr +# pos = self.get_relative_pos() +# tok.pos_recovery_stub = pos +# memaddr = self._gen_path_to_exit_path(descr, tok.failargs, +# tok.faillocs, save_exc=tok.save_exc) +# +# descr._ppc_frame_depth = tok.faillocs[0].getint() +# descr._failure_recovery_code = memaddr +# descr._ppc_guard_pos = pos +# +# def _gen_path_to_exit_path(self, descr, args, arglocs, save_exc=False): +# return 1 +# +# def _walk_operations(self, operations, regalloc): +# while regalloc.position() < len(operations) - 1: +# regalloc.next_instruction() +# pos = regalloc.position() +# op = operations[pos] +# opnum = op.getopnum() +# if op.has_no_side_effect() and op.result not in regalloc.longevity: +# regalloc.possibly_free_vars_for_op(op) +# else: +# arglocs = regalloc.operations[opnum](regalloc, op) +# if arglocs is not None: +# self.operations[opnum](self, op, arglocs, regalloc) +# if op.result: +# regalloc.possibly_free_var(op.result) +# regalloc.possibly_free_vars_for_op(op) +# regalloc._check_invariants() +# +# def _teardown(self): +# self.patch_list = None +# self.pending_guards = None +# self.current_clt = None +# self.reset() +# +# ## translate a trace operation to corresponding machine code +# #def build_op(self, trace_op, cpu): +# # opnum = trace_op.getopnum() +# # opname = trace_op.getopname() +# # op_method = self.oplist[opnum] +# # if trace_op.is_guard(): +# # op_method(self, trace_op, cpu) +# # self._guard_epilog(trace_op, cpu) +# # else: +# # if opname.startswith("int_") or opname.startswith("uint_")\ +# # or opname.startswith("ptr_"): +# # numargs = trace_op.numargs() +# # if numargs == 1: +# # free_reg, reg0 = self._unary_int_op_prolog(trace_op, cpu) +# # op_method(self, trace_op, cpu, reg0, free_reg) +# # self._int_op_epilog(trace_op, cpu, free_reg) +# # elif numargs == 2: +# # free_reg, reg0, reg1 = self._binary_int_op_prolog(trace_op, cpu) +# # op_method(self, trace_op, cpu, reg0, reg1, free_reg) +# # self._int_op_epilog(trace_op, cpu, free_reg) +# # else: +# # op_method(self, trace_op, cpu) +# # else: +# # op_method(self, trace_op, cpu) +# +# def _unary_int_op_prolog(self, op, cpu): +# arg0 = op.getarg(0) +# if isinstance(arg0, Box): +# reg0 = cpu.reg_map[arg0] +# else: +# reg0 = cpu.get_next_register() +# self.load_word(reg0, arg0.value) +# free_reg = cpu.next_free_register +# return free_reg, reg0 +# +# def _binary_int_op_prolog(self, op, cpu): +# arg0 = op.getarg(0) +# arg1 = op.getarg(1) +# if isinstance(arg0, Box): +# reg0 = cpu.reg_map[arg0] +# else: +# box = TempInt() +# reg0 = cpu.rm.force_allocate_reg(box) +# self.load_word(reg0, arg0.value) +# if isinstance(arg1, Box): +# reg1 = cpu.reg_map[arg1] +# else: +# box = TempInt() +# reg1 = cpu.rm.force_allocate_reg(box) +# boxed = cpu.rm.make_sure_var_in_reg(box) +# self.load_word(reg1, arg1.value) +# import pdb; pdb.set_trace() +# free_reg = cpu.rm.force_allocate_reg(op.result) +# return free_reg, reg0, reg1 +# +# def _int_op_epilog(self, op, cpu, result_reg): +# result = op.result +# cpu.reg_map[result] = result_reg +# cpu.next_free_register += 1 +# +# # Fetches the identifier from a descr object. +# # If it has no identifier, then an unused identifier +# # is generated +# # XXX could be overwritten later on, better approach? +# def _get_identifier_from_descr(self, descr): +# try: +# identifier = descr.identifier +# except AttributeError: +# identifier = None +# if identifier is not None: +# return identifier +# keys = self.cpu.saved_descr.keys() +# if keys == []: +# return 1 +# return max(keys) + 1 +# +# # --------------------------------------- # +# # CODE GENERATION # +# # --------------------------------------- # +# +# def emit_int_add(self, op, arglocs, regalloc): +# l0, l1, res = arglocs +# if l0.is_imm(): +# self.addi(res.value, l1.value, l0.value) +# elif l1.is_imm(): +# self.addi(res.value, l0.value, l1.value) +# else: +# self.add(res.value, l0.value, l1.value) +# +# def emit_int_add_ovf(self, op, cpu, reg0, reg1, free_reg): +# self.addo(free_reg, reg0, reg1) +# +# def emit_int_sub(self, op, cpu, reg0, reg1, free_reg): +# self.sub(free_reg, reg0, reg1) +# +# def emit_int_sub_ovf(self, op, cpu, reg0, reg1, free_reg): +# self.subfo(free_reg, reg1, reg0) +# +# def emit_int_mul(self, op, cpu, reg0, reg1, free_reg): +# # XXX need to care about factors whose product needs 64 bit +# if IS_PPC_32: +# self.mullw(free_reg, reg0, reg1) +# else: +# self.mulld(free_reg, reg0, reg1) +# +# def emit_int_mul_ovf(self, op, cpu, reg0, reg1, free_reg): +# if IS_PPC_32: +# self.mullwo(free_reg, reg0, reg1) +# else: +# self.mulldo(free_reg, reg0, reg1) +# +# def emit_int_floordiv(self, op, cpu, reg0, reg1, free_reg): +# if IS_PPC_32: +# self.divw(free_reg, reg0, reg1) +# else: +# self.divd(free_reg, reg0, reg1) +# +# def emit_int_mod(self, op, cpu, reg0, reg1, free_reg): +# if IS_PPC_32: +# self.divw(free_reg, reg0, reg1) +# # use shift left of log2 +# self.mullw(free_reg, free_reg, reg1) +# else: +# self.divd(free_reg, reg0, reg1) +# self.mulld(free_reg, free_reg, reg1) +# self.subf(free_reg, free_reg, reg0) +# +# def emit_int_and(self, op, cpu, reg0, reg1, free_reg): +# self.and_(free_reg, reg0, reg1) +# +# def emit_int_or(self, op, cpu, reg0, reg1, free_reg): +# self.or_(free_reg, reg0, reg1) +# +# def emit_int_xor(self, op, cpu, reg0, reg1, free_reg): +# self.xor(free_reg, reg0, reg1) +# +# def emit_int_lshift(self, op, cpu, reg0, reg1, free_reg): +# if IS_PPC_32: +# self.slw(free_reg, reg0, reg1) +# else: +# self.sld(free_reg, reg0, reg1) +# +# def emit_int_rshift(self, op, cpu, reg0, reg1, free_reg): +# if IS_PPC_32: +# self.sraw(free_reg, reg0, reg1) +# else: +# self.srad(free_reg, reg0, reg1) +# +# def emit_uint_rshift(self, op, cpu, reg0, reg1, free_reg): +# if IS_PPC_32: +# self.srw(free_reg, reg0, reg1) +# else: +# self.srd(free_reg, reg0, reg1) +# +# def emit_uint_floordiv(self, op, cpu, reg0, reg1, free_reg): +# if IS_PPC_32: +# self.divwu(free_reg, reg0, reg1) +# else: +# self.divdu(free_reg, reg0, reg1) +# +# # **************************************************** +# # * C O M P A R I S O N S T U F F * +# # **************************************************** +# +# emit_int_le = gen_emit_cmp_op(c.LE) +# +# def emit_int_eq(self, op, cpu, reg0, reg1, free_reg): +# self.xor(free_reg, reg0, reg1) +# if IS_PPC_32: +# self.cntlzw(free_reg, free_reg) +# self.srwi(free_reg, free_reg, 5) +# else: +# self.cntlzd(free_reg, free_reg) +# self.srdi(free_reg, free_reg, 6) +# +# def emit_int_lt(self, op, cpu, reg0, reg1, free_reg): +# if IS_PPC_32: +# self.cmpw(7, reg0, reg1) +# else: +# self.cmpd(7, reg0, reg1) +# self.mfcr(free_reg) +# self.rlwinm(free_reg, free_reg, 29, 31, 31) +# +# def emit_int_ne(self, op, cpu, reg0, reg1, free_reg): +# self.emit_int_eq(op, cpu, reg0, reg1, free_reg) +# self.xori(free_reg, free_reg, 1) +# +# def emit_int_gt(self, op, cpu, reg0, reg1, free_reg): +# if IS_PPC_32: +# self.cmpw(7, reg0, reg1) +# else: +# self.cmpd(7, reg0, reg1) +# self.mfcr(free_reg) +# self.rlwinm(free_reg, free_reg, 30, 31, 31) +# +# def emit_int_ge(self, op, cpu, reg0, reg1, free_reg): +# if IS_PPC_32: +# self.cmpw(7, reg0, reg1) +# else: +# self.cmpd(7, reg0, reg1) +# self.cror(31, 30, 29) +# self.mfcr(free_reg) +# self.rlwinm(free_reg, free_reg, 0, 31, 31) +# +# def emit_uint_lt(self, op, cpu, reg0, reg1, free_reg): +# self.subfc(free_reg, reg1, reg0) +# self.subfe(free_reg, free_reg, free_reg) +# self.neg(free_reg, free_reg) +# +# def emit_uint_le(self, op, cpu, reg0, reg1, free_reg): +# self.subfc(free_reg, reg0, reg1) +# self.li(free_reg, 0) +# self.adde(free_reg, free_reg, free_reg) +# +# def emit_uint_gt(self, op, cpu, reg0, reg1, free_reg): +# self.subfc(free_reg, reg0, reg1) +# self.subfe(free_reg, free_reg, free_reg) +# self.neg(free_reg, free_reg) +# +# def emit_uint_ge(self, op, cpu, reg0, reg1, free_reg): +# self.subfc(free_reg, reg1, reg0) +# self.li(free_reg, 0) +# self.adde(free_reg, free_reg, free_reg) +# +# # ************************************************* +# # FIELD AND ARRAY OPS * +# # ************************************************* +# +# def emit_setfield_gc(self, op, cpu): +# args = op.getarglist() +# fptr = args[0] +# value = args[1] +# fdescr = op.getdescr() +# offset = fdescr.offset +# width = fdescr.get_field_size(0) +# addr_reg = cpu.reg_map[fptr] +# +# if isinstance(value, Box): +# value_reg = cpu.reg_map[args[1]] +# elif isinstance(value, Const): +# value_reg = cpu.get_next_register() +# if isinstance(value, ConstInt): +# self.load_word(value_reg, value.value) +# elif isinstance(value, ConstPtr): +# self.load_word(value_reg, rffi.cast(lltype.Signed, value.value)) +# else: +# assert 0, "%s not supported" % value +# else: +# assert 0, "%s not supported" % value +# +# if width == 8: +# self.std(value_reg, addr_reg, offset) +# elif width == 4: +# self.stw(value_reg, addr_reg, offset) +# elif width == 2: +# self.sth(value_reg, addr_reg, offset) +# elif width == 1: +# self.stb(value_reg, addr_reg, offset) +# else: +# assert 0, "invalid width %s" % width +# +# def emit_setfield_raw(self, op, cpu): +# self.emit_setfield_gc(op, cpu) +# +# def emit_getfield_gc(self, op, cpu): +# args = op.getarglist() +# fptr = args[0] +# fdescr = op.getdescr() +# offset = fdescr.offset +# width = fdescr.get_field_size(0) +# sign = fdescr.is_field_signed() +# free_reg = cpu.next_free_register +# field_addr_reg = cpu.reg_map[fptr] +# if width == 8: +# self.ld(free_reg, field_addr_reg, offset) +# elif width == 4: +# if IS_PPC_32 or not sign: +# self.lwz(free_reg, field_addr_reg, offset) +# else: +# self.lwa(free_reg, field_addr_reg, offset) +# elif width == 2: +# if sign: +# self.lha(free_reg, field_addr_reg, offset) +# else: +# self.lhz(free_reg, field_addr_reg, offset) +# elif width == 1: +# self.lbz(free_reg, field_addr_reg, offset) +# if sign: +# self.extsb(free_reg, free_reg) +# else: +# assert 0, "invalid width %s" % width +# result = op.result +# cpu.reg_map[result] = cpu.next_free_register +# cpu.next_free_register += 1 +# +# def emit_getfield_raw(self, op, cpu): +# self.emit_getfield_gc(op, cpu) +# +# def emit_getfield_raw_pure(self, op, cpu): +# self.emit_getfield_gc(op, cpu) +# +# def emit_getfield_gc_pure(self, op, cpu): +# self.emit_getfield_gc(op, cpu) +# +# def emit_arraylen_gc(self, op, cpu): +# args = op.getarglist() +# fptr = args[0] +# free_reg = cpu.next_free_register +# base_addr_reg = cpu.reg_map[fptr] +# if IS_PPC_32: +# self.lwz(free_reg, base_addr_reg, 0) +# else: +# self.ld(free_reg, base_addr_reg, 0) +# result = op.result +# cpu.reg_map[result] = cpu.next_free_register +# cpu.next_free_register += 1 +# +# def emit_setarrayitem_gc(self, op, cpu): +# args = op.getarglist() +# fptr = args[0] +# optr = args[1] +# vptr = args[2] +# fdescr = op.getdescr() +# width = fdescr.get_item_size(0) +# ofs = fdescr.get_base_size(0) +# field_addr_reg = cpu.reg_map[fptr] +# offset_reg = cpu.reg_map[optr] +# value_reg = cpu.reg_map[vptr] +# self.addi(field_addr_reg, field_addr_reg, ofs) +# if width == 8: +# self.sldi(offset_reg, offset_reg, 3) +# self.stdx(value_reg, field_addr_reg, offset_reg) +# elif width == 4: +# if IS_PPC_32: +# self.slwi(offset_reg, offset_reg, 2) +# else: +# self.sldi(offset_reg, offset_reg, 2) +# self.stwx(value_reg, field_addr_reg, offset_reg) +# elif width == 2: +# if IS_PPC_32: +# self.slwi(offset_reg, offset_reg, 1) +# else: +# self.sldi(offset_reg, offset_reg, 1) +# self.sthx(value_reg, field_addr_reg, offset_reg) +# elif width == 1: +# self.stbx(value_reg, field_addr_reg, offset_reg) +# else: +# assert 0, "invalid width %s" % width +# +# def emit_setarrayitem_raw(self, op, cpu): +# self.emit_setarrayitem_gc(op, cpu) +# +# def emit_getarrayitem_gc(self, op, cpu): +# args = op.getarglist() +# fptr = args[0] +# optr = args[1] +# fdescr = op.getdescr() +# width = fdescr.get_item_size(0) +# ofs = fdescr.get_base_size(0) +# sign = fdescr.is_item_signed() +# free_reg = cpu.next_free_register +# field_addr_reg = cpu.reg_map[fptr] +# offset_reg = cpu.reg_map[optr] +# self.addi(field_addr_reg, field_addr_reg, ofs) +# if width == 8: +# self.sldi(offset_reg, offset_reg, 3) +# self.ldx(free_reg, field_addr_reg, offset_reg) +# elif width == 4: +# if IS_PPC_32: +# self.slwi(offset_reg, offset_reg, 2) +# else: +# self.sldi(offset_reg, offset_reg, 2) +# if IS_PPC_32 or not sign: +# self.lwzx(free_reg, field_addr_reg, offset_reg) +# else: +# self.lwax(free_reg, field_addr_reg, offset_reg) +# elif width == 2: +# if IS_PPC_32: +# self.slwi(offset_reg, offset_reg, 1) +# else: +# self.sldi(offset_reg, offset_reg, 1) +# if sign: +# self.lhax(free_reg, field_addr_reg, offset_reg) +# else: +# self.lhzx(free_reg, field_addr_reg, offset_reg) +# elif width == 1: +# self.lbzx(free_reg, field_addr_reg, offset_reg) +# if sign: +# self.extsb(free_reg, free_reg) +# else: +# assert 0, "invalid width %s" % width +# result = op.result +# cpu.reg_map[result] = cpu.next_free_register +# cpu.next_free_register += 1 +# +# def emit_getarrayitem_raw(self, op, cpu): +# self.emit_getarrayitem_gc(op, cpu) +# +# def emit_getarrayitem_gc_pure(self, op, cpu): +# self.emit_getarrayitem_gc(op, cpu) +# +# def emit_strlen(self, op, cpu): +# args = op.getarglist() +# base_box = args[0] +# base_reg = cpu.reg_map[base_box] +# free_reg = cpu.next_free_register +# _, _, ofs_length = symbolic.get_array_token(rstr.STR, +# cpu.translate_support_code) +# if IS_PPC_32: +# self.lwz(free_reg, base_reg, ofs_length) +# else: +# self.ld(free_reg, base_reg, ofs_length) +# result = op.result +# cpu.reg_map[result] = free_reg +# cpu.next_free_register += 1 +# +# def emit_strgetitem(self, op, cpu): +# args = op.getarglist() +# ptr_box = args[0] +# offset_box = args[1] +# ptr_reg = cpu.reg_map[ptr_box] +# offset_reg = cpu.reg_map[offset_box] +# free_reg = cpu.next_free_register +# basesize, itemsize, _ = symbolic.get_array_token(rstr.STR, +# cpu.translate_support_code) +# assert itemsize == 1 +# self.addi(ptr_reg, ptr_reg, basesize) +# self.lbzx(free_reg, ptr_reg, offset_reg) +# result = op.result +# cpu.reg_map[result] = free_reg +# cpu.next_free_register += 1 +# +# def emit_strsetitem(self, op, cpu): +# args = op.getarglist() +# ptr_box = args[0] +# offset_box = args[1] +# value_box = args[2] +# +# ptr_reg = cpu.reg_map[ptr_box] +# offset_reg = cpu.reg_map[offset_box] +# value_reg = cpu.reg_map[value_box] +# basesize, itemsize, _ = symbolic.get_array_token(rstr.STR, +# cpu.translate_support_code) +# assert itemsize == 1 +# self.addi(ptr_reg, ptr_reg, basesize) +# self.stbx(value_reg, ptr_reg, offset_reg) +# +# def emit_call(self, op, cpu): +# call_addr = rffi.cast(lltype.Signed, op.getarg(0).value) +# args = op.getarglist()[1:] +# descr = op.getdescr() +# num_args = len(args) +# +# # pass first arguments in registers +# arg_reg = 3 +# for arg in args: +# if isinstance(arg, Box): +# try: +# self.mr(arg_reg, cpu.reg_map[arg]) +# except KeyError: +# self.lwz(arg_reg, 1, cpu.mem_map[arg]) +# elif isinstance(arg, Const): +# self.load_word(arg_reg, arg.value) +# else: +# assert 0, "%s not supported yet" % arg +# arg_reg += 1 +# if arg_reg == 11: +# break +# +# # if the function takes more than 8 arguments, +# # pass remaining arguments on stack +# if num_args > 8: +# remaining_args = args[8:] +# for i, arg in enumerate(remaining_args): +# if isinstance(arg, Box): +# #self.mr(0, cpu.reg_map[arg]) +# try: +# self.stw(cpu.reg_map[arg], 1, 8 + WORD * i) +# except KeyError: +# self.load_word(0, cpu.mem_map[arg]) +# self.lwzx(0, 1, 0) +# self.stw(0, 1, 8 + WORD * i) +# elif isinstance(arg, Const): +# self.load_word(0, arg.value) +# self.stw(0, 1, 8 + WORD * i) +# else: +# assert 0, "%s not supported yet" % arg +# +# self.load_word(0, call_addr) +# self.mtctr(0) +# self.bctrl() +# +# result = op.result +# cpu.reg_map[result] = 3 +# +# ############################ +# # unary integer operations # +# ############################ +# +# def emit_int_is_true(self, op, cpu, reg0, free_reg): +# self.addic(free_reg, reg0, -1) +# self.subfe(0, free_reg, reg0) +# self.mr(free_reg, 0) +# +# def emit_int_neg(self, op, cpu, reg0, free_reg): +# self.xor(free_reg, free_reg, free_reg) +# self.sub(free_reg, free_reg, reg0) +# +# def emit_int_invert(self, op, cpu, reg0, free_reg): +# self.li(free_reg, -1) +# self.xor(free_reg, free_reg, reg0) +# +# def emit_int_is_zero(self, op, cpu, reg0, free_reg): +# if IS_PPC_32: +# self.cntlzw(free_reg, reg0) +# self.srwi(free_reg, free_reg, 5) +# else: +# self.cntlzd(free_reg, reg0) +# self.srdi(free_reg, free_reg, 6) +# +# #****************************** +# # GUARD OPERATIONS * +# #****************************** +# +# def _guard_epilogue(self, op, failargs): +# fail_descr = op.getdescr() +# fail_index = self._get_identifier_from_descr(fail_descr) +# fail_descr.index = fail_index +# self.cpu.saved_descr[fail_index] = fail_descr +# numops = self.get_number_of_ops() +# self.beq(0) +# reglist = [] +# for failarg in failargs: +# if failarg is None: +# reglist.append(None) +# else: +# reglist.append(failarg) +# self.patch_list.append((numops, fail_index, op, reglist)) +# +# def _emit_guard(self, op, arglocs, save_exc=False, +# is_guard_not_invalidated=False): +# descr = op.getdescr() +# assert isinstance(descr, AbstractFailDescr) +# pos = self.get_relative_pos() +# self.b(0) # has to be patched later on +# self.pending_guards.append(GuardToken(descr, +# failargs=op.getfailargs(), +# faillocs=arglocs, +# offset=pos, +# is_invalidate=is_guard_not_invalidated, +# save_exc=save_exc)) +# +# def emit_guard_true(self, op, arglocs, regalloc): +# l0 = arglocs[0] +# failargs = arglocs[1:] +# #import pdb; pdb.set_trace() +# self.cmpi(l0.value, 0) +# #self._emit_guard(op, failargs) +# self._guard_epilogue(op, failargs) +# +# def emit_guard_false(self, op, cpu): +# arg0 = op.getarg(0) +# regnum = cpu.reg_map[arg0] +# self.cmpi(0, 1, regnum, 1) +# +# def emit_guard_no_overflow(self, op, cpu): +# free_reg = cpu.next_free_register +# self.mfxer(free_reg) +# self.rlwinm(free_reg, free_reg, 2, 31, 31) +# self.cmpi(0, 1, free_reg, 1) +# +# def emit_guard_overflow(self, op, cpu): +# free_reg = cpu.next_free_register +# self.mfxer(free_reg) +# self.rlwinm(free_reg, free_reg, 2, 31, 31) +# self.cmpi(0, 1, free_reg, 0) +# +# def emit_guard_value(self, op, cpu): +# free_reg = cpu.next_free_register +# args = op.getarglist() +# reg0 = cpu.reg_map[args[0]] +# const = args[1] +# self.load_word(free_reg, const.value) +# if IS_PPC_32: +# self.cmpw(0, free_reg, reg0) +# else: +# self.cmpd(0, free_reg, reg0) +# self.cror(3, 0, 1) +# self.mfcr(free_reg) +# self.rlwinm(free_reg, free_reg, 4, 31, 31) +# self.cmpi(0, 1, free_reg, 1) +# +# def emit_guard_nonnull(self, op, cpu): +# arg0 = op.getarg(0) +# regnum = cpu.reg_map[arg0] +# self.cmpi(0, 1, regnum, 0) +# +# def emit_guard_isnull(self, op, cpu): +# free_reg = cpu.next_free_register +# arg0 = op.getarg(0) +# regnum = cpu.reg_map[arg0] +# self.cmpi(0, 1, regnum, 0) +# self.mfcr(free_reg) +# self.rlwinm(free_reg, free_reg, 3, 31, 31) +# self.cmpi(0, 1, free_reg, 0) +# +# def emit_guard_class(self, op, cpu): +# field_addr_reg = cpu.reg_map[op.getarg(0)] +# class_addr = rffi.cast(lltype.Signed, op.getarg(1).value) +# offset = cpu.vtable_offset +# free_reg = cpu.get_next_register() +# class_reg = cpu.next_free_register +# self.load_word(free_reg, offset) +# self.load_word(class_reg, class_addr) +# if IS_PPC_32: +# self.lwz(free_reg, field_addr_reg, offset) +# self.cmpw(0, free_reg, class_reg) +# else: +# self.ld(free_reg, field_addr_reg, offset) +# self.cmpd(0, free_reg, class_reg) +# self.cror(3, 0, 1) +# self.mfcr(free_reg) +# self.rlwinm(free_reg, free_reg, 4, 31, 31) +# self.cmpi(0, 1, free_reg, 1) +# +# def emit_guard_nonnull_class(self, op, cpu): +# self.emit_guard_nonnull(op, cpu) +# self._guard_epilog(op, cpu) +# self.emit_guard_class(op, cpu) +# +# #************************************* +# # POINTER OPERATIONS * +# #************************************* +# +# def emit_ptr_eq(self, op, cpu, reg0, reg1, free_reg): +# if IS_PPC_32: +# self.cmpw(0, reg0, reg1) +# else: +# self.cmpd(0, reg0, reg1) +# self.cror(3, 0, 1) +# self.crnot(3, 3) +# self.mfcr(free_reg) +# self.rlwinm(free_reg, free_reg, 4, 31, 31) +# +# def emit_ptr_ne(self, op, cpu, reg0, reg1, free_reg): +# if IS_PPC_32: +# self.cmpw(0, reg0, reg1) +# else: +# self.cmpd(0, reg0, reg1) +# self.cror(3, 0, 1) +# self.mfcr(free_reg) +# self.rlwinm(free_reg, free_reg, 4, 31, 31) +# +# #_____________________________________ +# +# def emit_finish(self, op, arglocs, regalloc): +# descr = op.getdescr() +# identifier = self._get_identifier_from_descr(descr) +# self.cpu.saved_descr[identifier] = descr +# args = op.getarglist() +# for index, arg in enumerate(arglocs): +# addr = self.fail_boxes_int.get_addr_for_num(index) +# self.store_reg(arg, addr) +# +# framesize = 256 + GPR_SAVE_AREA +# +# self._restore_nonvolatiles() +# +# if IS_PPC_32: +# self.lwz(0, 1, self.framesize + WORD) +# else: +# self.ld(0, 1, framesize + WORD) +# self.mtlr(0) +# self.addi(1, 1, framesize) +# self.load_imm(r.r3, identifier) +# self.blr() +# +# def emit_jump(self, op, arglocs, regalloc): +# descr = op.getdescr() +# assert isinstance(descr, LoopToken) +# if descr._ppc_bootstrap_code == 0: +# curpos = self.get_relative_pos() +# self.b(descr._ppc_loop_code - curpos) +# else: +# assert 0, "case not implemented yet" +# +#class BranchUpdater(PPCAssembler): +# def __init__(self): +# PPCAssembler.__init__(self) +# self.init_block_builder() +# +# def write_to_mem(self, addr): +# self.assemble() +# self.copy_to_raw_memory(addr) +# +# def assemble(self, dump=os.environ.has_key('PYPY_DEBUG')): +# insns = self.assemble0(dump) +# for i in insns: +# self.emit(i) +# +#def b(n): +# r = [] +# for i in range(32): +# r.append(n&1) +# n >>= 1 +# r.reverse() +# return ''.join(map(str, r)) +# +#from pypy.jit.backend.ppc.ppcgen.regname import * +# +#def main(): +# +# a = MyPPCAssembler() +# +# a.lwz(r5, r4, 12) +# a.lwz(r6, r4, 16) +# a.lwz(r7, r5, 8) +# a.lwz(r8, r6, 8) +# a.add(r3, r7, r8) +# a.load_word(r4, lookup("PyInt_FromLong")) +# a.mtctr(r4) +# a.bctr() +# +# f = a.assemble(True) +# print f(12,3) +# +# a = MyPPCAssembler() +# a.label("loop") +# a.mftbu(r3) +# a.mftbl(r4) +# a.mftbu(r5) +# a.cmpw(r5, r3) +# a.bne(-16) +# a.load_word(r5, lookup("PyLong_FromUnsignedLongLong")) +# a.mtctr(r5) +# a.bctr() +# +# tb = a.assemble(True) +# t0 = tb() +# print [tb() - t0 for i in range(10)] +# +#def make_operations(): +# def not_implemented(builder, trace_op, cpu, *rest_args): +# import pdb; pdb.set_trace() +# +# oplist = [None] * (rop._LAST + 1) +# for key, val in rop.__dict__.items(): +# if key.startswith("_"): +# continue +# opname = key.lower() +# methname = "emit_%s" % opname +# if hasattr(PPCBuilder, methname): +# oplist[val] = getattr(PPCBuilder, methname).im_func +# else: +# oplist[val] = not_implemented +# return oplist +# +#PPCBuilder.operations = make_operations() +# +#if __name__ == '__main__': +# main() def hi(w): return w >> 16 @@ -916,74 +1926,91 @@ def high(w): return (w >> 16) & 0x0000FFFF -class PPCBuilder(PPCAssembler): +class AssemblerPPC(OpAssembler): + def __init__(self, cpu, failargs_limit=1000): - PPCAssembler.__init__(self) self.cpu = cpu self.fail_boxes_int = values_array(lltype.Signed, failargs_limit) + self.mc = None def load_imm(self, rD, word): rD = rD.as_key() if word <= 32767 and word >= -32768: - self.li(rD, word) + self.mc.li(rD, word) elif IS_PPC_32 or (word <= 2147483647 and word >= -2147483648): - self.lis(rD, hi(word)) + self.mc.lis(rD, hi(word)) if word & 0xFFFF != 0: - self.ori(rD, rD, lo(word)) + self.mc.ori(rD, rD, lo(word)) else: - self.lis(rD, highest(word)) - self.ori(rD, rD, higher(word)) - self.sldi(rD, rD, 32) - self.oris(rD, rD, high(word)) - self.ori(rD, rD, lo(word)) + self.mc.lis(rD, highest(word)) + self.mc.ori(rD, rD, higher(word)) + self.mc.sldi(rD, rD, 32) + self.mc.oris(rD, rD, high(word)) + self.mc.ori(rD, rD, lo(word)) def load_from_addr(self, rD, addr): if IS_PPC_32: - self.addis(rD, 0, ha(addr)) - self.lwz(rD, rD, la(addr)) + self.mc.addis(rD, 0, ha(addr)) + self.mc.lwz(rD, rD, la(addr)) else: self.load_word(rD, addr) - self.ld(rD, rD, 0) + self.mc.ld(rD, rD, 0) def store_reg(self, source_reg, addr): self.load_imm(r.r0, addr) if IS_PPC_32: - self.stwx(source_reg.value, 0, 0) + self.mc.stwx(source_reg.value, 0, 0) else: - self.std(source_reg.value, 0, 0) + self.mc.std(source_reg.value, 0, 0) def _save_nonvolatiles(self): for i, reg in enumerate(NONVOLATILES): if IS_PPC_32: - self.stw(reg, 1, self.framesize - WORD * i) + self.mc.stw(reg, 1, self.framesize - WORD * i) else: - self.ld(reg, 1, self.framesize - WORD * i) + self.mc.ld(reg, 1, self.framesize - WORD * i) def _restore_nonvolatiles(self): for i, reg in enumerate(NONVOLATILES): if IS_PPC_32: - self.lwz(reg, 1, self.framesize - WORD * i) + self.mc.lwz(reg, 1, self.framesize - WORD * i) else: - self.ld(reg, 1, self.framesize - WORD * i) + self.mc.ld(reg, 1, self.framesize - WORD * i) + + # Fetches the identifier from a descr object. + # If it has no identifier, then an unused identifier + # is generated + # XXX could be overwritten later on, better approach? + def _get_identifier_from_descr(self, descr): + try: + identifier = descr.identifier + except AttributeError: + identifier = None + if identifier is not None: + return identifier + keys = self.cpu.saved_descr.keys() + if keys == []: + return 1 + return max(keys) + 1 def _make_prologue(self): if IS_PPC_32: - self.stwu(1, 1, -self.framesize) - self.mflr(0) - self.stw(0, 1, self.framesize + 4) + self.mc.stwu(1, 1, -self.framesize) + self.mc.mflr(0) + self.mc.stw(0, 1, self.framesize + 4) else: - self.stdu(1, 1, -self.framesize) - self.mflr(0) - self.std(0, 1, self.framesize + 4) + self.mc.stdu(1, 1, -self.framesize) + self.mc.mflr(0) + self.mc.std(0, 1, self.framesize + 4) self._save_nonvolatiles() def _make_epilogue(self): for op_index, fail_index, guard, reglist in self.patch_list: - curpos = self.get_relative_pos() + curpos = self.mc.get_rel_pos() offset = curpos - (4 * op_index) assert (1 << 15) > offset - self.beq(offset) - self.patch_op(op_index) + self.mc.beq(offset) + self.mc.patch_op(op_index) # store return parameters in memory used_mem_indices = [] @@ -994,8 +2021,8 @@ self.store_reg(reg, addr) used_mem_indices.append(index) - patch_op = self.get_number_of_ops() - patch_pos = self.get_relative_pos() + patch_op = self.mc.get_number_of_ops() + patch_pos = self.mc.get_rel_pos() descr = self.cpu.saved_descr[fail_index] descr.patch_op = patch_op descr.patch_pos = patch_pos @@ -1003,15 +2030,15 @@ self._restore_nonvolatiles() - self.lwz(0, 1, self.framesize + 4) + self.mc.lwz(0, 1, self.framesize + 4) if IS_PPC_32: - self.lwz(0, 1, self.framesize + WORD) # 36 + self.mc.lwz(0, 1, self.framesize + WORD) # 36 else: - self.ld(0, 1, self.framesize + WORD) # 36 - self.mtlr(0) - self.addi(1, 1, self.framesize) - self.li(r.r3.value, fail_index) - self.blr() + self.mc.ld(0, 1, self.framesize + WORD) # 36 + self.mc.mtlr(0) + self.mc.addi(1, 1, self.framesize) + self.mc.li(r.r3.value, fail_index) + self.mc.blr() def gen_bootstrap_code(self, nonfloatlocs, inputargs): for i in range(len(nonfloatlocs)): @@ -1033,11 +2060,15 @@ def assemble_loop(self, inputargs, operations, looptoken, log): self.framesize = 256 + GPR_SAVE_AREA self.patch_list = [] - self.startpos = self.get_relative_pos() + self.pending_guards = [] + self.mc = PPCBuilder() + self.startpos = self.mc.get_rel_pos() clt = CompiledLoopToken(self.cpu, looptoken.number) looptoken.compiled_loop_token = clt + self.current_clt = clt + longevity = compute_vars_longevity(inputargs, operations) regalloc = Regalloc(longevity, assembler=self, frame_manager=PPCFrameManager()) @@ -1046,15 +2077,22 @@ nonfloatlocs = regalloc.prepare_loop(inputargs, operations, looptoken) self.gen_bootstrap_code(nonfloatlocs, inputargs) - looptoken._ppc_loop_code = self.get_relative_pos() + looptoken._ppc_loop_code = self.mc.get_rel_pos() looptoken._ppc_arglocs = [nonfloatlocs] looptoken._ppc_bootstrap_code = 0 self._walk_operations(operations, regalloc) self._make_epilogue() + + loop_start = self.mc.assemble() + looptoken.ppc_code = loop_start + self._teardown() - looptoken.ppc_code = self.assemble() - self._teardown() + def _teardown(self): + self.patch_list = None + self.pending_guards = None + self.current_clt = None + self.mc = None def _walk_operations(self, operations, regalloc): while regalloc.position() < len(operations) - 1: @@ -1073,739 +2111,9 @@ regalloc.possibly_free_vars_for_op(op) regalloc._check_invariants() - def _teardown(self): - self.patch_list = None - self.reset() - - # translate a trace operation to corresponding machine code - def build_op(self, trace_op, cpu): - opnum = trace_op.getopnum() - opname = trace_op.getopname() - op_method = self.oplist[opnum] - if trace_op.is_guard(): - op_method(self, trace_op, cpu) - self._guard_epilog(trace_op, cpu) - else: - if opname.startswith("int_") or opname.startswith("uint_")\ - or opname.startswith("ptr_"): - numargs = trace_op.numargs() - if numargs == 1: - free_reg, reg0 = self._unary_int_op_prolog(trace_op, cpu) - op_method(self, trace_op, cpu, reg0, free_reg) - self._int_op_epilog(trace_op, cpu, free_reg) - elif numargs == 2: - free_reg, reg0, reg1 = self._binary_int_op_prolog(trace_op, cpu) - op_method(self, trace_op, cpu, reg0, reg1, free_reg) - self._int_op_epilog(trace_op, cpu, free_reg) - else: - op_method(self, trace_op, cpu) - else: - op_method(self, trace_op, cpu) - - def _unary_int_op_prolog(self, op, cpu): - arg0 = op.getarg(0) - if isinstance(arg0, Box): - reg0 = cpu.reg_map[arg0] - else: - reg0 = cpu.get_next_register() - self.load_word(reg0, arg0.value) - free_reg = cpu.next_free_register - return free_reg, reg0 - - def _binary_int_op_prolog(self, op, cpu): - arg0 = op.getarg(0) - arg1 = op.getarg(1) - if isinstance(arg0, Box): - reg0 = cpu.reg_map[arg0] - else: - box = TempInt() - reg0 = cpu.rm.force_allocate_reg(box) - self.load_word(reg0, arg0.value) - if isinstance(arg1, Box): - reg1 = cpu.reg_map[arg1] - else: - box = TempInt() - reg1 = cpu.rm.force_allocate_reg(box) - boxed = cpu.rm.make_sure_var_in_reg(box) - self.load_word(reg1, arg1.value) - import pdb; pdb.set_trace() - free_reg = cpu.rm.force_allocate_reg(op.result) - return free_reg, reg0, reg1 - - def _int_op_epilog(self, op, cpu, result_reg): - result = op.result - cpu.reg_map[result] = result_reg - cpu.next_free_register += 1 - - # Fetches the identifier from a descr object. - # If it has no identifier, then an unused identifier - # is generated - # XXX could be overwritten later on, better approach? - def _get_identifier_from_descr(self, descr): - try: - identifier = descr.identifier - except AttributeError: - identifier = None - if identifier is not None: - return identifier - keys = self.cpu.saved_descr.keys() - if keys == []: - return 1 - return max(keys) + 1 - - # --------------------------------------- # - # CODE GENERATION # - # --------------------------------------- # - - def emit_int_add(self, op, arglocs, regalloc): - l0, l1, res = arglocs - if l0.is_imm(): - self.addi(res.value, l1.value, l0.value) - elif l1.is_imm(): - self.addi(res.value, l0.value, l1.value) - else: - self.add(res.value, l0.value, l1.value) - - def emit_int_add_ovf(self, op, cpu, reg0, reg1, free_reg): - self.addo(free_reg, reg0, reg1) - - def emit_int_sub(self, op, cpu, reg0, reg1, free_reg): - self.sub(free_reg, reg0, reg1) - - def emit_int_sub_ovf(self, op, cpu, reg0, reg1, free_reg): - self.subfo(free_reg, reg1, reg0) - - def emit_int_mul(self, op, cpu, reg0, reg1, free_reg): - # XXX need to care about factors whose product needs 64 bit - if IS_PPC_32: - self.mullw(free_reg, reg0, reg1) - else: - self.mulld(free_reg, reg0, reg1) - - def emit_int_mul_ovf(self, op, cpu, reg0, reg1, free_reg): - if IS_PPC_32: - self.mullwo(free_reg, reg0, reg1) - else: - self.mulldo(free_reg, reg0, reg1) - - def emit_int_floordiv(self, op, cpu, reg0, reg1, free_reg): - if IS_PPC_32: - self.divw(free_reg, reg0, reg1) - else: - self.divd(free_reg, reg0, reg1) - - def emit_int_mod(self, op, cpu, reg0, reg1, free_reg): - if IS_PPC_32: - self.divw(free_reg, reg0, reg1) - # use shift left of log2 - self.mullw(free_reg, free_reg, reg1) - else: - self.divd(free_reg, reg0, reg1) - self.mulld(free_reg, free_reg, reg1) - self.subf(free_reg, free_reg, reg0) - - def emit_int_and(self, op, cpu, reg0, reg1, free_reg): - self.and_(free_reg, reg0, reg1) - - def emit_int_or(self, op, cpu, reg0, reg1, free_reg): - self.or_(free_reg, reg0, reg1) - - def emit_int_xor(self, op, cpu, reg0, reg1, free_reg): - self.xor(free_reg, reg0, reg1) - - def emit_int_lshift(self, op, cpu, reg0, reg1, free_reg): - if IS_PPC_32: - self.slw(free_reg, reg0, reg1) - else: - self.sld(free_reg, reg0, reg1) - - def emit_int_rshift(self, op, cpu, reg0, reg1, free_reg): - if IS_PPC_32: - self.sraw(free_reg, reg0, reg1) - else: - self.srad(free_reg, reg0, reg1) - - def emit_uint_rshift(self, op, cpu, reg0, reg1, free_reg): - if IS_PPC_32: - self.srw(free_reg, reg0, reg1) - else: - self.srd(free_reg, reg0, reg1) - - def emit_uint_floordiv(self, op, cpu, reg0, reg1, free_reg): - if IS_PPC_32: - self.divwu(free_reg, reg0, reg1) - else: - self.divdu(free_reg, reg0, reg1) - - # **************************************************** - # * C O M P A R I S O N S T U F F * - # **************************************************** - - emit_int_le = gen_emit_cmp_op(c.LE) - - def emit_int_eq(self, op, cpu, reg0, reg1, free_reg): - self.xor(free_reg, reg0, reg1) - if IS_PPC_32: - self.cntlzw(free_reg, free_reg) - self.srwi(free_reg, free_reg, 5) - else: - self.cntlzd(free_reg, free_reg) - self.srdi(free_reg, free_reg, 6) - - def emit_int_lt(self, op, cpu, reg0, reg1, free_reg): - if IS_PPC_32: - self.cmpw(7, reg0, reg1) - else: - self.cmpd(7, reg0, reg1) - self.mfcr(free_reg) - self.rlwinm(free_reg, free_reg, 29, 31, 31) - - def emit_int_ne(self, op, cpu, reg0, reg1, free_reg): - self.emit_int_eq(op, cpu, reg0, reg1, free_reg) - self.xori(free_reg, free_reg, 1) - - def emit_int_gt(self, op, cpu, reg0, reg1, free_reg): - if IS_PPC_32: - self.cmpw(7, reg0, reg1) - else: - self.cmpd(7, reg0, reg1) - self.mfcr(free_reg) - self.rlwinm(free_reg, free_reg, 30, 31, 31) - - def emit_int_ge(self, op, cpu, reg0, reg1, free_reg): - if IS_PPC_32: - self.cmpw(7, reg0, reg1) - else: - self.cmpd(7, reg0, reg1) - self.cror(31, 30, 29) - self.mfcr(free_reg) - self.rlwinm(free_reg, free_reg, 0, 31, 31) - - def emit_uint_lt(self, op, cpu, reg0, reg1, free_reg): - self.subfc(free_reg, reg1, reg0) - self.subfe(free_reg, free_reg, free_reg) - self.neg(free_reg, free_reg) - - def emit_uint_le(self, op, cpu, reg0, reg1, free_reg): - self.subfc(free_reg, reg0, reg1) - self.li(free_reg, 0) - self.adde(free_reg, free_reg, free_reg) - - def emit_uint_gt(self, op, cpu, reg0, reg1, free_reg): - self.subfc(free_reg, reg0, reg1) - self.subfe(free_reg, free_reg, free_reg) - self.neg(free_reg, free_reg) - - def emit_uint_ge(self, op, cpu, reg0, reg1, free_reg): - self.subfc(free_reg, reg1, reg0) - self.li(free_reg, 0) - self.adde(free_reg, free_reg, free_reg) - - # ************************************************* - # FIELD AND ARRAY OPS * - # ************************************************* - - def emit_setfield_gc(self, op, cpu): - args = op.getarglist() - fptr = args[0] - value = args[1] - fdescr = op.getdescr() - offset = fdescr.offset - width = fdescr.get_field_size(0) - addr_reg = cpu.reg_map[fptr] - - if isinstance(value, Box): - value_reg = cpu.reg_map[args[1]] - elif isinstance(value, Const): - value_reg = cpu.get_next_register() - if isinstance(value, ConstInt): - self.load_word(value_reg, value.value) - elif isinstance(value, ConstPtr): - self.load_word(value_reg, rffi.cast(lltype.Signed, value.value)) - else: - assert 0, "%s not supported" % value - else: - assert 0, "%s not supported" % value - - if width == 8: - self.std(value_reg, addr_reg, offset) - elif width == 4: - self.stw(value_reg, addr_reg, offset) - elif width == 2: - self.sth(value_reg, addr_reg, offset) - elif width == 1: - self.stb(value_reg, addr_reg, offset) - else: - assert 0, "invalid width %s" % width - - def emit_setfield_raw(self, op, cpu): - self.emit_setfield_gc(op, cpu) - - def emit_getfield_gc(self, op, cpu): - args = op.getarglist() - fptr = args[0] - fdescr = op.getdescr() - offset = fdescr.offset - width = fdescr.get_field_size(0) - sign = fdescr.is_field_signed() - free_reg = cpu.next_free_register - field_addr_reg = cpu.reg_map[fptr] - if width == 8: - self.ld(free_reg, field_addr_reg, offset) - elif width == 4: - if IS_PPC_32 or not sign: - self.lwz(free_reg, field_addr_reg, offset) - else: - self.lwa(free_reg, field_addr_reg, offset) - elif width == 2: - if sign: - self.lha(free_reg, field_addr_reg, offset) - else: - self.lhz(free_reg, field_addr_reg, offset) - elif width == 1: - self.lbz(free_reg, field_addr_reg, offset) - if sign: - self.extsb(free_reg, free_reg) - else: - assert 0, "invalid width %s" % width - result = op.result - cpu.reg_map[result] = cpu.next_free_register - cpu.next_free_register += 1 - - def emit_getfield_raw(self, op, cpu): - self.emit_getfield_gc(op, cpu) - - def emit_getfield_raw_pure(self, op, cpu): - self.emit_getfield_gc(op, cpu) - - def emit_getfield_gc_pure(self, op, cpu): - self.emit_getfield_gc(op, cpu) - - def emit_arraylen_gc(self, op, cpu): - args = op.getarglist() - fptr = args[0] - free_reg = cpu.next_free_register - base_addr_reg = cpu.reg_map[fptr] - if IS_PPC_32: - self.lwz(free_reg, base_addr_reg, 0) - else: - self.ld(free_reg, base_addr_reg, 0) - result = op.result - cpu.reg_map[result] = cpu.next_free_register - cpu.next_free_register += 1 - - def emit_setarrayitem_gc(self, op, cpu): - args = op.getarglist() - fptr = args[0] - optr = args[1] - vptr = args[2] - fdescr = op.getdescr() - width = fdescr.get_item_size(0) - ofs = fdescr.get_base_size(0) - field_addr_reg = cpu.reg_map[fptr] - offset_reg = cpu.reg_map[optr] - value_reg = cpu.reg_map[vptr] - self.addi(field_addr_reg, field_addr_reg, ofs) - if width == 8: - self.sldi(offset_reg, offset_reg, 3) - self.stdx(value_reg, field_addr_reg, offset_reg) - elif width == 4: - if IS_PPC_32: - self.slwi(offset_reg, offset_reg, 2) - else: - self.sldi(offset_reg, offset_reg, 2) - self.stwx(value_reg, field_addr_reg, offset_reg) - elif width == 2: - if IS_PPC_32: - self.slwi(offset_reg, offset_reg, 1) - else: - self.sldi(offset_reg, offset_reg, 1) - self.sthx(value_reg, field_addr_reg, offset_reg) - elif width == 1: - self.stbx(value_reg, field_addr_reg, offset_reg) - else: - assert 0, "invalid width %s" % width - - def emit_setarrayitem_raw(self, op, cpu): - self.emit_setarrayitem_gc(op, cpu) - - def emit_getarrayitem_gc(self, op, cpu): - args = op.getarglist() - fptr = args[0] - optr = args[1] - fdescr = op.getdescr() - width = fdescr.get_item_size(0) - ofs = fdescr.get_base_size(0) - sign = fdescr.is_item_signed() - free_reg = cpu.next_free_register - field_addr_reg = cpu.reg_map[fptr] - offset_reg = cpu.reg_map[optr] - self.addi(field_addr_reg, field_addr_reg, ofs) - if width == 8: - self.sldi(offset_reg, offset_reg, 3) - self.ldx(free_reg, field_addr_reg, offset_reg) - elif width == 4: - if IS_PPC_32: - self.slwi(offset_reg, offset_reg, 2) - else: - self.sldi(offset_reg, offset_reg, 2) - if IS_PPC_32 or not sign: - self.lwzx(free_reg, field_addr_reg, offset_reg) - else: - self.lwax(free_reg, field_addr_reg, offset_reg) - elif width == 2: - if IS_PPC_32: - self.slwi(offset_reg, offset_reg, 1) - else: - self.sldi(offset_reg, offset_reg, 1) - if sign: - self.lhax(free_reg, field_addr_reg, offset_reg) - else: - self.lhzx(free_reg, field_addr_reg, offset_reg) - elif width == 1: - self.lbzx(free_reg, field_addr_reg, offset_reg) - if sign: - self.extsb(free_reg, free_reg) - else: - assert 0, "invalid width %s" % width - result = op.result - cpu.reg_map[result] = cpu.next_free_register - cpu.next_free_register += 1 - - def emit_getarrayitem_raw(self, op, cpu): - self.emit_getarrayitem_gc(op, cpu) - - def emit_getarrayitem_gc_pure(self, op, cpu): - self.emit_getarrayitem_gc(op, cpu) - - def emit_strlen(self, op, cpu): - args = op.getarglist() - base_box = args[0] - base_reg = cpu.reg_map[base_box] - free_reg = cpu.next_free_register - _, _, ofs_length = symbolic.get_array_token(rstr.STR, - cpu.translate_support_code) - if IS_PPC_32: - self.lwz(free_reg, base_reg, ofs_length) - else: - self.ld(free_reg, base_reg, ofs_length) - result = op.result - cpu.reg_map[result] = free_reg - cpu.next_free_register += 1 - - def emit_strgetitem(self, op, cpu): - args = op.getarglist() - ptr_box = args[0] - offset_box = args[1] - ptr_reg = cpu.reg_map[ptr_box] - offset_reg = cpu.reg_map[offset_box] - free_reg = cpu.next_free_register - basesize, itemsize, _ = symbolic.get_array_token(rstr.STR, - cpu.translate_support_code) - assert itemsize == 1 - self.addi(ptr_reg, ptr_reg, basesize) - self.lbzx(free_reg, ptr_reg, offset_reg) - result = op.result - cpu.reg_map[result] = free_reg - cpu.next_free_register += 1 - - def emit_strsetitem(self, op, cpu): - args = op.getarglist() - ptr_box = args[0] - offset_box = args[1] - value_box = args[2] - - ptr_reg = cpu.reg_map[ptr_box] - offset_reg = cpu.reg_map[offset_box] - value_reg = cpu.reg_map[value_box] - basesize, itemsize, _ = symbolic.get_array_token(rstr.STR, - cpu.translate_support_code) - assert itemsize == 1 - self.addi(ptr_reg, ptr_reg, basesize) - self.stbx(value_reg, ptr_reg, offset_reg) - - def emit_call(self, op, cpu): - call_addr = rffi.cast(lltype.Signed, op.getarg(0).value) - args = op.getarglist()[1:] - descr = op.getdescr() - num_args = len(args) - - # pass first arguments in registers - arg_reg = 3 - for arg in args: - if isinstance(arg, Box): - try: - self.mr(arg_reg, cpu.reg_map[arg]) - except KeyError: - self.lwz(arg_reg, 1, cpu.mem_map[arg]) - elif isinstance(arg, Const): - self.load_word(arg_reg, arg.value) - else: - assert 0, "%s not supported yet" % arg - arg_reg += 1 - if arg_reg == 11: - break - - # if the function takes more than 8 arguments, - # pass remaining arguments on stack - if num_args > 8: - remaining_args = args[8:] - for i, arg in enumerate(remaining_args): - if isinstance(arg, Box): - #self.mr(0, cpu.reg_map[arg]) - try: - self.stw(cpu.reg_map[arg], 1, 8 + WORD * i) - except KeyError: - self.load_word(0, cpu.mem_map[arg]) - self.lwzx(0, 1, 0) - self.stw(0, 1, 8 + WORD * i) - elif isinstance(arg, Const): - self.load_word(0, arg.value) - self.stw(0, 1, 8 + WORD * i) - else: - assert 0, "%s not supported yet" % arg - - self.load_word(0, call_addr) - self.mtctr(0) - self.bctrl() - - result = op.result - cpu.reg_map[result] = 3 - - ############################ - # unary integer operations # - ############################ - - def emit_int_is_true(self, op, cpu, reg0, free_reg): - self.addic(free_reg, reg0, -1) - self.subfe(0, free_reg, reg0) - self.mr(free_reg, 0) - - def emit_int_neg(self, op, cpu, reg0, free_reg): - self.xor(free_reg, free_reg, free_reg) - self.sub(free_reg, free_reg, reg0) - - def emit_int_invert(self, op, cpu, reg0, free_reg): - self.li(free_reg, -1) - self.xor(free_reg, free_reg, reg0) - - def emit_int_is_zero(self, op, cpu, reg0, free_reg): - if IS_PPC_32: - self.cntlzw(free_reg, reg0) - self.srwi(free_reg, free_reg, 5) - else: - self.cntlzd(free_reg, reg0) - self.srdi(free_reg, free_reg, 6) - - #****************************** - # GUARD OPERATIONS * - #****************************** - - def _guard_epilogue(self, op, failargs): - fail_descr = op.getdescr() - fail_index = self._get_identifier_from_descr(fail_descr) - fail_descr.index = fail_index - self.cpu.saved_descr[fail_index] = fail_descr - numops = self.get_number_of_ops() - self.beq(0) - reglist = [] - for failarg in failargs: - if failarg is None: - reglist.append(None) - else: - reglist.append(failarg) - self.patch_list.append((numops, fail_index, op, reglist)) - - def emit_guard_true(self, op, arglocs, regalloc): - l0 = arglocs[0] - failargs = arglocs[1:] - self.cmpi(l0.value, 0) - self._guard_epilogue(op, failargs) - - def emit_guard_false(self, op, cpu): - arg0 = op.getarg(0) - regnum = cpu.reg_map[arg0] - self.cmpi(0, 1, regnum, 1) - - def emit_guard_no_overflow(self, op, cpu): - free_reg = cpu.next_free_register - self.mfxer(free_reg) - self.rlwinm(free_reg, free_reg, 2, 31, 31) - self.cmpi(0, 1, free_reg, 1) - - def emit_guard_overflow(self, op, cpu): - free_reg = cpu.next_free_register - self.mfxer(free_reg) - self.rlwinm(free_reg, free_reg, 2, 31, 31) - self.cmpi(0, 1, free_reg, 0) - - def emit_guard_value(self, op, cpu): - free_reg = cpu.next_free_register - args = op.getarglist() - reg0 = cpu.reg_map[args[0]] - const = args[1] - self.load_word(free_reg, const.value) - if IS_PPC_32: - self.cmpw(0, free_reg, reg0) - else: - self.cmpd(0, free_reg, reg0) - self.cror(3, 0, 1) - self.mfcr(free_reg) - self.rlwinm(free_reg, free_reg, 4, 31, 31) - self.cmpi(0, 1, free_reg, 1) - - def emit_guard_nonnull(self, op, cpu): - arg0 = op.getarg(0) - regnum = cpu.reg_map[arg0] - self.cmpi(0, 1, regnum, 0) - - def emit_guard_isnull(self, op, cpu): - free_reg = cpu.next_free_register - arg0 = op.getarg(0) - regnum = cpu.reg_map[arg0] - self.cmpi(0, 1, regnum, 0) - self.mfcr(free_reg) - self.rlwinm(free_reg, free_reg, 3, 31, 31) - self.cmpi(0, 1, free_reg, 0) - - def emit_guard_class(self, op, cpu): - field_addr_reg = cpu.reg_map[op.getarg(0)] - class_addr = rffi.cast(lltype.Signed, op.getarg(1).value) - offset = cpu.vtable_offset - free_reg = cpu.get_next_register() - class_reg = cpu.next_free_register - self.load_word(free_reg, offset) - self.load_word(class_reg, class_addr) - if IS_PPC_32: - self.lwz(free_reg, field_addr_reg, offset) - self.cmpw(0, free_reg, class_reg) - else: - self.ld(free_reg, field_addr_reg, offset) - self.cmpd(0, free_reg, class_reg) - self.cror(3, 0, 1) - self.mfcr(free_reg) - self.rlwinm(free_reg, free_reg, 4, 31, 31) - self.cmpi(0, 1, free_reg, 1) - - def emit_guard_nonnull_class(self, op, cpu): - self.emit_guard_nonnull(op, cpu) - self._guard_epilog(op, cpu) - self.emit_guard_class(op, cpu) - - #************************************* - # POINTER OPERATIONS * - #************************************* - - def emit_ptr_eq(self, op, cpu, reg0, reg1, free_reg): - if IS_PPC_32: - self.cmpw(0, reg0, reg1) - else: - self.cmpd(0, reg0, reg1) - self.cror(3, 0, 1) - self.crnot(3, 3) - self.mfcr(free_reg) - self.rlwinm(free_reg, free_reg, 4, 31, 31) - - def emit_ptr_ne(self, op, cpu, reg0, reg1, free_reg): - if IS_PPC_32: - self.cmpw(0, reg0, reg1) - else: - self.cmpd(0, reg0, reg1) - self.cror(3, 0, 1) - self.mfcr(free_reg) - self.rlwinm(free_reg, free_reg, 4, 31, 31) - - #_____________________________________ - - def emit_finish(self, op, arglocs, regalloc): - descr = op.getdescr() - identifier = self._get_identifier_from_descr(descr) - self.cpu.saved_descr[identifier] = descr - args = op.getarglist() - for index, arg in enumerate(arglocs): - addr = self.fail_boxes_int.get_addr_for_num(index) - self.store_reg(arg, addr) - - framesize = 256 + GPR_SAVE_AREA - - self._restore_nonvolatiles() - - if IS_PPC_32: - self.lwz(0, 1, self.framesize + WORD) - else: - self.ld(0, 1, framesize + WORD) - self.mtlr(0) - self.addi(1, 1, framesize) - self.load_imm(r.r3, identifier) - self.blr() - - def emit_jump(self, op, arglocs, regalloc): - descr = op.getdescr() - assert isinstance(descr, LoopToken) - if descr._ppc_bootstrap_code == 0: - curpos = self.get_relative_pos() - self.b(descr._ppc_loop_code - curpos) - else: - assert 0, "case not implemented yet" - -class BranchUpdater(PPCAssembler): - def __init__(self): - PPCAssembler.__init__(self) - self.init_block_builder() - - def write_to_mem(self, addr): - self.assemble() - self.copy_to_raw_memory(addr) - - def assemble(self, dump=os.environ.has_key('PYPY_DEBUG')): - insns = self.assemble0(dump) - for i in insns: - self.emit(i) - -def b(n): - r = [] - for i in range(32): - r.append(n&1) - n >>= 1 - r.reverse() - return ''.join(map(str, r)) - -from pypy.jit.backend.ppc.ppcgen.regname import * - -def main(): - - a = MyPPCAssembler() - - a.lwz(r5, r4, 12) - a.lwz(r6, r4, 16) - a.lwz(r7, r5, 8) - a.lwz(r8, r6, 8) - a.add(r3, r7, r8) - a.load_word(r4, lookup("PyInt_FromLong")) - a.mtctr(r4) - a.bctr() - - f = a.assemble(True) - print f(12,3) - - a = MyPPCAssembler() - a.label("loop") - a.mftbu(r3) - a.mftbl(r4) - a.mftbu(r5) - a.cmpw(r5, r3) - a.bne(-16) - a.load_word(r5, lookup("PyLong_FromUnsignedLongLong")) - a.mtctr(r5) - a.bctr() - - tb = a.assemble(True) - t0 = tb() - print [tb() - t0 for i in range(10)] - def make_operations(): def not_implemented(builder, trace_op, cpu, *rest_args): - import pdb; pdb.set_trace() + raise NotImplementedError, trace_op oplist = [None] * (rop._LAST + 1) for key, val in rop.__dict__.items(): @@ -1813,15 +2121,11 @@ continue opname = key.lower() methname = "emit_%s" % opname - if hasattr(PPCBuilder, methname): - oplist[val] = getattr(PPCBuilder, methname).im_func + if hasattr(AssemblerPPC, methname): + #oplist[val] = getattr(PPCBuilder, methname).im_func + oplist[val] = getattr(AssemblerPPC, methname).im_func else: oplist[val] = not_implemented return oplist -PPCBuilder.operations = make_operations() - -if __name__ == '__main__': - main() - - +AssemblerPPC.operations = make_operations() diff --git a/pypy/jit/backend/ppc/ppcgen/regalloc.py b/pypy/jit/backend/ppc/ppcgen/regalloc.py --- a/pypy/jit/backend/ppc/ppcgen/regalloc.py +++ b/pypy/jit/backend/ppc/ppcgen/regalloc.py @@ -3,6 +3,7 @@ compute_loop_consts) from pypy.jit.backend.ppc.ppcgen.arch import (WORD, MY_COPY_OF_REGS) from pypy.jit.backend.ppc.ppcgen.jump import remap_frame_layout_mixed +from pypy.jit.backend.ppc.ppcgen.locations import imm from pypy.jit.backend.ppc.ppcgen.helper.regalloc import (_check_imm_arg, prepare_cmp_op) from pypy.jit.metainterp.history import (INT, REF, FLOAT, Const, ConstInt, @@ -201,6 +202,7 @@ def _prepare_guard(self, op, args=None): if args is None: args = [] + #args.append(imm(self.frame_manager.frame_depth)) for arg in op.getfailargs(): if arg: args.append(self.loc(arg)) diff --git a/pypy/jit/backend/ppc/runner.py b/pypy/jit/backend/ppc/runner.py --- a/pypy/jit/backend/ppc/runner.py +++ b/pypy/jit/backend/ppc/runner.py @@ -11,7 +11,7 @@ from pypy.jit.backend.llsupport.llmodel import AbstractLLCPU from pypy.jit.backend.x86 import regloc from pypy.jit.backend.x86.support import values_array -from pypy.jit.backend.ppc.ppcgen.ppc_assembler import PPCBuilder +from pypy.jit.backend.ppc.ppcgen.ppc_assembler import AssemblerPPC from pypy.jit.backend.ppc.ppcgen.arch import NONVOLATILES, GPR_SAVE_AREA, WORD from pypy.jit.backend.ppc.ppcgen.regalloc import PPCRegisterManager, PPCFrameManager import sys @@ -33,45 +33,52 @@ self.supports_floats = False self.total_compiled_loops = 0 self.total_compiled_bridges = 0 - self.asm = PPCBuilder(self) + self.asm = AssemblerPPC(self) def compile_loop(self, inputargs, operations, looptoken, log=False): self.saved_descr = {} self.asm.assemble_loop(inputargs, operations, looptoken, log) - def compile_bridge(self, descr, inputargs, operations, looptoken): - self.saved_descr = {} - self.patch_list = [] - self.reg_map = {} - self.fail_box_count = 0 + def compile_bridge(self, faildescr, inputargs, operations, + original_loop_token, log=False): + clt = original_loop_token.compiled_loop_token + clt.compiling_a_bridge() + self.asm.assemble_bridge(faildescr, inputargs, operations, + original_loop_token, log=log) - codebuilder = looptoken.codebuilder - # jump to the bridge - current_pos = codebuilder.get_relative_pos() - offset = current_pos - descr.patch_pos - codebuilder.b(offset) - codebuilder.patch_op(descr.patch_op) + #def compile_bridge(self, descr, inputargs, operations, looptoken): + # self.saved_descr = {} + # self.patch_list = [] + # self.reg_map = {} + # self.fail_box_count = 0 - # initialize registers from memory - self.next_free_register = 3 - use_index = 0 - for index, arg in enumerate(inputargs): - self.reg_map[arg] = self.next_free_register - addr = self.fail_boxes_int.get_addr_for_num( - descr.used_mem_indices[use_index]) - codebuilder.load_from(self.next_free_register, addr) - self.next_free_register += 1 - use_index += 1 - - self._walk_trace_ops(codebuilder, operations) - self._make_epilogue(codebuilder) + # codebuilder = looptoken.codebuilder + # # jump to the bridge + # current_pos = codebuilder.get_relative_pos() + # offset = current_pos - descr.patch_pos + # codebuilder.b(offset) + # codebuilder.patch_op(descr.patch_op) - f = codebuilder.assemble() - looptoken.ppc_code = f - looptoken.codebuilder = codebuilder + # # initialize registers from memory + # self.next_free_register = 3 + # use_index = 0 + # for index, arg in enumerate(inputargs): + # self.reg_map[arg] = self.next_free_register + # addr = self.fail_boxes_int.get_addr_for_num( + # descr.used_mem_indices[use_index]) + # codebuilder.load_from(self.next_free_register, addr) + # self.next_free_register += 1 + # use_index += 1 + # + # self._walk_trace_ops(codebuilder, operations) + # self._make_epilogue(codebuilder) - self.total_compiled_bridges += 1 - self.teardown() + # f = codebuilder.assemble() + # looptoken.ppc_code = f + # looptoken.codebuilder = codebuilder + + # self.total_compiled_bridges += 1 + # self.teardown() # set value in fail_boxes_int def set_future_value_int(self, index, value_int): From noreply at buildbot.pypy.org Fri Sep 16 11:01:36 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 16 Sep 2011 11:01:36 +0200 (CEST) Subject: [pypy-commit] pypy improve-heap-caching-tracing: change the trace length limit to half its previous value. this seems to be Message-ID: <20110916090136.1F36E820B1@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: improve-heap-caching-tracing Changeset: r47282:0f72d394deb4 Date: 2011-09-16 11:01 +0200 http://bitbucket.org/pypy/pypy/changeset/0f72d394deb4/ Log: change the trace length limit to half its previous value. this seems to be better now with the better heap caching, will do a benchmark run to confirm. diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -294,7 +294,7 @@ PARAMETERS = {'threshold': 1032, # just above 1024 'function_threshold': 1617, # slightly more than one above 'trace_eagerness': 200, - 'trace_limit': 12000, + 'trace_limit': 6000, 'inlining': 1, 'loop_longevity': 1000, 'retrace_limit': 5, From noreply at buildbot.pypy.org Fri Sep 16 11:49:34 2011 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 16 Sep 2011 11:49:34 +0200 (CEST) Subject: [pypy-commit] pypy default: fix for ootype, llstrings are not supported as input arguments Message-ID: <20110916094934.42C1F820B1@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r47283:a03db4de5707 Date: 2011-09-16 11:49 +0200 http://bitbucket.org/pypy/pypy/changeset/a03db4de5707/ Log: fix for ootype, llstrings are not supported as input arguments diff --git a/pypy/rpython/test/test_rlist.py b/pypy/rpython/test/test_rlist.py --- a/pypy/rpython/test/test_rlist.py +++ b/pypy/rpython/test/test_rlist.py @@ -1363,14 +1363,15 @@ def test_hints(self): from pypy.rlib.objectmodel import newlist from pypy.rpython.annlowlevel import hlstr - - def f(z): - z = hlstr(z) + + strings = ['abc', 'def'] + def f(i): + z = strings[i] x = newlist(sizehint=13) x += z return ''.join(x) - res = self.interpret(f, [self.string_to_ll('abc')]) + res = self.interpret(f, [0]) assert self.ll_to_string(res) == 'abc' class TestLLtype(BaseTestRlist, LLRtypeMixin): From noreply at buildbot.pypy.org Fri Sep 16 12:45:46 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Sep 2011 12:45:46 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: hg merge default Message-ID: <20110916104546.8C542820B1@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47284:85905197df58 Date: 2011-09-16 09:36 +0200 http://bitbucket.org/pypy/pypy/changeset/85905197df58/ Log: hg merge default diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -124,8 +124,7 @@ try: res = greenlet.run(*args) finally: - if greenlet.parent is not _tls.main: - _continuation.permute(greenlet, greenlet.parent) + _continuation.permute(greenlet, greenlet.parent) return (res,) def _greenlet_throw(greenlet, exc, value, tb): @@ -133,5 +132,4 @@ try: raise exc, value, tb finally: - if greenlet.parent is not _tls.main: - _continuation.permute(greenlet, greenlet.parent) + _continuation.permute(greenlet, greenlet.parent) diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -269,7 +269,7 @@ cont = space.interp_w(W_Continulet, w_cont) if cont.sthread is not sthread: if cont.sthread is None: - raise geterror(space, "got a non-initialized continulet") + continue # ignore non-initialized continulets else: raise geterror(space, "inter-thread support is missing") elif sthread.is_empty_handle(cont.h): diff --git a/pypy/module/_continuation/test/test_stacklet.py b/pypy/module/_continuation/test/test_stacklet.py --- a/pypy/module/_continuation/test/test_stacklet.py +++ b/pypy/module/_continuation/test/test_stacklet.py @@ -661,6 +661,12 @@ assert res == "done" main() + def test_permute_noninitialized(self): + from _continuation import continulet, permute + permute(continulet.__new__(continulet)) # ignored + permute(continulet.__new__(continulet), # ignored + continulet.__new__(continulet)) + def test_bug_finish_with_already_finished_stacklet(self): from _continuation import continulet, error # make an already-finished continulet diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -347,7 +347,7 @@ SLOTS_STARTING_FROM = 3 -class BaseMapdictObject: # slightly evil to make it inherit from W_Root +class BaseMapdictObject: _mixin_ = True def _init_empty(self, map): From noreply at buildbot.pypy.org Fri Sep 16 12:45:47 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Sep 2011 12:45:47 +0200 (CEST) Subject: [pypy-commit] pypy default: More informative message Message-ID: <20110916104547.B5253820B1@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47285:3d84bc1f61de Date: 2011-09-16 11:01 +0200 http://bitbucket.org/pypy/pypy/changeset/3d84bc1f61de/ Log: More informative message diff --git a/pypy/rpython/memory/gctransform/framework.py b/pypy/rpython/memory/gctransform/framework.py --- a/pypy/rpython/memory/gctransform/framework.py +++ b/pypy/rpython/memory/gctransform/framework.py @@ -626,8 +626,8 @@ func = getattr(graph, 'func', None) if func and getattr(func, '_gc_no_collect_', False): if self.collect_analyzer.analyze_direct_call(graph): - raise Exception("no_collect function can trigger collection: %s" - % func.__name__) + raise Exception("'no_collect' function can trigger collection:" + " %s" % func) if self.write_barrier_ptr: self.clean_sets = ( From noreply at buildbot.pypy.org Fri Sep 16 12:45:48 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Sep 2011 12:45:48 +0200 (CEST) Subject: [pypy-commit] pypy default: Translation fix Message-ID: <20110916104548.DD864820B1@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47286:1ffcf2cd905f Date: 2011-09-16 12:43 +0200 http://bitbucket.org/pypy/pypy/changeset/1ffcf2cd905f/ Log: Translation fix diff --git a/pypy/rpython/memory/gctransform/asmgcroot.py b/pypy/rpython/memory/gctransform/asmgcroot.py --- a/pypy/rpython/memory/gctransform/asmgcroot.py +++ b/pypy/rpython/memory/gctransform/asmgcroot.py @@ -663,4 +663,5 @@ QSORT_CALLBACK_PTR], lltype.Void, sandboxsafe=True, + random_effects_on_gcobjs=False, # but has a callback _nowrapper=True) diff --git a/pypy/translator/backendopt/graphanalyze.py b/pypy/translator/backendopt/graphanalyze.py --- a/pypy/translator/backendopt/graphanalyze.py +++ b/pypy/translator/backendopt/graphanalyze.py @@ -3,6 +3,8 @@ from pypy.rpython.lltypesystem import lltype class GraphAnalyzer(object): + verbose = False + def __init__(self, translator): self.translator = translator self.analyzed_calls = {} @@ -71,12 +73,24 @@ if op.opname == "direct_call": graph = get_graph(op.args[0], self.translator) if graph is None: - return self.analyze_external_call(op, seen) - return self.analyze_direct_call(graph, seen) + x = self.analyze_external_call(op, seen) + if self.verbose and x: + print '\tanalyze_external_call %s: %r' % (op, x) + return x + x = self.analyze_direct_call(graph, seen) + if self.verbose and x: + print '\tanalyze_direct_call(%s): %r' % (graph, x) + return x elif op.opname == "indirect_call": - if op.args[-1].value is None: + graphs = op.args[-1].value + if graphs is None: + if self.verbose: + print '\t%s to unknown' % (op,) return self.top_result() - return self.analyze_indirect_call(op.args[-1].value, seen) + x = self.analyze_indirect_call(graphs, seen) + if self.verbose and x: + print '\tanalyze_indirect_call(%s): %r' % (graphs, x) + return x elif op.opname == "oosend": name = op.args[0].value TYPE = op.args[1].concretetype @@ -85,7 +99,10 @@ if graph is None: return self.analyze_external_method(op, TYPE, meth) return self.analyze_oosend(TYPE, name, seen) - return self.analyze_simple_operation(op, graphinfo) + x = self.analyze_simple_operation(op, graphinfo) + if self.verbose and x: + print '\t%s: %r' % (op, x) + return x def analyze_direct_call(self, graph, seen=None): if graph in self.analyzed_calls: From noreply at buildbot.pypy.org Fri Sep 16 12:45:50 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Sep 2011 12:45:50 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20110916104550.27BE9820B1@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47287:0fdfb1b11f99 Date: 2011-09-16 12:44 +0200 http://bitbucket.org/pypy/pypy/changeset/0fdfb1b11f99/ Log: merge heads diff --git a/pypy/rpython/test/test_rlist.py b/pypy/rpython/test/test_rlist.py --- a/pypy/rpython/test/test_rlist.py +++ b/pypy/rpython/test/test_rlist.py @@ -1363,14 +1363,15 @@ def test_hints(self): from pypy.rlib.objectmodel import newlist from pypy.rpython.annlowlevel import hlstr - - def f(z): - z = hlstr(z) + + strings = ['abc', 'def'] + def f(i): + z = strings[i] x = newlist(sizehint=13) x += z return ''.join(x) - res = self.interpret(f, [self.string_to_ll('abc')]) + res = self.interpret(f, [0]) assert self.ll_to_string(res) == 'abc' class TestLLtype(BaseTestRlist, LLRtypeMixin): From noreply at buildbot.pypy.org Fri Sep 16 12:46:58 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Sep 2011 12:46:58 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: Translation fix. Message-ID: <20110916104658.72BBD820B1@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47288:4139c7f2d817 Date: 2011-09-16 12:46 +0200 http://bitbucket.org/pypy/pypy/changeset/4139c7f2d817/ Log: Translation fix. diff --git a/pypy/module/_continuation/interp_pickle.py b/pypy/module/_continuation/interp_pickle.py --- a/pypy/module/_continuation/interp_pickle.py +++ b/pypy/module/_continuation/interp_pickle.py @@ -1,10 +1,11 @@ from pypy.tool import stdlib_opcode as pythonopcode from pypy.rlib import jit +from pypy.interpreter.error import OperationError from pypy.interpreter.pyframe import PyFrame from pypy.module._continuation.interp_continuation import State, global_state from pypy.module._continuation.interp_continuation import build_sthread from pypy.module._continuation.interp_continuation import post_switch -from pypy.module._continuation.interp_continuation import get_result +from pypy.module._continuation.interp_continuation import get_result, geterror def getunpickle(space): @@ -33,10 +34,10 @@ return space.newtuple(args) def setstate(self, w_args): + space = self.space if self.sthread is not None: raise geterror(space, "continulet.__setstate__() on an already-" "initialized continulet") - space = self.space w_frame, w_dict = space.fixedview(w_args, expected_length=2) if not space.is_w(w_dict, space.w_None): self.setdict(space, w_dict) @@ -68,8 +69,9 @@ try: w_result = post_switch(sthread, h) operr = None - except OperationError, operr: - pass + except OperationError, e: + w_result = None + operr = e # while True: ec = sthread.ec @@ -86,8 +88,9 @@ try: w_result = frame.execute_frame(w_result, operr) operr = None - except OperationError, operr: - pass + except OperationError, e: + w_result = None + operr = e if exit_continulet is not None: self = exit_continulet break From noreply at buildbot.pypy.org Fri Sep 16 12:59:40 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Sep 2011 12:59:40 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: Support pickling, at least good enough to have the single test Message-ID: <20110916105941.000CC820B1@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47289:637e3d17c657 Date: 2011-09-16 12:59 +0200 http://bitbucket.org/pypy/pypy/changeset/637e3d17c657/ Log: Support pickling, at least good enough to have the single test from test_stackless_pickling pass again. This is done by killing a lot of code and adding none, so unsure if there isn't something missing now. diff --git a/lib_pypy/stackless.py b/lib_pypy/stackless.py --- a/lib_pypy/stackless.py +++ b/lib_pypy/stackless.py @@ -21,9 +21,6 @@ self._frame = None self.is_zombie = False - def __getattr__(self, attr): - return getattr(self._frame, attr) - def __del__(self): self.is_zombie = True del self._frame @@ -74,9 +71,6 @@ return _getcurrent() getcurrent = staticmethod(getcurrent) - def __reduce__(self): - raise TypeError, 'pickling is not possible based upon continulets' - def _getcurrent(): "Returns the current coroutine (i.e. the one which called this function)." @@ -175,34 +169,6 @@ raise self.type, self.value, self.traceback # -# helpers for pickling -# - -_stackless_primitive_registry = {} - -def register_stackless_primitive(thang, retval_expr='None'): - import types - func = thang - if isinstance(thang, types.MethodType): - func = thang.im_func - code = func.func_code - _stackless_primitive_registry[code] = retval_expr - # It is not too nice to attach info via the code object, but - # I can't think of a better solution without a real transform. - -def rewrite_stackless_primitive(coro_state, alive, tempval): - flags, frame, thunk, parent = coro_state - while frame is not None: - retval_expr = _stackless_primitive_registry.get(frame.f_code) - if retval_expr: - # this tasklet needs to stop pickling here and return its value. - tempval = eval(retval_expr, globals(), frame.f_locals) - coro_state = flags, frame, thunk, parent - break - frame = frame.f_back - return coro_state, alive, tempval - -# # class channel(object): @@ -354,8 +320,6 @@ """ return self._channel_action(None, -1) - register_stackless_primitive(receive, retval_expr='receiver.tempval') - def send_exception(self, exp_type, msg): self.send(bomb(exp_type, exp_type(msg))) @@ -372,9 +336,7 @@ the runnables list. """ return self._channel_action(msg, 1) - - register_stackless_primitive(send) - + class tasklet(coroutine): """ A tasklet object represents a tiny task in a Python thread. @@ -480,39 +442,6 @@ raise RuntimeError, "The current tasklet cannot be removed." # not sure if I will revive this " Use t=tasklet().capture()" _scheduler_remove(self) - - def __reduce__(self): - one, two, coro_state = coroutine.__reduce__(self) - assert one is coroutine - assert two == () - # we want to get rid of the parent thing. - # for now, we just drop it - a, frame, c, d = coro_state - - # Removing all frames related to stackless.py. - # They point to stuff we don't want to be pickled. - - pickleframe = frame - while frame is not None: - if frame.f_code == schedule.func_code: - # Removing everything including and after the - # call to stackless.schedule() - pickleframe = frame.f_back - break - frame = frame.f_back - if d: - assert isinstance(d, coroutine) - coro_state = a, pickleframe, c, None - coro_state, alive, tempval = rewrite_stackless_primitive(coro_state, self.alive, self.tempval) - inst_dict = self.__dict__.copy() - inst_dict.pop('tempval', None) - return self.__class__, (), (coro_state, alive, tempval, inst_dict) - - def __setstate__(self, (coro_state, alive, tempval, inst_dict)): - coroutine.__setstate__(self, coro_state) - self.__dict__.update(inst_dict) - self.alive = alive - self.tempval = tempval def getmain(): """ From noreply at buildbot.pypy.org Fri Sep 16 13:43:29 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 16 Sep 2011 13:43:29 +0200 (CEST) Subject: [pypy-commit] pypy improve-heap-caching-tracing: close to-be-merged branch Message-ID: <20110916114329.550E7820B1@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: improve-heap-caching-tracing Changeset: r47290:7793d23a0ac6 Date: 2011-09-16 13:39 +0200 http://bitbucket.org/pypy/pypy/changeset/7793d23a0ac6/ Log: close to-be-merged branch From noreply at buildbot.pypy.org Fri Sep 16 13:43:30 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 16 Sep 2011 13:43:30 +0200 (CEST) Subject: [pypy-commit] pypy default: merge improve-heap-caching-tracing: Message-ID: <20110916114330.A578B820B1@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r47291:5fcc43295a0a Date: 2011-09-16 13:43 +0200 http://bitbucket.org/pypy/pypy/changeset/5fcc43295a0a/ Log: merge improve-heap-caching-tracing: - a much better heap cache during tracing, this makes the traces that are produced before optimization much shorter, thus improving warmup time, reducing GC pressure, etc. - change the maximum trace length to half its original value. Now that the traces are much shorted due to the better optimization, this seems to give much better results for translation. diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/heapcache.py @@ -0,0 +1,172 @@ +from pypy.jit.metainterp.history import ConstInt +from pypy.jit.metainterp.resoperation import rop + + +class HeapCache(object): + def __init__(self): + self.reset() + + def reset(self): + # contains boxes where the class is already known + self.known_class_boxes = {} + # store the boxes that contain newly allocated objects: + self.new_boxes = {} + # contains frame boxes that are not virtualizables + self.nonstandard_virtualizables = {} + # heap cache + # maps descrs to {from_box, to_box} dicts + self.heap_cache = {} + # heap array cache + # maps descrs to {index: {from_box: to_box}} dicts + self.heap_array_cache = {} + # cache the length of arrays + self.length_cache = {} + + def invalidate_caches(self, opnum, descr, argboxes): + if opnum == rop.SETFIELD_GC: + return + if opnum == rop.SETARRAYITEM_GC: + return + if opnum == rop.SETFIELD_RAW: + return + if opnum == rop.SETARRAYITEM_RAW: + return + if rop._OVF_FIRST <= opnum <= rop._OVF_LAST: + return + if rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST: + return + if opnum == rop.CALL or opnum == rop.CALL_LOOPINVARIANT: + effectinfo = descr.get_extra_info() + ef = effectinfo.extraeffect + if ef == effectinfo.EF_LOOPINVARIANT or \ + ef == effectinfo.EF_ELIDABLE_CANNOT_RAISE or \ + ef == effectinfo.EF_ELIDABLE_CAN_RAISE: + return + # A special case for ll_arraycopy, because it is so common, and its + # effects are so well defined. + elif effectinfo.oopspecindex == effectinfo.OS_ARRAYCOPY: + # The destination box + if argboxes[2] in self.new_boxes: + # XXX: no descr here so we invalidate any of them, not just + # of the correct type + # XXX: in theory the indices of the copy could be looked at + # as well + for descr, cache in self.heap_array_cache.iteritems(): + for idx, cache in cache.iteritems(): + for frombox in cache.keys(): + if frombox not in self.new_boxes: + del cache[frombox] + return + + self.heap_cache.clear() + self.heap_array_cache.clear() + + def is_class_known(self, box): + return box in self.known_class_boxes + + def class_now_known(self, box): + self.known_class_boxes[box] = None + + def is_nonstandard_virtualizable(self, box): + return box in self.nonstandard_virtualizables + + def nonstandard_virtualizables_now_known(self, box): + self.nonstandard_virtualizables[box] = None + + def new(self, box): + self.new_boxes[box] = None + + def new_array(self, box, lengthbox): + self.new(box) + self.arraylen_now_known(box, lengthbox) + + def getfield(self, box, descr): + d = self.heap_cache.get(descr, None) + if d: + tobox = d.get(box, None) + if tobox: + return tobox + return None + + def getfield_now_known(self, box, descr, fieldbox): + self.heap_cache.setdefault(descr, {})[box] = fieldbox + + def setfield(self, box, descr, fieldbox): + d = self.heap_cache.get(descr, None) + new_d = self._do_write_with_aliasing(d, box, fieldbox) + self.heap_cache[descr] = new_d + + def _do_write_with_aliasing(self, d, box, fieldbox): + # slightly subtle logic here + # a write to an arbitrary box, all other boxes can alias this one + if not d or box not in self.new_boxes: + # therefore we throw away the cache + return {box: fieldbox} + # the object we are writing to is freshly allocated + # only remove some boxes from the cache + new_d = {} + for frombox, tobox in d.iteritems(): + # the other box is *also* freshly allocated + # therefore frombox and box *must* contain different objects + # thus we can keep it in the cache + if frombox in self.new_boxes: + new_d[frombox] = tobox + new_d[box] = fieldbox + return new_d + + def getarrayitem(self, box, descr, indexbox): + if not isinstance(indexbox, ConstInt): + return + index = indexbox.getint() + cache = self.heap_array_cache.get(descr, None) + if cache: + indexcache = cache.get(index, None) + if indexcache is not None: + return indexcache.get(box, None) + + def getarrayitem_now_known(self, box, descr, indexbox, valuebox): + if not isinstance(indexbox, ConstInt): + return + index = indexbox.getint() + cache = self.heap_array_cache.setdefault(descr, {}) + indexcache = cache.get(index, None) + if indexcache is not None: + indexcache[box] = valuebox + else: + cache[index] = {box: valuebox} + + def setarrayitem(self, box, descr, indexbox, valuebox): + if not isinstance(indexbox, ConstInt): + cache = self.heap_array_cache.get(descr, None) + if cache is not None: + cache.clear() + return + index = indexbox.getint() + cache = self.heap_array_cache.setdefault(descr, {}) + indexcache = cache.get(index, None) + cache[index] = self._do_write_with_aliasing(indexcache, box, valuebox) + + + def arraylen(self, box): + return self.length_cache.get(box, None) + + def arraylen_now_known(self, box, lengthbox): + self.length_cache[box] = lengthbox + + def _replace_box(self, d, oldbox, newbox): + new_d = {} + for frombox, tobox in d.iteritems(): + if frombox is oldbox: + frombox = newbox + if tobox is oldbox: + tobox = newbox + new_d[frombox] = tobox + return new_d + + def replace_box(self, oldbox, newbox): + for descr, d in self.heap_cache.iteritems(): + self.heap_cache[descr] = self._replace_box(d, oldbox, newbox) + for descr, d in self.heap_array_cache.iteritems(): + for index, cache in d.iteritems(): + d[index] = self._replace_box(cache, oldbox, newbox) + self.length_cache = self._replace_box(self.length_cache, oldbox, newbox) diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -17,6 +17,7 @@ from pypy.jit.metainterp.jitprof import ABORT_TOO_LONG, ABORT_BRIDGE, \ ABORT_FORCE_QUASIIMMUT, ABORT_BAD_LOOP from pypy.jit.metainterp.jitexc import JitException, get_llexception +from pypy.jit.metainterp.heapcache import HeapCache from pypy.rlib.objectmodel import specialize from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr from pypy.jit.codewriter import heaptracker @@ -321,7 +322,7 @@ def _establish_nullity(self, box, orgpc): value = box.nonnull() if value: - if box not in self.metainterp.known_class_boxes: + if not self.metainterp.heapcache.is_class_known(box): self.generate_guard(rop.GUARD_NONNULL, box, resumepc=orgpc) else: if not isinstance(box, Const): @@ -366,14 +367,17 @@ @arguments("descr") def opimpl_new(self, sizedescr): - return self.execute_with_descr(rop.NEW, sizedescr) + resbox = self.execute_with_descr(rop.NEW, sizedescr) + self.metainterp.heapcache.new(resbox) + return resbox @arguments("descr") def opimpl_new_with_vtable(self, sizedescr): cpu = self.metainterp.cpu cls = heaptracker.descr2vtable(cpu, sizedescr) resbox = self.execute(rop.NEW_WITH_VTABLE, ConstInt(cls)) - self.metainterp.known_class_boxes[resbox] = None + self.metainterp.heapcache.new(resbox) + self.metainterp.heapcache.class_now_known(resbox) return resbox ## @FixME #arguments("box") @@ -392,24 +396,26 @@ ## self.execute(rop.SUBCLASSOF, box1, box2) @arguments("descr", "box") - def opimpl_new_array(self, itemsizedescr, countbox): - return self.execute_with_descr(rop.NEW_ARRAY, itemsizedescr, countbox) + def opimpl_new_array(self, itemsizedescr, lengthbox): + resbox = self.execute_with_descr(rop.NEW_ARRAY, itemsizedescr, lengthbox) + self.metainterp.heapcache.new_array(resbox, lengthbox) + return resbox @arguments("box", "descr", "box") def _opimpl_getarrayitem_gc_any(self, arraybox, arraydescr, indexbox): - cache = self.metainterp.heap_array_cache.get(arraydescr, None) - if cache and isinstance(indexbox, ConstInt): - index = indexbox.getint() - frombox, tobox = cache.get(index, (None, None)) - if frombox is arraybox: - return tobox + tobox = self.metainterp.heapcache.getarrayitem( + arraybox, arraydescr, indexbox) + if tobox: + # sanity check: see whether the current array value + # corresponds to what the cache thinks the value is + resbox = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETARRAYITEM_GC, arraydescr, arraybox, indexbox) + assert resbox.constbox().same_constant(tobox.constbox()) + return tobox resbox = self.execute_with_descr(rop.GETARRAYITEM_GC, arraydescr, arraybox, indexbox) - if isinstance(indexbox, ConstInt): - if not cache: - cache = self.metainterp.heap_array_cache[arraydescr] = {} - index = indexbox.getint() - cache[index] = arraybox, resbox + self.metainterp.heapcache.getarrayitem_now_known( + arraybox, arraydescr, indexbox, resbox) return resbox @@ -439,13 +445,8 @@ indexbox, itembox): self.execute_with_descr(rop.SETARRAYITEM_GC, arraydescr, arraybox, indexbox, itembox) - if isinstance(indexbox, ConstInt): - cache = self.metainterp.heap_array_cache.setdefault(arraydescr, {}) - cache[indexbox.getint()] = arraybox, itembox - else: - cache = self.metainterp.heap_array_cache.get(arraydescr, None) - if cache: - cache.clear() + self.metainterp.heapcache.setarrayitem( + arraybox, arraydescr, indexbox, itembox) opimpl_setarrayitem_gc_i = _opimpl_setarrayitem_gc_any opimpl_setarrayitem_gc_r = _opimpl_setarrayitem_gc_any @@ -462,7 +463,12 @@ @arguments("box", "descr") def opimpl_arraylen_gc(self, arraybox, arraydescr): - return self.execute_with_descr(rop.ARRAYLEN_GC, arraydescr, arraybox) + lengthbox = self.metainterp.heapcache.arraylen(arraybox) + if lengthbox is None: + lengthbox = self.execute_with_descr( + rop.ARRAYLEN_GC, arraydescr, arraybox) + self.metainterp.heapcache.arraylen_now_known(arraybox, lengthbox) + return lengthbox @arguments("orgpc", "box", "descr", "box") def opimpl_check_neg_index(self, orgpc, arraybox, arraydescr, indexbox): @@ -471,19 +477,17 @@ negbox = self.implement_guard_value(orgpc, negbox) if negbox.getint(): # the index is < 0; add the array length to it - lenbox = self.metainterp.execute_and_record( - rop.ARRAYLEN_GC, arraydescr, arraybox) + lengthbox = self.opimpl_arraylen_gc(arraybox, arraydescr) indexbox = self.metainterp.execute_and_record( - rop.INT_ADD, None, indexbox, lenbox) + rop.INT_ADD, None, indexbox, lengthbox) return indexbox @arguments("descr", "descr", "descr", "descr", "box") def opimpl_newlist(self, structdescr, lengthdescr, itemsdescr, arraydescr, sizebox): - sbox = self.metainterp.execute_and_record(rop.NEW, structdescr) + sbox = self.opimpl_new(structdescr) self._opimpl_setfield_gc_any(sbox, lengthdescr, sizebox) - abox = self.metainterp.execute_and_record(rop.NEW_ARRAY, arraydescr, - sizebox) + abox = self.opimpl_new_array(arraydescr, sizebox) self._opimpl_setfield_gc_any(sbox, itemsdescr, abox) return sbox @@ -540,11 +544,15 @@ @specialize.arg(1) def _opimpl_getfield_gc_any_pureornot(self, opnum, box, fielddescr): - frombox, tobox = self.metainterp.heap_cache.get(fielddescr, (None, None)) - if frombox is box: + tobox = self.metainterp.heapcache.getfield(box, fielddescr) + if tobox is not None: + # sanity check: see whether the current struct value + # corresponds to what the cache thinks the value is + resbox = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETFIELD_GC, fielddescr, box) return tobox resbox = self.execute_with_descr(opnum, fielddescr, box) - self.metainterp.heap_cache[fielddescr] = (box, resbox) + self.metainterp.heapcache.getfield_now_known(box, fielddescr, resbox) return resbox @arguments("orgpc", "box", "descr") @@ -565,11 +573,11 @@ @arguments("box", "descr", "box") def _opimpl_setfield_gc_any(self, box, fielddescr, valuebox): - frombox, tobox = self.metainterp.heap_cache.get(fielddescr, (None, None)) - if frombox is box and tobox is valuebox: + tobox = self.metainterp.heapcache.getfield(box, fielddescr) + if tobox is valuebox: return self.execute_with_descr(rop.SETFIELD_GC, fielddescr, box, valuebox) - self.metainterp.heap_cache[fielddescr] = (box, valuebox) + self.metainterp.heapcache.setfield(box, fielddescr, valuebox) opimpl_setfield_gc_i = _opimpl_setfield_gc_any opimpl_setfield_gc_r = _opimpl_setfield_gc_any opimpl_setfield_gc_f = _opimpl_setfield_gc_any @@ -633,7 +641,7 @@ standard_box = self.metainterp.virtualizable_boxes[-1] if standard_box is box: return False - if box in self.metainterp.nonstandard_virtualizables: + if self.metainterp.heapcache.is_nonstandard_virtualizable(box): return True eqbox = self.metainterp.execute_and_record(rop.PTR_EQ, None, box, standard_box) @@ -642,7 +650,7 @@ if isstandard: self.metainterp.replace_box(box, standard_box) else: - self.metainterp.nonstandard_virtualizables[box] = None + self.metainterp.heapcache.nonstandard_virtualizables_now_known(box) return not isstandard def _get_virtualizable_field_index(self, fielddescr): @@ -727,7 +735,7 @@ def opimpl_arraylen_vable(self, pc, box, fdescr, adescr): if self._nonstandard_virtualizable(pc, box): arraybox = self._opimpl_getfield_gc_any(box, fdescr) - return self.execute_with_descr(rop.ARRAYLEN_GC, adescr, arraybox) + return self.opimpl_arraylen_gc(arraybox, adescr) vinfo = self.metainterp.jitdriver_sd.virtualizable_info virtualizable_box = self.metainterp.virtualizable_boxes[-1] virtualizable = vinfo.unwrap_virtualizable_box(virtualizable_box) @@ -884,9 +892,9 @@ @arguments("orgpc", "box") def opimpl_guard_class(self, orgpc, box): clsbox = self.cls_of_box(box) - if box not in self.metainterp.known_class_boxes: + if not self.metainterp.heapcache.is_class_known(box): self.generate_guard(rop.GUARD_CLASS, box, [clsbox], resumepc=orgpc) - self.metainterp.known_class_boxes[box] = None + self.metainterp.heapcache.class_now_known(box) return clsbox @arguments("int", "orgpc") @@ -1492,16 +1500,7 @@ self.last_exc_value_box = None self.retracing_loop_from = None self.call_pure_results = args_dict_box() - # contains boxes where the class is already known - self.known_class_boxes = {} - # contains frame boxes that are not virtualizables - self.nonstandard_virtualizables = {} - # heap cache - # maps descrs to (from_box, to_box) tuples - self.heap_cache = {} - # heap array cache - # maps descrs to {index: (from_box, to_box)} dicts - self.heap_array_cache = {} + self.heapcache = HeapCache() def perform_call(self, jitcode, boxes, greenkey=None): # causes the metainterp to enter the given subfunction @@ -1677,29 +1676,11 @@ # record the operation profiler = self.staticdata.profiler profiler.count_ops(opnum, RECORDED_OPS) - self._invalidate_caches(opnum, descr) + self.heapcache.invalidate_caches(opnum, descr, argboxes) op = self.history.record(opnum, argboxes, resbox, descr) self.attach_debug_info(op) return resbox - def _invalidate_caches(self, opnum, descr): - if opnum == rop.SETFIELD_GC: - return - if opnum == rop.SETARRAYITEM_GC: - return - if rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST: - return - if opnum == rop.CALL: - effectinfo = descr.get_extra_info() - ef = effectinfo.extraeffect - if ef == effectinfo.EF_LOOPINVARIANT or \ - ef == effectinfo.EF_ELIDABLE_CANNOT_RAISE or \ - ef == effectinfo.EF_ELIDABLE_CAN_RAISE: - return - if self.heap_cache: - self.heap_cache.clear() - if self.heap_array_cache: - self.heap_array_cache.clear() def attach_debug_info(self, op): if (not we_are_translated() and op is not None @@ -1862,10 +1843,7 @@ duplicates[box] = None def reached_loop_header(self, greenboxes, redboxes, resumedescr): - self.known_class_boxes = {} - self.nonstandard_virtualizables = {} # XXX maybe not needed? - self.heap_cache = {} - self.heap_array_cache = {} + self.heapcache.reset() duplicates = {} self.remove_consts_and_duplicates(redboxes, len(redboxes), @@ -2373,17 +2351,7 @@ for i in range(len(boxes)): if boxes[i] is oldbox: boxes[i] = newbox - for descr, (frombox, tobox) in self.heap_cache.iteritems(): - change = False - if frombox is oldbox: - change = True - frombox = newbox - if tobox is oldbox: - change = True - tobox = newbox - if change: - self.heap_cache[descr] = frombox, tobox - # XXX what about self.heap_array_cache? + self.heapcache.replace_box(oldbox, newbox) def find_biggest_function(self): start_stack = [] diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/test/test_heapcache.py @@ -0,0 +1,328 @@ +from pypy.jit.metainterp.heapcache import HeapCache +from pypy.jit.metainterp.resoperation import rop +from pypy.jit.metainterp.history import ConstInt + +box1 = object() +box2 = object() +box3 = object() +box4 = object() +lengthbox1 = object() +lengthbox2 = object() +descr1 = object() +descr2 = object() +descr3 = object() + +index1 = ConstInt(0) +index2 = ConstInt(1) + + +class FakeEffektinfo(object): + EF_ELIDABLE_CANNOT_RAISE = 0 #elidable function (and cannot raise) + EF_LOOPINVARIANT = 1 #special: call it only once per loop + EF_CANNOT_RAISE = 2 #a function which cannot raise + EF_ELIDABLE_CAN_RAISE = 3 #elidable function (but can raise) + EF_CAN_RAISE = 4 #normal function (can raise) + EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE = 5 #can raise and force virtualizables + EF_RANDOM_EFFECTS = 6 #can do whatever + + OS_ARRAYCOPY = 0 + + def __init__(self, extraeffect, oopspecindex): + self.extraeffect = extraeffect + self.oopspecindex = oopspecindex + +class FakeCallDescr(object): + def __init__(self, extraeffect, oopspecindex=None): + self.extraeffect = extraeffect + self.oopspecindex = oopspecindex + + def get_extra_info(self): + return FakeEffektinfo(self.extraeffect, self.oopspecindex) + +class TestHeapCache(object): + def test_known_class_box(self): + h = HeapCache() + assert not h.is_class_known(1) + assert not h.is_class_known(2) + h.class_now_known(1) + assert h.is_class_known(1) + assert not h.is_class_known(2) + + h.reset() + assert not h.is_class_known(1) + assert not h.is_class_known(2) + + def test_nonstandard_virtualizable(self): + h = HeapCache() + assert not h.is_nonstandard_virtualizable(1) + assert not h.is_nonstandard_virtualizable(2) + h.nonstandard_virtualizables_now_known(1) + assert h.is_nonstandard_virtualizable(1) + assert not h.is_nonstandard_virtualizable(2) + + h.reset() + assert not h.is_nonstandard_virtualizable(1) + assert not h.is_nonstandard_virtualizable(2) + + + def test_heapcache_fields(self): + h = HeapCache() + assert h.getfield(box1, descr1) is None + assert h.getfield(box1, descr2) is None + h.setfield(box1, descr1, box2) + assert h.getfield(box1, descr1) is box2 + assert h.getfield(box1, descr2) is None + h.setfield(box1, descr2, box3) + assert h.getfield(box1, descr1) is box2 + assert h.getfield(box1, descr2) is box3 + h.setfield(box1, descr1, box3) + assert h.getfield(box1, descr1) is box3 + assert h.getfield(box1, descr2) is box3 + h.setfield(box3, descr1, box1) + assert h.getfield(box3, descr1) is box1 + assert h.getfield(box1, descr1) is None + assert h.getfield(box1, descr2) is box3 + + h.reset() + assert h.getfield(box1, descr1) is None + assert h.getfield(box1, descr2) is None + assert h.getfield(box3, descr1) is None + + def test_heapcache_read_fields_multiple(self): + h = HeapCache() + h.getfield_now_known(box1, descr1, box2) + h.getfield_now_known(box3, descr1, box4) + assert h.getfield(box1, descr1) is box2 + assert h.getfield(box1, descr2) is None + assert h.getfield(box3, descr1) is box4 + assert h.getfield(box3, descr2) is None + + h.reset() + assert h.getfield(box1, descr1) is None + assert h.getfield(box1, descr2) is None + assert h.getfield(box3, descr1) is None + assert h.getfield(box3, descr2) is None + + def test_heapcache_write_fields_multiple(self): + h = HeapCache() + h.setfield(box1, descr1, box2) + assert h.getfield(box1, descr1) is box2 + h.setfield(box3, descr1, box4) + assert h.getfield(box3, descr1) is box4 + assert h.getfield(box1, descr1) is None # box1 and box3 can alias + + h = HeapCache() + h.new(box1) + h.setfield(box1, descr1, box2) + assert h.getfield(box1, descr1) is box2 + h.setfield(box3, descr1, box4) + assert h.getfield(box3, descr1) is box4 + assert h.getfield(box1, descr1) is None # box1 and box3 can alias + + h = HeapCache() + h.new(box1) + h.new(box3) + h.setfield(box1, descr1, box2) + assert h.getfield(box1, descr1) is box2 + h.setfield(box3, descr1, box4) + assert h.getfield(box3, descr1) is box4 + assert h.getfield(box1, descr1) is box2 # box1 and box3 cannot alias + h.setfield(box1, descr1, box3) + assert h.getfield(box1, descr1) is box3 + + + def test_heapcache_arrays(self): + h = HeapCache() + assert h.getarrayitem(box1, descr1, index1) is None + assert h.getarrayitem(box1, descr2, index1) is None + assert h.getarrayitem(box1, descr1, index2) is None + assert h.getarrayitem(box1, descr2, index2) is None + + h.setarrayitem(box1, descr1, index1, box2) + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr2, index1) is None + assert h.getarrayitem(box1, descr1, index2) is None + assert h.getarrayitem(box1, descr2, index2) is None + h.setarrayitem(box1, descr1, index2, box4) + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr2, index1) is None + assert h.getarrayitem(box1, descr1, index2) is box4 + assert h.getarrayitem(box1, descr2, index2) is None + + h.setarrayitem(box1, descr2, index1, box3) + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr2, index1) is box3 + assert h.getarrayitem(box1, descr1, index2) is box4 + assert h.getarrayitem(box1, descr2, index2) is None + + h.setarrayitem(box1, descr1, index1, box3) + assert h.getarrayitem(box1, descr1, index1) is box3 + assert h.getarrayitem(box1, descr2, index1) is box3 + assert h.getarrayitem(box1, descr1, index2) is box4 + assert h.getarrayitem(box1, descr2, index2) is None + + h.setarrayitem(box3, descr1, index1, box1) + assert h.getarrayitem(box3, descr1, index1) is box1 + assert h.getarrayitem(box1, descr1, index1) is None + assert h.getarrayitem(box1, descr2, index1) is box3 + assert h.getarrayitem(box1, descr1, index2) is box4 + assert h.getarrayitem(box1, descr2, index2) is None + + h.reset() + assert h.getarrayitem(box1, descr1, index1) is None + assert h.getarrayitem(box1, descr2, index1) is None + assert h.getarrayitem(box3, descr1, index1) is None + + def test_heapcache_array_nonconst_index(self): + h = HeapCache() + h.setarrayitem(box1, descr1, index1, box2) + h.setarrayitem(box1, descr1, index2, box4) + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr1, index2) is box4 + h.setarrayitem(box1, descr1, box2, box3) + assert h.getarrayitem(box1, descr1, index1) is None + assert h.getarrayitem(box1, descr1, index2) is None + + def test_heapcache_read_fields_multiple_array(self): + h = HeapCache() + h.getarrayitem_now_known(box1, descr1, index1, box2) + h.getarrayitem_now_known(box3, descr1, index1, box4) + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr2, index1) is None + assert h.getarrayitem(box3, descr1, index1) is box4 + assert h.getarrayitem(box3, descr2, index1) is None + + h.reset() + assert h.getarrayitem(box1, descr1, index1) is None + assert h.getarrayitem(box1, descr2, index1) is None + assert h.getarrayitem(box3, descr1, index1) is None + assert h.getarrayitem(box3, descr2, index1) is None + + def test_heapcache_write_fields_multiple_array(self): + h = HeapCache() + h.setarrayitem(box1, descr1, index1, box2) + assert h.getarrayitem(box1, descr1, index1) is box2 + h.setarrayitem(box3, descr1, index1, box4) + assert h.getarrayitem(box3, descr1, index1) is box4 + assert h.getarrayitem(box1, descr1, index1) is None # box1 and box3 can alias + + h = HeapCache() + h.new(box1) + h.setarrayitem(box1, descr1, index1, box2) + assert h.getarrayitem(box1, descr1, index1) is box2 + h.setarrayitem(box3, descr1, index1, box4) + assert h.getarrayitem(box3, descr1, index1) is box4 + assert h.getarrayitem(box1, descr1, index1) is None # box1 and box3 can alias + + h = HeapCache() + h.new(box1) + h.new(box3) + h.setarrayitem(box1, descr1, index1, box2) + assert h.getarrayitem(box1, descr1, index1) is box2 + h.setarrayitem(box3, descr1, index1, box4) + assert h.getarrayitem(box3, descr1, index1) is box4 + assert h.getarrayitem(box1, descr1, index1) is box2 # box1 and box3 cannot alias + h.setarrayitem(box1, descr1, index1, box3) + assert h.getarrayitem(box3, descr1, index1) is box4 + assert h.getarrayitem(box1, descr1, index1) is box3 # box1 and box3 cannot alias + + def test_length_cache(self): + h = HeapCache() + h.new_array(box1, lengthbox1) + assert h.arraylen(box1) is lengthbox1 + + assert h.arraylen(box2) is None + h.arraylen_now_known(box2, lengthbox2) + assert h.arraylen(box2) is lengthbox2 + + + def test_invalidate_cache(self): + h = HeapCache() + h.setfield(box1, descr1, box2) + h.setarrayitem(box1, descr1, index1, box2) + h.setarrayitem(box1, descr1, index2, box4) + h.invalidate_caches(rop.INT_ADD, None, []) + h.invalidate_caches(rop.INT_ADD_OVF, None, []) + h.invalidate_caches(rop.SETFIELD_RAW, None, []) + h.invalidate_caches(rop.SETARRAYITEM_RAW, None, []) + assert h.getfield(box1, descr1) is box2 + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr1, index2) is box4 + + h.invalidate_caches( + rop.CALL, FakeCallDescr(FakeEffektinfo.EF_ELIDABLE_CANNOT_RAISE), []) + assert h.getfield(box1, descr1) is box2 + assert h.getarrayitem(box1, descr1, index1) is box2 + assert h.getarrayitem(box1, descr1, index2) is box4 + + h.invalidate_caches( + rop.CALL_LOOPINVARIANT, FakeCallDescr(FakeEffektinfo.EF_LOOPINVARIANT), []) + + h.invalidate_caches( + rop.CALL, FakeCallDescr(FakeEffektinfo.EF_RANDOM_EFFECTS), []) + assert h.getfield(box1, descr1) is None + assert h.getarrayitem(box1, descr1, index1) is None + assert h.getarrayitem(box1, descr1, index2) is None + + + def test_replace_box(self): + h = HeapCache() + h.setfield(box1, descr1, box2) + h.setfield(box1, descr2, box3) + h.setfield(box2, descr3, box3) + h.replace_box(box1, box4) + assert h.getfield(box1, descr1) is None + assert h.getfield(box1, descr2) is None + assert h.getfield(box4, descr1) is box2 + assert h.getfield(box4, descr2) is box3 + assert h.getfield(box2, descr3) is box3 + + def test_replace_box_array(self): + h = HeapCache() + h.setarrayitem(box1, descr1, index1, box2) + h.setarrayitem(box1, descr2, index1, box3) + h.arraylen_now_known(box1, lengthbox1) + h.setarrayitem(box2, descr1, index2, box1) + h.setarrayitem(box3, descr2, index2, box1) + h.setarrayitem(box2, descr3, index2, box3) + h.replace_box(box1, box4) + assert h.getarrayitem(box1, descr1, index1) is None + assert h.getarrayitem(box1, descr2, index1) is None + assert h.arraylen(box1) is None + assert h.arraylen(box4) is lengthbox1 + assert h.getarrayitem(box4, descr1, index1) is box2 + assert h.getarrayitem(box4, descr2, index1) is box3 + assert h.getarrayitem(box2, descr1, index2) is box4 + assert h.getarrayitem(box3, descr2, index2) is box4 + assert h.getarrayitem(box2, descr3, index2) is box3 + + h.replace_box(lengthbox1, lengthbox2) + assert h.arraylen(box4) is lengthbox2 + + def test_ll_arraycopy(self): + h = HeapCache() + h.new_array(box1, lengthbox1) + h.setarrayitem(box1, descr1, index1, box2) + h.new_array(box2, lengthbox1) + # Just need the destination box for this call + h.invalidate_caches( + rop.CALL, + FakeCallDescr(FakeEffektinfo.EF_CANNOT_RAISE, FakeEffektinfo.OS_ARRAYCOPY), + [None, None, box2, None, None] + ) + assert h.getarrayitem(box1, descr1, index1) is box2 + h.invalidate_caches( + rop.CALL, + FakeCallDescr(FakeEffektinfo.EF_CANNOT_RAISE, FakeEffektinfo.OS_ARRAYCOPY), + [None, None, box3, None, None] + ) + assert h.getarrayitem(box1, descr1, index1) is None + + h.setarrayitem(box4, descr1, index1, box2) + assert h.getarrayitem(box4, descr1, index1) is box2 + h.invalidate_caches( + rop.CALL, + FakeCallDescr(FakeEffektinfo.EF_CANNOT_RAISE, FakeEffektinfo.OS_ARRAYCOPY), + [None, None, box2, None, None] + ) + assert h.getarrayitem(box4, descr1, index1) is None diff --git a/pypy/jit/metainterp/test/test_tracingopts.py b/pypy/jit/metainterp/test/test_tracingopts.py --- a/pypy/jit/metainterp/test/test_tracingopts.py +++ b/pypy/jit/metainterp/test/test_tracingopts.py @@ -257,6 +257,28 @@ self.check_operations_history(setarrayitem_gc=2, setfield_gc=2, getarrayitem_gc=0, getfield_gc=2) + def test_promote_changes_array_cache(self): + a1 = [0, 0] + a2 = [0, 0] + def fn(n): + if n > 0: + a = a1 + else: + a = a2 + a[0] = n + jit.hint(n, promote=True) + x1 = a[0] + jit.hint(x1, promote=True) + a[n - n] = n + 1 + return a[0] + x1 + res = self.interp_operations(fn, [7]) + assert res == 7 + 7 + 1 + self.check_operations_history(getarrayitem_gc=0, guard_value=1) + res = self.interp_operations(fn, [-7]) + assert res == -7 - 7 + 1 + self.check_operations_history(getarrayitem_gc=0, guard_value=1) + + def test_list_caching(self): a1 = [0, 0] a2 = [0, 0] @@ -357,7 +379,7 @@ assert res == f(10, 1, 1) self.check_history(getarrayitem_gc=0, getfield_gc=0) - def test_heap_caching_pure(self): + def test_heap_caching_array_pure(self): class A(object): pass p1 = A() @@ -405,3 +427,149 @@ assert res == -7 + 7 self.check_operations_history(getfield_gc=0) return + + def test_heap_caching_multiple_objects(self): + class Gbl(object): + pass + g = Gbl() + class A(object): + pass + a1 = A() + g.a1 = a1 + a1.x = 7 + a2 = A() + g.a2 = a2 + a2.x = 7 + def gn(a1, a2): + return a1.x + a2.x + def fn(n): + if n < 0: + a1 = A() + g.a1 = a1 + a1.x = n + a2 = A() + g.a2 = a2 + a2.x = n - 1 + else: + a1 = g.a1 + a2 = g.a2 + return a1.x + a2.x + gn(a1, a2) + res = self.interp_operations(fn, [-7]) + assert res == 2 * -7 + 2 * -8 + self.check_operations_history(setfield_gc=4, getfield_gc=0) + res = self.interp_operations(fn, [7]) + assert res == 4 * 7 + self.check_operations_history(getfield_gc=4) + + def test_heap_caching_multiple_tuples(self): + class Gbl(object): + pass + g = Gbl() + def gn(a1, a2): + return a1[0] + a2[0] + def fn(n): + a1 = (n, ) + g.a = a1 + a2 = (n - 1, ) + g.a = a2 + jit.promote(n) + return a1[0] + a2[0] + gn(a1, a2) + res = self.interp_operations(fn, [7]) + assert res == 2 * 7 + 2 * 6 + self.check_operations_history(getfield_gc_pure=0) + res = self.interp_operations(fn, [-7]) + assert res == 2 * -7 + 2 * -8 + self.check_operations_history(getfield_gc_pure=0) + + def test_heap_caching_multiple_arrays(self): + class Gbl(object): + pass + g = Gbl() + def fn(n): + a1 = [n, n, n] + g.a = a1 + a1[0] = n + a2 = [n, n, n] + g.a = a2 + a2[0] = n - 1 + return a1[0] + a2[0] + a1[0] + a2[0] + res = self.interp_operations(fn, [7]) + assert res == 2 * 7 + 2 * 6 + self.check_operations_history(getarrayitem_gc=0) + res = self.interp_operations(fn, [-7]) + assert res == 2 * -7 + 2 * -8 + self.check_operations_history(getarrayitem_gc=0) + + def test_heap_caching_multiple_arrays_getarrayitem(self): + class Gbl(object): + pass + g = Gbl() + g.a1 = [7, 8, 9] + g.a2 = [8, 9, 10, 11] + + def fn(i): + if i < 0: + g.a1 = [7, 8, 9] + g.a2 = [7, 8, 9, 10] + jit.promote(i) + a1 = g.a1 + a1[i + 1] = 15 # make lists mutable + a2 = g.a2 + a2[i + 1] = 19 + return a1[i] + a2[i] + a1[i] + a2[i] + res = self.interp_operations(fn, [0]) + assert res == 2 * 7 + 2 * 8 + self.check_operations_history(getarrayitem_gc=2) + + + def test_heap_caching_multiple_lists(self): + class Gbl(object): + pass + g = Gbl() + g.l = [] + def fn(n): + if n < -100: + g.l.append(1) + a1 = [n, n, n] + g.l = a1 + a1[0] = n + a2 = [n, n, n] + g.l = a2 + a2[0] = n - 1 + return a1[0] + a2[0] + a1[0] + a2[0] + res = self.interp_operations(fn, [7]) + assert res == 2 * 7 + 2 * 6 + self.check_operations_history(getarrayitem_gc=0, getfield_gc=0) + res = self.interp_operations(fn, [-7]) + assert res == 2 * -7 + 2 * -8 + self.check_operations_history(getarrayitem_gc=0, getfield_gc=0) + + def test_length_caching(self): + class Gbl(object): + pass + g = Gbl() + g.a = [0] * 7 + def fn(n): + a = g.a + res = len(a) + len(a) + a1 = [0] * n + g.a = a1 + return len(a1) + res + res = self.interp_operations(fn, [7]) + assert res == 7 * 3 + self.check_operations_history(arraylen_gc=1) + + def test_arraycopy(self): + class Gbl(object): + pass + g = Gbl() + g.a = [0] * 7 + def fn(n): + assert n >= 0 + a = g.a + x = [0] * n + x[2] = 21 + return len(a[:n]) + x[2] + res = self.interp_operations(fn, [3]) + assert res == 24 + self.check_operations_history(getarrayitem_gc=0) diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -294,7 +294,7 @@ PARAMETERS = {'threshold': 1032, # just above 1024 'function_threshold': 1617, # slightly more than one above 'trace_eagerness': 200, - 'trace_limit': 12000, + 'trace_limit': 6000, 'inlining': 1, 'loop_longevity': 1000, 'retrace_limit': 5, From noreply at buildbot.pypy.org Fri Sep 16 13:46:06 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 16 Sep 2011 13:46:06 +0200 (CEST) Subject: [pypy-commit] pypy default: one of the more obscure commits: change the thresholds slightly to be prime Message-ID: <20110916114606.496E5820B1@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r47292:6dc305ab7c65 Date: 2011-09-16 13:45 +0200 http://bitbucket.org/pypy/pypy/changeset/6dc305ab7c65/ Log: one of the more obscure commits: change the thresholds slightly to be prime numbers. the reason for this is that if there are nested loops that have combined iteration times of exactly the threshold you get bad traces. probably not important in practice, but seemed fun to do. diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -291,8 +291,8 @@ class JitHintError(Exception): """Inconsistency in the JIT hints.""" -PARAMETERS = {'threshold': 1032, # just above 1024 - 'function_threshold': 1617, # slightly more than one above +PARAMETERS = {'threshold': 1039, # just above 1024, prime + 'function_threshold': 1619, # slightly more than one above, also prime 'trace_eagerness': 200, 'trace_limit': 6000, 'inlining': 1, From noreply at buildbot.pypy.org Fri Sep 16 15:48:47 2011 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Sep 2011 15:48:47 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Run yatiblog Message-ID: <20110916134847.D6D17820B1@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r258:64ff6a750de4 Date: 2011-09-16 15:48 +0200 http://bitbucket.org/pypy/pypy.org/changeset/64ff6a750de4/ Log: Run yatiblog diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -247,4 +247,4 @@ - + \ No newline at end of file diff --git a/people.html b/people.html --- a/people.html +++ b/people.html @@ -1,7 +1,7 @@ - PyPy :: Current people of PyPy + PyPy :: People of PyPy @@ -43,7 +43,7 @@
-

Current people of PyPy

+

People of PyPy

Armin Rigo

image/people/arigo.png @@ -147,6 +147,16 @@ and pondering what comes next. Other than that he continues to care for testing and some PyPy co-ordination bits behind the scene.

+
+

Samuele Pedroni

+

Samuele Pedroni got involved with PyPy almost at its inception in the +spring of 2003. One of the design contributors to PyPy, his help has +ranged from infrastructure and processes, through building out +RPython… optimizing the Python interpreter, to compressing resume +data in the last incarnation of the JIT compiler. Tempted away into the +application side of the software equation, these days he contributes +some words and wisdom to PyPy's paper writing.

+

Many more people

PyPy is and has always been an effort of many volunteers. Consult the LICENSE diff --git a/sponsor.html b/sponsor.html --- a/sponsor.html +++ b/sponsor.html @@ -32,7 +32,7 @@

diff --git a/source/_layouts/py3k.genshi b/source/_layouts/py3k.genshi new file mode 100644 --- /dev/null +++ b/source/_layouts/py3k.genshi @@ -0,0 +1,19 @@ +--- +layout: site +license: MIT +--- + +
+
+

${Markup(title)}

+${Markup(content)} +
+ +
diff --git a/source/py3donate.txt b/source/py3donate.txt --- a/source/py3donate.txt +++ b/source/py3donate.txt @@ -1,5 +1,5 @@ --- -layout: page +layout: py3k title: (UNRELEASED DRAFT) Call for donations - PyPy to support Python3! --- From noreply at buildbot.pypy.org Tue Sep 20 21:00:55 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 20 Sep 2011 21:00:55 +0200 (CEST) Subject: [pypy-commit] pypy unroll-if-alt: a test for the ll_join_chars @jit.look_inside_iff, as well as improvements to jit.isvirtual Message-ID: <20110920190055.4C38B820CF@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: unroll-if-alt Changeset: r47371:760a2b769dc4 Date: 2011-09-20 15:00 -0400 http://bitbucket.org/pypy/pypy/changeset/760a2b769dc4/ Log: a test for the ll_join_chars @jit.look_inside_iff, as well as improvements to jit.isvirtual diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py --- a/pypy/jit/metainterp/heapcache.py +++ b/pypy/jit/metainterp/heapcache.py @@ -14,6 +14,9 @@ # escaped the trace or not, its presences in the mapping shows that it # was allocated inside the trace self.new_boxes = {} + # Tracks which boxes should be marked as escaped when the key box + # escapes. + self.dependencies = {} # contains frame boxes that are not virtualizables self.nonstandard_virtualizables = {} # heap cache @@ -31,12 +34,26 @@ def mark_escaped(self, opnum, argboxes): idx = 0 - for box in argboxes: - # setfield_gc and setarrayitem_gc don't escape their first argument - if not (idx == 0 and opnum in [rop.SETFIELD_GC, rop.SETARRAYITEM_GC]): - if box in self.new_boxes: - self.new_boxes[box] = False - idx += 1 + if opnum == rop.SETFIELD_GC: + assert len(argboxes) == 2 + box, valuebox = argboxes + if self.is_unescaped(box) and self.is_unescaped(valuebox): + self.dependencies.setdefault(box, []).append(valuebox) + else: + self._escape(valuebox) + else: + for box in argboxes: + # setarrayitem_gc don't escape their first argument + if not (idx == 0 and opnum in [rop.SETARRAYITEM_GC, rop.GETFIELD_GC]): + self._escape(box) + idx += 1 + + def _escape(self, box): + if box in self.new_boxes: + self.new_boxes[box] = False + if box in self.dependencies: + for dep in self.dependencies.pop(box): + self._escape(dep) def clear_caches(self, opnum, descr, argboxes): if opnum == rop.SETFIELD_GC: diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py --- a/pypy/jit/metainterp/test/test_heapcache.py +++ b/pypy/jit/metainterp/test/test_heapcache.py @@ -337,6 +337,24 @@ h.invalidate_caches(rop.SETFIELD_GC, None, [box1, box2]) assert not h.is_unescaped(box2) + def test_unescaped_testing(self): + h = HeapCache() + h.new(box1) + h.new(box2) + assert h.is_unescaped(box1) + assert h.is_unescaped(box2) + # Putting a virtual inside of another virtual doesn't escape it. + h.invalidate_caches(rop.SETFIELD_GC, None, [box1, box2]) + assert h.is_unescaped(box2) + # Reading a field from a virtual doesn't escape it. + h.invalidate_caches(rop.GETFIELD_GC, None, [box1]) + assert h.is_unescaped(box1) + # Escaping a virtual transitively escapes anything inside of it. + assert not h.is_unescaped(box3) + h.invalidate_caches(rop.SETFIELD_GC, None, [box3, box1]) + assert not h.is_unescaped(box1) + assert not h.is_unescaped(box2) + def test_unescaped_array(self): h = HeapCache() h.new_array(box1, lengthbox1) diff --git a/pypy/jit/metainterp/test/test_string.py b/pypy/jit/metainterp/test/test_string.py --- a/pypy/jit/metainterp/test/test_string.py +++ b/pypy/jit/metainterp/test/test_string.py @@ -536,3 +536,27 @@ self.check_loops(call_pure=0, call=1, newunicode=0, unicodegetitem=0, unicodesetitem=0, copyunicodecontent=0) + + def test_join_chars(self): + jitdriver = JitDriver(reds=['a', 'b', 'c', 'i'], greens=[]) + def f(a, b, c): + i = 0 + while i < 10: + jitdriver.jit_merge_point(a=a, b=b, c=c, i=i) + x = [] + if a: + x.append("a") + if b: + x.append("b") + if c: + x.append("c") + i += len("".join(x)) + return i + res = self.meta_interp(f, [1, 1, 1]) + assert res == f(True, True, True) + # The "".join should be unrolled, since the length of x is known since + # it is virtual, ensure there are no calls to ll_join_chars, or + # allocations. + self.check_loops({ + "guard_true": 5, "int_is_true": 3, "int_lt": 2, "int_add": 2, "jump": 2, + }, everywhere=True) From noreply at buildbot.pypy.org Tue Sep 20 21:00:56 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 20 Sep 2011 21:00:56 +0200 (CEST) Subject: [pypy-commit] pypy unroll-if-alt: add a docstring suggested by cfbolz. Message-ID: <20110920190056.710B582211@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: unroll-if-alt Changeset: r47372:66dfd0624949 Date: 2011-09-20 15:00 -0400 http://bitbucket.org/pypy/pypy/changeset/66dfd0624949/ Log: add a docstring suggested by cfbolz. diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -164,6 +164,8 @@ While tracing, returns whether or not the value is currently known to be constant. This is not perfect, values can become constant later. Mostly for use with @look_inside_iff. + + This is for advanced usage only. """ # I hate the annotator so much. if NonConstant(False): @@ -176,6 +178,8 @@ """ Returns if this value is virtual, while tracing, it's relatively conservative and will miss some cases. + + This is for advanced usage only. """ if NonConstant(False): return True From noreply at buildbot.pypy.org Tue Sep 20 21:05:44 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 20 Sep 2011 21:05:44 +0200 (CEST) Subject: [pypy-commit] pypy unroll-if-alt: clearer comment. Message-ID: <20110920190544.8D161820CF@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: unroll-if-alt Changeset: r47373:2035354ec085 Date: 2011-09-20 15:05 -0400 http://bitbucket.org/pypy/pypy/changeset/2035354ec085/ Log: clearer comment. diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py --- a/pypy/jit/metainterp/heapcache.py +++ b/pypy/jit/metainterp/heapcache.py @@ -11,8 +11,9 @@ self.known_class_boxes = {} # store the boxes that contain newly allocated objects, this maps the # boxes to a bool, the bool indicates whether or not the object has - # escaped the trace or not, its presences in the mapping shows that it - # was allocated inside the trace + # escaped the trace or not (True means the box never escaped, False + # means it did escape), its presences in the mapping shows that it was + # allocated inside the trace self.new_boxes = {} # Tracks which boxes should be marked as escaped when the key box # escapes. From noreply at buildbot.pypy.org Tue Sep 20 21:15:16 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 20 Sep 2011 21:15:16 +0200 (CEST) Subject: [pypy-commit] pypy unroll-if-alt: rewrite, hopefully makes the logic more clear Message-ID: <20110920191516.8E280820CF@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: unroll-if-alt Changeset: r47374:5b130d326e27 Date: 2011-09-20 15:15 -0400 http://bitbucket.org/pypy/pypy/changeset/5b130d326e27/ Log: rewrite, hopefully makes the logic more clear diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py --- a/pypy/jit/metainterp/heapcache.py +++ b/pypy/jit/metainterp/heapcache.py @@ -42,10 +42,11 @@ self.dependencies.setdefault(box, []).append(valuebox) else: self._escape(valuebox) - else: + # GETFIELD_GC doesn't escape it's argument + elif opnum != rop.GETFIELD_GC: for box in argboxes: - # setarrayitem_gc don't escape their first argument - if not (idx == 0 and opnum in [rop.SETARRAYITEM_GC, rop.GETFIELD_GC]): + # setarrayitem_gc don't escape its first argument + if not (idx == 0 and opnum in [rop.SETARRAYITEM_GC]): self._escape(box) idx += 1 From noreply at buildbot.pypy.org Tue Sep 20 21:16:14 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Tue, 20 Sep 2011 21:16:14 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: change the mod test to a simpler int32 sum test while fixing. Message-ID: <20110920191614.AFEDC820CF@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: unsigned-dtypes Changeset: r47375:6f8397e5f89a Date: 2011-09-20 19:15 +0000 http://bitbucket.org/pypy/pypy/changeset/6f8397e5f89a/ Log: change the mod test to a simpler int32 sum test while fixing. diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -2,7 +2,7 @@ from pypy.module.micronumpy import interp_ufuncs, signature from pypy.module.micronumpy.compile import (numpy_compile, FakeSpace, FloatObject, IntObject) -from pypy.module.micronumpy.interp_dtype import W_Float64Dtype, W_Int64Dtype, W_UInt64Dtype +from pypy.module.micronumpy.interp_dtype import W_Int32Dtype, W_Float64Dtype, W_Int64Dtype, W_UInt64Dtype from pypy.module.micronumpy.interp_numarray import (BaseArray, SingleDimArray, SingleDimSlice, scalar_w) from pypy.rlib.nonconst import NonConstant @@ -16,6 +16,7 @@ cls.float64_dtype = cls.space.fromcache(W_Float64Dtype) cls.int64_dtype = cls.space.fromcache(W_Int64Dtype) cls.uint64_dtype = cls.space.fromcache(W_UInt64Dtype) + cls.int32_dtype = cls.space.fromcache(W_Int32Dtype) def test_add(self): def f(i): @@ -304,22 +305,22 @@ 'int_lt': 1, 'guard_true': 1, 'jump': 1}) assert result == 11.0 - def test_uint64_mod(self): + def test_int32_sum(self): space = self.space float64_dtype = self.float64_dtype - uint64_dtype = self.uint64_dtype + int32_dtype = self.int32_dtype def f(n): if NonConstant(False): dtype = float64_dtype else: - dtype = uint64_dtype + dtype = int32_dtype ar = SingleDimArray(n, dtype=dtype) i = 0 while i < n: - ar.get_concrete().setitem(i, uint64_dtype.box(7)) + ar.get_concrete().setitem(i, int32_dtype.box(7)) i += 1 - v = ar.descr_mod(space, ar).descr_sum(space) + v = ar.descr_add(space, ar).descr_sum(space) assert isinstance(v, IntObject) return v.intval From noreply at buildbot.pypy.org Tue Sep 20 21:37:50 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 20 Sep 2011 21:37:50 +0200 (CEST) Subject: [pypy-commit] pypy unroll-if-alt: fix translation Message-ID: <20110920193750.BDF7C820CF@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: unroll-if-alt Changeset: r47376:f5fd6aac22b8 Date: 2011-09-20 15:37 -0400 http://bitbucket.org/pypy/pypy/changeset/f5fd6aac22b8/ Log: fix translation diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py --- a/pypy/jit/metainterp/heapcache.py +++ b/pypy/jit/metainterp/heapcache.py @@ -54,8 +54,9 @@ if box in self.new_boxes: self.new_boxes[box] = False if box in self.dependencies: - for dep in self.dependencies.pop(box): + for dep in self.dependencies[box]: self._escape(dep) + del self.dependencies[box] def clear_caches(self, opnum, descr, argboxes): if opnum == rop.SETFIELD_GC: From noreply at buildbot.pypy.org Tue Sep 20 23:03:01 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 20 Sep 2011 23:03:01 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: fix again Message-ID: <20110920210301.CDF6182299@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r268:b1b1d87a8848 Date: 2011-09-20 23:02 +0200 http://bitbucket.org/pypy/pypy.org/changeset/b1b1d87a8848/ Log: fix again diff --git a/py3donate.html b/py3donate.html --- a/py3donate.html +++ b/py3donate.html @@ -67,7 +67,43 @@ Krekel and Jacob Hallen and they will - in close collaboration - with Conservancy and the core developers, select the best developers for the Python 3 porting job among well known PyPy contributors.

-

If you want to see PyPy support Python 3 and Python 2, donate here:

Should we not receive enough donations to complete all stages by 1st March 2012 +

If you want to see PyPy support Python 3 and Python 2, donate here:

+
+ + + + +
+ +
+ + + + + + + + + + + + + +
$ + + +
+

Should we not receive enough donations to complete all stages by 1st March 2012 at the latest, we will try our best to make PyPy support Python 3 anyway. We however reserve the right to shift any unused funds to other PyPy activities when that date is reached. Of course, since the Conservancy is a diff --git a/source/py3donate.txt b/source/py3donate.txt --- a/source/py3donate.txt +++ b/source/py3donate.txt @@ -31,7 +31,7 @@ If you want to see PyPy support Python 3 and Python 2, donate here: -.. raw: html +.. raw:: html

From noreply at buildbot.pypy.org Tue Sep 20 23:00:15 2011 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 20 Sep 2011 23:00:15 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: regen html and fix rest Message-ID: <20110920210015.B465182212@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r267:637f12287c0c Date: 2011-09-20 23:00 +0200 http://bitbucket.org/pypy/pypy.org/changeset/637f12287c0c/ Log: regen html and fix rest diff --git a/py3donate.html b/py3donate.html --- a/py3donate.html +++ b/py3donate.html @@ -67,11 +67,7 @@ Krekel and Jacob Hallen and they will - in close collaboration - with Conservancy and the core developers, select the best developers for the Python 3 porting job among well known PyPy contributors.

-

If you want to see PyPy support Python 3 and Python 2, donate here:

-
-XXX insert links to Paypal and Google Chekcout links categorized such -that things will get accounted properly on the Conservancy side
-

Should we not receive enough donations to complete all stages by 1st March 2012 +

If you want to see PyPy support Python 3 and Python 2, donate here:

Should we not receive enough donations to complete all stages by 1st March 2012 at the latest, we will try our best to make PyPy support Python 3 anyway. We however reserve the right to shift any unused funds to other PyPy activities when that date is reached. Of course, since the Conservancy is a @@ -79,7 +75,7 @@ regardless of their use, be spent in a way that benefits the general public, the advancement of Open Source and Free Software, and in particular the PyPy community and the PyPy codebase.

-

Note For donations higher than $XXX we can arrange for an invoice +

Note For donations higher than $1,000, we can arrange for an invoice and a different payment method to avoid the high Paypal fees. Please contact pypy at sfconservancy.org if you want to know details on how to donate via other means.

diff --git a/source/py3donate.txt b/source/py3donate.txt --- a/source/py3donate.txt +++ b/source/py3donate.txt @@ -30,44 +30,46 @@ the Python 3 porting job among well known PyPy contributors. If you want to see PyPy support Python 3 and Python 2, donate here: + .. raw: html - - - - - - - -
- - - - - - - - - - - - - -
$ - - -
-
+ +
+ + + + + + + + + + + + + +
$ + + +
+
Should we not receive enough donations to complete all stages by 1st March 2012 at the latest, we will try our best to make PyPy support Python 3 anyway. We From noreply at buildbot.pypy.org Tue Sep 20 22:57:14 2011 From: noreply at buildbot.pypy.org (bkuhn) Date: Tue, 20 Sep 2011 22:57:14 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Added donation links and recommended cap for them. Message-ID: <20110920205714.E6C89820CF@wyvern.cs.uni-duesseldorf.de> Author: "Bradley M. Kuhn" Branch: extradoc Changeset: r266:c894edc21cfb Date: 2011-09-20 16:56 -0400 http://bitbucket.org/pypy/pypy.org/changeset/c894edc21cfb/ Log: Added donation links and recommended cap for them. diff --git a/source/py3donate.txt b/source/py3donate.txt --- a/source/py3donate.txt +++ b/source/py3donate.txt @@ -30,9 +30,44 @@ the Python 3 porting job among well known PyPy contributors. If you want to see PyPy support Python 3 and Python 2, donate here: +.. raw: html +
+ + + + +
- XXX insert links to Paypal and Google Chekcout links categorized such - that things will get accounted properly on the Conservancy side + +
+ + + + + + + + + + + + + +
$ + + +
+
Should we not receive enough donations to complete all stages by 1st March 2012 at the latest, we will try our best to make PyPy support Python 3 anyway. We @@ -43,7 +78,7 @@ public, the advancement of Open Source and Free Software, and in particular the PyPy community and the PyPy codebase. -**Note** For donations higher than $XXX we can arrange for an invoice +**Note** For donations higher than $1,000, we can arrange for an invoice and a different payment method to avoid the high Paypal fees. Please contact pypy at sfconservancy.org if you want to know details on how to donate via other means. From noreply at buildbot.pypy.org Wed Sep 21 00:16:35 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 21 Sep 2011 00:16:35 +0200 (CEST) Subject: [pypy-commit] pypy unroll-if-alt: merged default in Message-ID: <20110920221635.56C93820CF@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: unroll-if-alt Changeset: r47377:e7518e0ec519 Date: 2011-09-20 17:51 -0400 http://bitbucket.org/pypy/pypy/changeset/e7518e0ec519/ Log: merged default in diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -1,5 +1,4 @@ import sys -from pypy.interpreter.miscutils import Stack from pypy.interpreter.error import OperationError from pypy.rlib.rarithmetic import LONG_BIT from pypy.rlib.unroll import unrolling_iterable diff --git a/pypy/interpreter/miscutils.py b/pypy/interpreter/miscutils.py --- a/pypy/interpreter/miscutils.py +++ b/pypy/interpreter/miscutils.py @@ -2,154 +2,6 @@ Miscellaneous utilities. """ -import types - -from pypy.rlib.rarithmetic import r_uint - -class RootStack: - pass - -class Stack(RootStack): - """Utility class implementing a stack.""" - - _annspecialcase_ = "specialize:ctr_location" # polymorphic - - def __init__(self): - self.items = [] - - def clone(self): - s = self.__class__() - for item in self.items: - try: - item = item.clone() - except AttributeError: - pass - s.push(item) - return s - - def push(self, item): - self.items.append(item) - - def pop(self): - return self.items.pop() - - def drop(self, n): - if n > 0: - del self.items[-n:] - - def top(self, position=0): - """'position' is 0 for the top of the stack, 1 for the item below, - and so on. It must not be negative.""" - if position < 0: - raise ValueError, 'negative stack position' - if position >= len(self.items): - raise IndexError, 'not enough entries in stack' - return self.items[~position] - - def set_top(self, value, position=0): - """'position' is 0 for the top of the stack, 1 for the item below, - and so on. It must not be negative.""" - if position < 0: - raise ValueError, 'negative stack position' - if position >= len(self.items): - raise IndexError, 'not enough entries in stack' - self.items[~position] = value - - def depth(self): - return len(self.items) - - def empty(self): - return len(self.items) == 0 - - -class FixedStack(RootStack): - _annspecialcase_ = "specialize:ctr_location" # polymorphic - - # unfortunately, we have to re-do everything - def __init__(self): - pass - - def setup(self, stacksize): - self.ptr = r_uint(0) # we point after the last element - self.items = [None] * stacksize - - def clone(self): - # this is only needed if we support flow space - s = self.__class__() - s.setup(len(self.items)) - for item in self.items[:self.ptr]: - try: - item = item.clone() - except AttributeError: - pass - s.push(item) - return s - - def push(self, item): - ptr = self.ptr - self.items[ptr] = item - self.ptr = ptr + 1 - - def pop(self): - ptr = self.ptr - 1 - ret = self.items[ptr] # you get OverflowError if the stack is empty - self.items[ptr] = None - self.ptr = ptr - return ret - - def drop(self, n): - while n > 0: - n -= 1 - self.ptr -= 1 - self.items[self.ptr] = None - - def top(self, position=0): - # for a fixed stack, we assume correct indices - return self.items[self.ptr + ~position] - - def set_top(self, value, position=0): - # for a fixed stack, we assume correct indices - self.items[self.ptr + ~position] = value - - def depth(self): - return self.ptr - - def empty(self): - return not self.ptr - - -class InitializedClass(type): - """NOT_RPYTHON. A meta-class that allows a class to initialize itself (or - its subclasses) by calling __initclass__() as a class method.""" - def __init__(self, name, bases, dict): - super(InitializedClass, self).__init__(name, bases, dict) - for basecls in self.__mro__: - raw = basecls.__dict__.get('__initclass__') - if isinstance(raw, types.FunctionType): - raw(self) # call it as a class method - - -class RwDictProxy(object): - """NOT_RPYTHON. A dict-like class standing for 'cls.__dict__', to work - around the fact that the latter is a read-only proxy for new-style - classes.""" - - def __init__(self, cls): - self.cls = cls - - def __getitem__(self, attr): - return self.cls.__dict__[attr] - - def __setitem__(self, attr, value): - setattr(self.cls, attr, value) - - def __contains__(self, value): - return value in self.cls.__dict__ - - def items(self): - return self.cls.__dict__.items() - - class ThreadLocals: """Pseudo thread-local storage, for 'space.threadlocals'. This is not really thread-local at all; the intention is that the PyPy diff --git a/pypy/rlib/parsing/codebuilder.py b/pypy/rlib/parsing/codebuilder.py --- a/pypy/rlib/parsing/codebuilder.py +++ b/pypy/rlib/parsing/codebuilder.py @@ -1,3 +1,5 @@ +import contextlib + class Codebuilder(object): def __init__(self): self.blocks = [] @@ -27,10 +29,12 @@ assert blockstarter.endswith(":") self.emit(blockstarter) self.blocks.append(blockstarter) - def BlockEnder(): - yield None - self.end_block(blockstarter) - return BlockEnder() + + @contextlib.contextmanager + def block(self, blockstarter): + self.start_block(blockstarter) + yield None + self.end_block(blockstarter) def end_block(self, starterpart=""): block = self.blocks.pop() diff --git a/pypy/rlib/parsing/deterministic.py b/pypy/rlib/parsing/deterministic.py --- a/pypy/rlib/parsing/deterministic.py +++ b/pypy/rlib/parsing/deterministic.py @@ -1,3 +1,4 @@ +from __future__ import with_statement import py try: @@ -228,11 +229,11 @@ above = set() for state, nextstates in state_to_chars.iteritems(): above.add(state) - for _ in result.start_block("if state == %s:" % (state, )): - for _ in result.start_block("if i < len(input):"): + with result.block("if state == %s:" % (state, )): + with result.block("if i < len(input):"): result.emit("char = input[i]") result.emit("i += 1") - for _ in result.start_block("else:"): + with result.block("else:"): if state in self.final_states: result.emit("return True") else: @@ -248,7 +249,7 @@ for i, (a, num) in enumerate(compressed): if num < 5: for charord in range(ord(a), ord(a) + num): - for _ in result.start_block( + with result.block( "%sif char == %r:" % ( elif_prefix, chr(charord))): result.emit("state = %s" % (nextstate, )) @@ -256,23 +257,23 @@ if not elif_prefix: elif_prefix = "el" else: - for _ in result.start_block( + with result.block( "%sif %r <= char <= %r:" % ( elif_prefix, a, chr(ord(a) + num - 1))): result.emit("state = %s""" % (nextstate, )) result.emit(continue_prefix) if not elif_prefix: elif_prefix = "el" - for _ in result.start_block("else:"): + with result.block("else:"): result.emit("break") #print state_to_chars.keys() for state in range(self.num_states): if state in state_to_chars: continue - for _ in result.start_block("if state == %s:" % (state, )): - for _ in result.start_block("if i == len(input):"): + with result.block("if state == %s:" % (state, )): + with result.block("if i == len(input):"): result.emit("return True") - for _ in result.start_block("else:"): + with result.block("else:"): result.emit("break") result.emit("break") result.end_block("while") @@ -303,14 +304,14 @@ above = set() for state, nextstates in state_to_chars_sorted: above.add(state) - for _ in result.start_block("if state == %s:" % (state, )): + with result.block("if state == %s:" % (state, )): if state in self.final_states: result.emit("runner.last_matched_index = i - 1") result.emit("runner.last_matched_state = state") - for _ in result.start_block("try:"): + with result.block("try:"): result.emit("char = input[i]") result.emit("i += 1") - for _ in result.start_block("except IndexError:"): + with result.block("except IndexError:"): result.emit("runner.state = %s" % (state, )) if state in self.final_states: result.emit("return i") @@ -327,21 +328,21 @@ for i, (a, num) in enumerate(compressed): if num < 3: for charord in range(ord(a), ord(a) + num): - for _ in result.start_block("%sif char == %r:" + with result.block("%sif char == %r:" % (elif_prefix, chr(charord))): result.emit("state = %s" % (nextstate, )) result.emit(continue_prefix) if not elif_prefix: elif_prefix = "el" else: - for _ in result.start_block( + with result.block( "%sif %r <= char <= %r:" % ( elif_prefix, a, chr(ord(a) + num - 1))): result.emit("state = %s" % (nextstate, )) result.emit(continue_prefix) if not elif_prefix: elif_prefix = "el" - for _ in result.start_block("else:"): + with result.block("else:"): result.emit("break") #print state_to_chars.keys() for state in range(self.num_states): diff --git a/pypy/rlib/parsing/makepackrat.py b/pypy/rlib/parsing/makepackrat.py --- a/pypy/rlib/parsing/makepackrat.py +++ b/pypy/rlib/parsing/makepackrat.py @@ -1,3 +1,4 @@ +from __future__ import with_statement import py import sys from pypy.rlib.parsing.tree import Nonterminal, Symbol, RPythonVisitor @@ -321,27 +322,27 @@ else: self.emit("_key = self._pos") self.emit("_status = self.%s.get(_key, None)" % (dictname, )) - for _ in self.start_block("if _status is None:"): + with self.block("if _status is None:"): self.emit("_status = self.%s[_key] = Status()" % ( dictname, )) - for _ in self.start_block("else:"): + with self.block("else:"): self.emit("_statusstatus = _status.status") - for _ in self.start_block("if _statusstatus == _status.NORMAL:"): + with self.block("if _statusstatus == _status.NORMAL:"): self.emit("self._pos = _status.pos") self.emit("return _status") - for _ in self.start_block("elif _statusstatus == _status.ERROR:"): + with self.block("elif _statusstatus == _status.ERROR:"): self.emit("raise BacktrackException(_status.error)") if self.have_call: - for _ in self.start_block( + with self.block( "elif (_statusstatus == _status.INPROGRESS or\n" " _statusstatus == _status.LEFTRECURSION):"): self.emit("_status.status = _status.LEFTRECURSION") - for _ in self.start_block("if _status.result is not None:"): + with self.block("if _status.result is not None:"): self.emit("self._pos = _status.pos") self.emit("return _status") - for _ in self.start_block("else:"): + with self.block("else:"): self.emit("raise BacktrackException(None)") - for _ in self.start_block( + with self.block( "elif _statusstatus == _status.SOMESOLUTIONS:"): self.emit("_status.status = _status.INPROGRESS") self.emit("_startingpos = self._pos") @@ -352,10 +353,10 @@ def memoize_footer(self, name, args): dictname = "_dict_%s" % (name, ) if self.have_call: - for _ in self.start_block( + with self.block( "if _status.status == _status.LEFTRECURSION:"): - for _ in self.start_block("if _status.result is not None:"): - for _ in self.start_block("if _status.pos >= self._pos:"): + with self.block("if _status.result is not None:"): + with self.block("if _status.pos >= self._pos:"): self.emit("_status.status = _status.NORMAL") self.emit("self._pos = _status.pos") self.emit("return _status") @@ -373,7 +374,7 @@ self.emit("_status.error = _error") self.emit("return _status") self.end_block("try") - for _ in self.start_block("except BacktrackException, _exc:"): + with self.block("except BacktrackException, _exc:"): self.emit("_status.pos = -1") self.emit("_status.result = None") self.combine_error('_exc.error') @@ -394,7 +395,7 @@ self.start_block("class Parser(object):") for elt in t.children: self.dispatch(elt) - for _ in self.start_block("def __init__(self, inputstream):"): + with self.block("def __init__(self, inputstream):"): for line in self.initcode: self.emit(line) self.emit("self._pos = 0") @@ -405,7 +406,7 @@ def emit_regex_code(self): for regex, matcher in self.matchers.iteritems(): - for _ in self.start_block( + with self.block( "def _regex%s(self):" % (abs(hash(regex)), )): c = self.choice_point() self.emit("_runner = self._Runner(self._inputstream, self._pos)") @@ -423,8 +424,8 @@ self.emit("self._pos = _upto") self.emit("return _result") - for _ in self.start_block("class _Runner(object):"): - for _ in self.start_block("def __init__(self, text, pos):"): + with self.block("class _Runner(object):"): + with self.block("def __init__(self, text, pos):"): self.emit("self.text = text") self.emit("self.pos = pos") self.emit("self.last_matched_state = -1") @@ -444,7 +445,7 @@ otherargs = t.children[1].children argswithself = ", ".join(["self"] + otherargs) argswithoutself = ", ".join(otherargs) - for _ in self.start_block("def %s(%s):" % (name, argswithself)): + with self.block("def %s(%s):" % (name, argswithself)): self.emit("return self._%s(%s).result" % (name, argswithoutself)) self.start_block("def _%s(%s):" % (name, argswithself, )) self.namecount = 0 @@ -465,10 +466,10 @@ self.start_block("while 1:") for i, p in enumerate(possibilities): c = self.choice_point() - for _ in self.start_block("try:"): + with self.block("try:"): self.dispatch(p) self.emit("break") - for _ in self.start_block("except BacktrackException, _exc:"): + with self.block("except BacktrackException, _exc:"): self.combine_error('_exc.error') self.revert(c) if i == len(possibilities) - 1: @@ -484,9 +485,9 @@ def visit_maybe(self, t): c = self.choice_point() - for _ in self.start_block("try:"): + with self.block("try:"): self.dispatch(t.children[0]) - for _ in self.start_block("except BacktrackException:"): + with self.block("except BacktrackException:"): self.revert(c) def visit_repetition(self, t): @@ -496,12 +497,12 @@ if t.children[0] == '+': self.dispatch(t.children[1]) self.emit("%s.append(_result)" % (name, )) - for _ in self.start_block("while 1:"): + with self.block("while 1:"): c = self.choice_point() - for _ in self.start_block("try:"): + with self.block("try:"): self.dispatch(t.children[1]) self.emit("%s.append(_result)" % (name, )) - for _ in self.start_block("except BacktrackException, _exc:"): + with self.block("except BacktrackException, _exc:"): self.combine_error('_exc.error') self.revert(c) self.emit("break") @@ -525,12 +526,12 @@ self.namecount += 1 child = t.children[0] self.emit("%s = _result" % (resultname, )) - for _ in self.start_block("try:"): + with self.block("try:"): self.dispatch(child) - for _ in self.start_block("except BacktrackException:"): + with self.block("except BacktrackException:"): self.revert(c) self.emit("_result = %s" % (resultname, )) - for _ in self.start_block("else:"): + with self.block("else:"): # heuristic to get nice error messages sometimes if isinstance(child, Symbol) and child.symbol == "QUOTE": @@ -559,21 +560,21 @@ def visit_if(self, t): if len(t.children) == 2: self.dispatch(t.children[0]) - for _ in self.start_block("if not (%s):" % ( + with self.block("if not (%s):" % ( t.children[-1].additional_info[1:-1], )): self.emit("raise BacktrackException(") self.emit(" self._ErrorInformation(") self.emit(" _startingpos, ['condition not met']))") - + def visit_choose(self, t): - for _ in self.start_block("for %s in (%s):" % ( + with self.block("for %s in (%s):" % ( t.children[0], t.children[1].additional_info[1:-1], )): - for _ in self.start_block("try:"): + with self.block("try:"): self.dispatch(t.children[2]) self.emit("break") - for _ in self.start_block("except BacktrackException, _exc:"): + with self.block("except BacktrackException, _exc:"): self.combine_error('_exc.error') - for _ in self.start_block("else:"): + with self.block("else:"): self.emit("raise BacktrackException(_error)") def visit_call(self, t): From noreply at buildbot.pypy.org Wed Sep 21 00:16:36 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 21 Sep 2011 00:16:36 +0200 (CEST) Subject: [pypy-commit] pypy unroll-if-alt: fix this test. Message-ID: <20110920221636.7B12D820CF@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: unroll-if-alt Changeset: r47378:5cb7011f542a Date: 2011-09-20 18:16 -0400 http://bitbucket.org/pypy/pypy/changeset/5cb7011f542a/ Log: fix this test. diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -337,7 +337,9 @@ assert loop.match_by_id('append', """ i13 = getfield_gc(p8, descr=) i15 = int_add(i13, 1) - call(ConstClass(_ll_list_resize_ge__listPtr_Signed), p8, i15, descr=) + # Will be killed by the backend + i17 = arraylen_gc(p7, descr=) + call(ConstClass(_ll_list_resize_ge), p8, i15, descr=) guard_no_exception(descr=...) p17 = getfield_gc(p8, descr=) p19 = new_with_vtable(ConstClass(W_IntObject)) From noreply at buildbot.pypy.org Wed Sep 21 00:19:05 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 21 Sep 2011 00:19:05 +0200 (CEST) Subject: [pypy-commit] pypy unroll-if-alt: fix this test on 32-bit. Message-ID: <20110920221905.CFA58820CF@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: unroll-if-alt Changeset: r47379:571efabef39b Date: 2011-09-20 18:18 -0400 http://bitbucket.org/pypy/pypy/changeset/571efabef39b/ Log: fix this test on 32-bit. diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -301,11 +301,17 @@ loop, = log.loops_by_id("struct") # This could, of course stand some improvement, to remove all these # arithmatic ops, but we've removed all the core overhead. + if sys.maxint == 31 ** 2: + extra = """ + i8 = int_lt(i4, -2147483648) + guard_false(i8, descr=...) + """ + else: + extra = "" assert loop.match_by_id("struct", """ guard_not_invalidated(descr=...) # struct.pack - i8 = int_lt(i4, -2147483648) - guard_false(i8, descr=...) + %(32_bit_only)s i11 = int_and(i4, 255) i13 = int_rshift(i4, 8) i14 = int_and(i13, 255) @@ -323,4 +329,4 @@ guard_false(i28, descr=...) i30 = int_lshift(i20, 24) i31 = int_or(i26, i30) - """) \ No newline at end of file + """ % {"32_bit_only": extra}) \ No newline at end of file From noreply at buildbot.pypy.org Wed Sep 21 01:17:07 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 21 Sep 2011 01:17:07 +0200 (CEST) Subject: [pypy-commit] pypy unroll-if-alt: fix this. Message-ID: <20110920231707.C9CE5820CF@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: unroll-if-alt Changeset: r47380:b49e0e2066a7 Date: 2011-09-20 19:16 -0400 http://bitbucket.org/pypy/pypy/changeset/b49e0e2066a7/ Log: fix this. diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -299,15 +299,15 @@ assert log.result == main() loop, = log.loops_by_id("struct") - # This could, of course stand some improvement, to remove all these - # arithmatic ops, but we've removed all the core overhead. - if sys.maxint == 31 ** 2: + if sys.maxint == 2 ** 63 - 1: extra = """ i8 = int_lt(i4, -2147483648) guard_false(i8, descr=...) """ else: extra = "" + # This could, of course stand some improvement, to remove all these + # arithmatic ops, but we've removed all the core overhead. assert loop.match_by_id("struct", """ guard_not_invalidated(descr=...) # struct.pack From noreply at buildbot.pypy.org Wed Sep 21 01:19:14 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 21 Sep 2011 01:19:14 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in unroll-if-alt: Message-ID: <20110920231914.3D155820CF@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r47381:c2297d1b6a6b Date: 2011-09-20 19:19 -0400 http://bitbucket.org/pypy/pypy/changeset/c2297d1b6a6b/ Log: Merged in unroll-if-alt: This adds @jit.look_inside_iff(lambda *args) which allows for conditional unrolling, as well as jit.isconstant and jit.isvirtual for use in these predicates. It also makes use of these to optimize struct.pack, struct.unpack, and str.__mod__. There's still plenty of room for improvements though. Thanks to cfbolz and fijal for review. diff --git a/pypy/annotation/annrpython.py b/pypy/annotation/annrpython.py --- a/pypy/annotation/annrpython.py +++ b/pypy/annotation/annrpython.py @@ -149,7 +149,7 @@ desc = olddesc.bind_self(classdef) args = self.bookkeeper.build_args("simple_call", args_s[:]) desc.consider_call_site(self.bookkeeper, desc.getcallfamily(), [desc], - args, annmodel.s_ImpossibleValue) + args, annmodel.s_ImpossibleValue, None) result = [] def schedule(graph, inputcells): result.append((graph, inputcells)) diff --git a/pypy/annotation/bookkeeper.py b/pypy/annotation/bookkeeper.py --- a/pypy/annotation/bookkeeper.py +++ b/pypy/annotation/bookkeeper.py @@ -209,8 +209,8 @@ self.consider_call_site(call_op) for pbc, args_s in self.emulated_pbc_calls.itervalues(): - self.consider_call_site_for_pbc(pbc, 'simple_call', - args_s, s_ImpossibleValue) + self.consider_call_site_for_pbc(pbc, 'simple_call', + args_s, s_ImpossibleValue, None) self.emulated_pbc_calls = {} finally: self.leave() @@ -257,18 +257,18 @@ args_s = [lltype_to_annotation(adtmeth.ll_ptrtype)] + args_s if isinstance(s_callable, SomePBC): s_result = binding(call_op.result, s_ImpossibleValue) - self.consider_call_site_for_pbc(s_callable, - call_op.opname, - args_s, s_result) + self.consider_call_site_for_pbc(s_callable, call_op.opname, args_s, + s_result, call_op) - def consider_call_site_for_pbc(self, s_callable, opname, args_s, s_result): + def consider_call_site_for_pbc(self, s_callable, opname, args_s, s_result, + call_op): descs = list(s_callable.descriptions) if not descs: return family = descs[0].getcallfamily() args = self.build_args(opname, args_s) s_callable.getKind().consider_call_site(self, family, descs, args, - s_result) + s_result, call_op) def getuniqueclassdef(self, cls): """Get the ClassDef associated with the given user cls. @@ -656,6 +656,7 @@ whence = None else: whence = emulated # callback case + op = None s_previous_result = s_ImpossibleValue def schedule(graph, inputcells): @@ -663,7 +664,7 @@ results = [] for desc in descs: - results.append(desc.pycall(schedule, args, s_previous_result)) + results.append(desc.pycall(schedule, args, s_previous_result, op)) s_result = unionof(*results) return s_result diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py --- a/pypy/annotation/description.py +++ b/pypy/annotation/description.py @@ -255,7 +255,11 @@ raise TypeError, "signature mismatch: %s" % e.getmsg(self.name) return inputcells - def specialize(self, inputcells): + def specialize(self, inputcells, op=None): + if (op is None and + getattr(self.bookkeeper, "position_key", None) is not None): + _, block, i = self.bookkeeper.position_key + op = block.operations[i] if self.specializer is None: # get the specializer based on the tag of the 'pyobj' # (if any), according to the current policy @@ -269,11 +273,14 @@ enforceargs = Sig(*enforceargs) self.pyobj._annenforceargs_ = enforceargs enforceargs(self, inputcells) # can modify inputcells in-place - return self.specializer(self, inputcells) + if getattr(self.pyobj, '_annspecialcase_', '').endswith("call_location"): + return self.specializer(self, inputcells, op) + else: + return self.specializer(self, inputcells) - def pycall(self, schedule, args, s_previous_result): + def pycall(self, schedule, args, s_previous_result, op=None): inputcells = self.parse_arguments(args) - result = self.specialize(inputcells) + result = self.specialize(inputcells, op) if isinstance(result, FunctionGraph): graph = result # common case # if that graph has a different signature, we need to re-parse @@ -296,17 +303,17 @@ None, # selfclassdef name) - def consider_call_site(bookkeeper, family, descs, args, s_result): + def consider_call_site(bookkeeper, family, descs, args, s_result, op): shape = rawshape(args) - row = FunctionDesc.row_to_consider(descs, args) + row = FunctionDesc.row_to_consider(descs, args, op) family.calltable_add_row(shape, row) consider_call_site = staticmethod(consider_call_site) - def variant_for_call_site(bookkeeper, family, descs, args): + def variant_for_call_site(bookkeeper, family, descs, args, op): shape = rawshape(args) bookkeeper.enter(None) try: - row = FunctionDesc.row_to_consider(descs, args) + row = FunctionDesc.row_to_consider(descs, args, op) finally: bookkeeper.leave() index = family.calltable_lookup_row(shape, row) @@ -316,7 +323,7 @@ def rowkey(self): return self - def row_to_consider(descs, args): + def row_to_consider(descs, args, op): # see comments in CallFamily from pypy.annotation.model import s_ImpossibleValue row = {} @@ -324,7 +331,7 @@ def enlist(graph, ignore): row[desc.rowkey()] = graph return s_ImpossibleValue # meaningless - desc.pycall(enlist, args, s_ImpossibleValue) + desc.pycall(enlist, args, s_ImpossibleValue, op) return row row_to_consider = staticmethod(row_to_consider) @@ -521,7 +528,7 @@ "specialization" % (self.name,)) return self.getclassdef(None) - def pycall(self, schedule, args, s_previous_result): + def pycall(self, schedule, args, s_previous_result, op=None): from pypy.annotation.model import SomeInstance, SomeImpossibleValue if self.specialize: if self.specialize == 'specialize:ctr_location': @@ -664,7 +671,7 @@ cdesc = cdesc.basedesc return s_result # common case - def consider_call_site(bookkeeper, family, descs, args, s_result): + def consider_call_site(bookkeeper, family, descs, args, s_result, op): from pypy.annotation.model import SomeInstance, SomePBC, s_None if len(descs) == 1: # call to a single class, look at the result annotation @@ -709,7 +716,7 @@ initdescs[0].mergecallfamilies(*initdescs[1:]) initfamily = initdescs[0].getcallfamily() MethodDesc.consider_call_site(bookkeeper, initfamily, initdescs, - args, s_None) + args, s_None, op) consider_call_site = staticmethod(consider_call_site) def getallbases(self): @@ -782,13 +789,13 @@ def getuniquegraph(self): return self.funcdesc.getuniquegraph() - def pycall(self, schedule, args, s_previous_result): + def pycall(self, schedule, args, s_previous_result, op=None): from pypy.annotation.model import SomeInstance if self.selfclassdef is None: raise Exception("calling %r" % (self,)) s_instance = SomeInstance(self.selfclassdef, flags = self.flags) args = args.prepend(s_instance) - return self.funcdesc.pycall(schedule, args, s_previous_result) + return self.funcdesc.pycall(schedule, args, s_previous_result, op) def bind_under(self, classdef, name): self.bookkeeper.warning("rebinding an already bound %r" % (self,)) @@ -801,10 +808,10 @@ self.name, flags) - def consider_call_site(bookkeeper, family, descs, args, s_result): + def consider_call_site(bookkeeper, family, descs, args, s_result, op): shape = rawshape(args, nextra=1) # account for the extra 'self' funcdescs = [methoddesc.funcdesc for methoddesc in descs] - row = FunctionDesc.row_to_consider(descs, args) + row = FunctionDesc.row_to_consider(descs, args, op) family.calltable_add_row(shape, row) consider_call_site = staticmethod(consider_call_site) @@ -956,16 +963,16 @@ return '' % (self.funcdesc, self.frozendesc) - def pycall(self, schedule, args, s_previous_result): + def pycall(self, schedule, args, s_previous_result, op=None): from pypy.annotation.model import SomePBC s_self = SomePBC([self.frozendesc]) args = args.prepend(s_self) - return self.funcdesc.pycall(schedule, args, s_previous_result) + return self.funcdesc.pycall(schedule, args, s_previous_result, op) - def consider_call_site(bookkeeper, family, descs, args, s_result): + def consider_call_site(bookkeeper, family, descs, args, s_result, op): shape = rawshape(args, nextra=1) # account for the extra 'self' funcdescs = [mofdesc.funcdesc for mofdesc in descs] - row = FunctionDesc.row_to_consider(descs, args) + row = FunctionDesc.row_to_consider(descs, args, op) family.calltable_add_row(shape, row) consider_call_site = staticmethod(consider_call_site) diff --git a/pypy/annotation/policy.py b/pypy/annotation/policy.py --- a/pypy/annotation/policy.py +++ b/pypy/annotation/policy.py @@ -1,7 +1,7 @@ # base annotation policy for specialization from pypy.annotation.specialize import default_specialize as default from pypy.annotation.specialize import specialize_argvalue, specialize_argtype, specialize_arglistitemtype -from pypy.annotation.specialize import memo +from pypy.annotation.specialize import memo, specialize_call_location # for some reason, model must be imported first, # or we create a cycle. from pypy.annotation import model as annmodel @@ -75,6 +75,7 @@ specialize__arg = staticmethod(specialize_argvalue) # specialize:arg(N) specialize__argtype = staticmethod(specialize_argtype) # specialize:argtype(N) specialize__arglistitemtype = staticmethod(specialize_arglistitemtype) + specialize__call_location = staticmethod(specialize_call_location) def specialize__ll(pol, *args): from pypy.rpython.annlowlevel import LowLevelAnnotatorPolicy diff --git a/pypy/annotation/specialize.py b/pypy/annotation/specialize.py --- a/pypy/annotation/specialize.py +++ b/pypy/annotation/specialize.py @@ -370,3 +370,7 @@ else: key = s.listdef.listitem.s_value.knowntype return maybe_star_args(funcdesc, key, args_s) + +def specialize_call_location(funcdesc, args_s, op): + assert op is not None + return maybe_star_args(funcdesc, op, args_s) diff --git a/pypy/annotation/test/test_annrpython.py b/pypy/annotation/test/test_annrpython.py --- a/pypy/annotation/test/test_annrpython.py +++ b/pypy/annotation/test/test_annrpython.py @@ -1099,8 +1099,8 @@ allocdesc = a.bookkeeper.getdesc(alloc) s_C1 = a.bookkeeper.immutablevalue(C1) s_C2 = a.bookkeeper.immutablevalue(C2) - graph1 = allocdesc.specialize([s_C1]) - graph2 = allocdesc.specialize([s_C2]) + graph1 = allocdesc.specialize([s_C1], None) + graph2 = allocdesc.specialize([s_C2], None) assert a.binding(graph1.getreturnvar()).classdef == C1df assert a.binding(graph2.getreturnvar()).classdef == C2df assert graph1 in a.translator.graphs @@ -1135,8 +1135,8 @@ allocdesc = a.bookkeeper.getdesc(alloc) s_C1 = a.bookkeeper.immutablevalue(C1) s_C2 = a.bookkeeper.immutablevalue(C2) - graph1 = allocdesc.specialize([s_C1, s_C2]) - graph2 = allocdesc.specialize([s_C2, s_C2]) + graph1 = allocdesc.specialize([s_C1, s_C2], None) + graph2 = allocdesc.specialize([s_C2, s_C2], None) assert a.binding(graph1.getreturnvar()).classdef == C1df assert a.binding(graph2.getreturnvar()).classdef == C2df assert graph1 in a.translator.graphs @@ -1194,6 +1194,19 @@ assert len(executedesc._cache[(0, 'star', 2)].startblock.inputargs) == 4 assert len(executedesc._cache[(1, 'star', 3)].startblock.inputargs) == 5 + def test_specialize_call_location(self): + def g(a): + return a + g._annspecialcase_ = "specialize:call_location" + def f(x): + return g(x) + f._annspecialcase_ = "specialize:argtype(0)" + def h(y): + w = f(y) + return int(f(str(y))) + w + a = self.RPythonAnnotator() + assert a.build_types(h, [int]) == annmodel.SomeInteger() + def test_assert_list_doesnt_lose_info(self): class T(object): pass diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -125,6 +125,7 @@ ### Manipulation ### + @jit.look_inside_iff(lambda self: not self._dont_jit) def unpack(self): # slowish "Return a ([w1,w2...], {'kw':w3...}) pair." kwds_w = {} @@ -245,6 +246,8 @@ ### Parsing for function calls ### + # XXX: this should be @jit.look_inside_iff, but we need key word arguments, + # and it doesn't support them for now. def _match_signature(self, w_firstarg, scope_w, signature, defaults_w=None, blindargs=0): """Parse args and kwargs according to the signature of a code object, diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -496,6 +496,16 @@ u = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), string) u.chars[index] = unichr(newvalue) + def bh_copystrcontent(self, src, dst, srcstart, dststart, length): + src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), src) + dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), dst) + rstr.copy_string_contents(src, dst, srcstart, dststart, length) + + def bh_copyunicodecontent(self, src, dst, srcstart, dststart, length): + src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), src) + dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), dst) + rstr.copy_unicode_contents(src, dst, srcstart, dststart, length) + def bh_call_i(self, func, calldescr, args_i, args_r, args_f): assert isinstance(calldescr, BaseIntCallDescr) if not we_are_translated(): diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -78,7 +78,7 @@ Optionally, return a ``ops_offset`` dictionary. See the docstring of ``compiled_loop`` for more informations about it. """ - raise NotImplementedError + raise NotImplementedError def dump_loop_token(self, looptoken): """Print a disassembled version of looptoken to stdout""" @@ -298,6 +298,10 @@ raise NotImplementedError def bh_unicodesetitem(self, string, index, newvalue): raise NotImplementedError + def bh_copystrcontent(self, src, dst, srcstart, dststart, length): + raise NotImplementedError + def bh_copyunicodecontent(self, src, dst, srcstart, dststart, length): + raise NotImplementedError def force(self, force_token): raise NotImplementedError diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -1158,6 +1158,12 @@ return SpaceOperation('%s_assert_green' % kind, args, None) elif oopspec_name == 'jit.current_trace_length': return SpaceOperation('current_trace_length', [], op.result) + elif oopspec_name == 'jit.isconstant': + kind = getkind(args[0].concretetype) + return SpaceOperation('%s_isconstant' % kind, args, op.result) + elif oopspec_name == 'jit.isvirtual': + kind = getkind(args[0].concretetype) + return SpaceOperation('%s_isvirtual' % kind, args, op.result) else: raise AssertionError("missing support for %r" % oopspec_name) @@ -1415,6 +1421,14 @@ else: assert 0, "args[0].concretetype must be STR or UNICODE" # + if oopspec_name == 'stroruni.copy_contents': + if SoU.TO == rstr.STR: + new_op = 'copystrcontent' + elif SoU.TO == rstr.UNICODE: + new_op = 'copyunicodecontent' + else: + assert 0 + return SpaceOperation(new_op, args, op.result) if oopspec_name == "stroruni.equal": for otherindex, othername, argtypes, resulttype in [ (EffectInfo.OS_STREQ_SLICE_CHECKNULL, diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -835,6 +835,18 @@ def bhimpl_current_trace_length(): return -1 + @arguments("i", returns="i") + def bhimpl_int_isconstant(x): + return False + + @arguments("r", returns="i") + def bhimpl_ref_isconstant(x): + return False + + @arguments("r", returns="i") + def bhimpl_ref_isvirtual(x): + return False + # ---------- # the main hints and recursive calls @@ -1224,6 +1236,9 @@ @arguments("cpu", "r", "i", "i") def bhimpl_strsetitem(cpu, string, index, newchr): cpu.bh_strsetitem(string, index, newchr) + @arguments("cpu", "r", "r", "i", "i", "i") + def bhimpl_copystrcontent(cpu, src, dst, srcstart, dststart, length): + cpu.bh_copystrcontent(src, dst, srcstart, dststart, length) @arguments("cpu", "i", returns="r") def bhimpl_newunicode(cpu, length): @@ -1237,6 +1252,9 @@ @arguments("cpu", "r", "i", "i") def bhimpl_unicodesetitem(cpu, unicode, index, newchr): cpu.bh_unicodesetitem(unicode, index, newchr) + @arguments("cpu", "r", "r", "i", "i", "i") + def bhimpl_copyunicodecontent(cpu, src, dst, srcstart, dststart, length): + cpu.bh_copyunicodecontent(src, dst, srcstart, dststart, length) @arguments(returns=(longlong.is_64_bit and "i" or "f")) def bhimpl_ll_read_timestamp(): diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py --- a/pypy/jit/metainterp/heapcache.py +++ b/pypy/jit/metainterp/heapcache.py @@ -11,9 +11,13 @@ self.known_class_boxes = {} # store the boxes that contain newly allocated objects, this maps the # boxes to a bool, the bool indicates whether or not the object has - # escaped the trace or not, its presences in the mapping shows that it - # was allocated inside the trace + # escaped the trace or not (True means the box never escaped, False + # means it did escape), its presences in the mapping shows that it was + # allocated inside the trace self.new_boxes = {} + # Tracks which boxes should be marked as escaped when the key box + # escapes. + self.dependencies = {} # contains frame boxes that are not virtualizables self.nonstandard_virtualizables = {} # heap cache @@ -31,12 +35,28 @@ def mark_escaped(self, opnum, argboxes): idx = 0 - for box in argboxes: - # setfield_gc and setarrayitem_gc don't escape their first argument - if not (idx == 0 and opnum in [rop.SETFIELD_GC, rop.SETARRAYITEM_GC]): - if box in self.new_boxes: - self.new_boxes[box] = False - idx += 1 + if opnum == rop.SETFIELD_GC: + assert len(argboxes) == 2 + box, valuebox = argboxes + if self.is_unescaped(box) and self.is_unescaped(valuebox): + self.dependencies.setdefault(box, []).append(valuebox) + else: + self._escape(valuebox) + # GETFIELD_GC doesn't escape it's argument + elif opnum != rop.GETFIELD_GC: + for box in argboxes: + # setarrayitem_gc don't escape its first argument + if not (idx == 0 and opnum in [rop.SETARRAYITEM_GC]): + self._escape(box) + idx += 1 + + def _escape(self, box): + if box in self.new_boxes: + self.new_boxes[box] = False + if box in self.dependencies: + for dep in self.dependencies[box]: + self._escape(dep) + del self.dependencies[box] def clear_caches(self, opnum, descr, argboxes): if opnum == rop.SETFIELD_GC: diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -71,7 +71,7 @@ guards.append(op) elif self.level == LEVEL_KNOWNCLASS: op = ResOperation(rop.GUARD_NONNULL, [box], None) - guards.append(op) + guards.append(op) op = ResOperation(rop.GUARD_CLASS, [box, self.known_class], None) guards.append(op) else: @@ -112,7 +112,7 @@ self.lenbound.bound.intersect(other.lenbound.bound) else: self.lenbound = other.lenbound.clone() - + def force_box(self): return self.box @@ -146,7 +146,7 @@ assert isinstance(constbox, Const) self.box = constbox self.level = LEVEL_CONSTANT - + if isinstance(constbox, ConstInt): val = constbox.getint() self.intbound = IntBound(val, val) @@ -378,7 +378,7 @@ new.set_optimizations(optimizations) new.quasi_immutable_deps = self.quasi_immutable_deps return new - + def produce_potential_short_preamble_ops(self, sb): raise NotImplementedError('This is implemented in unroll.UnrollableOptimizer') @@ -505,9 +505,9 @@ if op.returns_bool_result(): self.bool_boxes[self.getvalue(op.result)] = None self._emit_operation(op) - + @specialize.argtype(0) - def _emit_operation(self, op): + def _emit_operation(self, op): for i in range(op.numargs()): arg = op.getarg(i) try: @@ -568,7 +568,7 @@ arg = value.get_key_box() args[i] = arg args[n] = ConstInt(op.getopnum()) - args[n+1] = op.getdescr() + args[n + 1] = op.getdescr() return args @specialize.argtype(0) @@ -616,7 +616,7 @@ def remember_emitting_pure(self, op): pass - + def constant_fold(self, op): argboxes = [self.get_constant_box(op.getarg(i)) for i in range(op.numargs())] @@ -658,9 +658,9 @@ arrayvalue = self.getvalue(op.getarg(0)) arrayvalue.make_len_gt(MODE_UNICODE, op.getdescr(), indexvalue.box.getint()) self.optimize_default(op) - - + + dispatch_opt = make_dispatcher_method(Optimizer, 'optimize_', default=Optimizer.optimize_default) diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -19,7 +19,7 @@ def new(self): return OptRewrite() - + def produce_potential_short_preamble_ops(self, sb): for op in self.loop_invariant_producer.values(): sb.add_potential(op) @@ -231,6 +231,17 @@ else: self.make_constant(op.result, result) return + + args = self.optimizer.make_args_key(op) + oldop = self.optimizer.pure_operations.get(args, None) + if oldop is not None and oldop.getdescr() is op.getdescr(): + assert oldop.getopnum() == op.getopnum() + self.make_equal_to(op.result, self.getvalue(oldop.result)) + return + else: + self.optimizer.pure_operations[args] = op + self.optimizer.remember_emitting_pure(op) + # replace CALL_PURE with just CALL args = op.getarglist() self.emit_operation(ResOperation(rop.CALL, args, op.result, @@ -351,7 +362,7 @@ # expects a compile-time constant assert isinstance(arg, Const) key = make_hashable_int(arg.getint()) - + resvalue = self.loop_invariant_results.get(key, None) if resvalue is not None: self.make_equal_to(op.result, resvalue) diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -4711,6 +4711,35 @@ """ self.optimize_loop(ops, expected) + def test_empty_copystrunicontent(self): + ops = """ + [p0, p1, i0, i2, i3] + i4 = int_eq(i3, 0) + guard_true(i4) [] + copystrcontent(p0, p1, i0, i2, i3) + jump(p0, p1, i0, i2, i3) + """ + expected = """ + [p0, p1, i0, i2, i3] + i4 = int_eq(i3, 0) + guard_true(i4) [] + jump(p0, p1, i0, i2, 0) + """ + self.optimize_strunicode_loop(ops, expected) + + def test_empty_copystrunicontent_virtual(self): + ops = """ + [p0] + p1 = newstr(23) + copystrcontent(p0, p1, 0, 0, 0) + jump(p0) + """ + expected = """ + [p0] + jump(p0) + """ + self.optimize_strunicode_loop(ops, expected) + def test_forced_virtuals_aliasing(self): ops = """ [i0, i1] @@ -4739,6 +4768,7 @@ self.optimize_loop(ops, expected) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -102,9 +102,9 @@ print "Short Preamble:" short = loop.preamble.token.short_preamble[0] print short.inputargs - print '\n'.join([str(o) for o in short.operations]) + print '\n'.join([str(o) for o in short.operations]) print - + assert expected != "crash!", "should have raised an exception" self.assert_equal(loop, expected) if expected_preamble: @@ -113,7 +113,7 @@ if expected_short: self.assert_equal(short, expected_short, text_right='expected short preamble') - + return loop class OptimizeOptTest(BaseTestWithUnroll): @@ -866,10 +866,10 @@ setfield_gc(p3sub, i1, descr=valuedescr) setfield_gc(p1, p3sub, descr=nextdescr) # XXX: We get two extra operations here because the setfield - # above is the result of forcing p1 and thus not + # above is the result of forcing p1 and thus not # registered with the heap optimizer. I've makred tests # below with VIRTUALHEAP if they suffer from this issue - p3sub2 = getfield_gc(p1, descr=nextdescr) + p3sub2 = getfield_gc(p1, descr=nextdescr) guard_nonnull_class(p3sub2, ConstClass(node_vtable2)) [] jump(i1, p1, p3sub2) """ @@ -1411,7 +1411,7 @@ guard_isnull(p18) [p0, p8] p31 = new(descr=ssize) p35 = new_with_vtable(ConstClass(node_vtable)) - setfield_gc(p35, p31, descr=valuedescr) + setfield_gc(p35, p31, descr=valuedescr) jump(p0, p35) """ expected = """ @@ -1426,7 +1426,7 @@ guard_isnull(p18) [p0, p8] p31 = new(descr=ssize) p35 = new_with_vtable(ConstClass(node_vtable)) - setfield_gc(p35, p31, descr=valuedescr) + setfield_gc(p35, p31, descr=valuedescr) jump(p0, p35, p19, p18) """ expected = """ @@ -1435,7 +1435,7 @@ jump(p0, NULL) """ self.optimize_loop(ops, expected) - + def test_varray_1(self): ops = """ [i1] @@ -2181,7 +2181,7 @@ jump(p1) """ self.optimize_loop(ops, expected) - + def test_duplicate_getarrayitem_2(self): ops = """ [p1, i0] @@ -2199,7 +2199,7 @@ jump(p1, i7, i6) """ self.optimize_loop(ops, expected) - + def test_duplicate_getarrayitem_after_setarrayitem_1(self): ops = """ [p1, p2] @@ -2812,14 +2812,14 @@ guard_no_overflow() [] i3b = int_is_true(i3) guard_true(i3b) [] - setfield_gc(p1, i1, descr=valuedescr) + setfield_gc(p1, i1, descr=valuedescr) escape(i3) escape(i3) jump(i1, p1, i3) """ expected = """ [i1, p1, i3] - setfield_gc(p1, i1, descr=valuedescr) + setfield_gc(p1, i1, descr=valuedescr) escape(i3) escape(i3) jump(i1, p1, i3) @@ -2830,7 +2830,7 @@ ops = """ [p8, p11, i24] p26 = new_with_vtable(ConstClass(node_vtable)) - setfield_gc(p26, i24, descr=adescr) + setfield_gc(p26, i24, descr=adescr) i34 = getfield_gc_pure(p11, descr=valuedescr) i35 = getfield_gc_pure(p26, descr=adescr) i36 = int_add_ovf(i34, i35) @@ -2839,10 +2839,10 @@ """ expected = """ [p8, p11, i26] - jump(p8, p11, i26) - """ - self.optimize_loop(ops, expected) - + jump(p8, p11, i26) + """ + self.optimize_loop(ops, expected) + def test_ovf_guard_in_short_preamble2(self): ops = """ [p8, p11, p12] @@ -3191,13 +3191,18 @@ jump(p1, i4, i3) ''' expected = ''' + [p1, i4, i3, i5] + setfield_gc(p1, i5, descr=valuedescr) + jump(p1, i3, i5, i5) + ''' + preamble = ''' [p1, i1, i4] setfield_gc(p1, i1, descr=valuedescr) i3 = call(p1, descr=plaincalldescr) setfield_gc(p1, i3, descr=valuedescr) - jump(p1, i4, i3) + jump(p1, i4, i3, i3) ''' - self.optimize_loop(ops, expected, expected) + self.optimize_loop(ops, expected, preamble) def test_call_pure_invalidates_heap_knowledge(self): # CALL_PURE should still force the setfield_gc() to occur before it @@ -3209,21 +3214,20 @@ jump(p1, i4, i3) ''' expected = ''' + [p1, i4, i3, i5] + setfield_gc(p1, i4, descr=valuedescr) + jump(p1, i3, i5, i5) + ''' + preamble = ''' [p1, i1, i4] setfield_gc(p1, i1, descr=valuedescr) i3 = call(p1, descr=plaincalldescr) setfield_gc(p1, i1, descr=valuedescr) - jump(p1, i4, i3) + jump(p1, i4, i3, i3) ''' - self.optimize_loop(ops, expected, expected) + self.optimize_loop(ops, expected, preamble) def test_call_pure_constant_folding(self): - # CALL_PURE is not marked as is_always_pure(), because it is wrong - # to call the function arbitrary many times at arbitrary points in - # time. Check that it is either constant-folded (and replaced by - # the result of the call, recorded as the first arg), or turned into - # a regular CALL. - # XXX can this test be improved with unrolling? arg_consts = [ConstInt(i) for i in (123456, 4, 5, 6)] call_pure_results = {tuple(arg_consts): ConstInt(42)} ops = ''' @@ -3239,14 +3243,13 @@ escape(i1) escape(i2) i4 = call(123456, 4, i0, 6, descr=plaincalldescr) - jump(i0, i4) + jump(i0, i4, i4) ''' expected = ''' - [i0, i2] + [i0, i4, i5] escape(42) - escape(i2) - i4 = call(123456, 4, i0, 6, descr=plaincalldescr) - jump(i0, i4) + escape(i4) + jump(i0, i5, i5) ''' self.optimize_loop(ops, expected, preamble, call_pure_results) @@ -3270,18 +3273,43 @@ escape(i2) i4 = call(123456, 4, i0, 6, descr=plaincalldescr) guard_no_exception() [] - jump(i0, i4) + jump(i0, i4, i4) ''' expected = ''' - [i0, i2] + [i0, i2, i3] escape(42) escape(i2) - i4 = call(123456, 4, i0, 6, descr=plaincalldescr) - guard_no_exception() [] - jump(i0, i4) + jump(i0, i3, i3) ''' self.optimize_loop(ops, expected, preamble, call_pure_results) + def test_call_pure_returning_virtual(self): + # XXX: This kind of loop invaraint call_pure will be forced + # both in the preamble and in the peeled loop + ops = ''' + [p1, i1, i2] + p2 = call_pure(0, p1, i1, i2, descr=strslicedescr) + escape(p2) + jump(p1, i1, i2) + ''' + preamble = ''' + [p1, i1, i2] + i6 = int_sub(i2, i1) + p2 = newstr(i6) + copystrcontent(p1, p2, i1, 0, i6) + escape(p2) + jump(p1, i1, i2, i6) + ''' + expected = ''' + [p1, i1, i2, i6] + p2 = newstr(i6) + copystrcontent(p1, p2, i1, 0, i6) + escape(p2) + jump(p1, i1, i2, i6) + ''' + self.optimize_loop(ops, expected, preamble) + + # ---------- def test_vref_nonvirtual_nonescape(self): @@ -5150,14 +5178,14 @@ [i0, i1, i10, i11, i2, i3, i4] escape(i2) escape(i3) - escape(i4) + escape(i4) i24 = int_mul_ovf(i10, i11) guard_no_overflow() [] i23 = int_sub_ovf(i10, i11) guard_no_overflow() [] i22 = int_add_ovf(i10, i11) guard_no_overflow() [] - jump(i0, i1, i10, i11, i2, i3, i4) + jump(i0, i1, i10, i11, i2, i3, i4) """ self.optimize_loop(ops, expected) @@ -5366,6 +5394,8 @@ """ self.optimize_strunicode_loop(ops, expected, expected) + # XXX Should some of the call's below now be call_pure? + def test_str_concat_1(self): ops = """ [p1, p2] @@ -5699,14 +5729,14 @@ ops = """ [p0, i0] i1 = unicodegetitem(p0, i0) - i10 = unicodegetitem(p0, i0) + i10 = unicodegetitem(p0, i0) i2 = int_lt(i1, 0) guard_false(i2) [] jump(p0, i0) """ expected = """ [p0, i0] - i1 = unicodegetitem(p0, i0) + i1 = unicodegetitem(p0, i0) jump(p0, i0) """ self.optimize_loop(ops, expected) @@ -5865,7 +5895,7 @@ """ preamble = """ [p1, i1, i2, p3] - guard_nonnull(p3) [] + guard_nonnull(p3) [] i4 = int_sub(i2, i1) i0 = call(0, p1, i1, i4, p3, descr=streq_slice_nonnull_descr) escape(i0) @@ -6474,7 +6504,7 @@ setfield_gc(p3, i1, descr=adescr) setfield_gc(p3, i2, descr=bdescr) i5 = int_gt(ii, 42) - guard_true(i5) [] + guard_true(i5) [] jump(p0, p1, p3, ii2, ii, i1, i2) """ self.optimize_loop(ops, expected) @@ -6500,7 +6530,7 @@ p1 = getfield_gc(p0, descr=nextdescr) guard_nonnull_class(p1, ConstClass(node_vtable)) [] p2 = getfield_gc(p1, descr=nextdescr) - guard_nonnull_class(p2, ConstClass(node_vtable)) [] + guard_nonnull_class(p2, ConstClass(node_vtable)) [] jump(p0) """ expected = """ @@ -6514,11 +6544,11 @@ guard_class(p1, ConstClass(node_vtable)) [] p2 = getfield_gc(p1, descr=nextdescr) guard_nonnull(p2) [] - guard_class(p2, ConstClass(node_vtable)) [] + guard_class(p2, ConstClass(node_vtable)) [] jump(p0) """ self.optimize_loop(ops, expected, expected_short=short) - + def test_forced_virtual_pure_getfield(self): ops = """ [p0] @@ -6582,7 +6612,7 @@ jump(p1, i2) """ self.optimize_loop(ops, expected) - + def test_loopinvariant_strlen(self): ops = """ [p9] @@ -6715,7 +6745,7 @@ [p0, p1] p2 = new_with_vtable(ConstClass(node_vtable)) p3 = new_with_vtable(ConstClass(node_vtable)) - setfield_gc(p2, p3, descr=nextdescr) + setfield_gc(p2, p3, descr=nextdescr) jump(p2, p3) """ expected = """ @@ -6734,7 +6764,7 @@ jump(p2, i2) """ expected = """ - [p1] + [p1] p2 = getarrayitem_gc(p1, 7, descr=) i1 = arraylen_gc(p1) jump(p2) @@ -6775,8 +6805,8 @@ jump(p0, p2, p1) """ self.optimize_loop(ops, expected, expected_short=short) - - + + def test_loopinvariant_constant_strgetitem(self): ops = """ [p0] @@ -6830,11 +6860,11 @@ expected = """ [p0, i22, p1] call(i22, descr=nonwritedescr) - i3 = unicodelen(p1) # Should be killed by backend + i3 = unicodelen(p1) # Should be killed by backend jump(p0, i22, p1) """ self.optimize_loop(ops, expected, expected_short=short) - + def test_propagate_virtual_arryalen(self): ops = """ [p0] @@ -6903,7 +6933,7 @@ [p0, p1, p10, p11] i1 = arraylen_gc(p10, descr=arraydescr) getarrayitem_gc(p11, 1, descr=arraydescr) - call(i1, descr=nonwritedescr) + call(i1, descr=nonwritedescr) jump(p1, p0, p11, p10) """ self.optimize_loop(ops, expected) @@ -6912,20 +6942,20 @@ ops = """ [p5] i10 = getfield_gc(p5, descr=valuedescr) - call(i10, descr=nonwritedescr) + call(i10, descr=nonwritedescr) setfield_gc(p5, 1, descr=valuedescr) jump(p5) """ preamble = """ [p5] i10 = getfield_gc(p5, descr=valuedescr) - call(i10, descr=nonwritedescr) + call(i10, descr=nonwritedescr) setfield_gc(p5, 1, descr=valuedescr) jump(p5) """ expected = """ [p5] - call(1, descr=nonwritedescr) + call(1, descr=nonwritedescr) jump(p5) """ self.optimize_loop(ops, expected, preamble) @@ -6963,7 +6993,7 @@ [p9] call_assembler(0, descr=asmdescr) i18 = getfield_gc(p9, descr=valuedescr) - guard_value(i18, 0) [] + guard_value(i18, 0) [] jump(p9) """ self.optimize_loop(ops, expected) @@ -6992,17 +7022,17 @@ i10 = getfield_gc(p5, descr=valuedescr) i11 = getfield_gc(p6, descr=nextdescr) call(i10, i11, descr=nonwritedescr) - setfield_gc(p6, i10, descr=nextdescr) + setfield_gc(p6, i10, descr=nextdescr) jump(p5, p6) """ expected = """ [p5, p6, i10, i11] call(i10, i11, descr=nonwritedescr) - setfield_gc(p6, i10, descr=nextdescr) + setfield_gc(p6, i10, descr=nextdescr) jump(p5, p6, i10, i10) """ self.optimize_loop(ops, expected) - + def test_cached_pure_func_of_equal_fields(self): ops = """ [p5, p6] @@ -7011,18 +7041,18 @@ i12 = int_add(i10, 7) i13 = int_add(i11, 7) call(i12, i13, descr=nonwritedescr) - setfield_gc(p6, i10, descr=nextdescr) + setfield_gc(p6, i10, descr=nextdescr) jump(p5, p6) """ expected = """ [p5, p6, i14, i12, i10] i13 = int_add(i14, 7) call(i12, i13, descr=nonwritedescr) - setfield_gc(p6, i10, descr=nextdescr) + setfield_gc(p6, i10, descr=nextdescr) jump(p5, p6, i10, i12, i10) """ self.optimize_loop(ops, expected) - + def test_forced_counter(self): # XXX: VIRTUALHEAP (see above) py.test.skip("would be fixed by make heap optimizer aware of virtual setfields") @@ -7165,7 +7195,7 @@ expected = """ [p1, p2, i2, i1] call(i2, descr=nonwritedescr) - setfield_gc(p2, i1, descr=nextdescr) + setfield_gc(p2, i1, descr=nextdescr) jump(p1, p2, i2, i1) """ self.optimize_loop(ops, expected) @@ -7185,11 +7215,11 @@ expected = """ [p1, p2, i2, i1] call(i2, descr=nonwritedescr) - setfield_gc(p2, i1, descr=valuedescr) + setfield_gc(p2, i1, descr=valuedescr) jump(p1, p2, i2, i1) """ self.optimize_loop(ops, expected) - + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass - + diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -226,8 +226,9 @@ if op and op.result: preamble_value = preamble_optimizer.getvalue(op.result) value = self.optimizer.getvalue(op.result) - imp = ValueImporter(self, preamble_value, op) - self.optimizer.importable_values[value] = imp + if not value.is_virtual(): + imp = ValueImporter(self, preamble_value, op) + self.optimizer.importable_values[value] = imp newresult = self.optimizer.getvalue(op.result).get_key_box() if newresult is not op.result: self.short_boxes.alias(newresult, op.result) diff --git a/pypy/jit/metainterp/optimizeopt/vstring.py b/pypy/jit/metainterp/optimizeopt/vstring.py --- a/pypy/jit/metainterp/optimizeopt/vstring.py +++ b/pypy/jit/metainterp/optimizeopt/vstring.py @@ -296,7 +296,7 @@ def copy_str_content(optimizer, srcbox, targetbox, - srcoffsetbox, offsetbox, lengthbox, mode): + srcoffsetbox, offsetbox, lengthbox, mode, need_next_offset=True): if isinstance(srcbox, ConstPtr) and isinstance(srcoffsetbox, Const): M = 5 else: @@ -313,7 +313,10 @@ None)) offsetbox = _int_add(optimizer, offsetbox, CONST_1) else: - nextoffsetbox = _int_add(optimizer, offsetbox, lengthbox) + if need_next_offset: + nextoffsetbox = _int_add(optimizer, offsetbox, lengthbox) + else: + nextoffsetbox = None op = ResOperation(mode.COPYSTRCONTENT, [srcbox, targetbox, srcoffsetbox, offsetbox, lengthbox], None) @@ -450,6 +453,30 @@ lengthbox = value.getstrlen(self.optimizer, mode) self.make_equal_to(op.result, self.getvalue(lengthbox)) + def optimize_COPYSTRCONTENT(self, op): + self._optimize_COPYSTRCONTENT(op, mode_string) + def optimize_COPYUNICODECONTENT(self, op): + self._optimize_COPYSTRCONTENT(op, mode_unicode) + + def _optimize_COPYSTRCONTENT(self, op, mode): + # args: src dst srcstart dststart length + src = self.getvalue(op.getarg(0)) + dst = self.getvalue(op.getarg(1)) + srcstart = self.getvalue(op.getarg(2)) + dststart = self.getvalue(op.getarg(3)) + length = self.getvalue(op.getarg(4)) + + if length.is_constant() and length.box.getint() == 0: + return + copy_str_content(self.optimizer, + src.force_box(), + dst.force_box(), + srcstart.force_box(), + dststart.force_box(), + length.force_box(), + mode, need_next_offset=False + ) + def optimize_CALL(self, op): # dispatch based on 'oopspecindex' to a method that handles # specifically the given oopspec call. For non-oopspec calls, diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -210,7 +210,8 @@ self.metainterp.clear_exception() resbox = self.execute(rop.%s, b1, b2) self.make_result_of_lastop(resbox) # same as execute_varargs() - self.metainterp.handle_possible_overflow_error() + if not isinstance(resbox, Const): + self.metainterp.handle_possible_overflow_error() return resbox ''' % (_opimpl, _opimpl.upper())).compile() @@ -401,23 +402,25 @@ self.metainterp.heapcache.new_array(resbox, lengthbox) return resbox - @arguments("box", "descr", "box") - def _opimpl_getarrayitem_gc_any(self, arraybox, arraydescr, indexbox): + @specialize.arg(1) + def _do_getarrayitem_gc_any(self, op, arraybox, arraydescr, indexbox): tobox = self.metainterp.heapcache.getarrayitem( arraybox, arraydescr, indexbox) if tobox: # sanity check: see whether the current array value # corresponds to what the cache thinks the value is - resbox = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETARRAYITEM_GC, arraydescr, arraybox, indexbox) + resbox = executor.execute(self.metainterp.cpu, self.metainterp, op, + arraydescr, arraybox, indexbox) assert resbox.constbox().same_constant(tobox.constbox()) return tobox - resbox = self.execute_with_descr(rop.GETARRAYITEM_GC, - arraydescr, arraybox, indexbox) + resbox = self.execute_with_descr(op, arraydescr, arraybox, indexbox) self.metainterp.heapcache.getarrayitem_now_known( arraybox, arraydescr, indexbox, resbox) return resbox + @arguments("box", "descr", "box") + def _opimpl_getarrayitem_gc_any(self, arraybox, arraydescr, indexbox): + return self._do_getarrayitem_gc_any(rop.GETARRAYITEM_GC, arraybox, arraydescr, indexbox) opimpl_getarrayitem_gc_i = _opimpl_getarrayitem_gc_any opimpl_getarrayitem_gc_r = _opimpl_getarrayitem_gc_any @@ -433,8 +436,7 @@ @arguments("box", "descr", "box") def _opimpl_getarrayitem_gc_pure_any(self, arraybox, arraydescr, indexbox): - return self.execute_with_descr(rop.GETARRAYITEM_GC_PURE, - arraydescr, arraybox, indexbox) + return self._do_getarrayitem_gc_any(rop.GETARRAYITEM_GC_PURE, arraybox, arraydescr, indexbox) opimpl_getarrayitem_gc_pure_i = _opimpl_getarrayitem_gc_pure_any opimpl_getarrayitem_gc_pure_r = _opimpl_getarrayitem_gc_pure_any @@ -866,6 +868,14 @@ def opimpl_newunicode(self, lengthbox): return self.execute(rop.NEWUNICODE, lengthbox) + @arguments("box", "box", "box", "box", "box") + def opimpl_copystrcontent(self, srcbox, dstbox, srcstartbox, dststartbox, lengthbox): + return self.execute(rop.COPYSTRCONTENT, srcbox, dstbox, srcstartbox, dststartbox, lengthbox) + + @arguments("box", "box", "box", "box", "box") + def opimpl_copyunicodecontent(self, srcbox, dstbox, srcstartbox, dststartbox, lengthbox): + return self.execute(rop.COPYUNICODECONTENT, srcbox, dstbox, srcstartbox, dststartbox, lengthbox) + ## @FixME #arguments("descr", "varargs") ## def opimpl_residual_oosend_canraise(self, methdescr, varargs): ## return self.execute_varargs(rop.OOSEND, varargs, descr=methdescr, @@ -1060,6 +1070,18 @@ return ConstInt(trace_length) @arguments("box") + def _opimpl_isconstant(self, box): + return ConstInt(isinstance(box, Const)) + + opimpl_int_isconstant = opimpl_ref_isconstant = _opimpl_isconstant + + @arguments("box") + def _opimpl_isvirtual(self, box): + return ConstInt(self.metainterp.heapcache.is_unescaped(box)) + + opimpl_ref_isvirtual = _opimpl_isvirtual + + @arguments("box") def opimpl_virtual_ref(self, box): # Details on the content of metainterp.virtualref_boxes: # @@ -1673,6 +1695,10 @@ def _record_helper_nonpure_varargs(self, opnum, resbox, descr, argboxes): assert resbox is None or isinstance(resbox, Box) + if (rop._OVF_FIRST <= opnum <= rop._OVF_LAST and + self.last_exc_value_box is None and + self._all_constants_varargs(argboxes)): + return resbox.constbox() # record the operation profiler = self.staticdata.profiler profiler.count_ops(opnum, RECORDED_OPS) diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -1,23 +1,25 @@ +import sys + import py -import sys -from pypy.rlib.jit import JitDriver, we_are_jitted, hint, dont_look_inside -from pypy.rlib.jit import loop_invariant, elidable, promote -from pypy.rlib.jit import jit_debug, assert_green, AssertGreenFailed -from pypy.rlib.jit import unroll_safe, current_trace_length + +from pypy import conftest +from pypy.jit.codewriter.policy import JitPolicy, StopAtXPolicy from pypy.jit.metainterp import pyjitpl, history +from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin, noConst +from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper +from pypy.jit.metainterp.warmspot import get_stats from pypy.jit.metainterp.warmstate import set_future_value -from pypy.jit.metainterp.warmspot import get_stats -from pypy.jit.codewriter.policy import JitPolicy, StopAtXPolicy -from pypy import conftest +from pypy.rlib.jit import (JitDriver, we_are_jitted, hint, dont_look_inside, + loop_invariant, elidable, promote, jit_debug, assert_green, + AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff, + isconstant, isvirtual) from pypy.rlib.rarithmetic import ovfcheck -from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.ootypesystem import ootype -from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT -from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin, noConst + class BasicTests: - def test_basic(self): def f(x, y): return x + y @@ -99,14 +101,14 @@ myjitdriver.jit_merge_point(x=x, y=y, res=res) res += x * x x += 1 - res += x * x + res += x * x y -= 1 return res res = self.meta_interp(f, [6, 7]) assert res == 1323 self.check_loop_count(1) self.check_loops(int_mul=1) - + def test_loop_variant_mul_ovf(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) def f(x, y): @@ -1372,7 +1374,7 @@ return x res = self.meta_interp(f, [299], listops=True) assert res == f(299) - self.check_loops(guard_class=0, guard_value=3) + self.check_loops(guard_class=0, guard_value=3) self.check_loops(guard_class=0, guard_value=6, everywhere=True) def test_merge_guardnonnull_guardclass(self): @@ -2118,7 +2120,7 @@ return sa res = self.meta_interp(f, [32, 7]) assert res == f(32, 7) - + def test_caching_setarrayitem_fixed(self): myjitdriver = JitDriver(greens = [], reds = ['sa', 'i', 'n', 'a', 'node']) def f(n, a): @@ -2138,7 +2140,7 @@ return sa res = self.meta_interp(f, [32, 7]) assert res == f(32, 7) - + def test_caching_setarrayitem_var(self): myjitdriver = JitDriver(greens = [], reds = ['sa', 'i', 'n', 'a', 'b', 'node']) def f(n, a, b): @@ -2668,7 +2670,7 @@ myjitdriver.set_param('threshold', 3) myjitdriver.set_param('trace_eagerness', 1) myjitdriver.set_param('retrace_limit', 5) - myjitdriver.set_param('function_threshold', -1) + myjitdriver.set_param('function_threshold', -1) pc = sa = i = 0 while pc < len(bytecode): myjitdriver.jit_merge_point(pc=pc, n=n, sa=sa, i=i) @@ -2693,12 +2695,12 @@ def g(n1, n2): for i in range(10): f(n1) - for i in range(10): + for i in range(10): f(n2) nn = [10, 3] assert self.meta_interp(g, nn) == g(*nn) - + # The attempts of retracing first loop will end up retracing the # second and thus fail 5 times, saturating the retrace_count. Instead a # bridge back to the preamble of the first loop is produced. A guard in @@ -2709,7 +2711,7 @@ self.check_tree_loop_count(2 + 3) # FIXME: Add a gloabl retrace counter and test that we are not trying more than 5 times. - + def g(n): for i in range(n): for j in range(10): @@ -2945,15 +2947,15 @@ a = [0, 1, 2, 3, 4] while i < n: myjitdriver.jit_merge_point(sa=sa, n=n, a=a, i=i) - if i < n/2: + if i < n / 2: sa += a[4] - elif i == n/2: + elif i == n / 2: a.pop() i += 1 res = self.meta_interp(f, [32]) assert res == f(32) self.check_loops(arraylen_gc=2) - + class TestOOtype(BasicTests, OOJitMixin): def test_oohash(self): @@ -3173,7 +3175,7 @@ res = self.meta_interp(f, [32]) assert res == f(32) self.check_tree_loop_count(3) - + def test_two_loopinvariant_arrays3(self): from pypy.rpython.lltypesystem import lltype, llmemory, rffi myjitdriver = JitDriver(greens = [], reds = ['sa', 'n', 'i', 'a']) @@ -3197,7 +3199,7 @@ res = self.meta_interp(f, [32]) assert res == f(32) self.check_tree_loop_count(2) - + def test_two_loopinvariant_arrays_boxed(self): class A(object): def __init__(self, a): @@ -3222,7 +3224,7 @@ res = self.meta_interp(f, [32]) assert res == f(32) self.check_loops(arraylen_gc=2, everywhere=True) - + def test_release_gil_flush_heap_cache(self): if sys.platform == "win32": py.test.skip("needs 'time'") @@ -3298,5 +3300,114 @@ self.meta_interp(main, [10]) + def test_look_inside_iff_const(self): + @look_inside_iff(lambda arg: isconstant(arg)) + def f(arg): + s = 0 + while arg > 0: + s += arg + arg -= 1 + return s + + driver = JitDriver(greens = ['code'], reds = ['n', 'arg', 's']) + + def main(code, n, arg): + s = 0 + while n > 0: + driver.jit_merge_point(code=code, n=n, arg=arg, s=s) + if code == 0: + s += f(arg) + else: + s += f(1) + n -= 1 + return s + + res = self.meta_interp(main, [0, 10, 2], enable_opts='') + assert res == main(0, 10, 2) + self.check_loops(call=1) + res = self.meta_interp(main, [1, 10, 2], enable_opts='') + assert res == main(1, 10, 2) + self.check_loops(call=0) + + def test_look_inside_iff_virtual(self): + # There's no good reason for this to be look_inside_iff, but it's a test! + @look_inside_iff(lambda arg, n: isvirtual(arg)) + def f(arg, n): + if n == 100: + for i in xrange(n): + n += i + return arg.x + class A(object): + def __init__(self, x): + self.x = x + driver = JitDriver(greens=['n'], reds=['i', 'a']) + def main(n): + i = 0 + a = A(3) + while i < 20: + driver.jit_merge_point(i=i, n=n, a=a) + if n == 0: + i += f(a, n) + else: + i += f(A(2), n) + res = self.meta_interp(main, [0], enable_opts='') + assert res == main(0) + self.check_loops(call=1, getfield_gc=0) + res = self.meta_interp(main, [1], enable_opts='') + assert res == main(1) + self.check_loops(call=0, getfield_gc=0) + + def test_reuse_elidable_result(self): + driver = JitDriver(reds=['n', 's'], greens = []) + def main(n): + s = 0 + while n > 0: + driver.jit_merge_point(s=s, n=n) + s += len(str(n)) + len(str(n)) + n -= 1 + return s + res = self.meta_interp(main, [10]) + assert res == main(10) + self.check_loops({ + 'call': 1, 'guard_no_exception': 1, 'guard_true': 1, 'int_add': 2, + 'int_gt': 1, 'int_sub': 1, 'strlen': 1, 'jump': 1, + }) + + def test_look_inside_iff_const_getarrayitem_gc_pure(self): + driver = JitDriver(greens=['unroll'], reds=['s', 'n']) + + class A(object): + _immutable_fields_ = ["x[*]"] + def __init__(self, x): + self.x = [x] + + @look_inside_iff(lambda x: isconstant(x)) + def f(x): + i = 0 + for c in x: + i += 1 + return i + + def main(unroll, n): + s = 0 + while n > 0: + driver.jit_merge_point(s=s, n=n, unroll=unroll) + if unroll: + x = A("xx") + else: + x = A("x" * n) + s += f(x.x[0]) + n -= 1 + return s + + res = self.meta_interp(main, [0, 10]) + assert res == main(0, 10) + # 2 calls, one for f() and one for char_mul + self.check_loops(call=2) + res = self.meta_interp(main, [1, 10]) + assert res == main(1, 10) + self.check_loops(call=0) + + class TestLLtype(BaseLLtypeTests, LLJitMixin): pass diff --git a/pypy/jit/metainterp/test/test_dict.py b/pypy/jit/metainterp/test/test_dict.py --- a/pypy/jit/metainterp/test/test_dict.py +++ b/pypy/jit/metainterp/test/test_dict.py @@ -153,11 +153,7 @@ res = self.meta_interp(f, [100], listops=True) assert res == f(50) - # XXX: ideally there would be 7 calls here, but repeated CALL_PURE with - # the same arguments are not folded, because we have conflicting - # definitions of pure, once strhash can be appropriately folded - # this should be decreased to seven. - self.check_loops({"call": 8, "guard_false": 1, "guard_no_exception": 6, + self.check_loops({"call": 7, "guard_false": 1, "guard_no_exception": 6, "guard_true": 1, "int_and": 1, "int_gt": 1, "int_is_true": 1, "int_sub": 1, "jump": 1, "new_with_vtable": 1, "setfield_gc": 1}) diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py --- a/pypy/jit/metainterp/test/test_heapcache.py +++ b/pypy/jit/metainterp/test/test_heapcache.py @@ -337,6 +337,24 @@ h.invalidate_caches(rop.SETFIELD_GC, None, [box1, box2]) assert not h.is_unescaped(box2) + def test_unescaped_testing(self): + h = HeapCache() + h.new(box1) + h.new(box2) + assert h.is_unescaped(box1) + assert h.is_unescaped(box2) + # Putting a virtual inside of another virtual doesn't escape it. + h.invalidate_caches(rop.SETFIELD_GC, None, [box1, box2]) + assert h.is_unescaped(box2) + # Reading a field from a virtual doesn't escape it. + h.invalidate_caches(rop.GETFIELD_GC, None, [box1]) + assert h.is_unescaped(box1) + # Escaping a virtual transitively escapes anything inside of it. + assert not h.is_unescaped(box3) + h.invalidate_caches(rop.SETFIELD_GC, None, [box3, box1]) + assert not h.is_unescaped(box1) + assert not h.is_unescaped(box2) + def test_unescaped_array(self): h = HeapCache() h.new_array(box1, lengthbox1) diff --git a/pypy/jit/metainterp/test/test_list.py b/pypy/jit/metainterp/test/test_list.py --- a/pypy/jit/metainterp/test/test_list.py +++ b/pypy/jit/metainterp/test/test_list.py @@ -34,7 +34,7 @@ l = [x + 1] n -= 1 return l[0] - + res = self.meta_interp(f, [10], listops=True) assert res == f(10) self.check_all_virtualized() @@ -60,7 +60,7 @@ def test_ll_fixed_setitem_fast(self): jitdriver = JitDriver(greens = [], reds = ['n', 'l']) - + def f(n): l = [1, 2, 3] @@ -116,7 +116,7 @@ assert res == f(10) py.test.skip("'[non-null] * n' gives a residual call so far") self.check_loops(setarrayitem_gc=0, getarrayitem_gc=0, call=0) - + def test_arraycopy_simpleoptimize(self): def f(): l = [1, 2, 3, 4] @@ -208,6 +208,26 @@ assert res == f(15) self.check_loops(guard_exception=0) + def test_virtual_resize(self): + jitdriver = JitDriver(greens = [], reds = ['n', 's']) + def f(n): + s = 0 + while n > 0: + jitdriver.jit_merge_point(n=n, s=s) + lst = [] + lst += [1] + n -= len(lst) + s += lst[0] + lst.pop() + lst.append(1) + s /= lst.pop() + return s + res = self.meta_interp(f, [15], listops=True) + assert res == f(15) + self.check_loops({"int_add": 1, "int_sub": 1, "int_gt": 1, + "guard_true": 1, "jump": 1}) + + class TestOOtype(ListTests, OOJitMixin): pass @@ -236,8 +256,6 @@ return a * b res = self.meta_interp(f, [37]) assert res == f(37) - # There is the one actual field on a, plus 2 getfield's from the list - # itself, 1 to get the length (which is then incremented and passed to - # the resize func), and then a read of the items field to actually - # perform the setarrayitem on - self.check_loops(getfield_gc=5, everywhere=True) + # There is the one actual field on a, plus several fields on the list + # itself + self.check_loops(getfield_gc=10, everywhere=True) diff --git a/pypy/jit/metainterp/test/test_slist.py b/pypy/jit/metainterp/test/test_slist.py --- a/pypy/jit/metainterp/test/test_slist.py +++ b/pypy/jit/metainterp/test/test_slist.py @@ -5,7 +5,6 @@ class ListTests(object): def test_basic_list(self): - py.test.skip("not yet") myjitdriver = JitDriver(greens = [], reds = ['n', 'lst']) def f(n): lst = [] @@ -34,7 +33,7 @@ return m res = self.interp_operations(f, [11], listops=True) assert res == 49 - self.check_operations_history(call=5) + self.check_operations_history(call=3) def test_list_of_voids(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'lst']) @@ -93,7 +92,7 @@ return x res = self.meta_interp(f, [-2], listops=True) assert res == 41 - self.check_loops(call=1, guard_value=0) + self.check_loops(call=0, guard_value=0) # we don't support resizable lists on ootype #class TestOOtype(ListTests, OOJitMixin): diff --git a/pypy/jit/metainterp/test/test_string.py b/pypy/jit/metainterp/test/test_string.py --- a/pypy/jit/metainterp/test/test_string.py +++ b/pypy/jit/metainterp/test/test_string.py @@ -27,7 +27,7 @@ return i res = self.meta_interp(f, [10, True, _str('h')], listops=True) assert res == 5 - self.check_loops(**{self.CALL: 1, self.CALL_PURE: 0}) + self.check_loops(**{self.CALL: 1, self.CALL_PURE: 0, 'everywhere': True}) def test_eq_folded(self): _str = self._str @@ -327,7 +327,7 @@ def test_str_slice_len_surviving(self): _str = self._str longstring = _str("Unrolling Trouble") - mydriver = JitDriver(reds = ['i', 'a', 'sa'], greens = []) + mydriver = JitDriver(reds = ['i', 'a', 'sa'], greens = []) def f(a): i = sa = a while i < len(longstring): @@ -343,7 +343,7 @@ fillers = _str("abcdefghijklmnopqrstuvwxyz") data = _str("ABCDEFGHIJKLMNOPQRSTUVWXYZ") - mydriver = JitDriver(reds = ['line', 'noise', 'res'], greens = []) + mydriver = JitDriver(reds = ['line', 'noise', 'res'], greens = []) def f(): line = data noise = fillers @@ -370,7 +370,7 @@ def __init__(self, value): self.value = value mydriver = JitDriver(reds = ['ratio', 'line', 'noise', 'res'], - greens = []) + greens = []) def f(): line = Str(data) noise = Str(fillers) @@ -408,7 +408,7 @@ return len(sa) assert self.meta_interp(f, [16]) == f(16) - def test_loop_invariant_string_slize(self): + def test_loop_invariant_string_slice(self): _str = self._str mydriver = JitDriver(reds = ['i', 'n', 'sa', 's', 's1'], greens = []) def f(n, c): @@ -425,7 +425,7 @@ return sa assert self.meta_interp(f, [16, 'a']) == f(16, 'a') - def test_loop_invariant_string_slize_boxed(self): + def test_loop_invariant_string_slice_boxed(self): class Str(object): def __init__(self, value): self.value = value @@ -445,7 +445,7 @@ return sa assert self.meta_interp(f, [16, 'a']) == f(16, 'a') - def test_loop_invariant_string_slize_in_array(self): + def test_loop_invariant_string_slice_in_array(self): _str = self._str mydriver = JitDriver(reds = ['i', 'n', 'sa', 's', 's1'], greens = []) def f(n, c): @@ -513,7 +513,7 @@ m -= 1 return 42 self.meta_interp(f, [6, 7]) - self.check_loops(call=3, # str(), _str(), escape() + self.check_loops(call=1, # escape() newunicode=1, unicodegetitem=0, unicodesetitem=1, copyunicodecontent=1) @@ -536,3 +536,27 @@ self.check_loops(call_pure=0, call=1, newunicode=0, unicodegetitem=0, unicodesetitem=0, copyunicodecontent=0) + + def test_join_chars(self): + jitdriver = JitDriver(reds=['a', 'b', 'c', 'i'], greens=[]) + def f(a, b, c): + i = 0 + while i < 10: + jitdriver.jit_merge_point(a=a, b=b, c=c, i=i) + x = [] + if a: + x.append("a") + if b: + x.append("b") + if c: + x.append("c") + i += len("".join(x)) + return i + res = self.meta_interp(f, [1, 1, 1]) + assert res == f(True, True, True) + # The "".join should be unrolled, since the length of x is known since + # it is virtual, ensure there are no calls to ll_join_chars, or + # allocations. + self.check_loops({ + "guard_true": 5, "int_is_true": 3, "int_lt": 2, "int_add": 2, "jump": 2, + }, everywhere=True) diff --git a/pypy/jit/metainterp/test/test_tracingopts.py b/pypy/jit/metainterp/test/test_tracingopts.py --- a/pypy/jit/metainterp/test/test_tracingopts.py +++ b/pypy/jit/metainterp/test/test_tracingopts.py @@ -1,7 +1,10 @@ +import sys + +from pypy.jit.metainterp.test.support import LLJitMixin +from pypy.rlib import jit +from pypy.rlib.rarithmetic import ovfcheck + import py -import sys -from pypy.rlib import jit -from pypy.jit.metainterp.test.support import LLJitMixin class TestLLtype(LLJitMixin): @@ -573,3 +576,18 @@ res = self.interp_operations(fn, [3]) assert res == 24 self.check_operations_history(getarrayitem_gc=0) + + def test_fold_int_add_ovf(self): + def fn(n): + jit.promote(n) + try: + n = ovfcheck(n + 1) + except OverflowError: + return 12 + else: + return n + res = self.interp_operations(fn, [3]) + assert res == 4 + self.check_operations_history(int_add_ovf=0) + res = self.interp_operations(fn, [sys.maxint]) + assert res == 12 \ No newline at end of file diff --git a/pypy/jit/tl/pypyjit.py b/pypy/jit/tl/pypyjit.py --- a/pypy/jit/tl/pypyjit.py +++ b/pypy/jit/tl/pypyjit.py @@ -40,7 +40,7 @@ config.objspace.usemodules.array = False config.objspace.usemodules._weakref = True config.objspace.usemodules._sre = False -config.objspace.usemodules._lsprof = True +config.objspace.usemodules._lsprof = False # config.objspace.usemodules._ffi = True config.objspace.usemodules.micronumpy = False @@ -77,7 +77,7 @@ def read_code(): from pypy.module.marshal.interp_marshal import dumps - + filename = 'pypyjit_demo.py' source = readfile(filename) ec = space.getexecutioncontext() diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -16,7 +16,7 @@ if modname in ['pypyjit', 'signal', 'micronumpy', 'math', 'exceptions', 'imp', 'sys', 'array', '_ffi', 'itertools', 'operator', 'posix', '_socket', '_sre', '_lsprof', '_weakref', - '__pypy__', 'cStringIO', '_collections']: + '__pypy__', 'cStringIO', '_collections', 'struct']: return True return False diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -337,7 +337,9 @@ assert loop.match_by_id('append', """ i13 = getfield_gc(p8, descr=) i15 = int_add(i13, 1) - call(ConstClass(_ll_list_resize_ge__listPtr_Signed), p8, i15, descr=) + # Will be killed by the backend + i17 = arraylen_gc(p7, descr=) + call(ConstClass(_ll_list_resize_ge), p8, i15, descr=) guard_no_exception(descr=...) p17 = getfield_gc(p8, descr=) p19 = new_with_vtable(ConstClass(W_IntObject)) diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -40,10 +40,10 @@ log = self.run(fn, [1000]) assert log.result == 300 loop, = log.loops_by_filename(self.filepath) - # check that the call to ll_dict_lookup is not a call_may_force + # check that the call to ll_dict_lookup is not a call_may_force, the + # gc_id call is hoisted out of the loop, the id of a value obviously + # can't change ;) assert loop.match_by_id("getitem", """ - i25 = call(ConstClass(_ll_1_gc_identityhash__objectPtr), p6, descr=...) - ... i28 = call(ConstClass(ll_dict_lookup__dicttablePtr_objectPtr_Signed), p18, p6, i25, descr=...) ... p33 = call(ConstClass(ll_get_value__dicttablePtr_Signed), p18, i28, descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -92,7 +92,7 @@ """) - def test_cached_pure_func_of_equal_fields(self): + def test_cached_pure_func_of_equal_fields(self): def main(n): class A(object): def __init__(self, val): @@ -285,3 +285,48 @@ loop, = log.loops_by_id("globalread", is_entry_bridge=True) assert len(loop.ops_by_id("globalread")) == 0 + + def test_struct_module(self): + def main(): + import struct + i = 1 + while i < 1000: + x = struct.unpack("i", struct.pack("i", i))[0] # ID: struct + i += x / i + return i + + log = self.run(main) + assert log.result == main() + + loop, = log.loops_by_id("struct") + if sys.maxint == 2 ** 63 - 1: + extra = """ + i8 = int_lt(i4, -2147483648) + guard_false(i8, descr=...) + """ + else: + extra = "" + # This could, of course stand some improvement, to remove all these + # arithmatic ops, but we've removed all the core overhead. + assert loop.match_by_id("struct", """ + guard_not_invalidated(descr=...) + # struct.pack + %(32_bit_only)s + i11 = int_and(i4, 255) + i13 = int_rshift(i4, 8) + i14 = int_and(i13, 255) + i16 = int_rshift(i13, 8) + i17 = int_and(i16, 255) + i19 = int_rshift(i16, 8) + i20 = int_and(i19, 255) + + # struct.unpack + i22 = int_lshift(i14, 8) + i23 = int_or(i11, i22) + i25 = int_lshift(i17, 16) + i26 = int_or(i23, i25) + i28 = int_ge(i20, 128) + guard_false(i28, descr=...) + i30 = int_lshift(i20, 24) + i31 = int_or(i26, i30) + """ % {"32_bit_only": extra}) \ No newline at end of file diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -1,5 +1,6 @@ from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC + class TestString(BaseTestPyPyC): def test_lookup_default_encoding(self): def main(n): @@ -107,3 +108,52 @@ --TICK-- jump(p0, p1, p2, p3, p4, p5, i58, i7, descr=) """) + + def test_str_mod(self): + def main(n): + s = 0 + while n > 0: + s += len('%d %d' % (n, n)) + n -= 1 + return s + + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i7 = int_gt(i4, 0) + guard_true(i7, descr=...) + guard_not_invalidated(descr=...) + p9 = call(ConstClass(ll_int2dec__Signed), i4, descr=) + guard_no_exception(descr=...) + i10 = strlen(p9) + i11 = int_is_true(i10) + guard_true(i11, descr=...) + i13 = strgetitem(p9, 0) + i15 = int_eq(i13, 45) + guard_false(i15, descr=...) + i17 = int_sub(0, i10) + i19 = int_gt(i10, 23) + guard_false(i19, descr=...) + p21 = newstr(23) + copystrcontent(p9, p21, 0, 0, i10) + i25 = int_add(1, i10) + i26 = int_gt(i25, 23) + guard_false(i26, descr=...) + strsetitem(p21, i10, 32) + i29 = int_add(i10, 1) + i30 = int_add(i10, i25) + i31 = int_gt(i30, 23) + guard_false(i31, descr=...) + copystrcontent(p9, p21, 0, i25, i10) + i33 = int_eq(i30, 23) + guard_false(i33, descr=...) + p35 = call(ConstClass(ll_shrink_array__rpy_stringPtr_Signed), p21, i30, descr=) + guard_no_exception(descr=...) + i37 = strlen(p35) + i38 = int_add_ovf(i5, i37) + guard_no_overflow(descr=...) + i40 = int_sub(i4, 1) + --TICK-- + jump(p0, p1, p2, p3, i40, i38, descr=) + """) \ No newline at end of file diff --git a/pypy/module/struct/formatiterator.py b/pypy/module/struct/formatiterator.py --- a/pypy/module/struct/formatiterator.py +++ b/pypy/module/struct/formatiterator.py @@ -1,9 +1,9 @@ -from pypy.interpreter.error import OperationError - +from pypy.rlib import jit from pypy.rlib.objectmodel import specialize from pypy.rlib.rstruct.error import StructError +from pypy.rlib.rstruct.formatiterator import FormatIterator from pypy.rlib.rstruct.standardfmttable import PACK_ACCEPTS_BROKEN_INPUT -from pypy.rlib.rstruct.formatiterator import FormatIterator +from pypy.interpreter.error import OperationError class PackFormatIterator(FormatIterator): @@ -14,15 +14,20 @@ self.args_index = 0 self.result = [] # list of characters + # This *should* be always unroll safe, the only way to get here is by + # unroll the interpret function, which means the fmt is const, and thus + # this should be const (in theory ;) + @jit.unroll_safe + @specialize.arg(1) def operate(self, fmtdesc, repetitions): if fmtdesc.needcount: fmtdesc.pack(self, repetitions) else: for i in range(repetitions): fmtdesc.pack(self) - operate._annspecialcase_ = 'specialize:arg(1)' _operate_is_specialized_ = True + @jit.unroll_safe def align(self, mask): pad = (-len(self.result)) & mask for i in range(pad): @@ -130,13 +135,15 @@ self.inputpos = 0 self.result_w = [] # list of wrapped objects + # See above comment on operate. + @jit.unroll_safe + @specialize.arg(1) def operate(self, fmtdesc, repetitions): if fmtdesc.needcount: fmtdesc.unpack(self, repetitions) else: for i in range(repetitions): fmtdesc.unpack(self) - operate._annspecialcase_ = 'specialize:arg(1)' _operate_is_specialized_ = True def align(self, mask): @@ -154,7 +161,6 @@ self.inputpos = end return s + @specialize.argtype(1) def appendobj(self, value): self.result_w.append(self.space.wrap(value)) - appendobj._annspecialcase_ = 'specialize:argtype(1)' - diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -3,6 +3,7 @@ from pypy.rlib.rstruct.error import StructError from pypy.rlib.rstruct.formatiterator import CalcSizeFormatIterator + @unwrap_spec(format=str) def calcsize(space, format): fmtiter = CalcSizeFormatIterator() diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -250,7 +250,8 @@ def repr__Bytearray(space, w_bytearray): s = w_bytearray.data - buf = StringBuilder(50) + # Good default if there are no replacements. + buf = StringBuilder(len("bytearray(b'')") + len(s)) buf.append("bytearray(b'") diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -1,13 +1,15 @@ """ String formatting routines. """ -from pypy.rlib.unroll import unrolling_iterable +from pypy.interpreter.error import OperationError +from pypy.objspace.std.unicodetype import unicode_from_object +from pypy.rlib import jit from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.rfloat import formatd, DTSF_ALT, isnan, isinf -from pypy.interpreter.error import OperationError +from pypy.rlib.rstring import StringBuilder, UnicodeBuilder +from pypy.rlib.unroll import unrolling_iterable from pypy.tool.sourcetools import func_with_new_name -from pypy.rlib.rstring import StringBuilder, UnicodeBuilder -from pypy.objspace.std.unicodetype import unicode_from_object + class BaseStringFormatter(object): def __init__(self, space, values_w, w_valuedict): @@ -173,6 +175,9 @@ raise OperationError(space.w_ValueError, space.wrap("incomplete format")) + # Only shows up if we've already started inlining format(), so just + # unconditionally unroll this. + @jit.unroll_safe def getmappingkey(self): # return the mapping key in a '%(key)s' specifier fmt = self.fmt @@ -233,6 +238,8 @@ return w_value + # Same as getmappingkey + @jit.unroll_safe def peel_flags(self): self.f_ljust = False self.f_sign = False @@ -255,6 +262,8 @@ break self.forward() + # Same as getmappingkey + @jit.unroll_safe def peel_num(self): space = self.space c = self.peekchr() @@ -276,6 +285,7 @@ c = self.peekchr() return result + @jit.look_inside_iff(lambda self: jit.isconstant(self.fmt)) def format(self): lgt = len(self.fmt) + 4 * len(self.values_w) + 10 if do_unicode: diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -132,7 +132,10 @@ cache[selector] = attr return attr - @jit.unroll_safe + @jit.look_inside_iff(lambda self, obj, selector, w_value: + jit.isconstant(self) and + jit.isconstant(selector[0]) and + jit.isconstant(selector[1])) def add_attr(self, obj, selector, w_value): # grumble, jit needs this attr = self._get_new_attr(selector[0], selector[1]) diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py --- a/pypy/objspace/std/newformat.py +++ b/pypy/objspace/std/newformat.py @@ -3,9 +3,10 @@ import string from pypy.interpreter.error import OperationError -from pypy.rlib import rstring, runicode, rlocale, rarithmetic, rfloat +from pypy.rlib import rstring, runicode, rlocale, rarithmetic, rfloat, jit from pypy.rlib.objectmodel import specialize from pypy.rlib.rfloat import copysign, formatd +from pypy.tool import sourcetools @specialize.argtype(1) @@ -36,314 +37,321 @@ ANS_MANUAL = 3 -class TemplateFormatter(object): +def make_template_formatting_class(): + class TemplateFormatter(object): - _annspecialcase_ = "specialize:ctr_location" + parser_list_w = None - parser_list_w = None + def __init__(self, space, is_unicode, template): + self.space = space + self.is_unicode = is_unicode + self.empty = u"" if is_unicode else "" + self.template = template - def __init__(self, space, is_unicode, template): - self.space = space - self.is_unicode = is_unicode - self.empty = u"" if is_unicode else "" - self.template = template + def build(self, args): + self.args, self.kwargs = args.unpack() + self.auto_numbering = 0 + self.auto_numbering_state = ANS_INIT + return self._build_string(0, len(self.template), 2) - def build(self, args): - self.args, self.kwargs = args.unpack() - self.auto_numbering = 0 - self.auto_numbering_state = ANS_INIT - return self._build_string(0, len(self.template), 2) + def _build_string(self, start, end, level): + space = self.space + if self.is_unicode: + out = rstring.UnicodeBuilder() + else: + out = rstring.StringBuilder() + if not level: + raise OperationError(space.w_ValueError, + space.wrap("Recursion depth exceeded")) + level -= 1 + s = self.template + return self._do_build_string(start, end, level, out, s) - def _build_string(self, start, end, level): - space = self.space - if self.is_unicode: - out = rstring.UnicodeBuilder() - else: - out = rstring.StringBuilder() - if not level: - raise OperationError(space.w_ValueError, - space.wrap("Recursion depth exceeded")) - level -= 1 - s = self.template - last_literal = i = start - while i < end: - c = s[i] - i += 1 - if c == "{" or c == "}": - at_end = i == end - # Find escaped "{" and "}" - markup_follows = True - if c == "}": - if at_end or s[i] != "}": - raise OperationError(space.w_ValueError, - space.wrap("Single '}'")) - i += 1 - markup_follows = False - if c == "{": - if at_end: - raise OperationError(space.w_ValueError, - space.wrap("Single '{'")) - if s[i] == "{": + @jit.look_inside_iff(lambda self, start, end, level, out, s: jit.isconstant(s)) + def _do_build_string(self, start, end, level, out, s): + space = self.space + last_literal = i = start + while i < end: + c = s[i] + i += 1 + if c == "{" or c == "}": + at_end = i == end + # Find escaped "{" and "}" + markup_follows = True + if c == "}": + if at_end or s[i] != "}": + raise OperationError(space.w_ValueError, + space.wrap("Single '}'")) i += 1 markup_follows = False - # Attach literal data - out.append_slice(s, last_literal, i - 1) - if not markup_follows: + if c == "{": + if at_end: + raise OperationError(space.w_ValueError, + space.wrap("Single '{'")) + if s[i] == "{": + i += 1 + markup_follows = False + # Attach literal data + out.append_slice(s, last_literal, i - 1) + if not markup_follows: + last_literal = i + continue + nested = 1 + field_start = i + recursive = False + while i < end: + c = s[i] + if c == "{": + recursive = True + nested += 1 + elif c == "}": + nested -= 1 + if not nested: + break + i += 1 + if nested: + raise OperationError(space.w_ValueError, + space.wrap("Unmatched '{'")) + rendered = self._render_field(field_start, i, recursive, level) + out.append(rendered) + i += 1 last_literal = i - continue - nested = 1 - field_start = i - recursive = False - while i < end: - c = s[i] - if c == "{": - recursive = True - nested += 1 - elif c == "}": - nested -= 1 - if not nested: - break - i += 1 - if nested: - raise OperationError(space.w_ValueError, - space.wrap("Unmatched '{'")) - rendered = self._render_field(field_start, i, recursive, level) - out.append(rendered) + + out.append_slice(s, last_literal, end) + return out.build() + + def _parse_field(self, start, end): + s = self.template + # Find ":" or "!" + i = start + while i < end: + c = s[i] + if c == ":" or c == "!": + end_name = i + if c == "!": + i += 1 + if i == end: + w_msg = self.space.wrap("expected conversion") + raise OperationError(self.space.w_ValueError, w_msg) + conversion = s[i] + i += 1 + if i < end: + if s[i] != ':': + w_msg = self.space.wrap("expected ':' after" + " format specifier") + raise OperationError(self.space.w_ValueError, + w_msg) + i += 1 + else: + conversion = None + i += 1 + return s[start:end_name], conversion, i i += 1 - last_literal = i + return s[start:end], None, end - out.append_slice(s, last_literal, end) - return out.build() - - def _parse_field(self, start, end): - s = self.template - # Find ":" or "!" - i = start - while i < end: - c = s[i] - if c == ":" or c == "!": - end_name = i - if c == "!": - i += 1 - if i == end: - w_msg = self.space.wrap("expected conversion") - raise OperationError(self.space.w_ValueError, w_msg) - conversion = s[i] - i += 1 - if i < end: - if s[i] != ':': - w_msg = self.space.wrap("expected ':' after" - " format specifier") - raise OperationError(self.space.w_ValueError, - w_msg) - i += 1 + def _get_argument(self, name): + # First, find the argument. + space = self.space + i = 0 + end = len(name) + while i < end: + c = name[i] + if c == "[" or c == ".": + break + i += 1 + empty = not i + if empty: + index = -1 + else: + index, stop = _parse_int(self.space, name, 0, i) + if stop != i: + index = -1 + use_numeric = empty or index != -1 + if self.auto_numbering_state == ANS_INIT and use_numeric: + if empty: + self.auto_numbering_state = ANS_AUTO else: - conversion = None - i += 1 - return s[start:end_name], conversion, i - i += 1 - return s[start:end], None, end - - def _get_argument(self, name): - # First, find the argument. - space = self.space - i = 0 - end = len(name) - while i < end: - c = name[i] - if c == "[" or c == ".": - break - i += 1 - empty = not i - if empty: - index = -1 - else: - index, stop = _parse_int(self.space, name, 0, i) - if stop != i: - index = -1 - use_numeric = empty or index != -1 - if self.auto_numbering_state == ANS_INIT and use_numeric: - if empty: - self.auto_numbering_state = ANS_AUTO - else: - self.auto_numbering_state = ANS_MANUAL - if use_numeric: - if self.auto_numbering_state == ANS_MANUAL: - if empty: - msg = "switching from manual to automatic numbering" + self.auto_numbering_state = ANS_MANUAL + if use_numeric: + if self.auto_numbering_state == ANS_MANUAL: + if empty: + msg = "switching from manual to automatic numbering" + raise OperationError(space.w_ValueError, + space.wrap(msg)) + elif not empty: + msg = "switching from automatic to manual numbering" raise OperationError(space.w_ValueError, space.wrap(msg)) - elif not empty: - msg = "switching from automatic to manual numbering" - raise OperationError(space.w_ValueError, - space.wrap(msg)) - if empty: - index = self.auto_numbering - self.auto_numbering += 1 - if index == -1: - kwarg = name[:i] - if self.is_unicode: + if empty: + index = self.auto_numbering + self.auto_numbering += 1 + if index == -1: + kwarg = name[:i] + if self.is_unicode: + try: + arg_key = kwarg.encode("latin-1") + except UnicodeEncodeError: + # Not going to be found in a dict of strings. + raise OperationError(space.w_KeyError, space.wrap(kwarg)) + else: + arg_key = kwarg try: - arg_key = kwarg.encode("latin-1") - except UnicodeEncodeError: - # Not going to be found in a dict of strings. - raise OperationError(space.w_KeyError, space.wrap(kwarg)) + w_arg = self.kwargs[arg_key] + except KeyError: + raise OperationError(space.w_KeyError, space.wrap(arg_key)) else: - arg_key = kwarg - try: - w_arg = self.kwargs[arg_key] - except KeyError: - raise OperationError(space.w_KeyError, space.wrap(arg_key)) - else: - try: - w_arg = self.args[index] - except IndexError: - w_msg = space.wrap("index out of range") - raise OperationError(space.w_IndexError, w_msg) - return self._resolve_lookups(w_arg, name, i, end) + try: + w_arg = self.args[index] + except IndexError: + w_msg = space.wrap("index out of range") + raise OperationError(space.w_IndexError, w_msg) + return self._resolve_lookups(w_arg, name, i, end) - def _resolve_lookups(self, w_obj, name, start, end): - # Resolve attribute and item lookups. - space = self.space - i = start - while i < end: - c = name[i] - if c == ".": + def _resolve_lookups(self, w_obj, name, start, end): + # Resolve attribute and item lookups. + space = self.space + i = start + while i < end: + c = name[i] + if c == ".": + i += 1 + start = i + while i < end: + c = name[i] + if c == "[" or c == ".": + break + i += 1 + if start == i: + w_msg = space.wrap("Empty attribute in format string") + raise OperationError(space.w_ValueError, w_msg) + w_attr = space.wrap(name[start:i]) + if w_obj is not None: + w_obj = space.getattr(w_obj, w_attr) + else: + self.parser_list_w.append(space.newtuple([ + space.w_True, w_attr])) + elif c == "[": + got_bracket = False + i += 1 + start = i + while i < end: + c = name[i] + if c == "]": + got_bracket = True + break + i += 1 + if not got_bracket: + raise OperationError(space.w_ValueError, + space.wrap("Missing ']'")) + index, reached = _parse_int(self.space, name, start, i) + if index != -1 and reached == i: + w_item = space.wrap(index) + else: + w_item = space.wrap(name[start:i]) + i += 1 # Skip "]" + if w_obj is not None: + w_obj = space.getitem(w_obj, w_item) + else: + self.parser_list_w.append(space.newtuple([ + space.w_False, w_item])) + else: + msg = "Only '[' and '.' may follow ']'" + raise OperationError(space.w_ValueError, space.wrap(msg)) + return w_obj + + def formatter_field_name_split(self): + space = self.space + name = self.template + i = 0 + end = len(name) + while i < end: + c = name[i] + if c == "[" or c == ".": + break i += 1 - start = i - while i < end: - c = name[i] - if c == "[" or c == ".": - break - i += 1 - if start == i: - w_msg = space.wrap("Empty attribute in format string") - raise OperationError(space.w_ValueError, w_msg) - w_attr = space.wrap(name[start:i]) - if w_obj is not None: - w_obj = space.getattr(w_obj, w_attr) - else: - self.parser_list_w.append(space.newtuple([ - space.w_True, w_attr])) - elif c == "[": - got_bracket = False - i += 1 - start = i - while i < end: - c = name[i] - if c == "]": - got_bracket = True - break - i += 1 - if not got_bracket: - raise OperationError(space.w_ValueError, - space.wrap("Missing ']'")) - index, reached = _parse_int(self.space, name, start, i) - if index != -1 and reached == i: - w_item = space.wrap(index) - else: - w_item = space.wrap(name[start:i]) - i += 1 # Skip "]" - if w_obj is not None: - w_obj = space.getitem(w_obj, w_item) - else: - self.parser_list_w.append(space.newtuple([ - space.w_False, w_item])) + if i == 0: + index = -1 else: - msg = "Only '[' and '.' may follow ']'" - raise OperationError(space.w_ValueError, space.wrap(msg)) - return w_obj + index, stop = _parse_int(self.space, name, 0, i) + if stop != i: + index = -1 + if index >= 0: + w_first = space.wrap(index) + else: + w_first = space.wrap(name[:i]) + # + self.parser_list_w = [] + self._resolve_lookups(None, name, i, end) + # + return space.newtuple([w_first, + space.iter(space.newlist(self.parser_list_w))]) - def formatter_field_name_split(self): - space = self.space - name = self.template - i = 0 - end = len(name) - while i < end: - c = name[i] - if c == "[" or c == ".": - break - i += 1 - if i == 0: - index = -1 - else: - index, stop = _parse_int(self.space, name, 0, i) - if stop != i: - index = -1 - if index >= 0: - w_first = space.wrap(index) - else: - w_first = space.wrap(name[:i]) - # - self.parser_list_w = [] - self._resolve_lookups(None, name, i, end) - # - return space.newtuple([w_first, - space.iter(space.newlist(self.parser_list_w))]) + def _convert(self, w_obj, conversion): + space = self.space + conv = conversion[0] + if conv == "r": + return space.repr(w_obj) + elif conv == "s": + if self.is_unicode: + return space.call_function(space.w_unicode, w_obj) + return space.str(w_obj) + else: + raise OperationError(self.space.w_ValueError, + self.space.wrap("invalid conversion")) - def _convert(self, w_obj, conversion): - space = self.space - conv = conversion[0] - if conv == "r": - return space.repr(w_obj) - elif conv == "s": - if self.is_unicode: - return space.call_function(space.w_unicode, w_obj) - return space.str(w_obj) - else: - raise OperationError(self.space.w_ValueError, - self.space.wrap("invalid conversion")) + def _render_field(self, start, end, recursive, level): + name, conversion, spec_start = self._parse_field(start, end) + spec = self.template[spec_start:end] + # + if self.parser_list_w is not None: + # used from formatter_parser() + if level == 1: # ignore recursive calls + space = self.space + startm1 = start - 1 + assert startm1 >= self.last_end + w_entry = space.newtuple([ + space.wrap(self.template[self.last_end:startm1]), + space.wrap(name), + space.wrap(spec), + space.wrap(conversion)]) + self.parser_list_w.append(w_entry) + self.last_end = end + 1 + return self.empty + # + w_obj = self._get_argument(name) + if conversion is not None: + w_obj = self._convert(w_obj, conversion) + if recursive: + spec = self._build_string(spec_start, end, level) + w_rendered = self.space.format(w_obj, self.space.wrap(spec)) + unwrapper = "unicode_w" if self.is_unicode else "str_w" + to_interp = getattr(self.space, unwrapper) + return to_interp(w_rendered) - def _render_field(self, start, end, recursive, level): - name, conversion, spec_start = self._parse_field(start, end) - spec = self.template[spec_start:end] - # - if self.parser_list_w is not None: - # used from formatter_parser() - if level == 1: # ignore recursive calls - space = self.space - startm1 = start - 1 - assert startm1 >= self.last_end - w_entry = space.newtuple([ - space.wrap(self.template[self.last_end:startm1]), - space.wrap(name), - space.wrap(spec), - space.wrap(conversion)]) - self.parser_list_w.append(w_entry) - self.last_end = end + 1 - return self.empty - # - w_obj = self._get_argument(name) - if conversion is not None: - w_obj = self._convert(w_obj, conversion) - if recursive: - spec = self._build_string(spec_start, end, level) - w_rendered = self.space.format(w_obj, self.space.wrap(spec)) - unwrapper = "unicode_w" if self.is_unicode else "str_w" - to_interp = getattr(self.space, unwrapper) - return to_interp(w_rendered) + def formatter_parser(self): + self.parser_list_w = [] + self.last_end = 0 + self._build_string(0, len(self.template), 2) + # + space = self.space + if self.last_end < len(self.template): + w_lastentry = space.newtuple([ + space.wrap(self.template[self.last_end:]), + space.w_None, + space.w_None, + space.w_None]) + self.parser_list_w.append(w_lastentry) + return space.iter(space.newlist(self.parser_list_w)) + return TemplateFormatter - def formatter_parser(self): - self.parser_list_w = [] - self.last_end = 0 - self._build_string(0, len(self.template), 2) - # - space = self.space - if self.last_end < len(self.template): - w_lastentry = space.newtuple([ - space.wrap(self.template[self.last_end:]), - space.w_None, - space.w_None, - space.w_None]) - self.parser_list_w.append(w_lastentry) - return space.iter(space.newlist(self.parser_list_w)) - +StrTemplateFormatter = make_template_formatting_class() +UnicodeTemplateFormatter = make_template_formatting_class() def str_template_formatter(space, template): - return TemplateFormatter(space, False, template) + return StrTemplateFormatter(space, False, template) def unicode_template_formatter(space, template): - return TemplateFormatter(space, True, template) + return UnicodeTemplateFormatter(space, True, template) def format_method(space, w_string, args, is_unicode): @@ -380,756 +388,759 @@ LONG_DIGITS = string.digits + string.ascii_lowercase -class Formatter(BaseFormatter): - """__format__ implementation for builtin types.""" +def make_formatting_class(): + class Formatter(BaseFormatter): + """__format__ implementation for builtin types.""" - _annspecialcase_ = "specialize:ctr_location" - _grouped_digits = None + _grouped_digits = None - def __init__(self, space, is_unicode, spec): - self.space = space - self.is_unicode = is_unicode - self.empty = u"" if is_unicode else "" - self.spec = spec + def __init__(self, space, is_unicode, spec): + self.space = space + self.is_unicode = is_unicode + self.empty = u"" if is_unicode else "" + self.spec = spec - def _is_alignment(self, c): - return (c == "<" or - c == ">" or - c == "=" or - c == "^") + def _is_alignment(self, c): + return (c == "<" or + c == ">" or + c == "=" or + c == "^") - def _is_sign(self, c): - return (c == " " or - c == "+" or - c == "-") + def _is_sign(self, c): + return (c == " " or + c == "+" or + c == "-") - def _parse_spec(self, default_type, default_align): - space = self.space - self._fill_char = self._lit("\0")[0] - self._align = default_align - self._alternate = False - self._sign = "\0" - self._thousands_sep = False - self._precision = -1 - the_type = default_type - spec = self.spec - if not spec: - return True - length = len(spec) - i = 0 - got_align = True - if length - i >= 2 and self._is_alignment(spec[i + 1]): - self._align = spec[i + 1] - self._fill_char = spec[i] - i += 2 - elif length - i >= 1 and self._is_alignment(spec[i]): - self._align = spec[i] - i += 1 - else: - got_align = False - if length - i >= 1 and self._is_sign(spec[i]): - self._sign = spec[i] - i += 1 - if length - i >= 1 and spec[i] == "#": - self._alternate = True - i += 1 - if self._fill_char == "\0" and length - i >= 1 and spec[i] == "0": - self._fill_char = self._lit("0")[0] - if not got_align: - self._align = "=" - i += 1 - start_i = i - self._width, i = _parse_int(self.space, spec, i, length) - if length != i and spec[i] == ",": - self._thousands_sep = True - i += 1 - if length != i and spec[i] == ".": - i += 1 - self._precision, i = _parse_int(self.space, spec, i, length) - if self._precision == -1: + def _parse_spec(self, default_type, default_align): + space = self.space + self._fill_char = self._lit("\0")[0] + self._align = default_align + self._alternate = False + self._sign = "\0" + self._thousands_sep = False + self._precision = -1 + the_type = default_type + spec = self.spec + if not spec: + return True + length = len(spec) + i = 0 + got_align = True + if length - i >= 2 and self._is_alignment(spec[i + 1]): + self._align = spec[i + 1] + self._fill_char = spec[i] + i += 2 + elif length - i >= 1 and self._is_alignment(spec[i]): + self._align = spec[i] + i += 1 + else: + got_align = False + if length - i >= 1 and self._is_sign(spec[i]): + self._sign = spec[i] + i += 1 + if length - i >= 1 and spec[i] == "#": + self._alternate = True + i += 1 + if self._fill_char == "\0" and length - i >= 1 and spec[i] == "0": + self._fill_char = self._lit("0")[0] + if not got_align: + self._align = "=" + i += 1 + start_i = i + self._width, i = _parse_int(self.space, spec, i, length) + if length != i and spec[i] == ",": + self._thousands_sep = True + i += 1 + if length != i and spec[i] == ".": + i += 1 + self._precision, i = _parse_int(self.space, spec, i, length) + if self._precision == -1: + raise OperationError(space.w_ValueError, + space.wrap("no precision given")) + if length - i > 1: raise OperationError(space.w_ValueError, - space.wrap("no precision given")) - if length - i > 1: - raise OperationError(space.w_ValueError, - space.wrap("invalid format spec")) - if length - i == 1: - presentation_type = spec[i] - if self.is_unicode: - try: - the_type = spec[i].encode("ascii")[0] - except UnicodeEncodeError: + space.wrap("invalid format spec")) + if length - i == 1: + presentation_type = spec[i] + if self.is_unicode: + try: + the_type = spec[i].encode("ascii")[0] + except UnicodeEncodeError: + raise OperationError(space.w_ValueError, + space.wrap("invalid presentation type")) + else: + the_type = presentation_type + i += 1 + self._type = the_type + if self._thousands_sep: + tp = self._type + if (tp == "d" or + tp == "e" or + tp == "f" or + tp == "g" or + tp == "E" or + tp == "G" or + tp == "%" or + tp == "F" or + tp == "\0"): + # ok + pass + else: raise OperationError(space.w_ValueError, - space.wrap("invalid presentation type")) + space.wrap("invalid type with ','")) + return False + + def _calc_padding(self, string, length): + """compute left and right padding, return total width of string""" + if self._width != -1 and length < self._width: + total = self._width else: - the_type = presentation_type - i += 1 - self._type = the_type - if self._thousands_sep: - tp = self._type - if (tp == "d" or - tp == "e" or - tp == "f" or - tp == "g" or - tp == "E" or - tp == "G" or - tp == "%" or - tp == "F" or - tp == "\0"): - # ok - pass + total = length + align = self._align + if align == ">": + left = total - length + elif align == "^": + left = (total - length) / 2 + elif align == "<" or align == "=": + left = 0 else: - raise OperationError(space.w_ValueError, - space.wrap("invalid type with ','")) - return False + raise AssertionError("shouldn't be here") + right = total - length - left + self._left_pad = left + self._right_pad = right + return total - def _calc_padding(self, string, length): - """compute left and right padding, return total width of string""" - if self._width != -1 and length < self._width: - total = self._width - else: - total = length - align = self._align - if align == ">": - left = total - length - elif align == "^": - left = (total - length) / 2 - elif align == "<" or align == "=": - left = 0 - else: - raise AssertionError("shouldn't be here") - right = total - length - left - self._left_pad = left - self._right_pad = right - return total - - def _lit(self, s): - if self.is_unicode: - return s.decode("ascii") - else: - return s - - def _pad(self, string): - builder = self._builder() - builder.append_multiple_char(self._fill_char, self._left_pad) - builder.append(string) - builder.append_multiple_char(self._fill_char, self._right_pad) - return builder.build() - - def _builder(self): - if self.is_unicode: - return rstring.UnicodeBuilder() - else: - return rstring.StringBuilder() - - def _unknown_presentation(self, tp): - msg = "unknown presentation for %s: '%s'" - w_msg = self.space.wrap(msg % (tp, self._type)) - raise OperationError(self.space.w_ValueError, w_msg) - - def format_string(self, string): - space = self.space - if self._parse_spec("s", "<"): - return space.wrap(string) - if self._type != "s": - self._unknown_presentation("string") - if self._sign != "\0": - msg = "Sign not allowed in string format specifier" - raise OperationError(space.w_ValueError, space.wrap(msg)) - if self._alternate: - msg = "Alternate form not allowed in string format specifier" - raise OperationError(space.w_ValueError, space.wrap(msg)) - if self._align == "=": - msg = "'=' alignment not allowed in string format specifier" - raise OperationError(space.w_ValueError, space.wrap(msg)) - length = len(string) - precision = self._precision - if precision != -1 and length >= precision: - assert precision >= 0 - length = precision - string = string[:precision] - if self._fill_char == "\0": - self._fill_char = self._lit(" ")[0] - self._calc_padding(string, length) - return space.wrap(self._pad(string)) - - def _get_locale(self, tp): - space = self.space - if tp == "n": - dec, thousands, grouping = rlocale.numeric_formatting() - elif self._thousands_sep: - dec = "." - thousands = "," - grouping = "\3\0" - else: - dec = "." - thousands = "" - grouping = "\256" - if self.is_unicode: - self._loc_dec = dec.decode("ascii") - self._loc_thousands = thousands.decode("ascii") - else: - self._loc_dec = dec - self._loc_thousands = thousands - self._loc_grouping = grouping - - def _calc_num_width(self, n_prefix, sign_char, to_number, n_number, - n_remainder, has_dec, digits): - """Calculate widths of all parts of formatted number. - - Output will look like: - - - - - sign is computed from self._sign, and the sign of the number - prefix is given - digits is known - """ - spec = NumberSpec() - spec.n_digits = n_number - n_remainder - has_dec - spec.n_prefix = n_prefix - spec.n_lpadding = 0 - spec.n_decimal = int(has_dec) - spec.n_remainder = n_remainder - spec.n_spadding = 0 - spec.n_rpadding = 0 - spec.n_min_width = 0 - spec.n_total = 0 - spec.sign = "\0" - spec.n_sign = 0 - sign = self._sign - if sign == "+": - spec.n_sign = 1 - spec.sign = "-" if sign_char == "-" else "+" - elif sign == " ": - spec.n_sign = 1 - spec.sign = "-" if sign_char == "-" else " " - elif sign_char == "-": - spec.n_sign = 1 - spec.sign = "-" - extra_length = (spec.n_sign + spec.n_prefix + spec.n_decimal + - spec.n_remainder) # Not padding or digits - if self._fill_char == "0" and self._align == "=": - spec.n_min_width = self._width - extra_length - if self._loc_thousands: - self._group_digits(spec, digits[to_number:]) - n_grouped_digits = len(self._grouped_digits) - else: - n_grouped_digits = spec.n_digits - n_padding = self._width - (extra_length + n_grouped_digits) - if n_padding > 0: - align = self._align - if align == "<": - spec.n_rpadding = n_padding - elif align == ">": - spec.n_lpadding = n_padding - elif align == "^": - spec.n_lpadding = n_padding // 2 - spec.n_rpadding = n_padding - spec.n_lpadding - elif align == "=": - spec.n_spadding = n_padding - else: - raise AssertionError("shouldn't reach") - spec.n_total = spec.n_lpadding + spec.n_sign + spec.n_prefix + \ - spec.n_spadding + n_grouped_digits + \ - spec.n_decimal + spec.n_remainder + spec.n_rpadding - return spec - - def _fill_digits(self, buf, digits, d_state, n_chars, n_zeros, - thousands_sep): - if thousands_sep: - for c in thousands_sep: - buf.append(c) - for i in range(d_state - 1, d_state - n_chars - 1, -1): - buf.append(digits[i]) - for i in range(n_zeros): - buf.append("0") - - def _group_digits(self, spec, digits): - buf = [] - grouping = self._loc_grouping - min_width = spec.n_min_width - grouping_state = 0 - count = 0 - left = spec.n_digits - n_ts = len(self._loc_thousands) - need_separator = False - done = False - groupings = len(grouping) - previous = 0 - while True: - group = ord(grouping[grouping_state]) - if group > 0: - if group == 256: - break - grouping_state += 1 - previous = group - else: - group = previous - final_grouping = min(group, max(left, max(min_width, 1))) - n_zeros = max(0, final_grouping - left) - n_chars = max(0, min(left, final_grouping)) - ts = self._loc_thousands if need_separator else None - self._fill_digits(buf, digits, left, n_chars, n_zeros, ts) - need_separator = True - left -= n_chars - min_width -= final_grouping - if left <= 0 and min_width <= 0: - done = True - break - min_width -= n_ts - if not done: - group = max(max(left, min_width), 1) - n_zeros = max(0, group - left) - n_chars = max(0, min(left, group)) - ts = self._loc_thousands if need_separator else None - self._fill_digits(buf, digits, left, n_chars, n_zeros, ts) - buf.reverse() - self._grouped_digits = self.empty.join(buf) - - def _upcase_string(self, s): - buf = [] - for c in s: - index = ord(c) - if ord("a") <= index <= ord("z"): - c = chr(index - 32) - buf.append(c) - return self.empty.join(buf) - - - def _fill_number(self, spec, num, to_digits, to_prefix, fill_char, - to_remainder, upper, grouped_digits=None): - out = self._builder() - if spec.n_lpadding: - out.append_multiple_char(fill_char[0], spec.n_lpadding) - if spec.n_sign: - if self.is_unicode: - sign = spec.sign.decode("ascii") - else: - sign = spec.sign - out.append(sign) - if spec.n_prefix: - pref = num[to_prefix:to_prefix + spec.n_prefix] - if upper: - pref = self._upcase_string(pref) - out.append(pref) - if spec.n_spadding: - out.append_multiple_char(fill_char[0], spec.n_spadding) - if spec.n_digits != 0: - if self._loc_thousands: - if grouped_digits is not None: - digits = grouped_digits - else: - digits = self._grouped_digits - assert digits is not None - else: - stop = to_digits + spec.n_digits - assert stop >= 0 - digits = num[to_digits:stop] - if upper: - digits = self._upcase_string(digits) - out.append(digits) - if spec.n_decimal: - out.append(self._lit(".")[0]) - if spec.n_remainder: - out.append(num[to_remainder:]) - if spec.n_rpadding: - out.append_multiple_char(fill_char[0], spec.n_rpadding) - #if complex, need to call twice - just retun the buffer - return out.build() - - def _format_int_or_long(self, w_num, kind): - space = self.space - if self._precision != -1: - msg = "precision not allowed in integer type" - raise OperationError(space.w_ValueError, space.wrap(msg)) - sign_char = "\0" - tp = self._type - if tp == "c": - if self._sign != "\0": - msg = "sign not allowed with 'c' presentation type" - raise OperationError(space.w_ValueError, space.wrap(msg)) - value = space.int_w(w_num) - if self.is_unicode: - result = runicode.UNICHR(value) - else: - result = chr(value) - n_digits = 1 - n_remainder = 1 - to_remainder = 0 - n_prefix = 0 - to_prefix = 0 - to_numeric = 0 - else: - if tp == "b": - base = 2 - skip_leading = 2 - elif tp == "o": - base = 8 - skip_leading = 2 - elif tp == "x" or tp == "X": - base = 16 - skip_leading = 2 - elif tp == "n" or tp == "d": - base = 10 - skip_leading = 0 - else: - raise AssertionError("shouldn't reach") - if kind == INT_KIND: - result = self._int_to_base(base, space.int_w(w_num)) - else: - result = self._long_to_base(base, space.bigint_w(w_num)) - n_prefix = skip_leading if self._alternate else 0 - to_prefix = 0 - if result[0] == "-": - sign_char = "-" - skip_leading += 1 - to_prefix += 1 - n_digits = len(result) - skip_leading - n_remainder = 0 - to_remainder = 0 - to_numeric = skip_leading - self._get_locale(tp) - spec = self._calc_num_width(n_prefix, sign_char, to_numeric, n_digits, - n_remainder, False, result) - fill = self._lit(" ") if self._fill_char == "\0" else self._fill_char - upper = self._type == "X" - return self.space.wrap(self._fill_number(spec, result, to_numeric, - to_prefix, fill, to_remainder, upper)) - - def _long_to_base(self, base, value): - prefix = "" - if base == 2: - prefix = "0b" - elif base == 8: - prefix = "0o" - elif base == 16: - prefix = "0x" - as_str = value.format(LONG_DIGITS[:base], prefix) - if self.is_unicode: - return as_str.decode("ascii") - return as_str - - def _int_to_base(self, base, value): - if base == 10: - s = str(value) + def _lit(self, s): if self.is_unicode: return s.decode("ascii") - return s - # This part is slow. - negative = value < 0 - value = abs(value) - buf = ["\0"] * (8 * 8 + 6) # Too much on 32 bit, but who cares? - i = len(buf) - 1 - while True: - div = value // base - mod = value - div * base - digit = abs(mod) - digit += ord("0") if digit < 10 else ord("a") - 10 - buf[i] = chr(digit) - value = div + else: + return s + + def _pad(self, string): + builder = self._builder() + builder.append_multiple_char(self._fill_char, self._left_pad) + builder.append(string) + builder.append_multiple_char(self._fill_char, self._right_pad) + return builder.build() + + def _builder(self): + if self.is_unicode: + return rstring.UnicodeBuilder() + else: + return rstring.StringBuilder() + + def _unknown_presentation(self, tp): + msg = "unknown presentation for %s: '%s'" + w_msg = self.space.wrap(msg % (tp, self._type)) + raise OperationError(self.space.w_ValueError, w_msg) + + def format_string(self, string): + space = self.space + if self._parse_spec("s", "<"): + return space.wrap(string) + if self._type != "s": + self._unknown_presentation("string") + if self._sign != "\0": + msg = "Sign not allowed in string format specifier" + raise OperationError(space.w_ValueError, space.wrap(msg)) + if self._alternate: + msg = "Alternate form not allowed in string format specifier" + raise OperationError(space.w_ValueError, space.wrap(msg)) + if self._align == "=": + msg = "'=' alignment not allowed in string format specifier" + raise OperationError(space.w_ValueError, space.wrap(msg)) + length = len(string) + precision = self._precision + if precision != -1 and length >= precision: + assert precision >= 0 + length = precision + string = string[:precision] + if self._fill_char == "\0": + self._fill_char = self._lit(" ")[0] + self._calc_padding(string, length) + return space.wrap(self._pad(string)) + + def _get_locale(self, tp): + space = self.space + if tp == "n": + dec, thousands, grouping = rlocale.numeric_formatting() + elif self._thousands_sep: + dec = "." + thousands = "," + grouping = "\3\0" + else: + dec = "." + thousands = "" + grouping = "\256" + if self.is_unicode: + self._loc_dec = dec.decode("ascii") + self._loc_thousands = thousands.decode("ascii") + else: + self._loc_dec = dec + self._loc_thousands = thousands + self._loc_grouping = grouping + + def _calc_num_width(self, n_prefix, sign_char, to_number, n_number, + n_remainder, has_dec, digits): + """Calculate widths of all parts of formatted number. + + Output will look like: + + + + + sign is computed from self._sign, and the sign of the number + prefix is given + digits is known + """ + spec = NumberSpec() + spec.n_digits = n_number - n_remainder - has_dec + spec.n_prefix = n_prefix + spec.n_lpadding = 0 + spec.n_decimal = int(has_dec) + spec.n_remainder = n_remainder + spec.n_spadding = 0 + spec.n_rpadding = 0 + spec.n_min_width = 0 + spec.n_total = 0 + spec.sign = "\0" + spec.n_sign = 0 + sign = self._sign + if sign == "+": + spec.n_sign = 1 + spec.sign = "-" if sign_char == "-" else "+" + elif sign == " ": + spec.n_sign = 1 + spec.sign = "-" if sign_char == "-" else " " + elif sign_char == "-": + spec.n_sign = 1 + spec.sign = "-" + extra_length = (spec.n_sign + spec.n_prefix + spec.n_decimal + + spec.n_remainder) # Not padding or digits + if self._fill_char == "0" and self._align == "=": + spec.n_min_width = self._width - extra_length + if self._loc_thousands: + self._group_digits(spec, digits[to_number:]) + n_grouped_digits = len(self._grouped_digits) + else: + n_grouped_digits = spec.n_digits + n_padding = self._width - (extra_length + n_grouped_digits) + if n_padding > 0: + align = self._align + if align == "<": + spec.n_rpadding = n_padding + elif align == ">": + spec.n_lpadding = n_padding + elif align == "^": + spec.n_lpadding = n_padding // 2 + spec.n_rpadding = n_padding - spec.n_lpadding + elif align == "=": + spec.n_spadding = n_padding + else: + raise AssertionError("shouldn't reach") + spec.n_total = spec.n_lpadding + spec.n_sign + spec.n_prefix + \ + spec.n_spadding + n_grouped_digits + \ + spec.n_decimal + spec.n_remainder + spec.n_rpadding + return spec + + def _fill_digits(self, buf, digits, d_state, n_chars, n_zeros, + thousands_sep): + if thousands_sep: + for c in thousands_sep: + buf.append(c) + for i in range(d_state - 1, d_state - n_chars - 1, -1): + buf.append(digits[i]) + for i in range(n_zeros): + buf.append("0") + + def _group_digits(self, spec, digits): + buf = [] + grouping = self._loc_grouping + min_width = spec.n_min_width + grouping_state = 0 + count = 0 + left = spec.n_digits + n_ts = len(self._loc_thousands) + need_separator = False + done = False + groupings = len(grouping) + previous = 0 + while True: + group = ord(grouping[grouping_state]) + if group > 0: + if group == 256: + break + grouping_state += 1 + previous = group + else: + group = previous + final_grouping = min(group, max(left, max(min_width, 1))) + n_zeros = max(0, final_grouping - left) + n_chars = max(0, min(left, final_grouping)) + ts = self._loc_thousands if need_separator else None + self._fill_digits(buf, digits, left, n_chars, n_zeros, ts) + need_separator = True + left -= n_chars + min_width -= final_grouping + if left <= 0 and min_width <= 0: + done = True + break + min_width -= n_ts + if not done: + group = max(max(left, min_width), 1) + n_zeros = max(0, group - left) + n_chars = max(0, min(left, group)) + ts = self._loc_thousands if need_separator else None + self._fill_digits(buf, digits, left, n_chars, n_zeros, ts) + buf.reverse() + self._grouped_digits = self.empty.join(buf) + + def _upcase_string(self, s): + buf = [] + for c in s: + index = ord(c) + if ord("a") <= index <= ord("z"): + c = chr(index - 32) + buf.append(c) + return self.empty.join(buf) + + + def _fill_number(self, spec, num, to_digits, to_prefix, fill_char, + to_remainder, upper, grouped_digits=None): + out = self._builder() + if spec.n_lpadding: + out.append_multiple_char(fill_char[0], spec.n_lpadding) + if spec.n_sign: + if self.is_unicode: + sign = spec.sign.decode("ascii") + else: + sign = spec.sign + out.append(sign) + if spec.n_prefix: + pref = num[to_prefix:to_prefix + spec.n_prefix] + if upper: + pref = self._upcase_string(pref) + out.append(pref) + if spec.n_spadding: + out.append_multiple_char(fill_char[0], spec.n_spadding) + if spec.n_digits != 0: + if self._loc_thousands: + if grouped_digits is not None: + digits = grouped_digits + else: + digits = self._grouped_digits + assert digits is not None + else: + stop = to_digits + spec.n_digits + assert stop >= 0 + digits = num[to_digits:stop] + if upper: + digits = self._upcase_string(digits) + out.append(digits) + if spec.n_decimal: + out.append(self._lit(".")[0]) + if spec.n_remainder: + out.append(num[to_remainder:]) + if spec.n_rpadding: + out.append_multiple_char(fill_char[0], spec.n_rpadding) + #if complex, need to call twice - just retun the buffer + return out.build() + + def _format_int_or_long(self, w_num, kind): + space = self.space + if self._precision != -1: + msg = "precision not allowed in integer type" + raise OperationError(space.w_ValueError, space.wrap(msg)) + sign_char = "\0" + tp = self._type + if tp == "c": + if self._sign != "\0": + msg = "sign not allowed with 'c' presentation type" + raise OperationError(space.w_ValueError, space.wrap(msg)) + value = space.int_w(w_num) + if self.is_unicode: + result = runicode.UNICHR(value) + else: + result = chr(value) + n_digits = 1 + n_remainder = 1 + to_remainder = 0 + n_prefix = 0 + to_prefix = 0 + to_numeric = 0 + else: + if tp == "b": + base = 2 + skip_leading = 2 + elif tp == "o": + base = 8 + skip_leading = 2 + elif tp == "x" or tp == "X": + base = 16 + skip_leading = 2 + elif tp == "n" or tp == "d": + base = 10 + skip_leading = 0 + else: + raise AssertionError("shouldn't reach") + if kind == INT_KIND: + result = self._int_to_base(base, space.int_w(w_num)) + else: + result = self._long_to_base(base, space.bigint_w(w_num)) + n_prefix = skip_leading if self._alternate else 0 + to_prefix = 0 + if result[0] == "-": + sign_char = "-" + skip_leading += 1 + to_prefix += 1 + n_digits = len(result) - skip_leading + n_remainder = 0 + to_remainder = 0 + to_numeric = skip_leading + self._get_locale(tp) + spec = self._calc_num_width(n_prefix, sign_char, to_numeric, n_digits, + n_remainder, False, result) + fill = self._lit(" ") if self._fill_char == "\0" else self._fill_char + upper = self._type == "X" + return self.space.wrap(self._fill_number(spec, result, to_numeric, + to_prefix, fill, to_remainder, upper)) + + def _long_to_base(self, base, value): + prefix = "" + if base == 2: + prefix = "0b" + elif base == 8: + prefix = "0o" + elif base == 16: + prefix = "0x" + as_str = value.format(LONG_DIGITS[:base], prefix) + if self.is_unicode: + return as_str.decode("ascii") + return as_str + + def _int_to_base(self, base, value): + if base == 10: + s = str(value) + if self.is_unicode: + return s.decode("ascii") + return s + # This part is slow. + negative = value < 0 + value = abs(value) + buf = ["\0"] * (8 * 8 + 6) # Too much on 32 bit, but who cares? + i = len(buf) - 1 + while True: + div = value // base + mod = value - div * base + digit = abs(mod) + digit += ord("0") if digit < 10 else ord("a") - 10 + buf[i] = chr(digit) + value = div + i -= 1 + if not value: + break + if base == 2: + buf[i] = "b" + buf[i - 1] = "0" + elif base == 8: + buf[i] = "o" + buf[i - 1] = "0" + elif base == 16: + buf[i] = "x" + buf[i - 1] = "0" + else: + buf[i] = "#" + buf[i - 1] = chr(ord("0") + base % 10) + if base > 10: + buf[i - 2] = chr(ord("0") + base // 10) + i -= 1 i -= 1 - if not value: - break - if base == 2: - buf[i] = "b" - buf[i - 1] = "0" - elif base == 8: - buf[i] = "o" - buf[i - 1] = "0" - elif base == 16: - buf[i] = "x" - buf[i - 1] = "0" - else: - buf[i] = "#" - buf[i - 1] = chr(ord("0") + base % 10) - if base > 10: - buf[i - 2] = chr(ord("0") + base // 10) + if negative: i -= 1 - i -= 1 - if negative: - i -= 1 - buf[i] = "-" - assert i >= 0 - return self.empty.join(buf[i:]) + buf[i] = "-" + assert i >= 0 + return self.empty.join(buf[i:]) - def format_int_or_long(self, w_num, kind): - space = self.space - if self._parse_spec("d", ">"): + def format_int_or_long(self, w_num, kind): + space = self.space + if self._parse_spec("d", ">"): + if self.is_unicode: + return space.call_function(space.w_unicode, w_num) + return self.space.str(w_num) + tp = self._type + if (tp == "b" or + tp == "c" or + tp == "d" or + tp == "o" or + tp == "x" or + tp == "X" or + tp == "n"): + return self._format_int_or_long(w_num, kind) + elif (tp == "e" or + tp == "E" or + tp == "f" or + tp == "F" or + tp == "g" or + tp == "G" or + tp == "%"): + w_float = space.float(w_num) + return self._format_float(w_float) + else: + self._unknown_presentation("int" if kind == INT_KIND else "long") + + def _parse_number(self, s, i): + """Determine if s has a decimal point, and the index of the first # + after the decimal, or the end of the number.""" + length = len(s) + while i < length and "0" <= s[i] <= "9": + i += 1 + rest = i + dec_point = i < length and s[i] == "." + if dec_point: + rest += 1 + #differs from CPython method - CPython sets n_remainder + return dec_point, rest + + def _format_float(self, w_float): + """helper for format_float""" + space = self.space + flags = 0 + default_precision = 6 + if self._alternate: + msg = "alternate form not allowed in float formats" + raise OperationError(space.w_ValueError, space.wrap(msg)) + tp = self._type + self._get_locale(tp) + if tp == "\0": + tp = "g" + default_precision = 12 + flags |= rfloat.DTSF_ADD_DOT_0 + elif tp == "n": + tp = "g" + value = space.float_w(w_float) + if tp == "%": + tp = "f" + value *= 100 + add_pct = True + else: + add_pct = False + if self._precision == -1: + self._precision = default_precision + result, special = rfloat.double_to_string(value, tp, + self._precision, flags) + if add_pct: + result += "%" + n_digits = len(result) + if result[0] == "-": + sign = "-" + to_number = 1 + n_digits -= 1 + else: + sign = "\0" + to_number = 0 + have_dec_point, to_remainder = self._parse_number(result, to_number) + n_remainder = len(result) - to_remainder if self.is_unicode: - return space.call_function(space.w_unicode, w_num) - return self.space.str(w_num) - tp = self._type - if (tp == "b" or - tp == "c" or - tp == "d" or - tp == "o" or - tp == "x" or - tp == "X" or - tp == "n"): - return self._format_int_or_long(w_num, kind) - elif (tp == "e" or - tp == "E" or - tp == "f" or - tp == "F" or - tp == "g" or - tp == "G" or - tp == "%"): - w_float = space.float(w_num) - return self._format_float(w_float) - else: - self._unknown_presentation("int" if kind == INT_KIND else "long") + digits = result.decode("ascii") + else: + digits = result + spec = self._calc_num_width(0, sign, to_number, n_digits, + n_remainder, have_dec_point, digits) + fill = self._lit(" ") if self._fill_char == "\0" else self._fill_char + return self.space.wrap(self._fill_number(spec, digits, to_number, 0, + fill, to_remainder, False)) - def _parse_number(self, s, i): - """Determine if s has a decimal point, and the index of the first # - after the decimal, or the end of the number.""" - length = len(s) - while i < length and "0" <= s[i] <= "9": - i += 1 - rest = i - dec_point = i < length and s[i] == "." - if dec_point: - rest += 1 - #differs from CPython method - CPython sets n_remainder - return dec_point, rest + def format_float(self, w_float): + space = self.space + if self._parse_spec("\0", ">"): + if self.is_unicode: + return space.call_function(space.w_unicode, w_float) + return space.str(w_float) + tp = self._type + if (tp == "\0" or + tp == "e" or + tp == "E" or + tp == "f" or + tp == "F" or + tp == "g" or + tp == "G" or + tp == "n" or + tp == "%"): + return self._format_float(w_float) + self._unknown_presentation("float") - def _format_float(self, w_float): - """helper for format_float""" - space = self.space - flags = 0 - default_precision = 6 - if self._alternate: - msg = "alternate form not allowed in float formats" - raise OperationError(space.w_ValueError, space.wrap(msg)) - tp = self._type - self._get_locale(tp) - if tp == "\0": - tp = "g" - default_precision = 12 - flags |= rfloat.DTSF_ADD_DOT_0 - elif tp == "n": - tp = "g" - value = space.float_w(w_float) - if tp == "%": - tp = "f" - value *= 100 - add_pct = True - else: - add_pct = False - if self._precision == -1: - self._precision = default_precision - result, special = rfloat.double_to_string(value, tp, - self._precision, flags) - if add_pct: - result += "%" - n_digits = len(result) - if result[0] == "-": - sign = "-" - to_number = 1 - n_digits -= 1 - else: - sign = "\0" - to_number = 0 - have_dec_point, to_remainder = self._parse_number(result, to_number) - n_remainder = len(result) - to_remainder - if self.is_unicode: - digits = result.decode("ascii") - else: - digits = result - spec = self._calc_num_width(0, sign, to_number, n_digits, - n_remainder, have_dec_point, digits) - fill = self._lit(" ") if self._fill_char == "\0" else self._fill_char - return self.space.wrap(self._fill_number(spec, digits, to_number, 0, - fill, to_remainder, False)) + def _format_complex(self, w_complex): + space = self.space + tp = self._type + self._get_locale(tp) + default_precision = 6 + if self._align == "=": + # '=' alignment is invalid + msg = ("'=' alignment flag is not allowed in" + " complex format specifier") + raise OperationError(space.w_ValueError, space.wrap(msg)) + if self._fill_char == "0": + #zero padding is invalid + msg = "Zero padding is not allowed in complex format specifier" + raise OperationError(space.w_ValueError, space.wrap(msg)) + if self._alternate: + #alternate is invalid + msg = "Alternate form %s not allowed in complex format specifier" + raise OperationError(space.w_ValueError, + space.wrap(msg % (self._alternate))) + skip_re = 0 + add_parens = 0 + if tp == "\0": + #should mirror str() output + tp = "g" + default_precision = 12 + #test if real part is non-zero + if (w_complex.realval == 0 and + copysign(1., w_complex.realval) == 1.): + skip_re = 1 + else: + add_parens = 1 - def format_float(self, w_float): - space = self.space - if self._parse_spec("\0", ">"): + if tp == "n": + #same as 'g' except for locale, taken care of later + tp = "g" + + #check if precision not set + if self._precision == -1: + self._precision = default_precision + + #might want to switch to double_to_string from formatd + #in CPython it's named 're' - clashes with re module + re_num = formatd(w_complex.realval, tp, self._precision) + im_num = formatd(w_complex.imagval, tp, self._precision) + n_re_digits = len(re_num) + n_im_digits = len(im_num) + + to_real_number = 0 + to_imag_number = 0 + re_sign = im_sign = '' + #if a sign character is in the output, remember it and skip + if re_num[0] == "-": + re_sign = "-" + to_real_number = 1 + n_re_digits -= 1 + if im_num[0] == "-": + im_sign = "-" + to_imag_number = 1 + n_im_digits -= 1 + + #turn off padding - do it after number composition + #calc_num_width uses self._width, so assign to temporary variable, + #calculate width of real and imag parts, then reassign padding, align + tmp_fill_char = self._fill_char + tmp_align = self._align + tmp_width = self._width + self._fill_char = "\0" + self._align = "<" + self._width = -1 + + #determine if we have remainder, might include dec or exponent or both + re_have_dec, re_remainder_ptr = self._parse_number(re_num, + to_real_number) + im_have_dec, im_remainder_ptr = self._parse_number(im_num, + to_imag_number) + if self.is_unicode: - return space.call_function(space.w_unicode, w_float) - return space.str(w_float) - tp = self._type - if (tp == "\0" or - tp == "e" or - tp == "E" or - tp == "f" or - tp == "F" or - tp == "g" or - tp == "G" or - tp == "n" or - tp == "%"): - return self._format_float(w_float) - self._unknown_presentation("float") + re_num = re_num.decode("ascii") + im_num = im_num.decode("ascii") - def _format_complex(self, w_complex): - space = self.space - tp = self._type - self._get_locale(tp) - default_precision = 6 - if self._align == "=": - # '=' alignment is invalid - msg = ("'=' alignment flag is not allowed in" - " complex format specifier") - raise OperationError(space.w_ValueError, space.wrap(msg)) - if self._fill_char == "0": - #zero padding is invalid - msg = "Zero padding is not allowed in complex format specifier" - raise OperationError(space.w_ValueError, space.wrap(msg)) - if self._alternate: - #alternate is invalid - msg = "Alternate form %s not allowed in complex format specifier" - raise OperationError(space.w_ValueError, - space.wrap(msg % (self._alternate))) - skip_re = 0 - add_parens = 0 - if tp == "\0": - #should mirror str() output - tp = "g" - default_precision = 12 - #test if real part is non-zero - if (w_complex.realval == 0 and - copysign(1., w_complex.realval) == 1.): - skip_re = 1 - else: - add_parens = 1 + #set remainder, in CPython _parse_number sets this + #using n_re_digits causes tests to fail + re_n_remainder = len(re_num) - re_remainder_ptr + im_n_remainder = len(im_num) - im_remainder_ptr + re_spec = self._calc_num_width(0, re_sign, to_real_number, n_re_digits, + re_n_remainder, re_have_dec, + re_num) - if tp == "n": - #same as 'g' except for locale, taken care of later - tp = "g" + #capture grouped digits b/c _fill_number reads from self._grouped_digits + #self._grouped_digits will get overwritten in imaginary calc_num_width + re_grouped_digits = self._grouped_digits + if not skip_re: + self._sign = "+" + im_spec = self._calc_num_width(0, im_sign, to_imag_number, n_im_digits, + im_n_remainder, im_have_dec, + im_num) - #check if precision not set - if self._precision == -1: - self._precision = default_precision + im_grouped_digits = self._grouped_digits + if skip_re: + re_spec.n_total = 0 - #might want to switch to double_to_string from formatd - #in CPython it's named 're' - clashes with re module - re_num = formatd(w_complex.realval, tp, self._precision) - im_num = formatd(w_complex.imagval, tp, self._precision) - n_re_digits = len(re_num) - n_im_digits = len(im_num) + #reassign width, alignment, fill character + self._align = tmp_align + self._width = tmp_width + self._fill_char = tmp_fill_char - to_real_number = 0 - to_imag_number = 0 - re_sign = im_sign = '' - #if a sign character is in the output, remember it and skip - if re_num[0] == "-": - re_sign = "-" - to_real_number = 1 - n_re_digits -= 1 - if im_num[0] == "-": - im_sign = "-" - to_imag_number = 1 - n_im_digits -= 1 + #compute L and R padding - stored in self._left_pad and self._right_pad + self._calc_padding(self.empty, re_spec.n_total + im_spec.n_total + 1 + + add_parens * 2) - #turn off padding - do it after number composition - #calc_num_width uses self._width, so assign to temporary variable, - #calculate width of real and imag parts, then reassign padding, align - tmp_fill_char = self._fill_char - tmp_align = self._align - tmp_width = self._width - self._fill_char = "\0" - self._align = "<" - self._width = -1 + out = self._builder() + fill = self._fill_char + if fill == "\0": + fill = self._lit(" ")[0] - #determine if we have remainder, might include dec or exponent or both - re_have_dec, re_remainder_ptr = self._parse_number(re_num, - to_real_number) - im_have_dec, im_remainder_ptr = self._parse_number(im_num, - to_imag_number) + #compose the string + #add left padding + out.append_multiple_char(fill, self._left_pad) + if add_parens: + out.append(self._lit('(')[0]) - if self.is_unicode: - re_num = re_num.decode("ascii") - im_num = im_num.decode("ascii") + #if the no. has a real component, add it + if not skip_re: + out.append(self._fill_number(re_spec, re_num, to_real_number, 0, + fill, re_remainder_ptr, False, + re_grouped_digits)) - #set remainder, in CPython _parse_number sets this - #using n_re_digits causes tests to fail - re_n_remainder = len(re_num) - re_remainder_ptr - im_n_remainder = len(im_num) - im_remainder_ptr - re_spec = self._calc_num_width(0, re_sign, to_real_number, n_re_digits, - re_n_remainder, re_have_dec, - re_num) + #add imaginary component + out.append(self._fill_number(im_spec, im_num, to_imag_number, 0, + fill, im_remainder_ptr, False, + im_grouped_digits)) - #capture grouped digits b/c _fill_number reads from self._grouped_digits - #self._grouped_digits will get overwritten in imaginary calc_num_width - re_grouped_digits = self._grouped_digits - if not skip_re: - self._sign = "+" - im_spec = self._calc_num_width(0, im_sign, to_imag_number, n_im_digits, - im_n_remainder, im_have_dec, - im_num) + #add 'j' character + out.append(self._lit('j')[0]) - im_grouped_digits = self._grouped_digits - if skip_re: - re_spec.n_total = 0 + if add_parens: + out.append(self._lit(')')[0]) - #reassign width, alignment, fill character - self._align = tmp_align - self._width = tmp_width - self._fill_char = tmp_fill_char + #add right padding + out.append_multiple_char(fill, self._right_pad) - #compute L and R padding - stored in self._left_pad and self._right_pad - self._calc_padding(self.empty, re_spec.n_total + im_spec.n_total + 1 + - add_parens * 2) + return self.space.wrap(out.build()) - out = self._builder() - fill = self._fill_char - if fill == "\0": - fill = self._lit(" ")[0] - #compose the string - #add left padding - out.append_multiple_char(fill, self._left_pad) - if add_parens: - out.append(self._lit('(')[0]) + def format_complex(self, w_complex): + """return the string representation of a complex number""" + space = self.space + #parse format specification, set associated variables + if self._parse_spec("\0", ">"): + return space.str(w_complex) + tp = self._type + if (tp == "\0" or + tp == "e" or + tp == "E" or + tp == "f" or + tp == "F" or + tp == "g" or + tp == "G" or + tp == "n"): + return self._format_complex(w_complex) + self._unknown_presentation("complex") + return Formatter - #if the no. has a real component, add it - if not skip_re: - out.append(self._fill_number(re_spec, re_num, to_real_number, 0, - fill, re_remainder_ptr, False, - re_grouped_digits)) - - #add imaginary component - out.append(self._fill_number(im_spec, im_num, to_imag_number, 0, - fill, im_remainder_ptr, False, - im_grouped_digits)) - - #add 'j' character - out.append(self._lit('j')[0]) - - if add_parens: - out.append(self._lit(')')[0]) - - #add right padding - out.append_multiple_char(fill, self._right_pad) - - return self.space.wrap(out.build()) - - - def format_complex(self, w_complex): - """return the string representation of a complex number""" - space = self.space - #parse format specification, set associated variables - if self._parse_spec("\0", ">"): - return space.str(w_complex) - tp = self._type - if (tp == "\0" or - tp == "e" or - tp == "E" or - tp == "f" or - tp == "F" or - tp == "g" or - tp == "G" or - tp == "n"): - return self._format_complex(w_complex) - self._unknown_presentation("complex") +StrFormatter = make_formatting_class() +UnicodeFormatter = make_formatting_class() def unicode_formatter(space, spec): - return Formatter(space, True, spec) - + return StrFormatter(space, True, spec) def str_formatter(space, spec): - return Formatter(space, False, spec) + return UnicodeFormatter(space, False, spec) @specialize.arg(2) diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -1,10 +1,13 @@ +import sys + import py -import sys + +from pypy.rlib.nonconst import NonConstant +from pypy.rlib.objectmodel import CDefinedIntSymbolic, keepalive_until_here, specialize +from pypy.rlib.unroll import unrolling_iterable from pypy.rpython.extregistry import ExtRegistryEntry -from pypy.rlib.objectmodel import CDefinedIntSymbolic -from pypy.rlib.objectmodel import keepalive_until_here, specialize -from pypy.rlib.unroll import unrolling_iterable -from pypy.rlib.nonconst import NonConstant +from pypy.tool.sourcetools import func_with_new_name + def elidable(func): """ Decorate a function as "trace-elidable". This means precisely that: @@ -72,17 +75,22 @@ func._jit_loop_invariant_ = True return func +def _get_args(func): + import inspect + + args, varargs, varkw, defaults = inspect.getargspec(func) + args = ["v%s" % (i, ) for i in range(len(args))] + assert varargs is None and varkw is None + assert not defaults + return args + def elidable_promote(promote_args='all'): """ A decorator that promotes all arguments and then calls the supplied function """ def decorator(func): - import inspect elidable(func) - args, varargs, varkw, defaults = inspect.getargspec(func) - args = ["v%s" % (i, ) for i in range(len(args))] - assert varargs is None and varkw is None - assert not defaults + args = _get_args(func) argstring = ", ".join(args) code = ["def f(%s):\n" % (argstring, )] if promote_args != 'all': @@ -102,6 +110,46 @@ warnings.warn("purefunction_promote is deprecated, use elidable_promote instead", DeprecationWarning) return elidable_promote(*args, **kwargs) +def look_inside_iff(predicate): + """ + look inside (including unrolling loops) the target function, if and only if + predicate(*args) returns True + """ + def inner(func): + func = unroll_safe(func) + # When we return the new function, it might be specialized in some + # way. We "propogate" this specialization by using + # specialize:call_location on relevant functions. + for thing in [func, predicate]: + thing._annspecialcase_ = "specialize:call_location" + + args = _get_args(func) + d = { + "dont_look_inside": dont_look_inside, + "predicate": predicate, + "func": func, + "we_are_jitted": we_are_jitted, + } + exec py.code.Source(""" + @dont_look_inside + def trampoline(%(arguments)s): + return func(%(arguments)s) + if hasattr(func, "oopspec"): + # XXX: This seems like it should be here, but it causes errors. + # trampoline.oopspec = func.oopspec + del func.oopspec + trampoline.__name__ = func.__name__ + "_trampoline" + trampoline._annspecialcase_ = "specialize:call_location" + + def f(%(arguments)s): + if not we_are_jitted() or predicate(%(arguments)s): + return func(%(arguments)s) + else: + return trampoline(%(arguments)s) + f.__name__ = func.__name__ + "_look_inside_iff" + """ % {"arguments": ", ".join(args)}).compile() in d + return d["f"] + return inner def oopspec(spec): def decorator(func): @@ -109,6 +157,34 @@ return func return decorator + at oopspec("jit.isconstant(value)") + at specialize.argtype(0) +def isconstant(value): + """ + While tracing, returns whether or not the value is currently known to be + constant. This is not perfect, values can become constant later. Mostly for + use with @look_inside_iff. + + This is for advanced usage only. + """ + # I hate the annotator so much. + if NonConstant(False): + return True + return False + + at oopspec("jit.isvirtual(value)") + at specialize.ll() +def isvirtual(value): + """ + Returns if this value is virtual, while tracing, it's relatively + conservative and will miss some cases. + + This is for advanced usage only. + """ + if NonConstant(False): + return True + return False + class Entry(ExtRegistryEntry): _about_ = hint diff --git a/pypy/rlib/rgc.py b/pypy/rlib/rgc.py --- a/pypy/rlib/rgc.py +++ b/pypy/rlib/rgc.py @@ -1,6 +1,9 @@ -import gc, types +import gc +import types + +from pypy.rlib import jit +from pypy.rlib.objectmodel import we_are_translated, enforceargs, specialize from pypy.rpython.extregistry import ExtRegistryEntry -from pypy.rlib.objectmodel import we_are_translated from pypy.rpython.lltypesystem import lltype, llmemory # ____________________________________________________________ @@ -32,7 +35,7 @@ if len(hop.args_s) == 1: args_v = hop.inputargs(lltype.Signed) return hop.genop('gc__collect', args_v, resulttype=hop.r_result) - + class SetMaxHeapSizeEntry(ExtRegistryEntry): _about_ = set_max_heap_size @@ -133,6 +136,9 @@ hop.exception_cannot_occur() return hop.genop(opname, vlist, resulttype = hop.r_result.lowleveltype) + at jit.oopspec('list.ll_arraycopy(source, dest, source_start, dest_start, length)') + at specialize.ll() + at enforceargs(None, None, int, int, int) def ll_arraycopy(source, dest, source_start, dest_start, length): from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.objectmodel import keepalive_until_here @@ -161,14 +167,11 @@ llmemory.sizeof(TP.OF) * source_start) cp_dest_addr = (dest_addr + llmemory.itemoffsetof(TP, 0) + llmemory.sizeof(TP.OF) * dest_start) - + llmemory.raw_memcopy(cp_source_addr, cp_dest_addr, llmemory.sizeof(TP.OF) * length) keepalive_until_here(source) keepalive_until_here(dest) -ll_arraycopy._annenforceargs_ = [None, None, int, int, int] -ll_arraycopy._annspecialcase_ = 'specialize:ll' -ll_arraycopy.oopspec = 'list.ll_arraycopy(source, dest, source_start, dest_start, length)' def ll_shrink_array(p, smallerlength): from pypy.rpython.lltypesystem.lloperation import llop @@ -192,7 +195,7 @@ llmemory.itemoffsetof(ARRAY, 0)) source_addr = llmemory.cast_ptr_to_adr(p) + offset dest_addr = llmemory.cast_ptr_to_adr(newp) + offset - llmemory.raw_memcopy(source_addr, dest_addr, + llmemory.raw_memcopy(source_addr, dest_addr, llmemory.sizeof(ARRAY.OF) * smallerlength) keepalive_until_here(p) diff --git a/pypy/rlib/rstruct/formatiterator.py b/pypy/rlib/rstruct/formatiterator.py --- a/pypy/rlib/rstruct/formatiterator.py +++ b/pypy/rlib/rstruct/formatiterator.py @@ -1,10 +1,10 @@ - -from pypy.rlib.rstruct.nativefmttable import native_is_bigendian -from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib import jit from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.rstruct.error import StructError +from pypy.rlib.rstruct.nativefmttable import native_is_bigendian, native_fmttable from pypy.rlib.rstruct.standardfmttable import standard_fmttable -from pypy.rlib.rstruct.nativefmttable import native_fmttable +from pypy.rlib.unroll import unrolling_iterable + class FormatIterator(object): """ @@ -16,6 +16,7 @@ _mixin_ = True _operate_is_specialized_ = False + @jit.look_inside_iff(lambda self, fmt: jit.isconstant(fmt)) def interpret(self, fmt): # decode the byte order, size and alignment based on the 1st char table = unroll_native_fmtdescs diff --git a/pypy/rpython/lltypesystem/ll_str.py b/pypy/rpython/lltypesystem/ll_str.py --- a/pypy/rpython/lltypesystem/ll_str.py +++ b/pypy/rpython/lltypesystem/ll_str.py @@ -16,34 +16,31 @@ return r_uint(i) @jit.elidable -def ll_int2dec(i): +def ll_int2dec(val): from pypy.rpython.lltypesystem.rstr import mallocstr - temp = malloc(CHAR_ARRAY, 20) + + sign = int(val < 0) + if sign: + val = ll_unsigned(-val) + else: + val = ll_unsigned(val) len = 0 - sign = 0 - if i < 0: - sign = 1 - i = ll_unsigned(-i) - else: - i = ll_unsigned(i) - if i == 0: - len = 1 - temp[0] = '0' - else: - while i: - temp[len] = chr(i%10+ord('0')) - i //= 10 - len += 1 - len += sign - result = mallocstr(len) - result.hash = 0 + i = val + while i: + len += 1 + i //= 10 + + total_len = sign + len + int(val == 0) + result = mallocstr(total_len) if sign: result.chars[0] = '-' - j = 1 - else: - j = 0 + elif val == 0: + result.chars[0] = '0' + + j = 0 while j < len: - result.chars[j] = temp[len-j-1] + result.chars[total_len - j - 1] = chr(val % 10 + ord('0')) + val //= 10 j += 1 return result diff --git a/pypy/rpython/lltypesystem/rbuilder.py b/pypy/rpython/lltypesystem/rbuilder.py --- a/pypy/rpython/lltypesystem/rbuilder.py +++ b/pypy/rpython/lltypesystem/rbuilder.py @@ -1,4 +1,4 @@ -from pypy.rlib import rgc +from pypy.rlib import rgc, jit from pypy.rlib.objectmodel import enforceargs from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.annlowlevel import llstr @@ -95,6 +95,7 @@ ll_builder.used = needed + used @staticmethod + @jit.look_inside_iff(lambda ll_builder, char, times: jit.isconstant(times) and times <= 4) def ll_append_multiple_char(ll_builder, char, times): used = ll_builder.used if times + used > ll_builder.allocated: diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py --- a/pypy/rpython/lltypesystem/rlist.py +++ b/pypy/rpython/lltypesystem/rlist.py @@ -9,7 +9,7 @@ GcStruct, Void, Signed, malloc, typeOf, nullptr, typeMethod from pypy.rpython.lltypesystem import rstr from pypy.rlib.debug import ll_assert -from pypy.rlib import rgc +from pypy.rlib import rgc, jit # ____________________________________________________________ # @@ -225,20 +225,22 @@ else: _ll_list_resize_really(l, newsize) + at jit.look_inside_iff(lambda l, newsize: jit.isconstant(len(l.items)) and jit.isconstant(newsize)) + at jit.oopspec("list._resize_ge(l, newsize)") def _ll_list_resize_ge(l, newsize): if len(l.items) >= newsize: l.length = newsize else: _ll_list_resize_really(l, newsize) -_ll_list_resize_ge.oopspec = 'list._resize_ge(l, newsize)' + at jit.look_inside_iff(lambda l, newsize: jit.isconstant(len(l.items)) and jit.isconstant(newsize)) + at jit.oopspec("list._resize_le(l, newsize)") def _ll_list_resize_le(l, newsize): if newsize >= (len(l.items) >> 1) - 5: l.length = newsize else: _ll_list_resize_really(l, newsize) - def ll_append_noresize(l, newitem): length = l.length l.length = length + 1 diff --git a/pypy/rpython/lltypesystem/rpbc.py b/pypy/rpython/lltypesystem/rpbc.py --- a/pypy/rpython/lltypesystem/rpbc.py +++ b/pypy/rpython/lltypesystem/rpbc.py @@ -230,7 +230,8 @@ args = bk.build_args(opname, hop.args_s[1:]) s_pbc = hop.args_s[0] # possibly more precise than self.s_pbc descs = list(s_pbc.descriptions) - shape, index = description.FunctionDesc.variant_for_call_site(bk, self.callfamily, descs, args) + vfcs = description.FunctionDesc.variant_for_call_site + shape, index = vfcs(bk, self.callfamily, descs, args, hop.spaceop) row_of_graphs = self.callfamily.calltables[shape][index] anygraph = row_of_graphs.itervalues().next() # pick any witness vlist = [hop.inputarg(self, arg=0)] diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py --- a/pypy/rpython/lltypesystem/rstr.py +++ b/pypy/rpython/lltypesystem/rstr.py @@ -5,7 +5,7 @@ from pypy.rlib.objectmodel import _hash_string, enforceargs from pypy.rlib.objectmodel import keepalive_until_here from pypy.rlib.debug import ll_assert -from pypy.rlib.jit import elidable, we_are_jitted, dont_look_inside +from pypy.rlib import jit from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.robject import PyObjRepr, pyobj_repr from pypy.rpython.rmodel import inputconst, IntegerRepr @@ -58,8 +58,7 @@ llmemory.itemoffsetof(TP.chars, 0) + llmemory.sizeof(CHAR_TP) * item) - # It'd be nice to be able to look inside this function. - @dont_look_inside + @jit.oopspec('stroruni.copy_contents(src, dst, srcstart, dststart, length)') @enforceargs(None, None, int, int, int) def copy_string_contents(src, dst, srcstart, dststart, length): assert srcstart >= 0 @@ -71,8 +70,6 @@ keepalive_until_here(src) keepalive_until_here(dst) copy_string_contents._always_inline_ = True - #copy_string_contents.oopspec = ( - # '%s.copy_contents(src, dst, srcstart, dststart, length)' % name) return func_with_new_name(copy_string_contents, 'copy_%s_contents' % name) copy_string_contents = _new_copy_contents_fun(STR, Char, 'string') @@ -147,7 +144,7 @@ self.ll = LLHelpers self.malloc = mallocunicode - @elidable + @jit.elidable def ll_str(self, s): # XXX crazy that this is here, but I don't want to break # rmodel logic @@ -162,7 +159,7 @@ result.chars[i] = cast_primitive(Char, c) return result - @elidable + @jit.elidable def ll_encode_latin1(self, s): length = len(s.chars) result = mallocstr(length) @@ -261,7 +258,7 @@ class LLHelpers(AbstractLLHelpers): - @elidable + @jit.elidable def ll_str_mul(s, times): if times < 0: times = 0 @@ -283,7 +280,7 @@ i += j return newstr - @elidable + @jit.elidable def ll_char_mul(ch, times): if typeOf(ch) is Char: malloc = mallocstr @@ -328,7 +325,7 @@ return s ll_str2unicode.oopspec = 'str.str2unicode(str)' - @elidable + @jit.elidable def ll_strhash(s): # unlike CPython, there is no reason to avoid to return -1 # but our malloc initializes the memory to zero, so we use zero as the @@ -344,7 +341,7 @@ def ll_strfasthash(s): return s.hash # assumes that the hash is already computed - @elidable + @jit.elidable def ll_strconcat(s1, s2): len1 = len(s1.chars) len2 = len(s2.chars) @@ -356,7 +353,7 @@ return newstr ll_strconcat.oopspec = 'stroruni.concat(s1, s2)' - @elidable + @jit.elidable def ll_strip(s, ch, left, right): s_len = len(s.chars) if s_len == 0: @@ -374,7 +371,7 @@ s.copy_contents(s, result, lpos, 0, r_len) return result - @elidable + @jit.elidable def ll_upper(s): s_chars = s.chars s_len = len(s_chars) @@ -391,7 +388,7 @@ i += 1 return result - @elidable + @jit.elidable def ll_lower(s): s_chars = s.chars s_len = len(s_chars) @@ -441,7 +438,7 @@ i += 1 return result - @elidable + @jit.elidable def ll_strcmp(s1, s2): if not s1 and not s2: return True @@ -464,7 +461,7 @@ i += 1 return len1 - len2 - @elidable + @jit.elidable def ll_streq(s1, s2): if s1 == s2: # also if both are NULLs return True @@ -484,7 +481,7 @@ return True ll_streq.oopspec = 'stroruni.equal(s1, s2)' - @elidable + @jit.elidable def ll_startswith(s1, s2): len1 = len(s1.chars) len2 = len(s2.chars) @@ -505,7 +502,7 @@ return False return s.chars[0] == ch - @elidable + @jit.elidable def ll_endswith(s1, s2): len1 = len(s1.chars) len2 = len(s2.chars) @@ -527,7 +524,7 @@ return False return s.chars[len(s.chars) - 1] == ch - @elidable + @jit.elidable def ll_find_char(s, ch, start, end): i = start if end > len(s.chars): @@ -539,7 +536,7 @@ return -1 ll_find_char._annenforceargs_ = [None, None, int, int] - @elidable + @jit.elidable def ll_rfind_char(s, ch, start, end): if end > len(s.chars): end = len(s.chars) @@ -550,7 +547,7 @@ return i return -1 - @elidable + @jit.elidable def ll_count_char(s, ch, start, end): count = 0 i = start @@ -618,7 +615,7 @@ res = 0 return res - @elidable + @jit.elidable def ll_search(s1, s2, start, end, mode): count = 0 n = end - start @@ -697,7 +694,13 @@ return -1 return count + @jit.look_inside_iff(lambda length, items: jit.isconstant(length) and length <= 2) + @enforceargs(int, None) def ll_join_strs(length, items): + # Special case for length 1 items, helps both the JIT and other code + if length == 1: + return items[0] + num_items = length itemslen = 0 i = 0 @@ -724,8 +727,8 @@ res_index += item_len i += 1 return result - ll_join_strs._annenforceargs_ = [int, None] + @jit.look_inside_iff(lambda length, chars, RES: jit.isconstant(length) and jit.isvirtual(chars)) def ll_join_chars(length, chars, RES): # no need to optimize this, will be replaced by string builder # at some point soon @@ -744,7 +747,7 @@ i += 1 return result - @elidable + @jit.elidable def _ll_stringslice(s1, start, stop): lgt = stop - start assert start >= 0 @@ -759,7 +762,7 @@ return LLHelpers._ll_stringslice(s1, start, len(s1.chars)) def ll_stringslice_startstop(s1, start, stop): - if we_are_jitted(): + if jit.we_are_jitted(): if stop > len(s1.chars): stop = len(s1.chars) else: @@ -842,7 +845,7 @@ item.copy_contents(s, item, j, 0, i - j) return res - @elidable + @jit.elidable def ll_replace_chr_chr(s, c1, c2): length = len(s.chars) newstr = s.malloc(length) @@ -857,7 +860,7 @@ j += 1 return newstr - @elidable + @jit.elidable def ll_contains(s, c): chars = s.chars strlen = len(chars) @@ -868,7 +871,7 @@ i += 1 return False - @elidable + @jit.elidable def ll_int(s, base): if not 2 <= base <= 36: raise ValueError diff --git a/pypy/rpython/ootypesystem/rdict.py b/pypy/rpython/ootypesystem/rdict.py --- a/pypy/rpython/ootypesystem/rdict.py +++ b/pypy/rpython/ootypesystem/rdict.py @@ -247,7 +247,7 @@ fn = None v_obj = hop.inputarg(r_func, arg=arg) s_pbc_fn = hop.args_s[arg] - methodname = r_func._get_method_name("simple_call", s_pbc_fn, params_annotation) + methodname = r_func._get_method_name("simple_call", s_pbc_fn, params_annotation, hop) elif isinstance(r_func, MethodOfFrozenPBCRepr): r_impl, nimplicitarg = r_func.get_r_implfunc() fn = r_impl.get_unique_llfn().value diff --git a/pypy/rpython/ootypesystem/rpbc.py b/pypy/rpython/ootypesystem/rpbc.py --- a/pypy/rpython/ootypesystem/rpbc.py +++ b/pypy/rpython/ootypesystem/rpbc.py @@ -130,14 +130,14 @@ def call(self, opname, hop): s_pbc = hop.args_s[0] # possibly more precise than self.s_pbc args_s = hop.args_s[1:] - shape, index, callfamily = self._get_shape_index_callfamily(opname, s_pbc, args_s) + shape, index, callfamily = self._get_shape_index_callfamily(opname, s_pbc, args_s, hop) row_of_graphs = callfamily.calltables[shape][index] anygraph = row_of_graphs.itervalues().next() # pick any witness hop2 = self.add_instance_arg_to_hop(hop, opname == "call_args") vlist = callparse.callparse(self.rtyper, anygraph, hop2, opname, r_self = self.r_im_self) rresult = callparse.getrresult(self.rtyper, anygraph) - derived_mangled = self._get_method_name(opname, s_pbc, args_s) + derived_mangled = self._get_method_name(opname, s_pbc, args_s, hop) cname = hop.inputconst(ootype.Void, derived_mangled) hop.exception_is_here() # sanity check: make sure that INSTANCE has the method @@ -151,18 +151,18 @@ else: return hop.llops.convertvar(v, rresult, hop.r_result) - def _get_shape_index_callfamily(self, opname, s_pbc, args_s): + def _get_shape_index_callfamily(self, opname, s_pbc, args_s, hop): bk = self.rtyper.annotator.bookkeeper args = bk.build_args(opname, args_s) args = args.prepend(self.s_im_self) descs = [desc.funcdesc for desc in s_pbc.descriptions] callfamily = descs[0].getcallfamily() shape, index = description.FunctionDesc.variant_for_call_site( - bk, callfamily, descs, args) + bk, callfamily, descs, args, hop.spaceop) return shape, index, callfamily - def _get_method_name(self, opname, s_pbc, args_s): - shape, index, callfamily = self._get_shape_index_callfamily(opname, s_pbc, args_s) + def _get_method_name(self, opname, s_pbc, args_s, hop): + shape, index, callfamily = self._get_shape_index_callfamily(opname, s_pbc, args_s, hop) mangled = mangle(self.methodname, self.rtyper.getconfig()) row = self.concretetable[shape, index] derived_mangled = row_method_name(mangled, row.attrname) diff --git a/pypy/rpython/rlist.py b/pypy/rpython/rlist.py --- a/pypy/rpython/rlist.py +++ b/pypy/rpython/rlist.py @@ -116,7 +116,7 @@ v_lst = hop.inputarg(self, 0) cRESLIST = hop.inputconst(Void, hop.r_result.LIST) return hop.gendirectcall(ll_copy, cRESLIST, v_lst) - + def rtype_len(self, hop): v_lst, = hop.inputargs(self) if hop.args_s[0].listdef.listitem.resized: @@ -132,7 +132,7 @@ else: ll_func = ll_list_is_true_foldable return hop.gendirectcall(ll_func, v_lst) - + def rtype_method_reverse(self, hop): v_lst, = hop.inputargs(self) hop.exception_cannot_occur() @@ -273,7 +273,7 @@ return pair(r_lst, r_int).rtype_getitem(hop, checkidx=True) rtype_getitem_idx_key = rtype_getitem_idx - + def rtype_setitem((r_lst, r_int), hop): if hop.has_implicit_exception(IndexError): spec = dum_checkidx @@ -331,7 +331,7 @@ ## return hop.gendirectcall(ll_both_none, v_lst1, v_lst2) ## return pairtype(Repr, Repr).rtype_is_(pair(r_lst1, r_lst2), hop) - + def rtype_eq((r_lst1, r_lst2), hop): assert r_lst1.item_repr == r_lst2.item_repr v_lst1, v_lst2 = hop.inputargs(r_lst1, r_lst2) @@ -499,7 +499,7 @@ else: check = item if (not malloc_zero_filled) or check: # as long as malloc it is known to zero the allocated memory avoid zeroing twice - + i = 0 while i < count: l.ll_setitem_fast(i, item) @@ -633,7 +633,6 @@ l.ll_setitem_fast(index, null) l._ll_resize_le(newlength) return res -ll_pop_default.oopspec = 'list.pop(l)' def ll_pop_zero(func, l): length = l.ll_length() diff --git a/pypy/rpython/rpbc.py b/pypy/rpython/rpbc.py --- a/pypy/rpython/rpbc.py +++ b/pypy/rpython/rpbc.py @@ -322,7 +322,8 @@ args = bk.build_args(opname, hop.args_s[1:]) s_pbc = hop.args_s[0] # possibly more precise than self.s_pbc descs = list(s_pbc.descriptions) - shape, index = description.FunctionDesc.variant_for_call_site(bk, self.callfamily, descs, args) + vfcs = description.FunctionDesc.variant_for_call_site + shape, index = vfcs(bk, self.callfamily, descs, args, hop.spaceop) row_of_graphs = self.callfamily.calltables[shape][index] anygraph = row_of_graphs.itervalues().next() # pick any witness vfn = hop.inputarg(self, arg=0) diff --git a/pypy/rpython/test/test_rint.py b/pypy/rpython/test/test_rint.py --- a/pypy/rpython/test/test_rint.py +++ b/pypy/rpython/test/test_rint.py @@ -18,8 +18,8 @@ t = TranslationContext() t.buildannotator().build_types(func, types) t.buildrtyper().specialize() - t.checkgraphs() - + t.checkgraphs() + def test_not1(self): self._test(snippet.not1, [int]) @@ -44,7 +44,7 @@ class BaseTestRint(BaseRtypingTest): - + def test_char_constant(self): def dummyfn(i): return chr(i) From noreply at buildbot.pypy.org Wed Sep 21 01:19:51 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 21 Sep 2011 01:19:51 +0200 (CEST) Subject: [pypy-commit] pypy unroll-if-alt: close merged branch. Message-ID: <20110920231951.6FE6F820CF@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: unroll-if-alt Changeset: r47382:6d556a687a82 Date: 2011-09-20 19:19 -0400 http://bitbucket.org/pypy/pypy/changeset/6d556a687a82/ Log: close merged branch. From noreply at buildbot.pypy.org Wed Sep 21 06:08:08 2011 From: noreply at buildbot.pypy.org (hpk42) Date: Wed, 21 Sep 2011 06:08:08 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: fix two typos Message-ID: <20110921040808.DE9D3820CF@wyvern.cs.uni-duesseldorf.de> Author: holger krekel Branch: extradoc Changeset: r269:6a889b9d74e8 Date: 2011-09-21 06:07 +0200 http://bitbucket.org/pypy/pypy.org/changeset/6a889b9d74e8/ Log: fix two typos diff --git a/py3donate.html b/py3donate.html --- a/py3donate.html +++ b/py3donate.html @@ -64,8 +64,8 @@ developers. Contracts and money are managed by the non-profit Software Freedom Conservancy of which the PyPy project is a member. The current elected representatives are Carl Friedrich Bolz, Holger -Krekel and Jacob Hallen and they will - in close collaboration - with -Conservancy and the core developers, select the best developers for +Krekel and Jacob Hallen and they will - in close collaboration with +Conservancy and the core developers - select the best developers for the Python 3 porting job among well known PyPy contributors.

If you want to see PyPy support Python 3 and Python 2, donate here:

@@ -107,7 +107,7 @@ at the latest, we will try our best to make PyPy support Python 3 anyway. We however reserve the right to shift any unused funds to other PyPy activities when that date is reached. Of course, since the Conservancy is a -501©(3) chartiable organization incorporated in NY, USA, all funds will, +501©(3) charitable organization incorporated in NY, USA, all funds will, regardless of their use, be spent in a way that benefits the general public, the advancement of Open Source and Free Software, and in particular the PyPy community and the PyPy codebase.

diff --git a/source/py3donate.txt b/source/py3donate.txt --- a/source/py3donate.txt +++ b/source/py3donate.txt @@ -25,8 +25,8 @@ developers. Contracts and money are managed by the non-profit `Software Freedom Conservancy`_ of which the PyPy project is a member. The current elected representatives are Carl Friedrich Bolz, Holger -Krekel and Jacob Hallen and they will - in close collaboration - with -Conservancy and the core developers, select the best developers for +Krekel and Jacob Hallen and they will - in close collaboration with +Conservancy and the core developers - select the best developers for the Python 3 porting job among well known PyPy contributors. If you want to see PyPy support Python 3 and Python 2, donate here: @@ -75,7 +75,7 @@ at the latest, we will try our best to make PyPy support Python 3 anyway. We however reserve the right to shift any unused funds to other PyPy activities when that date is reached. Of course, since the Conservancy is a -501(c)(3) chartiable organization incorporated in NY, USA, all funds will, +501(c)(3) charitable organization incorporated in NY, USA, all funds will, regardless of their use, be spent in a way that benefits the general public, the advancement of Open Source and Free Software, and in particular the PyPy community and the PyPy codebase. From noreply at buildbot.pypy.org Wed Sep 21 09:47:21 2011 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 Sep 2011 09:47:21 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: rephrase a sentence so that it's clear that we are not going to support python3 and python2 within the same interpreter; shift 2000$ from 1.1 to 1.2 so that 1.1 is 35000$ Message-ID: <20110921074721.A32B5820CF@wyvern.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: extradoc Changeset: r270:c8455f01bd79 Date: 2011-09-21 09:47 +0200 http://bitbucket.org/pypy/pypy.org/changeset/c8455f01bd79/ Log: rephrase a sentence so that it's clear that we are not going to support python3 and python2 within the same interpreter; shift 2000$ from 1.1 to 1.2 so that 1.1 is 35000$ diff --git a/source/py3donate.txt b/source/py3donate.txt --- a/source/py3donate.txt +++ b/source/py3donate.txt @@ -12,7 +12,7 @@ harder for everyone. The PyPy project is in a unique position in that it could support -**both Python 2 and Python 3** versions from the same code base, fully +Python 3 without having to discontinue supporting Python 2, with the possibility of reusing a large part of of code base and fully reusing its unique translation and JIT-Compiler technologies. However, it requires a lot of work, and it will take a long time before we can complete a Python 3 port if we only wait for volunteer @@ -162,7 +162,7 @@ - syntactic changes to make ``io.py`` importable (in particular: ``metaclass=...`` in class declarations) - - **Estimate cost**: $37,000 + - **Estimate cost**: $35,000 * **Sub-step 1.2**: other syntactic changes, builtin types and functions, exceptions: @@ -188,7 +188,7 @@ - improved ``with`` statement - - **Estimate cost**: $26,000 + - **Estimate cost**: $28,000 Note that the distinction between sub-steps 1.1 and 1.2 is blurry, and it might be From noreply at buildbot.pypy.org Wed Sep 21 14:32:06 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 21 Sep 2011 14:32:06 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Add a way to donate for both general and py3k cause from the main site. Use Message-ID: <20110921123206.27B0B820CF@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r271:a60beecea321 Date: 2011-09-21 14:31 +0200 http://bitbucket.org/pypy/pypy.org/changeset/a60beecea321/ Log: Add a way to donate for both general and py3k cause from the main site. Use the main site as the basic layout for py3k. diff --git a/archive.html b/archive.html --- a/archive.html +++ b/archive.html @@ -16,6 +16,7 @@ + + - - - - - - - - - - - - - - -
$ - - -
- -
- -
  • - -
  • -
    diff --git a/contact.html b/contact.html --- a/contact.html +++ b/contact.html @@ -16,6 +16,7 @@ + -
    - - - - - - - - - - - - - -
    $ - - -
    -
    - - -
  • - -
  • - diff --git a/donate1.html b/donate1.html new file mode 100644 --- /dev/null +++ b/donate1.html @@ -0,0 +1,60 @@ +
      +
    • + Donate towards py3k in pypy
      + Donate towards general pypy progress
      +
    • +
    • +
    • +
    • +
      +
      + + + + +
      + + +
    • +
    • +
      +
      + + + + + + + + + + + + + +
      $ + + +
      +
      +
      +
    • + + +
    • + +
    • +
    diff --git a/donate2.html b/donate2.html new file mode 100644 --- /dev/null +++ b/donate2.html @@ -0,0 +1,58 @@ +
      +
    • + Donate towards py3k in pypy
      + Donate towards general pypy progress
      +
    • +
    • +
    • +
    • +
      +
      + + + + +
      +
      +
    • +
    • +
      + +
      + + + + + + + + + + + + + +
      $ + + +
      +
      +
      +
    • +
    • + +
    • +
    diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -16,6 +16,7 @@ + -
    - - - - - - - - - - - - - -
    $ - - -
    -
    - - -
  • - -
  • - diff --git a/features.html b/features.html --- a/features.html +++ b/features.html @@ -16,6 +16,7 @@ + -
    - - - - - - - - - - - - - -
    $ - - -
    -
    - - -
  • - -
  • - diff --git a/howtohelp.html b/howtohelp.html --- a/howtohelp.html +++ b/howtohelp.html @@ -16,6 +16,7 @@ + -
    - - - - - - - - - - - - - -
    $ - - -
    -
    - - -
  • - -
  • - diff --git a/index.html b/index.html --- a/index.html +++ b/index.html @@ -16,6 +16,7 @@ + -
    - - - - - - - - - - - - - -
    $ - - -
    -
    - - -
  • - -
  • - diff --git a/js/script.js b/js/script.js new file mode 100644 --- /dev/null +++ b/js/script.js @@ -0,0 +1,16 @@ + +function py3k_donate() { + $.get("donate1.html", function (html) { + $("#sidebar").html(html); + }); +} + +function general_donate() { + $.get("donate2.html", function (html) { + $("#sidebar").html(html); + }); +} + +$(document).ready(function() { + py3k_donate(); +}); \ No newline at end of file diff --git a/people.html b/people.html --- a/people.html +++ b/people.html @@ -16,6 +16,7 @@ + -
    - - - - - - - - - - - - - -
    $ - - -
    -
    - - -
  • - -
  • - diff --git a/py3donate.html b/py3donate.html --- a/py3donate.html +++ b/py3donate.html @@ -16,6 +16,7 @@ + -
    - - - - - - - - - - - - - -
    $ - - -
    -

    Should we not receive enough donations to complete all stages by 1st March 2012 +

    If you want to see PyPy support Python 3 and Python 2, donate using buttons +on the side.

    +

    Should we not receive enough donations to complete all stages by 1st March 2012 at the latest, we will try our best to make PyPy support Python 3 anyway. We however reserve the right to shift any unused funds to other PyPy activities when that date is reached. Of course, since the Conservancy is a @@ -173,7 +140,7 @@ 2.7)

  • syntactic changes to make io.py importable (in particular: metaclass=... in class declarations)
  • -
  • Estimate cost: $37,000
  • +
  • Estimate cost: $35,000
  • @@ -193,7 +160,7 @@ del e at the end of the except block, etc.
  • changes to builtins: super, input, next(), etc.
  • improved with statement
  • -
  • Estimate cost: $26,000
  • +
  • Estimate cost: $28,000
  • diff --git a/source/_layouts/page.genshi b/source/_layouts/page.genshi --- a/source/_layouts/page.genshi +++ b/source/_layouts/page.genshi @@ -9,62 +9,5 @@ ${Markup(content)} - diff --git a/source/_layouts/py3k.genshi b/source/_layouts/py3k.genshi --- a/source/_layouts/py3k.genshi +++ b/source/_layouts/py3k.genshi @@ -9,11 +9,5 @@ ${Markup(content)} - diff --git a/source/_layouts/site.genshi b/source/_layouts/site.genshi --- a/source/_layouts/site.genshi +++ b/source/_layouts/site.genshi @@ -44,6 +44,7 @@ + -
    - - - - - - - - - - - - - -
    $ - - -
    -
    +If you want to see PyPy support Python 3 and Python 2, donate using buttons +on the side. Should we not receive enough donations to complete all stages by 1st March 2012 at the latest, we will try our best to make PyPy support Python 3 anyway. We diff --git a/sponsor.html b/sponsor.html --- a/sponsor.html +++ b/sponsor.html @@ -16,6 +16,7 @@ + + -
    - - - - - - - - - - - - - -
    $ - - -
    -
    - - -
  • - -
  • - From noreply at buildbot.pypy.org Wed Sep 21 18:00:22 2011 From: noreply at buildbot.pypy.org (bkuhn) Date: Wed, 21 Sep 2011 18:00:22 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: The word "overhead" really isn't exactly right. Message-ID: <20110921160022.6C03D820CF@wyvern.cs.uni-duesseldorf.de> Author: "Bradley M. Kuhn" Branch: extradoc Changeset: r272:a1c7687c5d64 Date: 2011-09-21 11:59 -0400 http://bitbucket.org/pypy/pypy.org/changeset/a1c7687c5d64/ Log: The word "overhead" really isn't exactly right. Changed "overhead" to "general donation", and wordsmithed sentence. diff --git a/source/py3donate.txt b/source/py3donate.txt --- a/source/py3donate.txt +++ b/source/py3donate.txt @@ -77,7 +77,7 @@ For each step, we estimated the time that it would take to complete for an experienced developer who is already familiar with the PyPy codebase. From this number, the money is calculated considering a hourly rate of $60, and a -5% overhead which goes to the `Software Freedom Conservancy`_, the non-profit +5% general donation which goes to the `Software Freedom Conservancy`_ itself, the non-profit association of which the PyPy project is a member and which manages all the issues related to donations, taxes and payments. From noreply at buildbot.pypy.org Wed Sep 21 18:35:15 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 21 Sep 2011 18:35:15 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: a brief blog post draft Message-ID: <20110921163515.813E9820CF@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3900:3fc9d13dc64c Date: 2011-09-21 18:34 +0200 http://bitbucket.org/pypy/extradoc/changeset/3fc9d13dc64c/ Log: a brief blog post draft diff --git a/blog/draft/py3donate.rst b/blog/draft/py3donate.rst new file mode 100644 --- /dev/null +++ b/blog/draft/py3donate.rst @@ -0,0 +1,12 @@ + +Py3k for PyPy fundraiser +======================== + +Hello + +We would like to announce a donation campaign for implementing python 3 in PyPy. +Please read the `detailed plan`_ for further details and donate using +`our website`_. + +.. _`detailed plan`: http://pypy.org/py3donate.html +.. _`our website`: http://pypy.org From noreply at buildbot.pypy.org Wed Sep 21 18:38:44 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 21 Sep 2011 18:38:44 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: kill some suspect html. copy paste error? Message-ID: <20110921163844.865A8820CF@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r273:daccbb1e2f6d Date: 2011-09-21 18:37 +0200 http://bitbucket.org/pypy/pypy.org/changeset/daccbb1e2f6d/ Log: kill some suspect html. copy paste error? diff --git a/donate1.html b/donate1.html --- a/donate1.html +++ b/donate1.html @@ -28,7 +28,6 @@
  • -
    @@ -48,9 +47,6 @@
    -
    -
  • -
  • diff --git a/donate2.html b/donate2.html --- a/donate2.html +++ b/donate2.html @@ -40,7 +40,7 @@ -
    $ + $ From noreply at buildbot.pypy.org Wed Sep 21 18:38:45 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 21 Sep 2011 18:38:45 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: merge Message-ID: <20110921163845.8F969820CF@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r274:db624b4a3038 Date: 2011-09-21 18:38 +0200 http://bitbucket.org/pypy/pypy.org/changeset/db624b4a3038/ Log: merge diff --git a/source/py3donate.txt b/source/py3donate.txt --- a/source/py3donate.txt +++ b/source/py3donate.txt @@ -77,7 +77,7 @@ For each step, we estimated the time that it would take to complete for an experienced developer who is already familiar with the PyPy codebase. From this number, the money is calculated considering a hourly rate of $60, and a -5% overhead which goes to the `Software Freedom Conservancy`_, the non-profit +5% general donation which goes to the `Software Freedom Conservancy`_ itself, the non-profit association of which the PyPy project is a member and which manages all the issues related to donations, taxes and payments. From noreply at buildbot.pypy.org Wed Sep 21 18:41:38 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 21 Sep 2011 18:41:38 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: small rewrite. Message-ID: <20110921164138.BC5ED820CF@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r3901:fc7730bf6827 Date: 2011-09-21 12:41 -0400 http://bitbucket.org/pypy/extradoc/changeset/fc7730bf6827/ Log: small rewrite. diff --git a/blog/draft/py3donate.rst b/blog/draft/py3donate.rst --- a/blog/draft/py3donate.rst +++ b/blog/draft/py3donate.rst @@ -2,11 +2,14 @@ Py3k for PyPy fundraiser ======================== -Hello +Hi, -We would like to announce a donation campaign for implementing python 3 in PyPy. -Please read the `detailed plan`_ for further details and donate using -`our website`_. +We would like to announce a donation campaign for implementing Python 3 in PyPy. +Please read our `detailed plan`_ for all the details and donate using the +button on that page! + +Thanks, +The PyPy Team .. _`detailed plan`: http://pypy.org/py3donate.html .. _`our website`: http://pypy.org From noreply at buildbot.pypy.org Wed Sep 21 18:41:43 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 21 Sep 2011 18:41:43 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: kill jumping buttons Message-ID: <20110921164143.65917820CF@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r275:44905ff093d8 Date: 2011-09-21 18:41 +0200 http://bitbucket.org/pypy/pypy.org/changeset/44905ff093d8/ Log: kill jumping buttons diff --git a/donate1.html b/donate1.html --- a/donate1.html +++ b/donate1.html @@ -14,7 +14,9 @@ - + + +
  • -
  • -
  • @@ -39,7 +39,7 @@ -
    $ + $ @@ -47,6 +47,7 @@
    +
  • From noreply at buildbot.pypy.org Wed Sep 21 18:44:24 2011 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 21 Sep 2011 18:44:24 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Update html, remove draft, add py3k to the list of pages Message-ID: <20110921164424.2A1BE820CF@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r276:590e61384ca8 Date: 2011-09-21 18:44 +0200 http://bitbucket.org/pypy/pypy.org/changeset/590e61384ca8/ Log: Update html, remove draft, add py3k to the list of pages diff --git a/archive.html b/archive.html --- a/archive.html +++ b/archive.html @@ -38,7 +38,7 @@

    - +
    diff --git a/compat.html b/compat.html --- a/compat.html +++ b/compat.html @@ -38,7 +38,7 @@

    - +
    diff --git a/contact.html b/contact.html --- a/contact.html +++ b/contact.html @@ -38,7 +38,7 @@

    - +
    diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -38,7 +38,7 @@

    - +
    diff --git a/features.html b/features.html --- a/features.html +++ b/features.html @@ -38,7 +38,7 @@

    - +
    diff --git a/howtohelp.html b/howtohelp.html --- a/howtohelp.html +++ b/howtohelp.html @@ -38,7 +38,7 @@

    - +
    diff --git a/index.html b/index.html --- a/index.html +++ b/index.html @@ -38,7 +38,7 @@

    - +
    diff --git a/people.html b/people.html --- a/people.html +++ b/people.html @@ -38,7 +38,7 @@

    - +
    diff --git a/py3donate.html b/py3donate.html --- a/py3donate.html +++ b/py3donate.html @@ -1,7 +1,7 @@ - PyPy :: (UNRELEASED DRAFT) Call for donations - PyPy to support Python3! + PyPy :: Call for donations - PyPy to support Python3! @@ -38,13 +38,13 @@

    - +
    -

    (UNRELEASED DRAFT) Call for donations - PyPy to support Python3!

    +

    Call for donations - PyPy to support Python3!

    The release of Python 3 has been a major undertaking for the Python community, both technically and socially. So far the PyPy interpreter implements only version 2 of the Python language and is increasingly @@ -105,7 +105,7 @@

    For each step, we estimated the time that it would take to complete for an experienced developer who is already familiar with the PyPy codebase. From this number, the money is calculated considering a hourly rate of $60, and a -5% overhead which goes to the Software Freedom Conservancy, the non-profit +5% general donation which goes to the Software Freedom Conservancy itself, the non-profit association of which the PyPy project is a member and which manages all the issues related to donations, taxes and payments.

    The estimated time to complete the whole project is about 10.5 person-months.

    diff --git a/source/_layouts/site.genshi b/source/_layouts/site.genshi --- a/source/_layouts/site.genshi +++ b/source/_layouts/site.genshi @@ -13,6 +13,7 @@ ('Blog', 'http://morepypy.blogspot.com'), ('People', 'people.html'), ('Contact', 'contact.html'), + ('Py3k donations', 'py3donate.html'), ], } diff --git a/source/py3donate.txt b/source/py3donate.txt --- a/source/py3donate.txt +++ b/source/py3donate.txt @@ -1,6 +1,6 @@ --- layout: page -title: (UNRELEASED DRAFT) Call for donations - PyPy to support Python3! +title: Call for donations - PyPy to support Python3! --- The release of Python 3 has been a major undertaking for the Python diff --git a/sponsor.html b/sponsor.html --- a/sponsor.html +++ b/sponsor.html @@ -38,7 +38,7 @@

    - +
    diff --git a/success.html b/success.html --- a/success.html +++ b/success.html @@ -38,7 +38,7 @@

    - +
    From noreply at buildbot.pypy.org Thu Sep 22 04:08:01 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 22 Sep 2011 04:08:01 +0200 (CEST) Subject: [pypy-commit] pypy jit-frontend-unescaped: this branch was merged, closing it Message-ID: <20110922020801.65044820CF@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: jit-frontend-unescaped Changeset: r47383:89ff5a00266c Date: 2011-09-21 22:07 -0400 http://bitbucket.org/pypy/pypy/changeset/89ff5a00266c/ Log: this branch was merged, closing it From noreply at buildbot.pypy.org Thu Sep 22 09:54:57 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Sep 2011 09:54:57 +0200 (CEST) Subject: [pypy-commit] pypy default: Tweak for the common case of no **keyword arguments. Message-ID: <20110922075457.97C96820CF@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r47384:655453fd0333 Date: 2011-09-22 09:21 +0200 http://bitbucket.org/pypy/pypy/changeset/655453fd0333/ Log: Tweak for the common case of no **keyword arguments. diff --git a/lib_pypy/_functools.py b/lib_pypy/_functools.py --- a/lib_pypy/_functools.py +++ b/lib_pypy/_functools.py @@ -14,10 +14,9 @@ raise TypeError("the first argument must be callable") self.func = func self.args = args - self.keywords = keywords + self.keywords = keywords or None def __call__(self, *fargs, **fkeywords): - newkeywords = self.keywords.copy() - newkeywords.update(fkeywords) - return self.func(*(self.args + fargs), **newkeywords) - + if self.keywords is not None: + fkeywords.update(self.keywords) + return self.func(*(self.args + fargs), **fkeywords) From noreply at buildbot.pypy.org Thu Sep 22 09:54:58 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Sep 2011 09:54:58 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: Fix the test, but it fails. Message-ID: <20110922075458.BB3F2820CF@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47385:5c69da2521b4 Date: 2011-09-22 09:52 +0200 http://bitbucket.org/pypy/pypy/changeset/5c69da2521b4/ Log: Fix the test, but it fails. diff --git a/pypy/module/test_lib_pypy/test_stackless_pickle.py b/pypy/module/test_lib_pypy/test_stackless_pickle.py --- a/pypy/module/test_lib_pypy/test_stackless_pickle.py +++ b/pypy/module/test_lib_pypy/test_stackless_pickle.py @@ -1,25 +1,27 @@ -import py; py.test.skip("XXX port me") +import py from pypy.conftest import gettestobjspace, option class AppTest_Stackless: def setup_class(cls): - import py.test py.test.importorskip('greenlet') - space = gettestobjspace(usemodules=('_stackless', '_socket')) + space = gettestobjspace(usemodules=('_continuation', '_socket')) cls.space = space - # cannot test the unpickle part on top of py.py + if option.runappdirect: + cls.w_lev = space.wrap(14) + else: + cls.w_lev = space.wrap(2) def test_pickle(self): import new, sys mod = new.module('mod') sys.modules['mod'] = mod + mod.lev = self.lev try: exec ''' import pickle, sys import stackless -lev = 14 ch = stackless.channel() seen = [] From noreply at buildbot.pypy.org Thu Sep 22 09:54:59 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Sep 2011 09:54:59 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: Randomly fix something else in stackless.py. Message-ID: <20110922075459.DF1EF820CF@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47386:c183e6fa96c0 Date: 2011-09-22 09:54 +0200 http://bitbucket.org/pypy/pypy/changeset/c183e6fa96c0/ Log: Randomly fix something else in stackless.py. diff --git a/lib_pypy/stackless.py b/lib_pypy/stackless.py --- a/lib_pypy/stackless.py +++ b/lib_pypy/stackless.py @@ -15,16 +15,9 @@ class coroutine(object): - "we can't have continulet as a base, because continulets can't be rebound" def __init__(self): self._frame = None - self.is_zombie = False - - def __del__(self): - self.is_zombie = True - del self._frame - self._frame = None def bind(self, func, *argl, **argd): """coro.bind(f, *argl, **argd) -> None. @@ -62,7 +55,7 @@ def _is_alive(self): if self._frame is None: return False - return not self._frame.is_pending() + return self._frame.is_pending() is_alive = property(_is_alive) del _is_alive From noreply at buildbot.pypy.org Thu Sep 22 11:19:41 2011 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 22 Sep 2011 11:19:41 +0200 (CEST) Subject: [pypy-commit] lang-io default: whitespace Message-ID: <20110922091941.0FB13820CF@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r39:155337c53d32 Date: 2011-09-15 11:39 +0200 http://bitbucket.org/pypy/lang-io/changeset/155337c53d32/ Log: whitespace diff --git a/io/coroutinemodel.py b/io/coroutinemodel.py --- a/io/coroutinemodel.py +++ b/io/coroutinemodel.py @@ -12,12 +12,12 @@ class W_Coroutine(Coroutine): def __init__(self, space, state, protos): Coroutine.__init__(self, state) - + W_Object.__init__(self, space, protos) def clone(self): return W_Coroutine(self.space, self.costate, [self]) - + @staticmethod def w_getcurrent(space): return W_Coroutine._get_state(space).current @@ -32,7 +32,7 @@ space._coroutine_state = AppCoState(space) space._coroutine_state.post_install() return space._coroutine_state - + def run(self, space, w_receiver, w_context): if self.thunk is None: t = IoThunk(space, self.slots['runMessage'], w_receiver, w_context) @@ -41,13 +41,13 @@ if not space.isnil(p): self.parent = p self.switch() - + class AppCoState(BaseCoState): def __init__(self, space): BaseCoState.__init__(self) self.space = space - + def post_install(self): self.main = W_Coroutine(self.space, self, [self.space.w_object]) self.current = self.main @@ -59,7 +59,7 @@ self.w_message = w_message self.w_receiver = w_receiver self.w_context = w_context - + def call(self): t = self.w_message.eval(self.space, self.w_receiver, self.w_context) self.w_receiver.slots['result'] = t From noreply at buildbot.pypy.org Thu Sep 22 11:19:42 2011 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 22 Sep 2011 11:19:42 +0200 (CEST) Subject: [pypy-commit] lang-io default: implement List remove(item) Message-ID: <20110922091942.2755582211@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r40:912d94ab5156 Date: 2011-09-22 11:18 +0200 http://bitbucket.org/pypy/lang-io/changeset/912d94ab5156/ Log: implement List remove(item) diff --git a/io/list.py b/io/list.py --- a/io/list.py +++ b/io/list.py @@ -128,6 +128,12 @@ raise Exception, 'index out of bounds' return w_target + + at register_method('List', 'remove') +def list_remove_all(space, w_target, w_message, w_context): + w_item = w_message.arguments[0].eval(space, w_target, w_context) + w_target.list_items.remove(w_item) + return w_target @register_method('List', 'atPut') def list_reverse_in_place(space, w_target, w_message, w_context): diff --git a/io/test/test_list.py b/io/test/test_list.py --- a/io/test/test_list.py +++ b/io/test/test_list.py @@ -197,6 +197,11 @@ res, space = interpret(inp) assert isinstance(res, W_List) assert [x.value for x in res.list_items] == [] +def test_remove(): + inp = 'a := list(9,8,7,6,5,4,3,2,1,100); a remove(7)' + res, space = interpret(inp) + assert isinstance(res, W_List) + assert [int(x.number_value) for x in res.list_items] == [9,8,6,5,4,3,2,1,100] def test_at_put(): inp = 'a := list(9,8,7,6,5,4,3,2,1,100); a atPut(3, 1045)' From noreply at buildbot.pypy.org Thu Sep 22 11:19:43 2011 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 22 Sep 2011 11:19:43 +0200 (CEST) Subject: [pypy-commit] lang-io default: cleanup test Message-ID: <20110922091943.436B8820CF@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r41:d906c3756c13 Date: 2011-09-22 11:18 +0200 http://bitbucket.org/pypy/lang-io/changeset/d906c3756c13/ Log: cleanup test diff --git a/io/test/test_list.py b/io/test/test_list.py --- a/io/test/test_list.py +++ b/io/test/test_list.py @@ -196,7 +196,8 @@ inp = 'a := list(9,8,7,6,5,4,3,2,1,100); a removeAll; a' res, space = interpret(inp) assert isinstance(res, W_List) - assert [x.value for x in res.list_items] == [] + assert res.list_items == [] + def test_remove(): inp = 'a := list(9,8,7,6,5,4,3,2,1,100); a remove(7)' res, space = interpret(inp) From noreply at buildbot.pypy.org Thu Sep 22 11:22:04 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Sep 2011 11:22:04 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: Reduce a little bit the code. Message-ID: <20110922092204.984F0820CF@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47387:ce5f58ca6849 Date: 2011-09-22 10:56 +0200 http://bitbucket.org/pypy/pypy/changeset/ce5f58ca6849/ Log: Reduce a little bit the code. diff --git a/lib_pypy/stackless.py b/lib_pypy/stackless.py --- a/lib_pypy/stackless.py +++ b/lib_pypy/stackless.py @@ -5,7 +5,6 @@ """ -import traceback import _continuation class TaskletExit(Exception): @@ -14,7 +13,18 @@ CoroutineExit = TaskletExit +def _coroutine_getcurrent(): + "Returns the current coroutine (i.e. the one which called this function)." + try: + return _tls.current_coroutine + except AttributeError: + # first call in this thread: current == main + _coroutine_create_main() + return _tls.current_coroutine + + class coroutine(object): + _is_started = False def __init__(self): self._frame = None @@ -24,13 +34,14 @@ binds function f to coro. f will be called with arguments *argl, **argd """ - if self._frame is None or not self._frame.is_pending(): - def run(c): - _tls.current_coroutine = self - return func(*argl, **argd) - self._frame = frame = _continuation.continulet(run) - else: + if self.is_alive: raise ValueError("cannot bind a bound coroutine") + def run(c): + _tls.current_coroutine = self + self._is_started = True + return func(*argl, **argd) + self._is_started = False + self._frame = _continuation.continulet(run) def switch(self): """coro.switch() -> returnvalue @@ -38,7 +49,7 @@ f finishes, the returnvalue is that of f, otherwise None is returned """ - current = _getcurrent() + current = _coroutine_getcurrent() try: current._frame.switch(to=self._frame) finally: @@ -46,33 +57,22 @@ def kill(self): """coro.kill() : kill coroutine coro""" - current = _getcurrent() + current = _coroutine_getcurrent() try: current._frame.throw(CoroutineExit, to=self._frame) finally: _tls.current_coroutine = current - def _is_alive(self): - if self._frame is None: - return False - return self._frame.is_pending() - is_alive = property(_is_alive) - del _is_alive + @property + def is_alive(self): + return self._frame is not None and self._frame.is_pending() - def getcurrent(): - """coroutine.getcurrent() -> the currently running coroutine""" - return _getcurrent() - getcurrent = staticmethod(getcurrent) + @property + def is_zombie(self): + return self._is_started and not self._frame.is_pending() + getcurrent = staticmethod(_coroutine_getcurrent) -def _getcurrent(): - "Returns the current coroutine (i.e. the one which called this function)." - try: - return _tls.current_coroutine - except AttributeError: - # first call in this thread: current == main - _coroutine_create_main() - return _tls.current_coroutine try: from thread import _local @@ -91,6 +91,8 @@ _tls.main_coroutine = main_coroutine _tls.current_coroutine = main_coroutine +# ____________________________________________________________ + from collections import deque @@ -135,10 +137,7 @@ _last_task = next assert not next.blocked if next is not current: - #try: - next.switch() - #except CoroutineExit: --- they are the same anyway - # raise TaskletExit + next.switch() return current def set_schedule_callback(callback): @@ -330,6 +329,7 @@ """ return self._channel_action(msg, 1) + class tasklet(coroutine): """ A tasklet object represents a tiny task in a Python thread. @@ -411,7 +411,7 @@ self.func = None coroutine.bind(self, _func) - back = _getcurrent() + back = _coroutine_getcurrent() coroutine.switch(self) self.alive = True _scheduler_append(self) From noreply at buildbot.pypy.org Thu Sep 22 11:22:05 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Sep 2011 11:22:05 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: Baaah. /me finds the horrible hack, despairs, and add "XXX HAAAAACK". Message-ID: <20110922092205.C8E37820CF@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47388:f2dad909405b Date: 2011-09-22 11:21 +0200 http://bitbucket.org/pypy/pypy/changeset/f2dad909405b/ Log: Baaah. /me finds the horrible hack, despairs, and add "XXX HAAAAACK". diff --git a/lib_pypy/pypy_test/test_stackless_pickling.py b/lib_pypy/pypy_test/test_stackless_pickling.py --- a/lib_pypy/pypy_test/test_stackless_pickling.py +++ b/lib_pypy/pypy_test/test_stackless_pickling.py @@ -1,7 +1,3 @@ -""" -this test should probably not run from CPython or py.py. -I'm not entirely sure, how to do that. -""" from __future__ import absolute_import from py.test import skip try: @@ -16,11 +12,15 @@ class Test_StacklessPickling: + def test_pickle_main_coroutine(self): + import stackless, pickle + s = pickle.dumps(stackless.coroutine.getcurrent()) + print s + c = pickle.loads(s) + assert c is stackless.coroutine.getcurrent() + def test_basic_tasklet_pickling(self): - try: - import stackless - except ImportError: - skip("can't load stackless and don't know why!!!") + import stackless from stackless import run, schedule, tasklet import pickle diff --git a/lib_pypy/stackless.py b/lib_pypy/stackless.py --- a/lib_pypy/stackless.py +++ b/lib_pypy/stackless.py @@ -19,12 +19,23 @@ return _tls.current_coroutine except AttributeError: # first call in this thread: current == main - _coroutine_create_main() - return _tls.current_coroutine + return _coroutine_getmain() + +def _coroutine_getmain(): + try: + return _tls.main_coroutine + except AttributeError: + # create the main coroutine for this thread + continulet = _continuation.continulet + main = coroutine() + main._frame = continulet.__new__(continulet) + main._is_started = -1 + _tls.current_coroutine = _tls.main_coroutine = main + return _tls.main_coroutine class coroutine(object): - _is_started = False + _is_started = 0 # 0=no, 1=yes, -1=main def __init__(self): self._frame = None @@ -38,9 +49,9 @@ raise ValueError("cannot bind a bound coroutine") def run(c): _tls.current_coroutine = self - self._is_started = True + self._is_started = 1 return func(*argl, **argd) - self._is_started = False + self._is_started = 0 self._frame = _continuation.continulet(run) def switch(self): @@ -65,14 +76,21 @@ @property def is_alive(self): - return self._frame is not None and self._frame.is_pending() + return self._is_started < 0 or ( + self._frame is not None and self._frame.is_pending()) @property def is_zombie(self): - return self._is_started and not self._frame.is_pending() + return self._is_started > 0 and not self._frame.is_pending() getcurrent = staticmethod(_coroutine_getcurrent) + def __reduce__(self): + if self._is_started < 0: + return _coroutine_getmain, () + else: + return type(self), (), self.__dict__ + try: from thread import _local @@ -82,14 +100,6 @@ _tls = _local() -def _coroutine_create_main(): - # create the main coroutine for this thread - _tls.current_coroutine = None - main_coroutine = coroutine() - typ = _continuation.continulet - main_coroutine._frame = typ.__new__(typ) - _tls.main_coroutine = main_coroutine - _tls.current_coroutine = main_coroutine # ____________________________________________________________ @@ -523,30 +533,7 @@ global _last_task _global_task_id = 0 _main_tasklet = coroutine.getcurrent() - try: - _main_tasklet.__class__ = tasklet - except TypeError: # we are running pypy-c - class TaskletProxy(object): - """TaskletProxy is needed to give the _main_coroutine tasklet behaviour""" - def __init__(self, coro): - self._coro = coro - - def __getattr__(self,attr): - return getattr(self._coro,attr) - - def __str__(self): - return '' % (self._task_id, self.is_alive) - - def __reduce__(self): - return getmain, () - - __repr__ = __str__ - - - global _main_coroutine - _main_coroutine = _main_tasklet - _main_tasklet = TaskletProxy(_main_tasklet) - assert _main_tasklet.is_alive and not _main_tasklet.is_zombie + _main_tasklet.__class__ = tasklet # XXX HAAAAAAAAAAAAAAAAAAAAACK _last_task = _main_tasklet tasklet._init.im_func(_main_tasklet, label='main') _squeue = deque() From noreply at buildbot.pypy.org Thu Sep 22 12:04:15 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Sep 2011 12:04:15 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: Fix an UnboundLocalError. Message-ID: <20110922100415.63BB1820CF@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47389:37278daa9a3d Date: 2011-09-22 11:50 +0200 http://bitbucket.org/pypy/pypy/changeset/37278daa9a3d/ Log: Fix an UnboundLocalError. diff --git a/py/_code/source.py b/py/_code/source.py --- a/py/_code/source.py +++ b/py/_code/source.py @@ -139,7 +139,7 @@ trysource = self[start:end] if trysource.isparseable(): return start, end - return start, end + return start, len(self) def getblockend(self, lineno): # XXX From noreply at buildbot.pypy.org Thu Sep 22 12:04:16 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Sep 2011 12:04:16 +0200 (CEST) Subject: [pypy-commit] pypy continulet-pickle: Implement gr_frame. Message-ID: <20110922100416.8D728820CF@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: continulet-pickle Changeset: r47390:41e5b1a4e17c Date: 2011-09-22 12:03 +0200 http://bitbucket.org/pypy/pypy/changeset/41e5b1a4e17c/ Log: Implement gr_frame. diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -96,7 +96,16 @@ @property def gr_frame(self): - raise NotImplementedError("attribute 'gr_frame' of greenlet objects") + # xxx this doesn't work when called on either the current or + # the main greenlet of another thread + if self is getcurrent(): + return None + if self.__main: + self = getcurrent() + f = _continulet.__reduce__(self)[2][0] + if not f: + return None + return f.f_back.f_back.f_back # go past start(), __switch(), switch() # ____________________________________________________________ # Internal stuff diff --git a/pypy/module/test_lib_pypy/test_greenlet.py b/pypy/module/test_lib_pypy/test_greenlet.py --- a/pypy/module/test_lib_pypy/test_greenlet.py +++ b/pypy/module/test_lib_pypy/test_greenlet.py @@ -258,3 +258,25 @@ assert sys.exc_info() == (None, None, None) greenlet(f).switch() + + def test_gr_frame(self): + from greenlet import greenlet + import sys + def f2(): + assert g.gr_frame is None + gmain.switch() + assert g.gr_frame is None + def f1(): + assert gmain.gr_frame is gmain_frame + assert g.gr_frame is None + f2() + assert g.gr_frame is None + gmain = greenlet.getcurrent() + assert gmain.gr_frame is None + gmain_frame = sys._getframe() + g = greenlet(f1) + assert g.gr_frame is None + g.switch() + assert g.gr_frame.f_code.co_name == 'f2' + g.switch() + assert g.gr_frame is None From noreply at buildbot.pypy.org Thu Sep 22 15:10:31 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 22 Sep 2011 15:10:31 +0200 (CEST) Subject: [pypy-commit] pypy kill-unary-multimethods: remove id as multimethod Message-ID: <20110922131031.93734820CF@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-unary-multimethods Changeset: r47391:30254a57e7d9 Date: 2011-09-21 10:19 +0200 http://bitbucket.org/pypy/pypy/changeset/30254a57e7d9/ Log: remove id as multimethod diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -8,7 +8,7 @@ from pypy.interpreter.miscutils import ThreadLocals from pypy.tool.cache import Cache from pypy.tool.uid import HUGEVAL_BYTES -from pypy.rlib.objectmodel import we_are_translated, newlist +from pypy.rlib.objectmodel import we_are_translated, newlist, compute_unique_id from pypy.rlib.debug import make_sure_not_resized from pypy.rlib.timer import DummyTimer, Timer from pypy.rlib.rarithmetic import r_uint @@ -958,6 +958,9 @@ def isinstance_w(self, w_obj, w_type): return self.is_true(self.isinstance(w_obj, w_type)) + def id(self, w_obj): + return self.wrap(compute_unique_id(w_obj)) + # The code below only works # for the simple case (new-style instance). # These methods are patched with the full logic by the __builtin__ diff --git a/pypy/objspace/std/default.py b/pypy/objspace/std/default.py --- a/pypy/objspace/std/default.py +++ b/pypy/objspace/std/default.py @@ -2,16 +2,8 @@ from pypy.interpreter.error import OperationError, typed_unwrap_error_msg from pypy.objspace.std.register_all import register_all -from pypy.rlib import objectmodel -# The following default implementations are used before delegation is tried. -# 'id' is normally the address of the wrapper. - -def id__ANY(space, w_obj): - #print 'id:', w_obj - return space.wrap(objectmodel.compute_unique_id(w_obj)) - # __init__ should succeed if called internally as a multimethod def init__ANY(space, w_obj, __args__): From noreply at buildbot.pypy.org Thu Sep 22 15:10:32 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 22 Sep 2011 15:10:32 +0200 (CEST) Subject: [pypy-commit] pypy kill-unary-multimethods: remove int_w, bigint_w and uint_w as multimethods Message-ID: <20110922131032.C515A820CF@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-unary-multimethods Changeset: r47392:514091a77a3b Date: 2011-09-21 10:38 +0200 http://bitbucket.org/pypy/pypy/changeset/514091a77a3b/ Log: remove int_w, bigint_w and uint_w as multimethods diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -196,6 +196,11 @@ raise OperationError(space.w_TypeError, typed_unwrap_error_msg(space, "unicode", self)) + def int_w(self, space): + raise OperationError(space.w_TypeError, + typed_unwrap_error_msg(space, "integer", self)) + uint_w = int_w + bigint_w = int_w class Wrappable(W_Root): @@ -1224,6 +1229,15 @@ def str_w(self, w_obj): return w_obj.str_w(self) + def int_w(self, w_obj): + return w_obj.int_w(self) + + def uint_w(self, w_obj): + return w_obj.uint_w(self) + + def bigint_w(self, w_obj): + return w_obj.bigint_w(self) + def realstr_w(self, w_obj): # Like str_w, but only works if w_obj is really of type 'str'. if not self.is_true(self.isinstance(w_obj, self.w_str)): diff --git a/pypy/objspace/std/boolobject.py b/pypy/objspace/std/boolobject.py --- a/pypy/objspace/std/boolobject.py +++ b/pypy/objspace/std/boolobject.py @@ -1,8 +1,10 @@ +from pypy.rlib.rbigint import rbigint +from pypy.rlib.rarithmetic import r_uint +from pypy.interpreter.error import OperationError from pypy.objspace.std.model import registerimplementation, W_Object from pypy.objspace.std.register_all import register_all from pypy.objspace.std.intobject import W_IntObject - class W_BoolObject(W_Object): from pypy.objspace.std.booltype import bool_typedef as typedef _immutable_fields_ = ['boolval'] @@ -19,6 +21,19 @@ def unwrap(w_self, space): return w_self.boolval + int_w = unwrap + + def uint_w(w_self, space): + intval = w_self.intval + if intval < 0: + raise OperationError(space.w_ValueError, + space.wrap("cannot convert negative integer to unsigned")) + else: + return r_uint(intval) + + def bigint_w(w_self, space): + return rbigint.fromint(w_self.intval) + registerimplementation(W_BoolObject) diff --git a/pypy/objspace/std/default.py b/pypy/objspace/std/default.py --- a/pypy/objspace/std/default.py +++ b/pypy/objspace/std/default.py @@ -9,20 +9,8 @@ def init__ANY(space, w_obj, __args__): pass -def int_w__ANY(space,w_obj): - raise OperationError(space.w_TypeError, - typed_unwrap_error_msg(space, "integer", w_obj)) - def float_w__ANY(space,w_obj): raise OperationError(space.w_TypeError, typed_unwrap_error_msg(space, "float", w_obj)) -def uint_w__ANY(space,w_obj): - raise OperationError(space.w_TypeError, - typed_unwrap_error_msg(space, "integer", w_obj)) - -def bigint_w__ANY(space,w_obj): - raise OperationError(space.w_TypeError, - typed_unwrap_error_msg(space, "integer", w_obj)) - register_all(vars()) diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -30,7 +30,18 @@ def unwrap(w_self, space): return int(w_self.intval) + int_w = unwrap + def uint_w(w_self, space): + intval = w_self.intval + if intval < 0: + raise OperationError(space.w_ValueError, + space.wrap("cannot convert negative integer to unsigned")) + else: + return r_uint(intval) + + def bigint_w(w_self, space): + return rbigint.fromint(w_self.intval) registerimplementation(W_IntObject) @@ -39,20 +50,6 @@ # alias and then teach copy_multimethods in smallintobject.py to override # it. See int__Int for example. -def int_w__Int(space, w_int1): - return int(w_int1.intval) - -def uint_w__Int(space, w_int1): - intval = w_int1.intval - if intval < 0: - raise OperationError(space.w_ValueError, - space.wrap("cannot convert negative integer to unsigned")) - else: - return r_uint(intval) - -def bigint_w__Int(space, w_int1): - return rbigint.fromint(w_int1.intval) - def repr__Int(space, w_int1): a = w_int1.intval res = str(a) diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -45,6 +45,26 @@ fromrarith_int._annspecialcase_ = "specialize:argtype(0)" fromrarith_int = staticmethod(fromrarith_int) + def int_w(w_self, space): + try: + return w_self.num.toint() + except OverflowError: + raise OperationError(space.w_OverflowError, space.wrap( + "long int too large to convert to int")) + + def uint_w(w_self, space): + try: + return w_self.num.touint() + except ValueError: + raise OperationError(space.w_ValueError, space.wrap( + "cannot convert negative integer to unsigned int")) + except OverflowError: + raise OperationError(space.w_OverflowError, space.wrap( + "long int too large to convert to unsigned int")) + + def bigint_w(w_self, space): + return w_self.num + def __repr__(self): return '' % self.num.tolong() @@ -104,27 +124,6 @@ raise OperationError(space.w_OverflowError, space.wrap("long int too large to convert to float")) -def int_w__Long(space, w_value): - try: - return w_value.num.toint() - except OverflowError: - raise OperationError(space.w_OverflowError, space.wrap( - "long int too large to convert to int")) - - -def uint_w__Long(space, w_value): - try: - return w_value.num.touint() - except ValueError: - raise OperationError(space.w_ValueError, space.wrap( - "cannot convert negative integer to unsigned int")) - except OverflowError: - raise OperationError(space.w_OverflowError, space.wrap( - "long int too large to convert to unsigned int")) - -def bigint_w__Long(space, w_value): - return w_value.num - def repr__Long(space, w_long): return space.wrap(w_long.num.repr()) diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -442,6 +442,7 @@ mm.dispatch_tree = merge(self.dispatch_tree, other.dispatch_tree) return mm +NOT_MULTIMETHODS = [] class MM: """StdObjSpace multimethods""" @@ -451,10 +452,7 @@ init = StdObjSpaceMultiMethod('__init__', 1, general__args__=True) getnewargs = StdObjSpaceMultiMethod('__getnewargs__', 1) # special visible multimethods - int_w = StdObjSpaceMultiMethod('int_w', 1, []) # returns an unwrapped int float_w = StdObjSpaceMultiMethod('float_w', 1, []) # returns an unwrapped float - uint_w = StdObjSpaceMultiMethod('uint_w', 1, []) # returns an unwrapped unsigned int (r_uint) - bigint_w = StdObjSpaceMultiMethod('bigint_w', 1, []) # returns an unwrapped rbigint # NOTE: when adding more sometype_w() methods, you need to write a # stub in default.py to raise a space.w_TypeError marshal_w = StdObjSpaceMultiMethod('marshal_w', 1, [], extra_args=['marshaller']) @@ -462,9 +460,10 @@ # add all regular multimethods here for _name, _symbol, _arity, _specialnames in ObjSpace.MethodTable: - if _name not in locals(): + if _name not in locals() or _name in NOT_MULTIMETHODS: mm = StdObjSpaceMultiMethod(_symbol, _arity, _specialnames) locals()[_name] = mm del mm pow.extras['defaults'] = (None,) + diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -39,6 +39,30 @@ def __repr__(w_self): return '' % w_self.longlong + def int_w(w_self, space): + a = w_self.longlong + b = intmask(a) + if b == a: + return b + else: + raise OperationError(space.w_OverflowError, space.wrap( + "long int too large to convert to int")) + + def uint_w(w_self, space): + a = w_self.longlong + if a < 0: + raise OperationError(space.w_ValueError, space.wrap( + "cannot convert negative integer to unsigned int")) + b = r_uint(a) + if r_longlong(b) == a: + return b + else: + raise OperationError(space.w_OverflowError, space.wrap( + "long int too large to convert to unsigned int")) + + def bigint_w(w_self, space): + return w_self.asbigint() + registerimplementation(W_SmallLongObject) # ____________________________________________________________ @@ -102,30 +126,6 @@ def float__SmallLong(space, w_value): return space.newfloat(float(w_value.longlong)) -def int_w__SmallLong(space, w_value): - a = w_value.longlong - b = intmask(a) - if b == a: - return b - else: - raise OperationError(space.w_OverflowError, space.wrap( - "long int too large to convert to int")) - -def uint_w__SmallLong(space, w_value): - a = w_value.longlong - if a < 0: - raise OperationError(space.w_ValueError, space.wrap( - "cannot convert negative integer to unsigned int")) - b = r_uint(a) - if r_longlong(b) == a: - return b - else: - raise OperationError(space.w_OverflowError, space.wrap( - "long int too large to convert to unsigned int")) - -def bigint_w__SmallLong(space, w_value): - return w_value.asbigint() - def lt__SmallLong_SmallLong(space, w_small1, w_small2): return space.newbool(w_small1.longlong < w_small2.longlong) def le__SmallLong_SmallLong(space, w_small1, w_small2): From noreply at buildbot.pypy.org Thu Sep 22 15:10:33 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 22 Sep 2011 15:10:33 +0200 (CEST) Subject: [pypy-commit] pypy kill-unary-multimethods: some more test fixes Message-ID: <20110922131033.EA93B820CF@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-unary-multimethods Changeset: r47393:0ce0d09dbd8f Date: 2011-09-21 10:55 +0200 http://bitbucket.org/pypy/pypy/changeset/0ce0d09dbd8f/ Log: some more test fixes diff --git a/pypy/objspace/std/ropeobject.py b/pypy/objspace/std/ropeobject.py --- a/pypy/objspace/std/ropeobject.py +++ b/pypy/objspace/std/ropeobject.py @@ -41,6 +41,11 @@ return w_self return W_RopeObject(w_self._node) + def unicode_w(w_self, space): + # XXX should this use the default encoding? + from pypy.objspace.std.unicodetype import plain_str2unicode + return plain_str2unicode(space, w_self._node.flatten_string()) + W_RopeObject.EMPTY = W_RopeObject(rope.LiteralStringNode.EMPTY) W_RopeObject.PREBUILT = [W_RopeObject(rope.LiteralStringNode.PREBUILT[i]) for i in range(256)] diff --git a/pypy/objspace/std/smallintobject.py b/pypy/objspace/std/smallintobject.py --- a/pypy/objspace/std/smallintobject.py +++ b/pypy/objspace/std/smallintobject.py @@ -7,16 +7,30 @@ from pypy.objspace.std.register_all import register_all from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.intobject import W_IntObject +from pypy.interpreter.error import OperationError from pypy.rlib.objectmodel import UnboxedValue +from pypy.rlib.rbigint import rbigint +from pypy.rlib.rarithmetic import r_uint from pypy.tool.sourcetools import func_with_new_name - class W_SmallIntObject(W_Object, UnboxedValue): __slots__ = 'intval' from pypy.objspace.std.inttype import int_typedef as typedef def unwrap(w_self, space): return int(w_self.intval) + int_w = unwrap + + def uint_w(w_self, space): + intval = w_self.intval + if intval < 0: + raise OperationError(space.w_ValueError, + space.wrap("cannot convert negative integer to unsigned")) + else: + return r_uint(intval) + + def bigint_w(w_self, space): + return rbigint.fromint(w_self.intval) registerimplementation(W_SmallIntObject) From noreply at buildbot.pypy.org Thu Sep 22 15:10:35 2011 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 22 Sep 2011 15:10:35 +0200 (CEST) Subject: [pypy-commit] pypy kill-unary-multimethods: remove unnecessary multimethods Message-ID: <20110922131035.223D9820CF@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-unary-multimethods Changeset: r47394:801bde9e6123 Date: 2011-09-21 11:08 +0200 http://bitbucket.org/pypy/pypy/changeset/801bde9e6123/ Log: remove unnecessary multimethods diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -442,7 +442,12 @@ mm.dispatch_tree = merge(self.dispatch_tree, other.dispatch_tree) return mm -NOT_MULTIMETHODS = [] +NOT_MULTIMETHODS = dict.fromkeys( + ['delattr', 'delete', 'get', 'id', 'inplace_div', 'inplace_floordiv', + 'inplace_lshift', 'inplace_mod', 'inplace_pow', 'inplace_rshift', + 'inplace_truediv', 'is_', 'set', 'setattr', 'type', 'userdel']) +# XXX should we just remove those from the method table or we're happy +# with just not having multimethods? class MM: """StdObjSpace multimethods""" @@ -456,7 +461,6 @@ # NOTE: when adding more sometype_w() methods, you need to write a # stub in default.py to raise a space.w_TypeError marshal_w = StdObjSpaceMultiMethod('marshal_w', 1, [], extra_args=['marshaller']) - log = StdObjSpaceMultiMethod('log', 1, [], extra_args=['base']) # add all regular multimethods here for _name, _symbol, _arity, _specialnames in ObjSpace.MethodTable: From notifications-noreply at bitbucket.org Thu Sep 22 15:31:23 2011 From: notifications-noreply at bitbucket.org (Bitbucket) Date: Thu, 22 Sep 2011 13:31:23 -0000 Subject: [pypy-commit] Notification: jitviewer Message-ID: <20110922133123.25217.12557@bitbucket13.managed.contegix.com> You have received a notification from Anders Lehmann. Hi, I forked jitviewer. My fork is at https://bitbucket.org/redorlik/jitviewer. -- Disable notifications at https://bitbucket.org/account/notifications/ From pullrequests-noreply at bitbucket.org Thu Sep 22 15:35:53 2011 From: pullrequests-noreply at bitbucket.org (Bitbucket) Date: Thu, 22 Sep 2011 13:35:53 -0000 Subject: [pypy-commit] [OPEN] Pull request #1 for pypy/jitviewer: Updated README Message-ID: A new pull request has been opened by Anders Lehmann. redorlik/jitviewer has changes to be pulled into pypy/jitviewer. https://bitbucket.org/pypy/jitviewer/pull-request/1/updated-readme Title: Updated README The diassembler program objdump is not installed by default on OS X. So you need to install binutils and add objdump to the path. Changes to be pulled: ea8a654a9c6d by Anders Lehmann: "Added a comment to make it work on OS X, which does not come with objdump preins?" -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Thu Sep 22 18:01:20 2011 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 22 Sep 2011 18:01:20 +0200 (CEST) Subject: [pypy-commit] lang-io default: catch exception raised when item is not in the list Message-ID: <20110922160120.8E76C820CF@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r42:25ee4f13a748 Date: 2011-09-22 13:18 +0200 http://bitbucket.org/pypy/lang-io/changeset/25ee4f13a748/ Log: catch exception raised when item is not in the list diff --git a/io/list.py b/io/list.py --- a/io/list.py +++ b/io/list.py @@ -131,10 +131,12 @@ @register_method('List', 'remove') def list_remove_all(space, w_target, w_message, w_context): - w_item = w_message.arguments[0].eval(space, w_target, w_context) - w_target.list_items.remove(w_item) - return w_target - + w_item = w_message.arguments[0].eval(space, w_context, w_context) + try: + w_target.list_items.remove(w_item) + finally: + return w_target + @register_method('List', 'atPut') def list_reverse_in_place(space, w_target, w_message, w_context): # Help annotator From noreply at buildbot.pypy.org Thu Sep 22 18:01:21 2011 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 22 Sep 2011 18:01:21 +0200 (CEST) Subject: [pypy-commit] lang-io default: extend if method to support any object as condition Message-ID: <20110922160121.A623882211@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r43:5d3d11f87944 Date: 2011-09-22 13:20 +0200 http://bitbucket.org/pypy/lang-io/changeset/5d3d11f87944/ Log: extend if method to support any object as condition diff --git a/io/object.py b/io/object.py --- a/io/object.py +++ b/io/object.py @@ -151,16 +151,15 @@ @register_method('Object', 'if') def object_if(space, w_target, w_message, w_context): - w_condition = w_message.arguments[0].eval(space, w_context, w_context) - - if w_condition is space.w_true: + w_condition = w_message.arguments[0].eval(space, w_target, w_context) + if space.istrue(w_condition): index = 1 else: index = 2 if index < len(w_message.arguments): return w_message.arguments[index].eval(space, w_context, w_context) - return w_condition + return space.newbool(index == 1) @register_method('Object', 'stopStatus') def object_stopstatus(space, w_target, w_message, w_context): diff --git a/io/objspace.py b/io/objspace.py --- a/io/objspace.py +++ b/io/objspace.py @@ -204,8 +204,14 @@ return self.w_true return self.w_false + def istrue(self, value): + if value is not self.w_false and value is not self.w_nil: + return True + return False + def newsequence(self, value): return self.w_immutable_sequence.clone_and_init(value) + def isnil(self, w_object): return w_object is self.w_nil diff --git a/io/test/test_object.py b/io/test/test_object.py --- a/io/test/test_object.py +++ b/io/test/test_object.py @@ -197,6 +197,15 @@ res, space = interpret(inp) assert res is space.w_false +def test_if_object(): + inp = 'if(Object)' + res, space = interpret(inp) + assert res is space.w_true + + inp = 'if(nil)' + res, space = interpret(inp) + assert res is space.w_false + def test_object_stopStatus(): inp = 'stopStatus' res, space = interpret(inp) From noreply at buildbot.pypy.org Thu Sep 22 18:01:22 2011 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 22 Sep 2011 18:01:22 +0200 (CEST) Subject: [pypy-commit] lang-io default: implemented special version of updateSlot for Locals objects Message-ID: <20110922160122.BFD19820CF@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r44:666653c3a8e7 Date: 2011-09-22 15:04 +0200 http://bitbucket.org/pypy/lang-io/changeset/666653c3a8e7/ Log: implemented special version of updateSlot for Locals objects diff --git a/io/locals.py b/io/locals.py new file mode 100644 --- /dev/null +++ b/io/locals.py @@ -0,0 +1,14 @@ +from io.register import register_method + + at register_method('Locals', 'updateSlot') +def locals_update_slot(space, w_target, w_message, w_context): + slotname = w_message.arguments[0].eval(space, w_target, w_context).value + assert w_target.lookup(slotname) is not None + + if slotname in w_target.slots: + w_value = w_message.arguments[1].eval(space, w_target, w_context) + w_target.slots[slotname] = w_value + return w_value + else: + w_self = w_target.lookup('self') + return w_message.eval(space, w_self, w_context) diff --git a/io/model.py b/io/model.py --- a/io/model.py +++ b/io/model.py @@ -47,7 +47,7 @@ def __repr__(self): """NOT RPYTHON""" - return "" % (self.slots.keys(),) + return "<%s slots=%s>" % (self.__class__.__name__, self.slots.keys(),) class W_Number(W_Object): """Number""" @@ -297,10 +297,10 @@ w_locals.slots[args[i]] = space.w_nil if self.activateable: - w_locals.protos = [w_receiver] + w_locals.protos.append(w_receiver) w_locals.slots['self'] = w_receiver else: - w_locals.protos = [w_context] + w_locals.protos.append(w_context) w_locals.slots['self'] = w_context w_locals.slots['call'] = w_call diff --git a/io/objspace.py b/io/objspace.py --- a/io/objspace.py +++ b/io/objspace.py @@ -13,6 +13,7 @@ import io.coroutine import io.sequence import io.compiler +import io.locals class ObjSpace(object): """docstring for ObjSpace""" @@ -79,6 +80,8 @@ self.init_w_compiler() + self.init_w_locals() + def init_w_map(self): for key, function in cfunction_definitions['Map'].items(): self.w_map.slots[key] = W_CFunction(self, function) @@ -99,6 +102,10 @@ for key, function in cfunction_definitions['List'].items(): self.w_list.slots[key] = W_CFunction(self, function) + def init_w_locals(self): + for key, function in cfunction_definitions['Locals'].items(): + self.w_locals.slots[key] = W_CFunction(self, function) + def init_w_core(self): self.w_core.protos.append(self.w_object) self.w_core.slots['Locals'] = self.w_locals diff --git a/io/test/test_locals.py b/io/test/test_locals.py new file mode 100644 --- /dev/null +++ b/io/test/test_locals.py @@ -0,0 +1,22 @@ +from io.interpreter import parse, interpret + +def test_updateSlot_in_self(): + inp = """a := Object clone + a foo := 5 + a bar := method(foo = 10) + a bar + a foo + """ + res, space = interpret(inp) + assert res.number_value == 10 + assert space.w_lobby.slots['a'].slots['foo'].number_value == 10 + +def test_updateSlot_in_locals(): + inp = """a := Object clone + a foo := 5 + a bar := method(foo := foo; foo = 10) + a bar + """ + res, space = interpret(inp) + assert res.number_value == 10 + assert space.w_lobby.slots['a'].slots['foo'].number_value == 5 From noreply at buildbot.pypy.org Thu Sep 22 18:01:23 2011 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 22 Sep 2011 18:01:23 +0200 (CEST) Subject: [pypy-commit] lang-io default: enable io implementation of newSlot Message-ID: <20110922160123.CAA95820CF@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r45:e3b24ae80e73 Date: 2011-09-22 18:01 +0200 http://bitbucket.org/pypy/lang-io/changeset/e3b24ae80e73/ Log: enable io implementation of newSlot diff --git a/io/io/A2_Object.io b/io/io/A2_Object.io --- a/io/io/A2_Object.io +++ b/io/io/A2_Object.io @@ -331,7 +331,6 @@ and sets it's default the value aValue. Returns self. For example, newSlot("foo", 1) would create slot named foo with the value 1 as well as a setter method setFoo(). */ -/* newSlot := method(name, value, doc, getSlot("self") setSlot(name, getSlot("value")) getSlot("self") setSlot("set" .. name asCapitalized, @@ -339,7 +338,6 @@ //if(doc, getSlot("self") docSlot(name, doc)) getSlot("value") ) -*/ //doc Object launchFile(pathString) Eval file at pathString as if from the command line in it's folder. //doc System launchPath Returns a pathComponent of the launch file. launchFile := method(path, args, diff --git a/io/object.py b/io/object.py --- a/io/object.py +++ b/io/object.py @@ -175,24 +175,6 @@ ast = parse(space, code) return ast.eval(space, w_target, w_target) -# XXX replace with the original one in A2_Object.io when it works -from io.model import W_Object -class W_setSlotFunction(W_Object): - def __init__(self, space, name): - W_Object.__init__(self, space) - self.name = name - - def apply(self, space, w_receiver, w_message, w_context): - w_receiver.slots[self.name] = w_message.arguments[0].eval(space, w_context, - w_receiver) - return w_receiver - at register_method('Object', 'newSlot', unwrap_spec=[object, str, object]) -def object_new_slot(space, w_target, name, w_value): - from io.model import W_CFunction - w_target.slots[name] = w_value - slot_name = 'set%s%s' % (name[0].upper(), name[1:]) - w_target.slots[slot_name] = W_setSlotFunction(space, name) - @register_method('Object', 'updateSlot', unwrap_spec=[object, str, object]) def object_update_slot(space, w_target, slotname, w_value): assert w_target.lookup(slotname) is not None diff --git a/io/test/test_object.py b/io/test/test_object.py --- a/io/test/test_object.py +++ b/io/test/test_object.py @@ -261,10 +261,34 @@ res, space = interpret(inp) assert res.number_value == 5 assert space.w_lobby.slots['a'].number_value == 5 + def test_object_update_slot_raises(): inp = 'qwer = 23' py.test.raises(Exception, 'interpret(inp)') +def test_new_slot(): + inp = """a := Object clone + a foo ::= 45 + a bar ::= 99 + a setBar(123) + a + """ + res, space = interpret(inp) + assert res.slots['foo'].number_value == 45 + assert res.slots['bar'].number_value == 123 + assert 'setFoo' in res.slots + assert 'setBar' in res.slots + +def test_new_slot_with_method(): + inp = """a := Object clone + a foo := method(setBar(123)) + a bar ::= 99 + a foo + a bar + """ + res, space = interpret(inp) + assert res.number_value == 123 + def test_object_write(): inp = """ p := Object clone do( From pullrequests-noreply at bitbucket.org Thu Sep 22 18:27:31 2011 From: pullrequests-noreply at bitbucket.org (Bitbucket) Date: Thu, 22 Sep 2011 16:27:31 -0000 Subject: [pypy-commit] [ACCEPTED] Pull request #1 for pypy/jitviewer: Updated README In-Reply-To: References: Message-ID: <20110922162731.28964.36174@bitbucket02.managed.contegix.com> Pull request #1 has been accepted by Alex Gaynor. Changes in redorlik/jitviewer have been pulled into pypy/jitviewer. https://bitbucket.org/pypy/jitviewer/pull-request/1/updated-readme -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From pullrequests-noreply at bitbucket.org Thu Sep 22 18:27:32 2011 From: pullrequests-noreply at bitbucket.org (Bitbucket) Date: Thu, 22 Sep 2011 16:27:32 -0000 Subject: [pypy-commit] [ACCEPTED] Pull request #1 for pypy/jitviewer: Updated README In-Reply-To: References: Message-ID: <20110922162732.28964.31738@bitbucket02.managed.contegix.com> Pull request #1 has been accepted by Alex Gaynor. Changes in redorlik/jitviewer have been pulled into pypy/jitviewer. https://bitbucket.org/pypy/jitviewer/pull-request/1/updated-readme -- This is an issue notification from bitbucket.org. You are receiving this either because you are the participating in a pull request, or you are following it. From noreply at buildbot.pypy.org Thu Sep 22 18:27:35 2011 From: noreply at buildbot.pypy.org (Anders Lehmann) Date: Thu, 22 Sep 2011 18:27:35 +0200 (CEST) Subject: [pypy-commit] jitviewer default: Added a comment to make it work on OS X, which does not come with objdump preinstalled Message-ID: <20110922162735.6D3D3820CF@wyvern.cs.uni-duesseldorf.de> Author: Anders Lehmann Branch: Changeset: r172:ea8a654a9c6d Date: 2011-09-19 18:38 +0200 http://bitbucket.org/pypy/jitviewer/changeset/ea8a654a9c6d/ Log: Added a comment to make it work on OS X, which does not come with objdump preinstalled diff --git a/README b/README --- a/README +++ b/README @@ -1,6 +1,9 @@ You need to use PyPy to run this. To get started, using a recent virtualenv -(1.6.1 or newer), virtualenvwrapper, and a recent PyPy (1.5 or trunk) to create a -virtualenv: +(1.6.1 or newer), virtualenvwrapper, and a recent PyPy (1.5 or trunk). + +On Mac OSX you will also need to install binutils, to make objdump available. + +To create a virtualenv: mkvirtualenv --python=/path/to/pypy pypy-viewer From noreply at buildbot.pypy.org Thu Sep 22 18:46:04 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Thu, 22 Sep 2011 18:46:04 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: add some jit codewriter and metainterp tests Message-ID: <20110922164604.89BC7820CF@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: unsigned-dtypes Changeset: r47395:f3f228579454 Date: 2011-09-22 10:45 -0600 http://bitbucket.org/pypy/pypy/changeset/f3f228579454/ Log: add some jit codewriter and metainterp tests diff --git a/pypy/jit/codewriter/test/test_longlong.py b/pypy/jit/codewriter/test/test_longlong.py --- a/pypy/jit/codewriter/test/test_longlong.py +++ b/pypy/jit/codewriter/test/test_longlong.py @@ -193,6 +193,8 @@ [lltype.SignedLongLong], lltype.Signed) self.do_check('cast_float_to_longlong', EffectInfo.OS_LLONG_FROM_FLOAT, [lltype.Float], lltype.SignedLongLong) + self.do_check('cast_float_to_ulonglong', EffectInfo.OS_LLONG_FROM_FLOAT, + [lltype.Float], lltype.UnsignedLongLong) self.do_check('cast_longlong_to_float', EffectInfo.OS_LLONG_TO_FLOAT, [lltype.SignedLongLong], lltype.Float) self.do_check('cast_ulonglong_to_float', EffectInfo.OS_LLONG_U_TO_FLOAT, diff --git a/pypy/jit/metainterp/test/test_longlong.py b/pypy/jit/metainterp/test/test_longlong.py --- a/pypy/jit/metainterp/test/test_longlong.py +++ b/pypy/jit/metainterp/test/test_longlong.py @@ -118,6 +118,26 @@ res = self.interp_operations(f, [1000000000]) assert res == 123500000000.0 + def test_floats_negative(self): + def f(i): + # i == 1000000000 + f = i * -123.5 + n = r_longlong(f) + compare(n, -29, 1054051584) + return float(n) + res = self.interp_operations(f, [1000000000]) + assert res == -123500000000.0 + + def test_floats_ulonglong(self): + def f(i): + # i == 10**17 + f = i * 123.5 + n = r_ulonglong(f) + compare(n, -1419508847, 538116096) + return float(n) + res = self.interp_operations(f, [10**17]) + assert res == 12350000000000000000.0 + def test_unsigned_compare_ops(self): def f(n1, n2): # n == 30002000000000 From noreply at buildbot.pypy.org Thu Sep 22 18:59:37 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Sep 2011 18:59:37 +0200 (CEST) Subject: [pypy-commit] pypy stm: A branch of PyPy to start playing with STM. Message-ID: <20110922165937.68402820CF@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stm Changeset: r47396:f3cec8c65a3f Date: 2011-09-22 18:54 +0200 http://bitbucket.org/pypy/pypy/changeset/f3cec8c65a3f/ Log: A branch of PyPy to start playing with STM. See also my hacks repository, more precisely this dir: https://bitbucket.org/arigo/arigo/raw/default/hack/stm/pypy From noreply at buildbot.pypy.org Thu Sep 22 18:59:38 2011 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Sep 2011 18:59:38 +0200 (CEST) Subject: [pypy-commit] pypy stm: Make the exc_data structure a thread-local. Message-ID: <20110922165938.9CEBA820CF@wyvern.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stm Changeset: r47397:2869bd44f830 Date: 2011-09-22 18:59 +0200 http://bitbucket.org/pypy/pypy/changeset/2869bd44f830/ Log: Make the exc_data structure a thread-local. diff --git a/pypy/translator/exceptiontransform.py b/pypy/translator/exceptiontransform.py --- a/pypy/translator/exceptiontransform.py +++ b/pypy/translator/exceptiontransform.py @@ -471,7 +471,8 @@ def setup_excdata(self): EXCDATA = lltype.Struct('ExcData', ('exc_type', self.lltype_of_exception_type), - ('exc_value', self.lltype_of_exception_value)) + ('exc_value', self.lltype_of_exception_value), + hints={'thread_local': True}) self.EXCDATA = EXCDATA exc_data = lltype.malloc(EXCDATA, immortal=True) From noreply at buildbot.pypy.org Thu Sep 22 19:08:27 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Thu, 22 Sep 2011 19:08:27 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: add ullong_from_float Message-ID: <20110922170827.52D3A820CF@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: unsigned-dtypes Changeset: r47398:b72e47360723 Date: 2011-09-22 17:07 +0000 http://bitbucket.org/pypy/pypy/changeset/b72e47360723/ Log: add ullong_from_float diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -330,6 +330,9 @@ def _ll_1_llong_from_float(xf): return r_longlong(xf) +def _ll_1_ullong_from_float(xf): + return r_ulonglong(xf) + def _ll_1_llong_to_float(xll): return float(rffi.cast(lltype.SignedLongLong, xll)) From noreply at buildbot.pypy.org Thu Sep 22 19:19:11 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Thu, 22 Sep 2011 19:19:11 +0200 (CEST) Subject: [pypy-commit] pypy jit-optimizeopt-cleanups: kill the now obsolete dryrun Message-ID: <20110922171911.6E47D820CF@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-optimizeopt-cleanups Changeset: r47399:77e7b10d2e1c Date: 2011-09-22 19:18 +0200 http://bitbucket.org/pypy/pypy/changeset/77e7b10d2e1c/ Log: kill the now obsolete dryrun diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -251,12 +251,6 @@ def emit_operation(self, op): self.next_optimization.propagate_forward(op) - def test_emittable(self, op): - return self.is_emittable(op) - - def is_emittable(self, op): - return self.next_optimization.test_emittable(op) - # FIXME: Move some of these here? def getvalue(self, box): return self.optimizer.getvalue(box) @@ -498,9 +492,6 @@ self.producer[op.result] = op dispatch_opt(self, op) - def test_emittable(self, op): - return True - def emit_operation(self, op): if op.returns_bool_result(): self.bool_boxes[self.getvalue(op.result)] = None diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -31,19 +31,6 @@ dispatch_opt(self, op) - def test_emittable(self, op): - opnum = op.getopnum() - for value, cls, func in optimize_guards: - if opnum == value: - assert isinstance(op, cls) - try: - func(self, op, dryrun=True) - return self.is_emittable(op) - except InvalidLoop: - return False - return self.is_emittable(op) - - def try_boolinvers(self, op, targs): oldop = self.optimizer.pure_operations.get(targs, None) if oldop is not None and oldop.getdescr() is op.getdescr(): @@ -247,7 +234,7 @@ self.emit_operation(ResOperation(rop.CALL, args, op.result, op.getdescr())) - def optimize_guard(self, op, constbox, emit_operation=True, dryrun=False): + def optimize_guard(self, op, constbox, emit_operation=True): value = self.getvalue(op.getarg(0)) if value.is_constant(): box = value.box @@ -255,36 +242,33 @@ if not box.same_constant(constbox): raise InvalidLoop return - if dryrun: return if emit_operation: self.emit_operation(op) value.make_constant(constbox) self.optimizer.turned_constant(value) - def optimize_GUARD_ISNULL(self, op, dryrun=False): + def optimize_GUARD_ISNULL(self, op): value = self.getvalue(op.getarg(0)) if value.is_null(): return elif value.is_nonnull(): raise InvalidLoop - if dryrun: return self.emit_operation(op) value.make_constant(self.optimizer.cpu.ts.CONST_NULL) - def optimize_GUARD_NONNULL(self, op, dryrun=False): + def optimize_GUARD_NONNULL(self, op): value = self.getvalue(op.getarg(0)) if value.is_nonnull(): return elif value.is_null(): raise InvalidLoop - if dryrun: return self.emit_operation(op) value.make_nonnull(len(self.optimizer.newoperations) - 1) - def optimize_GUARD_VALUE(self, op, dryrun=False): + def optimize_GUARD_VALUE(self, op): value = self.getvalue(op.getarg(0)) emit_operation = True - if not dryrun and value.last_guard_index != -1: + if value.last_guard_index != -1: # there already has been a guard_nonnull or guard_class or # guard_nonnull_class on this value, which is rather silly. # replace the original guard with a guard_value @@ -302,15 +286,15 @@ emit_operation = False constbox = op.getarg(1) assert isinstance(constbox, Const) - self.optimize_guard(op, constbox, emit_operation, dryrun) + self.optimize_guard(op, constbox, emit_operation) - def optimize_GUARD_TRUE(self, op, dryrun=False): - self.optimize_guard(op, CONST_1, dryrun=dryrun) + def optimize_GUARD_TRUE(self, op): + self.optimize_guard(op, CONST_1) - def optimize_GUARD_FALSE(self, op, dryrun=False): - self.optimize_guard(op, CONST_0, dryrun=dryrun) + def optimize_GUARD_FALSE(self, op): + self.optimize_guard(op, CONST_0) - def optimize_GUARD_CLASS(self, op, dryrun=False): + def optimize_GUARD_CLASS(self, op): value = self.getvalue(op.getarg(0)) expectedclassbox = op.getarg(1) assert isinstance(expectedclassbox, Const) @@ -319,7 +303,6 @@ if realclassbox.same_constant(expectedclassbox): return raise InvalidLoop - if dryrun: return emit_operation = True if value.last_guard_index != -1: # there already has been a guard_nonnull or guard_class or @@ -345,12 +328,13 @@ last_guard_index = value.last_guard_index value.make_constant_class(expectedclassbox, last_guard_index) - def optimize_GUARD_NONNULL_CLASS(self, op, dryrun=False): - self.optimize_GUARD_NONNULL(op, True) - self.optimize_GUARD_CLASS(op, dryrun) + def optimize_GUARD_NONNULL_CLASS(self, op): + value = self.getvalue(op.getarg(0)) + if value.is_null(): + raise InvalidLoop + self.optimize_GUARD_CLASS(op) - def optimize_GUARD_NO_EXCEPTION(self, op, dryrun=False): - if dryrun: return + def optimize_GUARD_NO_EXCEPTION(self, op): if not self.optimizer.exception_might_have_happened: return self.emit_operation(op) diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -483,11 +483,6 @@ if op.getopnum() == rop.JUMP: loop_token = op.getdescr() assert isinstance(loop_token, LoopToken) - # FIXME: Use a tree, similar to the tree formed by the full - # preamble and it's bridges, instead of a list to save time and - # memory. This should also allow better behaviour in - # situations that the is_emittable() chain currently cant - # handle and the inlining fails unexpectedly belwo. short = loop_token.short_preamble if short: args = op.getarglist() From noreply at buildbot.pypy.org Thu Sep 22 19:21:08 2011 From: noreply at buildbot.pypy.org (hager) Date: Thu, 22 Sep 2011 19:21:08 +0200 (CEST) Subject: [pypy-commit] pypy ppc-jit-backend: Moved code for assembler prolog to the end of the generated code. Message-ID: <20110922172108.CF87A820CF@wyvern.cs.uni-duesseldorf.de> Author: hager Branch: ppc-jit-backend Changeset: r47400:95f203d329b1 Date: 2011-09-22 19:20 +0200 http://bitbucket.org/pypy/pypy/changeset/95f203d329b1/ Log: Moved code for assembler prolog to the end of the generated code. diff --git a/pypy/jit/backend/ppc/ppcgen/codebuilder.py b/pypy/jit/backend/ppc/ppcgen/codebuilder.py --- a/pypy/jit/backend/ppc/ppcgen/codebuilder.py +++ b/pypy/jit/backend/ppc/ppcgen/codebuilder.py @@ -23,6 +23,8 @@ from pypy.jit.metainterp.history import (BoxInt, ConstInt, ConstPtr, ConstFloat, Box, INT, REF, FLOAT) from pypy.jit.backend.x86.support import values_array +from pypy.tool.udir import udir +from pypy.rlib.objectmodel import we_are_translated A = Form("frD", "frA", "frB", "XO3", "Rc") A1 = Form("frD", "frB", "XO3", "Rc") @@ -968,12 +970,26 @@ for inst in insts: self.write32(inst.assemble()) + def _dump_trace(self, addr, name, formatter=-1): + if not we_are_translated(): + if formatter != -1: + name = name % formatter + dir = udir.ensure('asm', dir=True) + f = dir.join(name).open('wb') + data = rffi.cast(rffi.CCHARP, addr) + for i in range(self.currpos()): + f.write(data[i]) + f.close() + def write32(self, word): self.writechar(chr((word >> 24) & 0xFF)) self.writechar(chr((word >> 16) & 0xFF)) self.writechar(chr((word >> 8) & 0xFF)) self.writechar(chr(word & 0xFF)) + def currpos(self): + return self.get_rel_pos() + class BranchUpdater(PPCAssembler): def __init__(self): PPCAssembler.__init__(self) diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py --- a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py +++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py @@ -1833,6 +1833,8 @@ # self.copy_to_raw_memory(addr) # # def assemble(self, dump=os.environ.has_key('PYPY_DEBUG')): + + # insns = self.assemble0(dump) # for i in insns: # self.emit(i) @@ -2004,16 +2006,19 @@ clt.asmmemmgr = [] return clt.asmmemmgr_blocks - def _make_prologue(self): + def _make_prologue(self, target_pos): if IS_PPC_32: self.mc.stwu(1, 1, -self.framesize) - self.mc.mflr(0) - self.mc.stw(0, 1, self.framesize + 4) + self.mc.mflr(0) # move old link register + self.mc.stw(0, 1, self.framesize + 4) # save it in previous frame else: self.mc.stdu(1, 1, -self.framesize) self.mc.mflr(0) self.mc.std(0, 1, self.framesize + 4) self._save_nonvolatiles() + curpos = self.mc.currpos() + offset = target_pos - curpos + self.mc.b(offset) def _make_epilogue(self): for op_index, fail_index, guard, reglist in self.patch_list: @@ -2084,35 +2089,36 @@ self.memcpy_addr = self.cpu.cast_ptr_to_int(memcpy_fn) def assemble_loop(self, inputargs, operations, looptoken, log): - self.framesize = 256 + GPR_SAVE_AREA - self.patch_list = [] - self.pending_guards = [] - self.mc = PPCBuilder() - self.startpos = self.mc.get_rel_pos() clt = CompiledLoopToken(self.cpu, looptoken.number) looptoken.compiled_loop_token = clt self.setup(looptoken, operations) + self.framesize = 256 + GPR_SAVE_AREA + self.patch_list = [] + self.pending_guards = [] + self.startpos = self.mc.get_rel_pos() longevity = compute_vars_longevity(inputargs, operations) regalloc = Regalloc(longevity, assembler=self, frame_manager=PPCFrameManager()) - self._make_prologue() nonfloatlocs = regalloc.prepare_loop(inputargs, operations, looptoken) + regalloc_head = self.mc.currpos() self.gen_bootstrap_code(nonfloatlocs, inputargs) - looptoken._ppc_loop_code = self.mc.get_rel_pos() + loophead = self.mc.currpos() + looptoken._ppc_loop_code = loophead looptoken._ppc_arglocs = [nonfloatlocs] looptoken._ppc_bootstrap_code = 0 self._walk_operations(operations, regalloc) + start_pos = self.mc.currpos() + self._make_prologue(regalloc_head) self._make_epilogue() - #loop_start = self.mc.assemble() loop_start = self.materialize_loop(looptoken) - looptoken.ppc_code = loop_start + looptoken.ppc_code = loop_start + start_pos self._teardown() def _teardown(self): From noreply at buildbot.pypy.org Thu Sep 22 20:10:18 2011 From: noreply at buildbot.pypy.org (justinpeel) Date: Thu, 22 Sep 2011 20:10:18 +0200 (CEST) Subject: [pypy-commit] pypy unsigned-dtypes: fix test Message-ID: <20110922181018.7BDE0820CF@wyvern.cs.uni-duesseldorf.de> Author: Justin Peel Branch: unsigned-dtypes Changeset: r47401:3868cf6ce395 Date: 2011-09-22 18:08 +0000 http://bitbucket.org/pypy/pypy/changeset/3868cf6ce395/ Log: fix test diff --git a/pypy/jit/metainterp/test/test_longlong.py b/pypy/jit/metainterp/test/test_longlong.py --- a/pypy/jit/metainterp/test/test_longlong.py +++ b/pypy/jit/metainterp/test/test_longlong.py @@ -130,12 +130,12 @@ def test_floats_ulonglong(self): def f(i): - # i == 10**17 - f = i * 123.5 + # i == 1000000000 + f = i * 12350000000.0 n = r_ulonglong(f) compare(n, -1419508847, 538116096) return float(n) - res = self.interp_operations(f, [10**17]) + res = self.interp_operations(f, [1000000000]) assert res == 12350000000000000000.0 def test_unsigned_compare_ops(self): From noreply at buildbot.pypy.org Thu Sep 22 20:40:44 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Thu, 22 Sep 2011 20:40:44 +0200 (CEST) Subject: [pypy-commit] pypy jit-optimizeopt-cleanups: pass strengthen guards down the optimization chain instead of modifying optimizer.newoperations directly Message-ID: <20110922184044.7C16B820CF@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-optimizeopt-cleanups Changeset: r47402:0fbcbb4e7bc7 Date: 2011-09-22 20:32 +0200 http://bitbucket.org/pypy/pypy/changeset/0fbcbb4e7bc7/ Log: pass strengthen guards down the optimization chain instead of modifying optimizer.newoperations directly diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -31,8 +31,8 @@ class OptValue(object): __metaclass__ = extendabletype - _attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound', 'lenbound') - last_guard_index = -1 + _attrs_ = ('box', 'known_class', 'last_guard', 'level', 'intbound', 'lenbound') + last_guard = None level = LEVEL_UNKNOWN known_class = None @@ -100,7 +100,7 @@ self.make_constant(other.get_key_box()) optimizer.turned_constant(self) elif other.level == LEVEL_KNOWNCLASS: - self.make_constant_class(other.known_class, -1) + self.make_constant_class(other.known_class, None) else: if other.level == LEVEL_NONNULL: self.ensure_nonnull() @@ -162,16 +162,16 @@ else: return None - def make_constant_class(self, classbox, opindex): + def make_constant_class(self, classbox, guardop): assert self.level < LEVEL_KNOWNCLASS self.known_class = classbox self.level = LEVEL_KNOWNCLASS - self.last_guard_index = opindex + self.last_guard = guardop - def make_nonnull(self, opindex): + def make_nonnull(self, guardop): assert self.level < LEVEL_NONNULL self.level = LEVEL_NONNULL - self.last_guard_index = opindex + self.last_guard = guardop def is_nonnull(self): level = self.level @@ -331,6 +331,7 @@ self.exception_might_have_happened = False self.quasi_immutable_deps = None self.opaque_pointers = {} + self.replaces_guard = {} self.newoperations = [] if loop is not None: self.call_pure_results = loop.call_pure_results @@ -511,11 +512,27 @@ self.metainterp_sd.profiler.count(jitprof.OPT_OPS) if op.is_guard(): self.metainterp_sd.profiler.count(jitprof.OPT_GUARDS) - op = self.store_final_boxes_in_guard(op) + if self.replaces_guard and op in self.replaces_guard: + self.replace_op(self.replaces_guard[op], op) + del self.replaces_guard[op] + return + else: + op = self.store_final_boxes_in_guard(op) elif op.can_raise(): self.exception_might_have_happened = True self.newoperations.append(op) + def replace_op(self, old_op, new_op): + # XXX: Do we want to cache indexes to prevent search? + i = len(self.newoperations) + while i > 0: + i -= 1 + if self.newoperations[i] is old_op: + self.newoperations[i] = new_op + break + else: + assert False + def store_final_boxes_in_guard(self, op): descr = op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -263,30 +263,28 @@ elif value.is_null(): raise InvalidLoop self.emit_operation(op) - value.make_nonnull(len(self.optimizer.newoperations) - 1) + value.make_nonnull(op) def optimize_GUARD_VALUE(self, op): value = self.getvalue(op.getarg(0)) - emit_operation = True - if value.last_guard_index != -1: + if value.last_guard: # there already has been a guard_nonnull or guard_class or # guard_nonnull_class on this value, which is rather silly. # replace the original guard with a guard_value - old_guard_op = self.optimizer.newoperations[value.last_guard_index] - new_guard_op = old_guard_op.copy_and_change(rop.GUARD_VALUE, - args = [old_guard_op.getarg(0), op.getarg(1)]) - self.optimizer.newoperations[value.last_guard_index] = new_guard_op + old_guard_op = value.last_guard + op = old_guard_op.copy_and_change(rop.GUARD_VALUE, + args = [old_guard_op.getarg(0), op.getarg(1)]) + self.optimizer.replaces_guard[op] = old_guard_op # hack hack hack. Change the guard_opnum on # new_guard_op.getdescr() so that when resuming, # the operation is not skipped by pyjitpl.py. - descr = new_guard_op.getdescr() + descr = op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) descr.guard_opnum = rop.GUARD_VALUE - descr.make_a_counter_per_value(new_guard_op) - emit_operation = False + descr.make_a_counter_per_value(op) constbox = op.getarg(1) assert isinstance(constbox, Const) - self.optimize_guard(op, constbox, emit_operation) + self.optimize_guard(op, constbox) def optimize_GUARD_TRUE(self, op): self.optimize_guard(op, CONST_1) @@ -303,30 +301,24 @@ if realclassbox.same_constant(expectedclassbox): return raise InvalidLoop - emit_operation = True - if value.last_guard_index != -1: + if value.last_guard: # there already has been a guard_nonnull or guard_class or # guard_nonnull_class on this value. - old_guard_op = self.optimizer.newoperations[value.last_guard_index] + old_guard_op = value.last_guard if old_guard_op.getopnum() == rop.GUARD_NONNULL: # it was a guard_nonnull, which we replace with a # guard_nonnull_class. - new_guard_op = old_guard_op.copy_and_change (rop.GUARD_NONNULL_CLASS, + op = old_guard_op.copy_and_change (rop.GUARD_NONNULL_CLASS, args = [old_guard_op.getarg(0), op.getarg(1)]) - self.optimizer.newoperations[value.last_guard_index] = new_guard_op + self.optimizer.replaces_guard[op] = old_guard_op # hack hack hack. Change the guard_opnum on # new_guard_op.getdescr() so that when resuming, # the operation is not skipped by pyjitpl.py. - descr = new_guard_op.getdescr() + descr = op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) descr.guard_opnum = rop.GUARD_NONNULL_CLASS - emit_operation = False - if emit_operation: - self.emit_operation(op) - last_guard_index = len(self.optimizer.newoperations) - 1 - else: - last_guard_index = value.last_guard_index - value.make_constant_class(expectedclassbox, last_guard_index) + self.emit_operation(op) + value.make_constant_class(expectedclassbox, op) def optimize_GUARD_NONNULL_CLASS(self, op): value = self.getvalue(op.getarg(0)) diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -221,7 +221,6 @@ newop.setfailargs(self.getfailargs()) return newop - # ============ # arity mixins # ============ From noreply at buildbot.pypy.org Thu Sep 22 20:40:45 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Thu, 22 Sep 2011 20:40:45 +0200 (CEST) Subject: [pypy-commit] pypy jit-optimizeopt-cleanups: dont assume operations originate from optimizer.loop.operations Message-ID: <20110922184045.A00A6820CF@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-optimizeopt-cleanups Changeset: r47403:25853ef7976c Date: 2011-09-22 20:36 +0200 http://bitbucket.org/pypy/pypy/changeset/25853ef7976c/ Log: dont assume operations originate from optimizer.loop.operations diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -475,11 +475,8 @@ self.exception_might_have_happened = self.bridge self.newoperations = [] self.first_optimization.propagate_begin_forward() - self.i = 0 - while self.i < len(self.loop.operations): - op = self.loop.operations[self.i] + for op in self.loop.operations: self.first_optimization.propagate_forward(op) - self.i += 1 self.first_optimization.propagate_end_forward() self.loop.operations = self.newoperations self.loop.quasi_immutable_deps = self.quasi_immutable_deps From noreply at buildbot.pypy.org Thu Sep 22 20:40:46 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Thu, 22 Sep 2011 20:40:46 +0200 (CEST) Subject: [pypy-commit] pypy jit-optimizeopt-cleanups: these does not seem to be used and propagate_end_forward is probably intended for the same functionality as Optimization.flush() Message-ID: <20110922184046.C2027820CF@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-optimizeopt-cleanups Changeset: r47404:5305a3642a0f Date: 2011-09-22 20:39 +0200 http://bitbucket.org/pypy/pypy/changeset/5305a3642a0f/ Log: these does not seem to be used and propagate_end_forward is probably intended for the same functionality as Optimization.flush() diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -237,14 +237,6 @@ def __init__(self): pass # make rpython happy - def propagate_begin_forward(self): - if self.next_optimization: - self.next_optimization.propagate_begin_forward() - - def propagate_end_forward(self): - if self.next_optimization: - self.next_optimization.propagate_end_forward() - def propagate_forward(self, op): raise NotImplementedError @@ -474,10 +466,8 @@ def propagate_all_forward(self): self.exception_might_have_happened = self.bridge self.newoperations = [] - self.first_optimization.propagate_begin_forward() for op in self.loop.operations: self.first_optimization.propagate_forward(op) - self.first_optimization.propagate_end_forward() self.loop.operations = self.newoperations self.loop.quasi_immutable_deps = self.quasi_immutable_deps # accumulate counters From noreply at buildbot.pypy.org Thu Sep 22 21:19:09 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Thu, 22 Sep 2011 21:19:09 +0200 (CEST) Subject: [pypy-commit] pypy jit-optimizeopt-cleanups: pospone ops instead of reordering already emitted ops Message-ID: <20110922191909.91A21820CF@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-optimizeopt-cleanups Changeset: r47405:8dfeb716d0ab Date: 2011-09-22 21:18 +0200 http://bitbucket.org/pypy/pypy/changeset/8dfeb716d0ab/ Log: pospone ops instead of reordering already emitted ops diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -158,6 +158,7 @@ self._lazy_setfields_and_arrayitems = [] self._remove_guard_not_invalidated = False self._seen_guard_not_invalidated = False + self.posponedop = None def force_at_end_of_preamble(self): self.force_all_lazy_setfields_and_arrayitems() @@ -211,7 +212,14 @@ def emit_operation(self, op): self.emitting_operation(op) - self.next_optimization.propagate_forward(op) + if self.posponedop: + self.next_optimization.propagate_forward(self.posponedop) + self.posponedop = None + if (op.is_comparison() or op.getopnum() == rop.CALL_MAY_FORCE + or op.is_ovf()): + self.posponedop = op + else: + self.next_optimization.propagate_forward(op) def emitting_operation(self, op): if op.has_no_side_effect(): @@ -293,30 +301,6 @@ if indexvalue is None or indexvalue.intbound.contains(idx): cf.force_lazy_setfield(self, can_cache) - def fixup_guard_situation(self): - # hackish: reverse the order of the last two operations if it makes - # sense to avoid a situation like "int_eq/setfield_gc/guard_true", - # which the backend (at least the x86 backend) does not handle well. - newoperations = self.optimizer.newoperations - if len(newoperations) < 2: - return - lastop = newoperations[-1] - if (lastop.getopnum() != rop.SETFIELD_GC and - lastop.getopnum() != rop.SETARRAYITEM_GC): - return - # - is_comparison() for cases like "int_eq/setfield_gc/guard_true" - # - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced" - # - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow" - prevop = newoperations[-2] - opnum = prevop.getopnum() - if not (prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE - or prevop.is_ovf()): - return - if prevop.result in lastop.getarglist(): - return - newoperations[-2] = lastop - newoperations[-1] = prevop - def _assert_valid_cf(self, cf): # check that 'cf' is in cached_fields or cached_arrayitems if not we_are_translated(): @@ -362,7 +346,6 @@ fieldvalue.get_key_box(), itemindex)) else: cf.force_lazy_setfield(self) - self.fixup_guard_situation() return pendingfields def optimize_GETFIELD_GC(self, op): From noreply at buildbot.pypy.org Thu Sep 22 21:44:48 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Thu, 22 Sep 2011 21:44:48 +0200 (CEST) Subject: [pypy-commit] pypy jit-optimizeopt-cleanups: started to move pure operation reusage into an optimization stage of its own Message-ID: <20110922194448.E9F1C820CF@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-optimizeopt-cleanups Changeset: r47406:8b3e60e6d037 Date: 2011-09-22 21:42 +0200 http://bitbucket.org/pypy/pypy/changeset/8b3e60e6d037/ Log: started to move pure operation reusage into an optimization stage of its own diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -7,6 +7,7 @@ from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll, OptInlineShortPreamble from pypy.jit.metainterp.optimizeopt.fficall import OptFfiCall from pypy.jit.metainterp.optimizeopt.simplify import OptSimplify +from pypy.jit.metainterp.optimizeopt.pure import OptPure from pypy.rlib.jit import PARAMETERS from pypy.rlib.unroll import unrolling_iterable @@ -14,6 +15,7 @@ ('rewrite', OptRewrite), ('virtualize', OptVirtualize), ('string', OptString), + ('pure', OptPure), ('heap', OptHeap), ('ffi', None), ('unroll', None)] diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -165,6 +165,9 @@ def flush(self): self.force_all_lazy_setfields_and_arrayitems() + if self.posponedop: + self.next_optimization.propagate_forward(self.posponedop) + self.posponedop = None def new(self): return OptHeap() diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -566,48 +566,8 @@ args[n + 1] = op.getdescr() return args - @specialize.argtype(0) def optimize_default(self, op): - canfold = op.is_always_pure() - if op.is_ovf(): - self.posponedop = op - return - if self.posponedop: - nextop = op - op = self.posponedop - self.posponedop = None - canfold = nextop.getopnum() == rop.GUARD_NO_OVERFLOW - else: - nextop = None - - if canfold: - for i in range(op.numargs()): - if self.get_constant_box(op.getarg(i)) is None: - break - else: - # all constant arguments: constant-fold away - resbox = self.constant_fold(op) - # note that INT_xxx_OVF is not done from here, and the - # overflows in the INT_xxx operations are ignored - self.make_constant(op.result, resbox) - return - - # did we do the exact same operation already? - args = self.make_args_key(op) - oldop = self.pure_operations.get(args, None) - if oldop is not None and oldop.getdescr() is op.getdescr(): - assert oldop.getopnum() == op.getopnum() - self.make_equal_to(op.result, self.getvalue(oldop.result), - True) - return - else: - self.pure_operations[args] = op - self.remember_emitting_pure(op) - - # otherwise, the operation remains self.emit_operation(op) - if nextop: - self.emit_operation(nextop) def remember_emitting_pure(self, op): pass diff --git a/pypy/jit/metainterp/optimizeopt/pure.py b/pypy/jit/metainterp/optimizeopt/pure.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/optimizeopt/pure.py @@ -0,0 +1,57 @@ +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization +from pypy.jit.metainterp.resoperation import rop, ResOperation + +class OptPure(Optimization): + def __init__(self): + self.posponedop = None + + def propagate_forward(self, op): + canfold = op.is_always_pure() + if op.is_ovf(): + self.posponedop = op + return + if self.posponedop: + nextop = op + op = self.posponedop + self.posponedop = None + canfold = nextop.getopnum() == rop.GUARD_NO_OVERFLOW + else: + nextop = None + + if canfold: + for i in range(op.numargs()): + if self.get_constant_box(op.getarg(i)) is None: + break + else: + # all constant arguments: constant-fold away + resbox = self.optimizer.constant_fold(op) + # note that INT_xxx_OVF is not done from here, and the + # overflows in the INT_xxx operations are ignored + self.optimizer.make_constant(op.result, resbox) + return + + # did we do the exact same operation already? + args = self.optimizer.make_args_key(op) + oldop = self.optimizer.pure_operations.get(args, None) + if oldop is not None and oldop.getdescr() is op.getdescr(): + assert oldop.getopnum() == op.getopnum() + self.optimizer.make_equal_to(op.result, self.getvalue(oldop.result), + True) + return + else: + self.optimizer.pure_operations[args] = op + self.optimizer.remember_emitting_pure(op) + + # otherwise, the operation remains + self.emit_operation(op) + if op.returns_bool_result(): + self.optimizer.bool_boxes[self.getvalue(op.result)] = None + if nextop: + self.emit_operation(nextop) + + def flush(self): + assert self.posponedop is None + + def new(self): + assert self.posponedop is None + return OptPure() diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -111,7 +111,7 @@ class BaseTestBasic(BaseTest): - enable_opts = "intbounds:rewrite:virtualize:string:heap" + enable_opts = "intbounds:rewrite:virtualize:string:pure:heap" def optimize_loop(self, ops, optops, call_pure_results=None): diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py @@ -36,7 +36,7 @@ class TestFfiCall(BaseTestBasic, LLtypeMixin): - enable_opts = "intbounds:rewrite:virtualize:string:heap:ffi" + enable_opts = "intbounds:rewrite:virtualize:string:pure:heap:ffi" class namespace: cpu = LLtypeMixin.cpu diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -68,7 +68,7 @@ class BaseTestWithUnroll(BaseTest): - enable_opts = "intbounds:rewrite:virtualize:string:heap:unroll" + enable_opts = "intbounds:rewrite:virtualize:string:pure:heap:unroll" def optimize_loop(self, ops, expected, expected_preamble=None, call_pure_results=None, expected_short=None): From noreply at buildbot.pypy.org Fri Sep 23 01:40:22 2011 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 23 Sep 2011 01:40:22 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Add a very rough talk outline Message-ID: <20110922234022.96022820CF@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3902:76bda7cc8d34 Date: 2011-09-23 00:21 +0200 http://bitbucket.org/pypy/extradoc/changeset/76bda7cc8d34/ Log: Add a very rough talk outline diff --git a/talk/pyconar2011/talk.rst b/talk/pyconar2011/talk.rst new file mode 100644 --- /dev/null +++ b/talk/pyconar2011/talk.rst @@ -0,0 +1,95 @@ + +Little things that PyPy makes possible +====================================== + +xxx + +Python +------ + +* Python is great + +|pause| + +* Python is a glue language + +|pause| + +* Python is slow + +Is it? +------ + +* xkcd [citation needed] + +Things you can do with PyPy +--------------------------- + +* XXX demos + +What is PyPy? +------------- + +* PyPy is many things + +* **just another python implementation** + +.. sourcecode:: bash + + pypy x.py + +What is PyPy (2)? +----------------- + +* Comes with a JIT compiler + +How fast is PyPy? +----------------- + +* XXX speed website + +* XXX django over time + +* XXX v8 vs pypy + +How fast is PyPy? +----------------- + +* pretty damn fast + +PyPy 1.6 - status +----------------- + +* Released on XXX + +* Python 2.7.1 + +* The most compatible alternative to CPython + +* Most programs just work + +* (C extensions might not) + +Contacts, Q/A +-------------- + +- http://pypy.org + +- blog: http://morepypy.blogspot.com + +- mailing list: pypy-dev at python.org + +- IRC: #pypy on freenode + +.. image:: ../ep2011/talk/question-mark.png + :scale: 10% + :align: center + +Shameless ad +------------ + +* Want to make run your software fast? + +* We can make it happen + +* fijall at gmail.com From noreply at buildbot.pypy.org Fri Sep 23 01:40:23 2011 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 23 Sep 2011 01:40:23 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: update and missing files Message-ID: <20110922234023.BCB12820CF@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3903:6e1a2ccda6e0 Date: 2011-09-23 01:36 +0200 http://bitbucket.org/pypy/extradoc/changeset/6e1a2ccda6e0/ Log: update and missing files diff --git a/talk/pycon2011/whyslow/Makefile b/talk/pycon2011/whyslow/Makefile --- a/talk/pycon2011/whyslow/Makefile +++ b/talk/pycon2011/whyslow/Makefile @@ -7,4 +7,4 @@ pdflatex whyslow-talk.latex || exit view: whyslow-talk.pdf - evince whyslow-talk.pdf & \ No newline at end of file + evince whyslow-talk.pdf & diff --git a/talk/pyconar2011/Makefile b/talk/pyconar2011/Makefile new file mode 100644 --- /dev/null +++ b/talk/pyconar2011/Makefile @@ -0,0 +1,13 @@ + + +talk.pdf: talk.rst author.latex title.latex stylesheet.latex + rst2beamer.py --input-encoding=utf-8 --output-encoding=utf-8 --stylesheet=stylesheet.latex --documentoptions=14pt --theme=Warsaw talk.rst talk.latex || exit + sed 's/\\date{}/\\input{author.latex}/' -i talk.latex || exit + sed 's/\\maketitle/\\input{title.latex}/' -i talk.latex || exit + pdflatex talk.latex || exit + +clean: + rm talk.pdf talk.latex + +view: talk.pdf + evince talk.pdf & diff --git a/talk/pyconar2011/author.latex b/talk/pyconar2011/author.latex new file mode 100644 --- /dev/null +++ b/talk/pyconar2011/author.latex @@ -0,0 +1,8 @@ +\definecolor{rrblitbackground}{rgb}{0.0, 0.0, 0.0} + +\title[Little things that PyPy makes possible]{Little things that PyPy makes possible} +\author[fijal] +{Maciej Fijałkowski} + +\institute{Pycon Argentina 2011} +\date{Sep 23 2011} diff --git a/talk/pyconar2011/beamerdefs.txt b/talk/pyconar2011/beamerdefs.txt new file mode 100644 --- /dev/null +++ b/talk/pyconar2011/beamerdefs.txt @@ -0,0 +1,108 @@ +.. colors +.. =========================== + +.. role:: green +.. role:: red + + +.. general useful commands +.. =========================== + +.. |pause| raw:: latex + + \pause + +.. |small| raw:: latex + + {\small + +.. |end_small| raw:: latex + + } + +.. |scriptsize| raw:: latex + + {\scriptsize + +.. |end_scriptsize| raw:: latex + + } + +.. |strike<| raw:: latex + + \sout{ + +.. closed bracket +.. =========================== + +.. |>| raw:: latex + + } + + +.. example block +.. =========================== + +.. |example<| raw:: latex + + \begin{exampleblock}{ + + +.. |end_example| raw:: latex + + \end{exampleblock} + + + +.. alert block +.. =========================== + +.. |alert<| raw:: latex + + \begin{alertblock}{ + + +.. |end_alert| raw:: latex + + \end{alertblock} + + + +.. columns +.. =========================== + +.. |column1| raw:: latex + + \begin{columns} + \begin{column}{0.45\textwidth} + +.. |column2| raw:: latex + + \end{column} + \begin{column}{0.45\textwidth} + + +.. |end_columns| raw:: latex + + \end{column} + \end{columns} + + + +.. |snake| image:: ../../img/py-web-new.png + :scale: 15% + + + +.. nested blocks +.. =========================== + +.. |nested| raw:: latex + + \begin{columns} + \begin{column}{0.85\textwidth} + +.. |end_nested| raw:: latex + + \end{column} + \end{columns} diff --git a/talk/pyconar2011/stylesheet.latex b/talk/pyconar2011/stylesheet.latex new file mode 100644 --- /dev/null +++ b/talk/pyconar2011/stylesheet.latex @@ -0,0 +1,12 @@ +\usepackage{ulem} +\usetheme{Boadilla} +\usecolortheme{whale} +\setbeamercovered{transparent} +\setbeamertemplate{navigation symbols}{} + +\definecolor{darkgreen}{rgb}{0, 0.5, 0.0} +\newcommand{\docutilsrolegreen}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\docutilsrolered}[1]{\color{red}#1\normalcolor} + +\newcommand{\green}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\red}[1]{\color{red}#1\normalcolor} diff --git a/talk/pyconar2011/talk.rst b/talk/pyconar2011/talk.rst --- a/talk/pyconar2011/talk.rst +++ b/talk/pyconar2011/talk.rst @@ -1,8 +1,8 @@ +.. include:: beamerdefs.txt -Little things that PyPy makes possible -====================================== - -xxx +============================= +Making little things possible +============================= Python ------ @@ -20,12 +20,24 @@ Is it? ------ -* xkcd [citation needed] +.. image:: wikipedian_protester.png + :scale: 700% + :align: center -Things you can do with PyPy ---------------------------- +Things you can do with PyPy (and not with CPython) +-------------------------------------------------- -* XXX demos +|pause| + +* real time video processing + +|pause| + +* software-rendered games + +|pause| + +* this is just the beginning! What is PyPy? ------------- diff --git a/talk/pyconar2011/title.latex b/talk/pyconar2011/title.latex new file mode 100644 --- /dev/null +++ b/talk/pyconar2011/title.latex @@ -0,0 +1,5 @@ +\begin{titlepage} +\begin{figure}[h] +\includegraphics[width=60px]{../img/py-web-new.png} +\end{figure} +\end{titlepage} diff --git a/talk/pyconar2011/wikipedian_protester.png b/talk/pyconar2011/wikipedian_protester.png new file mode 100644 index 0000000000000000000000000000000000000000..42203e81c752287c5555824eacdf901649ffb8fb GIT binary patch [cut] From noreply at buildbot.pypy.org Fri Sep 23 01:40:24 2011 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 23 Sep 2011 01:40:24 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: merge Message-ID: <20110922234024.DFCD3820CF@wyvern.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3904:4e77b8a47710 Date: 2011-09-23 01:39 +0200 http://bitbucket.org/pypy/extradoc/changeset/4e77b8a47710/ Log: merge diff --git a/blog/draft/py3donate.rst b/blog/draft/py3donate.rst --- a/blog/draft/py3donate.rst +++ b/blog/draft/py3donate.rst @@ -2,11 +2,14 @@ Py3k for PyPy fundraiser ======================== -Hello +Hi, -We would like to announce a donation campaign for implementing python 3 in PyPy. -Please read the `detailed plan`_ for further details and donate using -`our website`_. +We would like to announce a donation campaign for implementing Python 3 in PyPy. +Please read our `detailed plan`_ for all the details and donate using the +button on that page! + +Thanks, +The PyPy Team .. _`detailed plan`: http://pypy.org/py3donate.html .. _`our website`: http://pypy.org From noreply at buildbot.pypy.org Fri Sep 23 02:01:42 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 23 Sep 2011 02:01:42 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: dead link. Message-ID: <20110923000142.46CCC820CF@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r3905:3574c17e185d Date: 2011-09-21 12:43 -0400 http://bitbucket.org/pypy/extradoc/changeset/3574c17e185d/ Log: dead link. diff --git a/blog/draft/py3donate.rst b/blog/draft/py3donate.rst --- a/blog/draft/py3donate.rst +++ b/blog/draft/py3donate.rst @@ -12,4 +12,3 @@ The PyPy Team .. _`detailed plan`: http://pypy.org/py3donate.html -.. _`our website`: http://pypy.org From noreply at buildbot.pypy.org Fri Sep 23 02:01:43 2011 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 23 Sep 2011 02:01:43 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: merged upstream (what did I change?) Message-ID: <20110923000143.6B798820CF@wyvern.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r3906:c4fcced99b24 Date: 2011-09-22 20:01 -0400 http://bitbucket.org/pypy/extradoc/changeset/c4fcced99b24/ Log: merged upstream (what did I change?) diff --git a/talk/pycon2011/whyslow/Makefile b/talk/pycon2011/whyslow/Makefile --- a/talk/pycon2011/whyslow/Makefile +++ b/talk/pycon2011/whyslow/Makefile @@ -7,4 +7,4 @@ pdflatex whyslow-talk.latex || exit view: whyslow-talk.pdf - evince whyslow-talk.pdf & \ No newline at end of file + evince whyslow-talk.pdf & diff --git a/talk/pyconar2011/Makefile b/talk/pyconar2011/Makefile new file mode 100644 --- /dev/null +++ b/talk/pyconar2011/Makefile @@ -0,0 +1,13 @@ + + +talk.pdf: talk.rst author.latex title.latex stylesheet.latex + rst2beamer.py --input-encoding=utf-8 --output-encoding=utf-8 --stylesheet=stylesheet.latex --documentoptions=14pt --theme=Warsaw talk.rst talk.latex || exit + sed 's/\\date{}/\\input{author.latex}/' -i talk.latex || exit + sed 's/\\maketitle/\\input{title.latex}/' -i talk.latex || exit + pdflatex talk.latex || exit + +clean: + rm talk.pdf talk.latex + +view: talk.pdf + evince talk.pdf & diff --git a/talk/pyconar2011/author.latex b/talk/pyconar2011/author.latex new file mode 100644 --- /dev/null +++ b/talk/pyconar2011/author.latex @@ -0,0 +1,8 @@ +\definecolor{rrblitbackground}{rgb}{0.0, 0.0, 0.0} + +\title[Little things that PyPy makes possible]{Little things that PyPy makes possible} +\author[fijal] +{Maciej Fijałkowski} + +\institute{Pycon Argentina 2011} +\date{Sep 23 2011} diff --git a/talk/pyconar2011/beamerdefs.txt b/talk/pyconar2011/beamerdefs.txt new file mode 100644 --- /dev/null +++ b/talk/pyconar2011/beamerdefs.txt @@ -0,0 +1,108 @@ +.. colors +.. =========================== + +.. role:: green +.. role:: red + + +.. general useful commands +.. =========================== + +.. |pause| raw:: latex + + \pause + +.. |small| raw:: latex + + {\small + +.. |end_small| raw:: latex + + } + +.. |scriptsize| raw:: latex + + {\scriptsize + +.. |end_scriptsize| raw:: latex + + } + +.. |strike<| raw:: latex + + \sout{ + +.. closed bracket +.. =========================== + +.. |>| raw:: latex + + } + + +.. example block +.. =========================== + +.. |example<| raw:: latex + + \begin{exampleblock}{ + + +.. |end_example| raw:: latex + + \end{exampleblock} + + + +.. alert block +.. =========================== + +.. |alert<| raw:: latex + + \begin{alertblock}{ + + +.. |end_alert| raw:: latex + + \end{alertblock} + + + +.. columns +.. =========================== + +.. |column1| raw:: latex + + \begin{columns} + \begin{column}{0.45\textwidth} + +.. |column2| raw:: latex + + \end{column} + \begin{column}{0.45\textwidth} + + +.. |end_columns| raw:: latex + + \end{column} + \end{columns} + + + +.. |snake| image:: ../../img/py-web-new.png + :scale: 15% + + + +.. nested blocks +.. =========================== + +.. |nested| raw:: latex + + \begin{columns} + \begin{column}{0.85\textwidth} + +.. |end_nested| raw:: latex + + \end{column} + \end{columns} diff --git a/talk/pyconar2011/stylesheet.latex b/talk/pyconar2011/stylesheet.latex new file mode 100644 --- /dev/null +++ b/talk/pyconar2011/stylesheet.latex @@ -0,0 +1,12 @@ +\usepackage{ulem} +\usetheme{Boadilla} +\usecolortheme{whale} +\setbeamercovered{transparent} +\setbeamertemplate{navigation symbols}{} + +\definecolor{darkgreen}{rgb}{0, 0.5, 0.0} +\newcommand{\docutilsrolegreen}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\docutilsrolered}[1]{\color{red}#1\normalcolor} + +\newcommand{\green}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\red}[1]{\color{red}#1\normalcolor} diff --git a/talk/pyconar2011/talk.rst b/talk/pyconar2011/talk.rst new file mode 100644 --- /dev/null +++ b/talk/pyconar2011/talk.rst @@ -0,0 +1,107 @@ +.. include:: beamerdefs.txt + +============================= +Making little things possible +============================= + +Python +------ + +* Python is great + +|pause| + +* Python is a glue language + +|pause| + +* Python is slow + +Is it? +------ + +.. image:: wikipedian_protester.png + :scale: 700% + :align: center + +Things you can do with PyPy (and not with CPython) +-------------------------------------------------- + +|pause| + +* real time video processing + +|pause| + +* software-rendered games + +|pause| + +* this is just the beginning! + +What is PyPy? +------------- + +* PyPy is many things + +* **just another python implementation** + +.. sourcecode:: bash + + pypy x.py + +What is PyPy (2)? +----------------- + +* Comes with a JIT compiler + +How fast is PyPy? +----------------- + +* XXX speed website + +* XXX django over time + +* XXX v8 vs pypy + +How fast is PyPy? +----------------- + +* pretty damn fast + +PyPy 1.6 - status +----------------- + +* Released on XXX + +* Python 2.7.1 + +* The most compatible alternative to CPython + +* Most programs just work + +* (C extensions might not) + +Contacts, Q/A +-------------- + +- http://pypy.org + +- blog: http://morepypy.blogspot.com + +- mailing list: pypy-dev at python.org + +- IRC: #pypy on freenode + +.. image:: ../ep2011/talk/question-mark.png + :scale: 10% + :align: center + +Shameless ad +------------ + +* Want to make run your software fast? + +* We can make it happen + +* fijall at gmail.com diff --git a/talk/pyconar2011/title.latex b/talk/pyconar2011/title.latex new file mode 100644 --- /dev/null +++ b/talk/pyconar2011/title.latex @@ -0,0 +1,5 @@ +\begin{titlepage} +\begin{figure}[h] +\includegraphics[width=60px]{../img/py-web-new.png} +\end{figure} +\end{titlepage} diff --git a/talk/pyconar2011/wikipedian_protester.png b/talk/pyconar2011/wikipedian_protester.png new file mode 100644 index 0000000000000000000000000000000000000000..42203e81c752287c5555824eacdf901649ffb8fb GIT binary patch [cut] From noreply at buildbot.pypy.org Fri Sep 23 08:34:04 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Fri, 23 Sep 2011 08:34:04 +0200 (CEST) Subject: [pypy-commit] pypy jit-optimizeopt-cleanups: Make forced strings emit operations using OptString.emit_operation. That way they will be inserted into the optimization chain at the point where the were removed in the first place. Message-ID: <20110923063404.0F2DC820D1@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-optimizeopt-cleanups Changeset: r47407:ed0c682b0f83 Date: 2011-09-23 08:33 +0200 http://bitbucket.org/pypy/pypy/changeset/ed0c682b0f83/ Log: Make forced strings emit operations using OptString.emit_operation. That way they will be inserted into the optimization chain at the point where the were removed in the first place. diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -305,6 +305,9 @@ def produce_potential_short_preamble_ops(self, potential_ops): pass + def forget_numberings(self, box): + self.optimizer.forget_numberings(box) + class Optimizer(Optimization): def __init__(self, metainterp_sd, loop, optimizations=None, bridge=False): diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -5961,13 +5961,18 @@ escape(i0) jump(p1) """ - expected = """ + preamble = """ [p1] i0 = ptr_eq(p1, NULL) escape(i0) - jump(p1) - """ - self.optimize_strunicode_loop_extradescrs(ops, expected, expected) + jump(p1, i0) + """ + expected = """ + [p1, i0] + escape(i0) + jump(p1, i0) + """ + self.optimize_strunicode_loop_extradescrs(ops, expected, preamble) def test_str_equal_none2(self): ops = """ @@ -5976,13 +5981,18 @@ escape(i0) jump(p1) """ - expected = """ + preamble = """ [p1] i0 = ptr_eq(p1, NULL) escape(i0) - jump(p1) - """ - self.optimize_strunicode_loop_extradescrs(ops, expected, expected) + jump(p1, i0) + """ + expected = """ + [p1, i0] + escape(i0) + jump(p1, i0) + """ + self.optimize_strunicode_loop_extradescrs(ops, expected, preamble) def test_str_equal_nonnull1(self): ops = """ diff --git a/pypy/jit/metainterp/optimizeopt/vstring.py b/pypy/jit/metainterp/optimizeopt/vstring.py --- a/pypy/jit/metainterp/optimizeopt/vstring.py +++ b/pypy/jit/metainterp/optimizeopt/vstring.py @@ -43,7 +43,7 @@ class __extend__(optimizer.OptValue): """New methods added to the base class OptValue for this file.""" - def getstrlen(self, optimization, mode): + def getstrlen(self, string_optimizer, mode): if mode is mode_string: s = self.get_constant_string_spec(mode_string) if s is not None: @@ -52,12 +52,12 @@ s = self.get_constant_string_spec(mode_unicode) if s is not None: return ConstInt(len(s)) - if optimization is None: + if string_optimizer is None: return None self.ensure_nonnull() box = self.force_box() lengthbox = BoxInt() - optimization.propagate_forward(ResOperation(mode.STRLEN, [box], lengthbox)) + string_optimizer.emit_operation(ResOperation(mode.STRLEN, [box], lengthbox)) return lengthbox @specialize.arg(1) @@ -68,21 +68,21 @@ else: return None - def string_copy_parts(self, optimizer, targetbox, offsetbox, mode): + def string_copy_parts(self, string_optimizer, targetbox, offsetbox, mode): # Copies the pointer-to-string 'self' into the target string # given by 'targetbox', at the specified offset. Returns the offset # at the end of the copy. - lengthbox = self.getstrlen(optimizer, mode) + lengthbox = self.getstrlen(string_optimizer, mode) srcbox = self.force_box() - return copy_str_content(optimizer, srcbox, targetbox, + return copy_str_content(string_optimizer, srcbox, targetbox, CONST_0, offsetbox, lengthbox, mode) class VAbstractStringValue(virtualize.AbstractVirtualValue): _attrs_ = ('mode',) - def __init__(self, optimizer, keybox, source_op, mode): - virtualize.AbstractVirtualValue.__init__(self, optimizer, keybox, + def __init__(self, string_optimizer, keybox, source_op, mode): + virtualize.AbstractVirtualValue.__init__(self, string_optimizer, keybox, source_op) self.mode = mode @@ -140,15 +140,15 @@ return mode.emptystr.join([mode.chr(c.box.getint()) for c in self._chars]) - def string_copy_parts(self, optimizer, targetbox, offsetbox, mode): + def string_copy_parts(self, string_optimizer, targetbox, offsetbox, mode): for i in range(len(self._chars)): charbox = self._chars[i].force_box() if not (isinstance(charbox, Const) and charbox.same_constant(CONST_0)): - optimizer.emit_operation(ResOperation(mode.STRSETITEM, [targetbox, - offsetbox, - charbox], + string_optimizer.emit_operation(ResOperation(mode.STRSETITEM, [targetbox, + offsetbox, + charbox], None)) - offsetbox = _int_add(optimizer, offsetbox, CONST_1) + offsetbox = _int_add(string_optimizer, offsetbox, CONST_1) return offsetbox def get_args_for_fail(self, modifier): @@ -182,16 +182,16 @@ self.left = left self.right = right - def getstrlen(self, optimizer, mode): + def getstrlen(self, string_optimizer, mode): if self.lengthbox is None: - len1box = self.left.getstrlen(optimizer, mode) + len1box = self.left.getstrlen(string_optimizer, mode) if len1box is None: return None - len2box = self.right.getstrlen(optimizer, mode) + len2box = self.right.getstrlen(string_optimizer, mode) if len2box is None: return None - self.lengthbox = _int_add(optimizer, len1box, len2box) - # ^^^ may still be None, if optimizer is None + self.lengthbox = _int_add(string_optimizer, len1box, len2box) + # ^^^ may still be None, if string_optimizer is None return self.lengthbox @specialize.arg(1) @@ -204,10 +204,10 @@ return None return s1 + s2 - def string_copy_parts(self, optimizer, targetbox, offsetbox, mode): - offsetbox = self.left.string_copy_parts(optimizer, targetbox, + def string_copy_parts(self, string_optimizer, targetbox, offsetbox, mode): + offsetbox = self.left.string_copy_parts(string_optimizer, targetbox, offsetbox, mode) - offsetbox = self.right.string_copy_parts(optimizer, targetbox, + offsetbox = self.right.string_copy_parts(string_optimizer, targetbox, offsetbox, mode) return offsetbox @@ -262,9 +262,9 @@ return s1[start : start + length] return None - def string_copy_parts(self, optimizer, targetbox, offsetbox, mode): - lengthbox = self.getstrlen(optimizer, mode) - return copy_str_content(optimizer, + def string_copy_parts(self, string_optimizer, targetbox, offsetbox, mode): + lengthbox = self.getstrlen(string_optimizer, mode) + return copy_str_content(string_optimizer, self.vstr.force_box(), targetbox, self.vstart.force_box(), offsetbox, lengthbox, mode) @@ -295,7 +295,7 @@ return modifier.make_vstrslice(self.mode is mode_unicode) -def copy_str_content(optimizer, srcbox, targetbox, +def copy_str_content(string_optimizer, srcbox, targetbox, srcoffsetbox, offsetbox, lengthbox, mode, need_next_offset=True): if isinstance(srcbox, ConstPtr) and isinstance(srcoffsetbox, Const): M = 5 @@ -305,26 +305,26 @@ # up to M characters are done "inline", i.e. with STRGETITEM/STRSETITEM # instead of just a COPYSTRCONTENT. for i in range(lengthbox.value): - charbox = _strgetitem(optimizer, srcbox, srcoffsetbox, mode) - srcoffsetbox = _int_add(optimizer, srcoffsetbox, CONST_1) - optimizer.emit_operation(ResOperation(mode.STRSETITEM, [targetbox, - offsetbox, - charbox], + charbox = _strgetitem(string_optimizer, srcbox, srcoffsetbox, mode) + srcoffsetbox = _int_add(string_optimizer, srcoffsetbox, CONST_1) + string_optimizer.emit_operation(ResOperation(mode.STRSETITEM, [targetbox, + offsetbox, + charbox], None)) - offsetbox = _int_add(optimizer, offsetbox, CONST_1) + offsetbox = _int_add(string_optimizer, offsetbox, CONST_1) else: if need_next_offset: - nextoffsetbox = _int_add(optimizer, offsetbox, lengthbox) + nextoffsetbox = _int_add(string_optimizer, offsetbox, lengthbox) else: nextoffsetbox = None op = ResOperation(mode.COPYSTRCONTENT, [srcbox, targetbox, srcoffsetbox, offsetbox, lengthbox], None) - optimizer.emit_operation(op) + string_optimizer.emit_operation(op) offsetbox = nextoffsetbox return offsetbox -def _int_add(optimizer, box1, box2): +def _int_add(string_optimizer, box1, box2): if isinstance(box1, ConstInt): if box1.value == 0: return box2 @@ -332,23 +332,23 @@ return ConstInt(box1.value + box2.value) elif isinstance(box2, ConstInt) and box2.value == 0: return box1 - if optimizer is None: + if string_optimizer is None: return None resbox = BoxInt() - optimizer.propagate_forward(ResOperation(rop.INT_ADD, [box1, box2], resbox)) + string_optimizer.emit_operation(ResOperation(rop.INT_ADD, [box1, box2], resbox)) return resbox -def _int_sub(optimizer, box1, box2): +def _int_sub(string_optimizer, box1, box2): if isinstance(box2, ConstInt): if box2.value == 0: return box1 if isinstance(box1, ConstInt): return ConstInt(box1.value - box2.value) resbox = BoxInt() - optimizer.propagate_forward(ResOperation(rop.INT_SUB, [box1, box2], resbox)) + string_optimizer.emit_operation(ResOperation(rop.INT_SUB, [box1, box2], resbox)) return resbox -def _strgetitem(optimizer, strbox, indexbox, mode): +def _strgetitem(string_optimizer, strbox, indexbox, mode): if isinstance(strbox, ConstPtr) and isinstance(indexbox, ConstInt): if mode is mode_string: s = strbox.getref(lltype.Ptr(rstr.STR)) @@ -357,8 +357,8 @@ s = strbox.getref(lltype.Ptr(rstr.UNICODE)) return ConstInt(ord(s.chars[indexbox.getint()])) resbox = BoxInt() - optimizer.propagate_forward(ResOperation(mode.STRGETITEM, [strbox, indexbox], - resbox)) + string_optimizer.emit_operation(ResOperation(mode.STRGETITEM, [strbox, indexbox], + resbox)) return resbox @@ -370,17 +370,17 @@ return OptString() def make_vstring_plain(self, box, source_op, mode): - vvalue = VStringPlainValue(self.optimizer, box, source_op, mode) + vvalue = VStringPlainValue(self, box, source_op, mode) self.make_equal_to(box, vvalue) return vvalue def make_vstring_concat(self, box, source_op, mode): - vvalue = VStringConcatValue(self.optimizer, box, source_op, mode) + vvalue = VStringConcatValue(self, box, source_op, mode) self.make_equal_to(box, vvalue) return vvalue def make_vstring_slice(self, box, source_op, mode): - vvalue = VStringSliceValue(self.optimizer, box, source_op, mode) + vvalue = VStringSliceValue(self, box, source_op, mode) self.make_equal_to(box, vvalue) return vvalue @@ -430,7 +430,7 @@ value.ensure_nonnull() # if value.is_virtual() and isinstance(value, VStringSliceValue): - fullindexbox = _int_add(self.optimizer, + fullindexbox = _int_add(self, value.vstart.force_box(), vindex.force_box()) value = value.vstr @@ -440,7 +440,7 @@ if vindex.is_constant(): return value.getitem(vindex.box.getint()) # - resbox = _strgetitem(self.optimizer, value.force_box(), vindex.force_box(), mode) + resbox = _strgetitem(self, value.force_box(), vindex.force_box(), mode) return self.getvalue(resbox) def optimize_STRLEN(self, op): @@ -450,7 +450,7 @@ def _optimize_STRLEN(self, op, mode): value = self.getvalue(op.getarg(0)) - lengthbox = value.getstrlen(self.optimizer, mode) + lengthbox = value.getstrlen(self, mode) self.make_equal_to(op.result, self.getvalue(lengthbox)) def optimize_COPYSTRCONTENT(self, op): @@ -468,7 +468,7 @@ if length.is_constant() and length.box.getint() == 0: return - copy_str_content(self.optimizer, + copy_str_content(self, src.force_box(), dst.force_box(), srcstart.force_box(), @@ -538,14 +538,14 @@ return True # vstr.ensure_nonnull() - lengthbox = _int_sub(self.optimizer, vstop.force_box(), - vstart.force_box()) + lengthbox = _int_sub(self, vstop.force_box(), + vstart.force_box()) # if isinstance(vstr, VStringSliceValue): # double slicing s[i:j][k:l] vintermediate = vstr vstr = vintermediate.vstr - startbox = _int_add(self.optimizer, + startbox = _int_add(self, vintermediate.vstart.force_box(), vstart.force_box()) vstart = self.getvalue(startbox) @@ -594,7 +594,7 @@ l2box = v2.getstrlen(None, mode) if isinstance(l2box, ConstInt): if l2box.value == 0: - lengthbox = v1.getstrlen(self.optimizer, mode) + lengthbox = v1.getstrlen(self, mode) seo = self.optimizer.send_extra_operation seo(ResOperation(rop.INT_EQ, [lengthbox, CONST_0], resultbox)) return True @@ -629,7 +629,7 @@ op = ResOperation(rop.PTR_EQ, [v1.force_box(), llhelper.CONST_NULL], resultbox) - self.optimizer.emit_operation(op) + self.emit_operation(op) return True # return False @@ -666,7 +666,7 @@ calldescr, func = cic.callinfo_for_oopspec(oopspecindex) op = ResOperation(rop.CALL, [ConstInt(func)] + args, result, descr=calldescr) - self.optimizer.emit_operation(op) + self.emit_operation(op) def propagate_forward(self, op): if not self.enabled: From noreply at buildbot.pypy.org Fri Sep 23 09:00:43 2011 From: noreply at buildbot.pypy.org (hakanardo) Date: Fri, 23 Sep 2011 09:00:43 +0200 (CEST) Subject: [pypy-commit] pypy jit-optimizeopt-cleanups: unskip test thats now passing thanx to 8dfeb716d0ab Message-ID: <20110923070043.3D27F820D1@wyvern.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: jit-optimizeopt-cleanups Changeset: r47408:dd6b6ecd9c7e Date: 2011-09-23 09:00 +0200 http://bitbucket.org/pypy/pypy/changeset/dd6b6ecd9c7e/ Log: unskip test thats now passing thanx to 8dfeb716d0ab diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -2790,7 +2790,6 @@ self.optimize_loop(ops, expected, preamble) def test_remove_duplicate_pure_op_ovf_with_lazy_setfield(self): - py.test.skip('this optimization is not yet supprted') ops = """ [i1, p1] i3 = int_add_ovf(i1, 1) From noreply at buildbot.pypy.org Fri Sep 23 09:48:39 2011 From: noreply at buildbot.pypy.org (bivab) Date: Fri, 23 Sep 2011 09:48:39 +0200 (CEST) Subject: [pypy-commit] lang-io default: fix sequence asCapitalized to only capitalize first char and leave the rest unchanged Message-ID: <20110923074839.12217820D1@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r46:495612e30422 Date: 2011-09-23 09:28 +0200 http://bitbucket.org/pypy/lang-io/changeset/495612e30422/ Log: fix sequence asCapitalized to only capitalize first char and leave the rest unchanged diff --git a/io/sequence.py b/io/sequence.py --- a/io/sequence.py +++ b/io/sequence.py @@ -9,7 +9,7 @@ @register_method('Sequence', 'asCapitalized') def sequence_as_capitalized(space, w_target, w_message, w_context): - # c/p from pypy/objspace/std/stringobject.py + # based on pypy/objspace/std/stringobject.py input = w_target.value buffer = [' '] * len(input) if len(input) > 0: @@ -22,11 +22,7 @@ for i in range(1, len(input)): ch = input[i] - if ch.isupper(): - o = ord(ch) + 32 - buffer[i] = chr(o) - else: - buffer[i] = ch + buffer[i] = ch s = space.w_sequence.clone() s.value = "".join(buffer) diff --git a/io/test/test_sequence.py b/io/test/test_sequence.py --- a/io/test/test_sequence.py +++ b/io/test/test_sequence.py @@ -16,3 +16,9 @@ def test_sequence_as_capitalized(): inp = '"asdf qerttz" asCapitalized' + res, space = interpret(inp) + assert res.value == "Asdf qerttz" + + inp = '"fooBar" asCapitalized' + res, space = interpret(inp) + assert res.value == "FooBar" From noreply at buildbot.pypy.org Fri Sep 23 09:48:40 2011 From: noreply at buildbot.pypy.org (bivab) Date: Fri, 23 Sep 2011 09:48:40 +0200 (CEST) Subject: [pypy-commit] lang-io default: fix Object getSlot to not only look in the own slots but do a full lookup Message-ID: <20110923074840.241B8820D1@wyvern.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r47:912b15c364e0 Date: 2011-09-23 09:47 +0200 http://bitbucket.org/pypy/lang-io/changeset/912b15c364e0/ Log: fix Object getSlot to not only look in the own slots but do a full lookup diff --git a/io/object.py b/io/object.py --- a/io/object.py +++ b/io/object.py @@ -14,10 +14,10 @@ @register_method('Object', 'getSlot', unwrap_spec=[object, str]) def w_object_get_slot(space, w_target, name): - try: - return w_target.slots[name] - except KeyError: - return space.w_nil + w_value = w_target.lookup(name) + if w_value: + return w_value + return space.w_nil @register_method('Object', 'hasSlot', unwrap_spec=[object, str]) def w_object_has_slot(space, w_target, name): diff --git a/io/test/test_object.py b/io/test/test_object.py --- a/io/test/test_object.py +++ b/io/test/test_object.py @@ -32,6 +32,20 @@ res, space = interpret(inp) assert res.value == 'foo' +def test_object_get_proto_slot(): + inp = """ + a := Object clone + a someValue := 123 + b := a clone + b getSlot("someValue")""" + res, space = interpret(inp) + assert res.number_value == 123 + +def test_object_get_slot_fail(): + inp = 'Object getSlot("notHere")' + res, space = interpret(inp) + assert res is space.w_nil + def test_object_has_slot(): inp = 'Object hasSlot("foo")' res, space = interpret(inp) From noreply at buildbot.pypy.org Fri Sep 23 13:11:06 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:06 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: (l.diekmann, cfbolz): start to sketch the way we want things to look: introduce Message-ID: <20110923111106.90114820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47409:ec034fe3b9b8 Date: 2011-01-20 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/ec034fe3b9b8/ Log: (l.diekmann, cfbolz): start to sketch the way we want things to look: introduce list strategies (which are supposed to be singletons in the end) and use an opaque storage attribute on the list for the actual memory. next step: stop using the wrappeditems attribute. also needs more support from rlib.rerased, faking it for now. diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -11,10 +11,23 @@ from pypy.rlib.listsort import TimSort from pypy.interpreter.argument import Signature +class cast_to_void_star(object): + # this will later be replaced by something in rlib.rerased + def __init__(self, content, from_where=""): + self._content = content + self._from_where = from_where + +def cast_from_void_star(wrapper, from_where=""): + # this will later be replaced by something in rlib.rerased + assert wrapper._from_where == from_where + return wrapper._content + class W_ListObject(W_Object): from pypy.objspace.std.listtype import list_typedef as typedef def __init__(w_self, wrappeditems): + w_self.strategy = ObjectListStrategy() + w_self.strategy.init_from_list_w(w_self, wrappeditems) w_self.wrappeditems = wrappeditems def __repr__(w_self): @@ -22,15 +35,57 @@ return "%s(%s)" % (w_self.__class__.__name__, w_self.wrappeditems) def unwrap(w_list, space): - items = [space.unwrap(w_item) for w_item in w_list.wrappeditems]# XXX generic mixed types unwrap + # for tests only! + items = [space.unwrap(w_item) for w_item in w_list.wrappeditems] return list(items) def append(w_list, w_item): w_list.wrappeditems.append(w_item) + # ___________________________________________________ + + def length(self): + return self.strategy.length(self) + + def getitem(self, index): + return self.strategy.getitem(self, index) + registerimplementation(W_ListObject) +class ListStrategy(object): + def init_from_list_w(self, w_list, list_w): + raise NotImplementedError + + def length(self, w_list): + raise NotImplementedError + + def getitem(self, w_list, index): + raise NotImplementedError + +class EmptyListStrategy(ListStrategy): + def init_from_list_w(self, w_list, list_w): + assert len(list_w) == 0 + w_list.storage = cast_to_void_star(None) + + def length(self, w_list): + return 0 + + def getitem(self, w_list, index): + raise IndexError + +class ObjectListStrategy(ListStrategy): + def init_from_list_w(self, w_list, list_w): + w_list.storage = cast_to_void_star(list_w, "object") + + def length(self, w_list): + return len(cast_from_void_star(w_list.storage, "object")) + + def getitem(self, w_list, index): + return cast_from_void_star(w_list.storage, "object")[index] + + + init_signature = Signature(['sequence'], None, None) init_defaults = [None] @@ -60,12 +115,12 @@ items_w.append(w_item) def len__List(space, w_list): - result = len(w_list.wrappeditems) + result = w_list.length() return wrapint(space, result) def getitem__List_ANY(space, w_list, w_index): try: - return w_list.wrappeditems[get_list_index(space, w_index)] + return w_list.getitem(get_list_index(space, w_index)) except IndexError: raise OperationError(space.w_IndexError, space.wrap("list index out of range")) From noreply at buildbot.pypy.org Fri Sep 23 13:11:07 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:07 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: (l.diekmann, cfbolz): Implemented getslice Message-ID: <20110923111107.BFCFD820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47410:9adef7c602c2 Date: 2011-01-20 14:38 +0100 http://bitbucket.org/pypy/pypy/changeset/9adef7c602c2/ Log: (l.diekmann, cfbolz): Implemented getslice diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -26,6 +26,7 @@ from pypy.objspace.std.listtype import list_typedef as typedef def __init__(w_self, wrappeditems): + assert isinstance(wrappeditems, list) w_self.strategy = ObjectListStrategy() w_self.strategy.init_from_list_w(w_self, wrappeditems) w_self.wrappeditems = wrappeditems @@ -50,6 +51,9 @@ def getitem(self, index): return self.strategy.getitem(self, index) + def getslice(self, start, stop, step, length): + return self.strategy.getslice(self, start, stop, step, length) + registerimplementation(W_ListObject) @@ -63,6 +67,9 @@ def getitem(self, w_list, index): raise NotImplementedError + def getslice(self, w_list, start, stop, step, length): + raise NotImplementedError + class EmptyListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): assert len(list_w) == 0 @@ -74,6 +81,9 @@ def getitem(self, w_list, index): raise IndexError + def getslice(self, w_list, start, stop, step, length): + return W_ListObject([]) + class ObjectListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): w_list.storage = cast_to_void_star(list_w, "object") @@ -84,7 +94,15 @@ def getitem(self, w_list, index): return cast_from_void_star(w_list.storage, "object")[index] - + def getslice(self, w_list, start, stop, step, length): + if step == 1: + return W_ListObject(cast_from_void_star(w_list.storage, "object")[start:stop]) + else: + subitems_w = [None] * length + for i in range(length): + subitems_w[i] = w_list.getitem(start) + start += step + return W_ListObject(subitems_w) init_signature = Signature(['sequence'], None, None) init_defaults = [None] @@ -127,18 +145,10 @@ def getitem__List_Slice(space, w_list, w_slice): # XXX consider to extend rlist's functionality? - length = len(w_list.wrappeditems) + length = w_list.length() start, stop, step, slicelength = w_slice.indices4(space, length) assert slicelength >= 0 - if step == 1 and 0 <= start <= stop: - return W_ListObject(w_list.wrappeditems[start:stop]) - w_res = W_ListObject([None] * slicelength) - items_w = w_list.wrappeditems - subitems_w = w_res.wrappeditems - for i in range(slicelength): - subitems_w[i] = items_w[start] - start += step - return w_res + return w_list.getslice(start, stop, step, slicelength) def getslice__List_ANY_ANY(space, w_list, w_start, w_stop): length = len(w_list.wrappeditems) From noreply at buildbot.pypy.org Fri Sep 23 13:11:08 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:08 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: (l.diekmann, cfbolz): more slicing Message-ID: <20110923111108.EBDBB820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47411:e2e33e1676ab Date: 2011-01-20 14:41 +0100 http://bitbucket.org/pypy/pypy/changeset/e2e33e1676ab/ Log: (l.diekmann, cfbolz): more slicing diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -151,9 +151,9 @@ return w_list.getslice(start, stop, step, slicelength) def getslice__List_ANY_ANY(space, w_list, w_start, w_stop): - length = len(w_list.wrappeditems) + length = w_list.length() start, stop = normalize_simple_slice(space, length, w_start, w_stop) - return W_ListObject(w_list.wrappeditems[start:stop]) + return w_list.getslice(start, stop, 1, stop - start) def setslice__List_ANY_ANY_ANY(space, w_list, w_start, w_stop, w_sequence): length = len(w_list.wrappeditems) From noreply at buildbot.pypy.org Fri Sep 23 13:11:10 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:10 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: (l.diekmann, cfbolz around): some more calls to length; changed contains to use strategies Message-ID: <20110923111110.235BF820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47412:a6dcfde63812 Date: 2011-01-20 14:57 +0100 http://bitbucket.org/pypy/pypy/changeset/a6dcfde63812/ Log: (l.diekmann, cfbolz around): some more calls to length; changed contains to use strategies diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -156,21 +156,20 @@ return w_list.getslice(start, stop, 1, stop - start) def setslice__List_ANY_ANY_ANY(space, w_list, w_start, w_stop, w_sequence): - length = len(w_list.wrappeditems) + length = w_list.length() start, stop = normalize_simple_slice(space, length, w_start, w_stop) _setitem_slice_helper(space, w_list, start, 1, stop-start, w_sequence) def delslice__List_ANY_ANY(space, w_list, w_start, w_stop): - length = len(w_list.wrappeditems) + length = w_list.length() start, stop = normalize_simple_slice(space, length, w_start, w_stop) _delitem_slice_helper(space, w_list.wrappeditems, start, 1, stop-start) def contains__List_ANY(space, w_list, w_obj): # needs to be safe against eq_w() mutating the w_list behind our back i = 0 - items_w = w_list.wrappeditems - while i < len(items_w): # intentionally always calling len! - if space.eq_w(items_w[i], w_obj): + while i < w_list.length(): # intentionally always calling len! + if space.eq_w(w_list.getitem(i), w_obj): return space.w_True i += 1 return space.w_False From noreply at buildbot.pypy.org Fri Sep 23 13:11:11 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:11 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Replaced more w_list.wrappeditems by using the (temporary) method w_list.getitems() Message-ID: <20110923111111.4F0F5820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47413:3f7f05555c7b Date: 2011-01-26 11:45 +0100 http://bitbucket.org/pypy/pypy/changeset/3f7f05555c7b/ Log: Replaced more w_list.wrappeditems by using the (temporary) method w_list.getitems() diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -54,6 +54,9 @@ def getslice(self, start, stop, step, length): return self.strategy.getslice(self, start, stop, step, length) + def getitems(self): + return self.strategy.getitems(self) + registerimplementation(W_ListObject) @@ -70,6 +73,9 @@ def getslice(self, w_list, start, stop, step, length): raise NotImplementedError + def getitems(self, w_list): + raise NotImplementedError + class EmptyListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): assert len(list_w) == 0 @@ -84,6 +90,9 @@ def getslice(self, w_list, start, stop, step, length): return W_ListObject([]) + def getitems(self, w_list): + return [] + class ObjectListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): w_list.storage = cast_to_void_star(list_w, "object") @@ -104,6 +113,9 @@ start += step return W_ListObject(subitems_w) + def getitems(self, w_list): + return cast_from_void_star(w_list.storage, "object") + init_signature = Signature(['sequence'], None, None) init_defaults = [None] @@ -119,7 +131,7 @@ # This is commented out to avoid assigning a new RPython list to # 'wrappeditems', which defeats the W_FastSeqIterObject optimization. # - items_w = w_list.wrappeditems + items_w = w_list.getitems() del items_w[:] if w_iterable is not None: w_iterator = space.iter(w_iterable) @@ -163,7 +175,7 @@ def delslice__List_ANY_ANY(space, w_list, w_start, w_stop): length = w_list.length() start, stop = normalize_simple_slice(space, length, w_start, w_stop) - _delitem_slice_helper(space, w_list.wrappeditems, start, 1, stop-start) + _delitem_slice_helper(space, w_list.getitems(), start, 1, stop-start) def contains__List_ANY(space, w_list, w_obj): # needs to be safe against eq_w() mutating the w_list behind our back @@ -176,10 +188,10 @@ def iter__List(space, w_list): from pypy.objspace.std import iterobject - return iterobject.W_FastListIterObject(w_list, w_list.wrappeditems) + return iterobject.W_FastListIterObject(w_list, w_list.getitems()) def add__List_List(space, w_list1, w_list2): - return W_ListObject(w_list1.wrappeditems + w_list2.wrappeditems) + return W_ListObject(w_list1.getitems() + w_list2.getitems()) def inplace_add__List_ANY(space, w_list1, w_iterable2): @@ -197,7 +209,7 @@ if e.match(space, space.w_TypeError): raise FailedToImplement raise - return W_ListObject(w_list.wrappeditems * times) + return W_ListObject(w_list.getitems() * times) def mul__List_ANY(space, w_list, w_times): return mul_list_times(space, w_list, w_times) @@ -217,8 +229,8 @@ def eq__List_List(space, w_list1, w_list2): # needs to be safe against eq_w() mutating the w_lists behind our back - items1_w = w_list1.wrappeditems - items2_w = w_list2.wrappeditems + items1_w = w_list1.getitems() + items2_w = w_list2.getitems() return equal_wrappeditems(space, items1_w, items2_w) def equal_wrappeditems(space, items1_w, items2_w): @@ -258,12 +270,12 @@ return space.newbool(len(items1_w) > len(items2_w)) def lt__List_List(space, w_list1, w_list2): - return lessthan_unwrappeditems(space, w_list1.wrappeditems, - w_list2.wrappeditems) + return lessthan_unwrappeditems(space, w_list1.getitems(), + w_list2.getitems()) def gt__List_List(space, w_list1, w_list2): - return greaterthan_unwrappeditems(space, w_list1.wrappeditems, - w_list2.wrappeditems) + return greaterthan_unwrappeditems(space, w_list1.getitems(), + w_list2.getitems()) def delitem__List_ANY(space, w_list, w_idx): idx = get_list_index(space, w_idx) From noreply at buildbot.pypy.org Fri Sep 23 13:11:12 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:12 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Choose ListStrategy matching items in list Message-ID: <20110923111112.80960820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47414:58cd6e224558 Date: 2011-02-01 13:07 +0100 http://bitbucket.org/pypy/pypy/changeset/58cd6e224558/ Log: Choose ListStrategy matching items in list (had to change .hgsub to do commits - otherwise got error: unknown subrepo source) diff --git a/.hgsub b/.hgsub --- a/.hgsub +++ b/.hgsub @@ -1,4 +1,4 @@ -greenlet = [svn]http://codespeak.net/svn/greenlet/trunk/c -testrunner = [svn]http://codespeak.net/svn/pypy/build/testrunner -lib_pypy/pyrepl = [svn]http://codespeak.net/svn/pyrepl/trunk/pyrepl/pyrepl -lib_pypy/sqlite3 = [svn]http://codespeak.net/svn/pypy/pysqlite2 +#greenlet = [svn]http://codespeak.net/svn/greenlet/trunk/c +#testrunner = [svn]http://codespeak.net/svn/pypy/build/testrunner +#lib_pypy/pyrepl = [svn]http://codespeak.net/svn/pyrepl/trunk/pyrepl/pyrepl +#lib_pypy/sqlite3 = [svn]http://codespeak.net/svn/pypy/pysqlite2 diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -22,12 +22,18 @@ assert wrapper._from_where == from_where return wrapper._content +# don't know where to put this function, so it is global for now +def get_strategy_from_list_objects(list_w): + if list_w == []: + return EmptyListStrategy() + return ObjectListStrategy() + class W_ListObject(W_Object): from pypy.objspace.std.listtype import list_typedef as typedef def __init__(w_self, wrappeditems): assert isinstance(wrappeditems, list) - w_self.strategy = ObjectListStrategy() + w_self.strategy = get_strategy_from_list_objects(wrappeditems) w_self.strategy.init_from_list_w(w_self, wrappeditems) w_self.wrappeditems = wrappeditems diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -0,0 +1,8 @@ +from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy +from pypy.objspace.std.test.test_listobject import TestW_ListObject + +class TestW_ListStrategies(TestW_ListObject): + + def test_check_strategy(self): + assert isinstance(W_ListObject([]).strategy, EmptyListStrategy) + assert isinstance(W_ListObject([1]).strategy, ObjectListStrategy) From noreply at buildbot.pypy.org Fri Sep 23 13:11:13 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:13 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: test_listobject works again: changed init method to call W_List.append instead of wrapped_items.append Message-ID: <20110923111113.AF073820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47415:0da72c36ed08 Date: 2011-02-01 14:37 +0100 http://bitbucket.org/pypy/pypy/changeset/0da72c36ed08/ Log: test_listobject works again: changed init method to call W_List.append instead of wrapped_items.append diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -48,6 +48,9 @@ def append(w_list, w_item): w_list.wrappeditems.append(w_item) + if isinstance(w_list.strategy, EmptyListStrategy): + w_list.strategy = ObjectListStrategy() + w_list.strategy.init_from_list_w(w_list, w_list.wrappeditems) # ___________________________________________________ @@ -148,7 +151,8 @@ if not e.match(space, space.w_StopIteration): raise break # done - items_w.append(w_item) + #items_w.append(w_item) + w_list.append(w_item) def len__List(space, w_list): result = w_list.length() @@ -429,7 +433,8 @@ return space.w_None def list_append__List_ANY(space, w_list, w_any): - w_list.wrappeditems.append(w_any) + #w_list.wrappeditems.append(w_any) + w_list.append(w_any) return space.w_None def list_extend__List_List(space, w_list, w_other): diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -5,4 +5,12 @@ def test_check_strategy(self): assert isinstance(W_ListObject([]).strategy, EmptyListStrategy) - assert isinstance(W_ListObject([1]).strategy, ObjectListStrategy) + assert isinstance(W_ListObject([self.space.wrap(1),self.space.wrap('a')]).strategy, ObjectListStrategy) + assert isinstance(W_ListObject([self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]).strategy, ObjectListStrategy) + assert isinstance(W_ListObject([self.space.wrap('a'), self.space.wrap('b')]).strategy, ObjectListStrategy) + + def test_switch_strategy(self): + l = W_ListObject([]) + assert isinstance(l.strategy, EmptyListStrategy) + l.append(self.space.wrap(1)) + assert isinstance(l.strategy, ObjectListStrategy) From noreply at buildbot.pypy.org Fri Sep 23 13:11:14 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:14 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Added Integer- and StringListStrategy Message-ID: <20110923111114.DB951820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47416:8743cea829a0 Date: 2011-02-01 17:01 +0100 http://bitbucket.org/pypy/pypy/changeset/8743cea829a0/ Log: Added Integer- and StringListStrategy diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -5,7 +5,6 @@ from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.listtype import get_list_index from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice - from pypy.objspace.std import slicetype from pypy.interpreter import gateway, baseobjspace from pypy.rlib.listsort import TimSort @@ -26,8 +25,29 @@ def get_strategy_from_list_objects(list_w): if list_w == []: return EmptyListStrategy() + + # check for ints + for e in list_w: + if not is_W_IntObject(e): + break + if e is list_w[-1]: + return IntegerListStrategy() + + # check for ints + for e in list_w: + if not is_W_StringObject(e): + break + if e is list_w[-1]: + return StringListStrategy() + return ObjectListStrategy() +def is_W_IntObject(w_object): + return str(w_object.__class__) == "" + +def is_W_StringObject(w_object): + return str(w_object.__class__) == "" + class W_ListObject(W_Object): from pypy.objspace.std.listtype import list_typedef as typedef @@ -47,10 +67,7 @@ return list(items) def append(w_list, w_item): - w_list.wrappeditems.append(w_item) - if isinstance(w_list.strategy, EmptyListStrategy): - w_list.strategy = ObjectListStrategy() - w_list.strategy.init_from_list_w(w_list, w_list.wrappeditems) + w_list.strategy.append(w_list, w_item) # ___________________________________________________ @@ -85,6 +102,9 @@ def getitems(self, w_list): raise NotImplementedError + def append(self, w_list, w_item): + raise NotImplementedError + class EmptyListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): assert len(list_w) == 0 @@ -102,6 +122,19 @@ def getitems(self, w_list): return [] + def append(self, w_list, w_item): + if is_W_IntObject(w_item): + w_list.strategy = IntegerListStrategy() + + elif is_W_StringObject(w_item): + w_list.strategy = StringListStrategy() + + else: + w_list.strategy = ObjectListStrategy() + + w_list.wrappeditems.append(w_item) + w_list.strategy.init_from_list_w(w_list, w_list.wrappeditems) + class ObjectListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): w_list.storage = cast_to_void_star(list_w, "object") @@ -125,6 +158,66 @@ def getitems(self, w_list): return cast_from_void_star(w_list.storage, "object") + def append(self, w_list, w_item): + w_list.wrappeditems.append(w_item) + #list_w = cast_from_void_star(w_list.storage, 'object') + #list_w.append(w_item) + #w_list.storage = cast_to_void_star(list_w, 'object') + +class IntegerListStrategy(ListStrategy): + + def init_from_list_w(self, w_list, list_w): + w_list.storage = cast_to_void_star(list_w, "integer") + + def length(self, w_list): + return len(cast_from_void_star(w_list.storage, "integer")) + + def getitem(self, w_list, index): + return cast_from_void_star(w_list.storage, "integer")[index] + + def getslice(self, w_list, start, stop, step, length): + if step == 1: + return W_ListObject(cast_from_void_star(w_list.storage, "integer")[start:stop]) + else: + subitems_w = [None] * length + for i in range(length): + subitems_w[i] = w_list.getitem(start) + start += step + return W_ListObject(subitems_w) + + def getitems(self, w_list): + return cast_from_void_star(w_list.storage, "integer") + + def append(self, w_list, w_item): + w_list.wrappeditems.append(w_item) + +class StringListStrategy(ListStrategy): + + def init_from_list_w(self, w_list, list_w): + w_list.storage = cast_to_void_star(list_w, "string") + + def length(self, w_list): + return len(cast_from_void_star(w_list.storage, "string")) + + def getitem(self, w_list, index): + return cast_from_void_star(w_list.storage, "string")[index] + + def getslice(self, w_list, start, stop, step, length): + if step == 1: + return W_ListObject(cast_from_void_star(w_list.storage, "string")[start:stop]) + else: + subitems_w = [None] * length + for i in range(length): + subitems_w[i] = w_list.getitem(start) + start += step + return W_ListObject(subitems_w) + + def getitems(self, w_list): + return cast_from_void_star(w_list.storage, "string") + + def append(self, w_list, w_item): + w_list.wrappeditems.append(w_item) + init_signature = Signature(['sequence'], None, None) init_defaults = [None] diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -1,4 +1,4 @@ -from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy +from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, StringListStrategy from pypy.objspace.std.test.test_listobject import TestW_ListObject class TestW_ListStrategies(TestW_ListObject): @@ -6,11 +6,21 @@ def test_check_strategy(self): assert isinstance(W_ListObject([]).strategy, EmptyListStrategy) assert isinstance(W_ListObject([self.space.wrap(1),self.space.wrap('a')]).strategy, ObjectListStrategy) - assert isinstance(W_ListObject([self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]).strategy, ObjectListStrategy) - assert isinstance(W_ListObject([self.space.wrap('a'), self.space.wrap('b')]).strategy, ObjectListStrategy) + assert isinstance(W_ListObject([self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]).strategy, IntegerListStrategy) + assert isinstance(W_ListObject([self.space.wrap('a'), self.space.wrap('b')]).strategy, StringListStrategy) - def test_switch_strategy(self): + def test_empty_to_any(self): + l = W_ListObject([]) + assert isinstance(l.strategy, EmptyListStrategy) + l.append(self.space.wrap(1.)) + assert isinstance(l.strategy, ObjectListStrategy) + l = W_ListObject([]) assert isinstance(l.strategy, EmptyListStrategy) l.append(self.space.wrap(1)) - assert isinstance(l.strategy, ObjectListStrategy) + assert isinstance(l.strategy, IntegerListStrategy) + + l = W_ListObject([]) + assert isinstance(l.strategy, EmptyListStrategy) + l.append(self.space.wrap('a')) + assert isinstance(l.strategy, StringListStrategy) From noreply at buildbot.pypy.org Fri Sep 23 13:11:16 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:16 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Convert ListStrategies to ObjectListStrategy when a differen element is added Message-ID: <20110923111116.13C12820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47417:79361ae2fe98 Date: 2011-02-02 11:14 +0100 http://bitbucket.org/pypy/pypy/changeset/79361ae2fe98/ Log: Convert ListStrategies to ObjectListStrategy when a differen element is added diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -160,9 +160,6 @@ def append(self, w_list, w_item): w_list.wrappeditems.append(w_item) - #list_w = cast_from_void_star(w_list.storage, 'object') - #list_w.append(w_item) - #w_list.storage = cast_to_void_star(list_w, 'object') class IntegerListStrategy(ListStrategy): @@ -191,6 +188,12 @@ def append(self, w_list, w_item): w_list.wrappeditems.append(w_item) + if is_W_IntObject(w_item): + return + + w_list.strategy = ObjectListStrategy() + w_list.strategy.init_from_list_w(w_list, w_list.wrappeditems) + class StringListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): @@ -218,6 +221,12 @@ def append(self, w_list, w_item): w_list.wrappeditems.append(w_item) + if is_W_StringObject(w_item): + return + + w_list.strategy = ObjectListStrategy() + w_list.strategy.init_from_list_w(w_list, w_list.wrappeditems) + init_signature = Signature(['sequence'], None, None) init_defaults = [None] diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -24,3 +24,22 @@ assert isinstance(l.strategy, EmptyListStrategy) l.append(self.space.wrap('a')) assert isinstance(l.strategy, StringListStrategy) + + def test_int_to_any(self): + l = W_ListObject([self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) + assert isinstance(l.strategy, IntegerListStrategy) + l.append(self.space.wrap(4)) + assert isinstance(l.strategy, IntegerListStrategy) + l.append(self.space.wrap('a')) + assert isinstance(l.strategy, ObjectListStrategy) + + def test_string_to_any(self): + l = W_ListObject([self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) + assert isinstance(l.strategy, StringListStrategy) + l.append(self.space.wrap('d')) + assert isinstance(l.strategy, StringListStrategy) + l.append(self.space.wrap(3)) + assert isinstance(l.strategy, ObjectListStrategy) + + + From noreply at buildbot.pypy.org Fri Sep 23 13:11:17 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:17 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Carl: Append with cast_to_void_star Message-ID: <20110923111117.4107E820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47418:f2f2a37ebb4c Date: 2011-02-14 10:19 +0100 http://bitbucket.org/pypy/pypy/changeset/f2f2a37ebb4c/ Log: Carl: Append with cast_to_void_star diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -43,10 +43,12 @@ return ObjectListStrategy() def is_W_IntObject(w_object): - return str(w_object.__class__) == "" + from pypy.objspace.std.intobject import W_IntObject + return type(w_object) is W_IntObject def is_W_StringObject(w_object): - return str(w_object.__class__) == "" + from pypy.objspace.std.stringobject import W_StringObject + return type(w_object) is W_StringObject class W_ListObject(W_Object): from pypy.objspace.std.listtype import list_typedef as typedef @@ -159,7 +161,7 @@ return cast_from_void_star(w_list.storage, "object") def append(self, w_list, w_item): - w_list.wrappeditems.append(w_item) + cast_from_void_star(w_list.storage, "object").append(w_item) class IntegerListStrategy(ListStrategy): @@ -186,13 +188,15 @@ return cast_from_void_star(w_list.storage, "integer") def append(self, w_list, w_item): - w_list.wrappeditems.append(w_item) if is_W_IntObject(w_item): + cast_from_void_star(w_list.storage, "integer").append(w_item) return + items_w = w_list.getitems() w_list.strategy = ObjectListStrategy() - w_list.strategy.init_from_list_w(w_list, w_list.wrappeditems) + w_list.strategy.init_from_list_w(w_list, items_w) + w_list.append(w_item) class StringListStrategy(ListStrategy): @@ -219,13 +223,15 @@ return cast_from_void_star(w_list.storage, "string") def append(self, w_list, w_item): - w_list.wrappeditems.append(w_item) if is_W_StringObject(w_item): + cast_from_void_star(w_list.storage, "string").append(w_item) return + list_w = w_list.getitems() w_list.strategy = ObjectListStrategy() - w_list.strategy.init_from_list_w(w_list, w_list.wrappeditems) + w_list.strategy.init_from_list_w(w_list, list_w) + w_list.append(w_item) init_signature = Signature(['sequence'], None, None) init_defaults = [None] From noreply at buildbot.pypy.org Fri Sep 23 13:11:18 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:18 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Implemented inplace_mul in strategies Message-ID: <20110923111118.6D5C0820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47419:05ab11979465 Date: 2011-02-16 11:55 +0100 http://bitbucket.org/pypy/pypy/changeset/05ab11979465/ Log: Implemented inplace_mul in strategies diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -85,6 +85,11 @@ def getitems(self): return self.strategy.getitems(self) + # ___________________________________________________ + + def inplace_mul(self, times): + self.strategy.inplace_mul(self, times) + registerimplementation(W_ListObject) @@ -107,6 +112,9 @@ def append(self, w_list, w_item): raise NotImplementedError + def inplace_mul(self, w_list, times): + raise NotImplementedError + class EmptyListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): assert len(list_w) == 0 @@ -137,6 +145,9 @@ w_list.wrappeditems.append(w_item) w_list.strategy.init_from_list_w(w_list, w_list.wrappeditems) + def inplace_mul(self, w_list, times): + return + class ObjectListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): w_list.storage = cast_to_void_star(list_w, "object") @@ -163,6 +174,10 @@ def append(self, w_list, w_item): cast_from_void_star(w_list.storage, "object").append(w_item) + def inplace_mul(self, w_list, times): + list_w = cast_from_void_star(w_list.storage, "object") + list_w *= times + class IntegerListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): @@ -198,6 +213,10 @@ w_list.strategy.init_from_list_w(w_list, items_w) w_list.append(w_item) + def inplace_mul(self, w_list, times): + list_w = cast_from_void_star(w_list.storage, "integer") + list_w *= times + class StringListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): @@ -233,6 +252,12 @@ w_list.strategy.init_from_list_w(w_list, list_w) w_list.append(w_item) + def inplace_mul(self, w_list, times): + list_w = cast_from_void_star(w_list.storage, "string") + list_w *= times + +# _______________________________________________________ + init_signature = Signature(['sequence'], None, None) init_defaults = [None] @@ -342,7 +367,7 @@ if e.match(space, space.w_TypeError): raise FailedToImplement raise - w_list.wrappeditems *= times + w_list.inplace_mul(times) return w_list def eq__List_List(space, w_list1, w_list2): From noreply at buildbot.pypy.org Fri Sep 23 13:11:19 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:19 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Implemented deleteitem Message-ID: <20110923111119.99C0A820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47420:5476d5e4a096 Date: 2011-02-16 12:26 +0100 http://bitbucket.org/pypy/pypy/changeset/5476d5e4a096/ Log: Implemented deleteitem diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -90,6 +90,9 @@ def inplace_mul(self, times): self.strategy.inplace_mul(self, times) + def deleteitem(self, index): + self.strategy.deleteitem(self, index) + registerimplementation(W_ListObject) @@ -115,6 +118,9 @@ def inplace_mul(self, w_list, times): raise NotImplementedError + def deleteitem(self, w_list, index): + raise NotImplementedError + class EmptyListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): assert len(list_w) == 0 @@ -148,6 +154,9 @@ def inplace_mul(self, w_list, times): return + def deleteitem(self, w_list, index): + raise IndexError + class ObjectListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): w_list.storage = cast_to_void_star(list_w, "object") @@ -178,6 +187,10 @@ list_w = cast_from_void_star(w_list.storage, "object") list_w *= times + def deleteitem(self, w_list, index): + list_w = cast_from_void_star(w_list.storage, "object") + del list_w[index] + class IntegerListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): @@ -217,6 +230,10 @@ list_w = cast_from_void_star(w_list.storage, "integer") list_w *= times + def deleteitem(self, w_list, index): + list_w = cast_from_void_star(w_list.storage, "integer") + del list_w[index] + class StringListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): @@ -256,6 +273,10 @@ list_w = cast_from_void_star(w_list.storage, "string") list_w *= times + def deleteitem(self, w_list, index): + list_w = cast_from_void_star(w_list.storage, "string") + del list_w[index] + # _______________________________________________________ init_signature = Signature(['sequence'], None, None) @@ -423,7 +444,7 @@ def delitem__List_ANY(space, w_list, w_idx): idx = get_list_index(space, w_idx) try: - del w_list.wrappeditems[idx] + w_list.deleteitem(idx) except IndexError: raise OperationError(space.w_IndexError, space.wrap("list deletion index out of range")) From noreply at buildbot.pypy.org Fri Sep 23 13:11:20 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:20 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Implemented setitem Message-ID: <20110923111120.C553A820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47421:b55d9ead88af Date: 2011-02-16 12:44 +0100 http://bitbucket.org/pypy/pypy/changeset/b55d9ead88af/ Log: Implemented setitem diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -93,6 +93,9 @@ def deleteitem(self, index): self.strategy.deleteitem(self, index) + def setitem(self, index, w_item): + self.strategy.setitem(self, index, w_item) + registerimplementation(W_ListObject) @@ -121,6 +124,9 @@ def deleteitem(self, w_list, index): raise NotImplementedError + def setitem(self, w_list, index, w_item): + raise NotImplementedError + class EmptyListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): assert len(list_w) == 0 @@ -157,6 +163,9 @@ def deleteitem(self, w_list, index): raise IndexError + def setitem(self, w_list, index, w_item): + raise IndexError + class ObjectListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): w_list.storage = cast_to_void_star(list_w, "object") @@ -191,6 +200,10 @@ list_w = cast_from_void_star(w_list.storage, "object") del list_w[index] + def setitem(self, w_list, index, w_item): + list_w = cast_from_void_star(w_list.storage, "object") + list_w[index] = w_item + class IntegerListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): @@ -234,6 +247,10 @@ list_w = cast_from_void_star(w_list.storage, "integer") del list_w[index] + def setitem(self, w_list, index, w_item): + list_w = cast_from_void_star(w_list.storage, "integer") + list_w[index] = w_item + class StringListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): @@ -277,6 +294,10 @@ list_w = cast_from_void_star(w_list.storage, "string") del list_w[index] + def setitem(self, w_list, index, w_item): + list_w = cast_from_void_star(w_list.storage, "string") + list_w[index] = w_item + # _______________________________________________________ init_signature = Signature(['sequence'], None, None) @@ -490,7 +511,8 @@ def setitem__List_ANY_ANY(space, w_list, w_index, w_any): idx = get_list_index(space, w_index) try: - w_list.wrappeditems[idx] = w_any + #w_list.wrappeditems[idx] = w_any + w_list.setitem(idx, w_any) except IndexError: raise OperationError(space.w_IndexError, space.wrap("list index out of range")) diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -41,5 +41,17 @@ l.append(self.space.wrap(3)) assert isinstance(l.strategy, ObjectListStrategy) + def test_setitem(self): + # This should work if test_listobject.py passes + l = W_ListObject([self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) + assert self.space.eq_w(l.getitem(0), self.space.wrap('a')) + l.setitem(0, self.space.wrap('d')) + assert self.space.eq_w(l.getitem(0), self.space.wrap('d')) + # Test strategy change + l = W_ListObject([self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) + assert isinstance(l.strategy, IntegerListStrategy) + l.setitem(0, self.space.wrap('d')) + assert isinstance(l.strategy, ObjectListStrategy) + From noreply at buildbot.pypy.org Fri Sep 23 13:11:21 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:21 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Switch strategy depending on type of item that was set Message-ID: <20110923111121.F198D820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47422:3a9a1a59af86 Date: 2011-02-16 12:51 +0100 http://bitbucket.org/pypy/pypy/changeset/3a9a1a59af86/ Log: Switch strategy depending on type of item that was set diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -251,6 +251,10 @@ list_w = cast_from_void_star(w_list.storage, "integer") list_w[index] = w_item + if not is_W_IntObject(w_item): + w_list.strategy = ObjectListStrategy() + w_list.strategy.init_from_list_w(w_list, list_w) + class StringListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): @@ -298,6 +302,10 @@ list_w = cast_from_void_star(w_list.storage, "string") list_w[index] = w_item + if not is_W_StringObject(w_item): + w_list.strategy = ObjectListStrategy() + w_list.strategy.init_from_list_w(w_list, list_w) + # _______________________________________________________ init_signature = Signature(['sequence'], None, None) diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -48,10 +48,17 @@ l.setitem(0, self.space.wrap('d')) assert self.space.eq_w(l.getitem(0), self.space.wrap('d')) - # Test strategy change + assert isinstance(l.strategy, StringListStrategy) + + # IntStrategy to ObjectStrategy l = W_ListObject([self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) l.setitem(0, self.space.wrap('d')) assert isinstance(l.strategy, ObjectListStrategy) + # StringStrategy to ObjectStrategy + l = W_ListObject([self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) + assert isinstance(l.strategy, StringListStrategy) + l.setitem(0, self.space.wrap(2)) + assert isinstance(l.strategy, ObjectListStrategy) From noreply at buildbot.pypy.org Fri Sep 23 13:11:23 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:23 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: remove some comments; replaced len() by w_list.length() in repr Message-ID: <20110923111123.2E577820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47423:7cfc2472e868 Date: 2011-02-16 13:03 +0100 http://bitbucket.org/pypy/pypy/changeset/7cfc2472e868/ Log: remove some comments; replaced len() by w_list.length() in repr diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -519,7 +519,6 @@ def setitem__List_ANY_ANY(space, w_list, w_index, w_any): idx = get_list_index(space, w_index) try: - #w_list.wrappeditems[idx] = w_any w_list.setitem(idx, w_any) except IndexError: raise OperationError(space.w_IndexError, @@ -596,7 +595,7 @@ listrepr = app.interphook("listrepr") def repr__List(space, w_list): - if len(w_list.wrappeditems) == 0: + if w_list.length() == 0: return space.wrap('[]') ec = space.getexecutioncontext() w_currently_in_repr = ec._py_repr @@ -617,7 +616,6 @@ return space.w_None def list_append__List_ANY(space, w_list, w_any): - #w_list.wrappeditems.append(w_any) w_list.append(w_any) return space.w_None From noreply at buildbot.pypy.org Fri Sep 23 13:11:24 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:24 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Implemented insert Message-ID: <20110923111124.5AEAB820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47424:5fca9acec5b7 Date: 2011-02-16 13:23 +0100 http://bitbucket.org/pypy/pypy/changeset/5fca9acec5b7/ Log: Implemented insert diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -96,6 +96,9 @@ def setitem(self, index, w_item): self.strategy.setitem(self, index, w_item) + def insert(self, index, w_item): + self.strategy.insert(self, index, w_item) + registerimplementation(W_ListObject) @@ -127,6 +130,9 @@ def setitem(self, w_list, index, w_item): raise NotImplementedError + def insert(self, w_list, index, w_item): + raise NotImplementedError + class EmptyListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): assert len(list_w) == 0 @@ -166,6 +172,9 @@ def setitem(self, w_list, index, w_item): raise IndexError + def insert(self, w_list, index, w_item): + self.append(w_list, w_item) + class ObjectListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): w_list.storage = cast_to_void_star(list_w, "object") @@ -204,6 +213,11 @@ list_w = cast_from_void_star(w_list.storage, "object") list_w[index] = w_item + def insert(self, w_list, index, w_item): + list_w = cast_from_void_star(w_list.storage, "object") + list_w.insert(index, w_item) + + class IntegerListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): @@ -255,6 +269,15 @@ w_list.strategy = ObjectListStrategy() w_list.strategy.init_from_list_w(w_list, list_w) + def insert(self, w_list, index, w_item): + list_w = cast_from_void_star(w_list.storage, "integer") + list_w.insert(index, w_item) + + if not is_W_IntObject(w_item): + w_list.strategy = ObjectListStrategy() + w_list.strategy.init_from_list_w(w_list, list_w) + + class StringListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): @@ -306,6 +329,14 @@ w_list.strategy = ObjectListStrategy() w_list.strategy.init_from_list_w(w_list, list_w) + def insert(self, w_list, index, w_item): + list_w = cast_from_void_star(w_list.storage, "string") + list_w.insert(index, w_item) + + if not is_W_StringObject(w_item): + w_list.strategy = ObjectListStrategy() + w_list.strategy.init_from_list_w(w_list, list_w) + # _______________________________________________________ init_signature = Signature(['sequence'], None, None) @@ -605,14 +636,14 @@ def list_insert__List_ANY_ANY(space, w_list, w_where, w_any): where = space.int_w(w_where) - length = len(w_list.wrappeditems) + length = w_list.length() if where < 0: where += length if where < 0: where = 0 elif where > length: where = length - w_list.wrappeditems.insert(where, w_any) + w_list.insert(where, w_any) return space.w_None def list_append__List_ANY(space, w_list, w_any): diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -62,3 +62,35 @@ l.setitem(0, self.space.wrap(2)) assert isinstance(l.strategy, ObjectListStrategy) + def test_insert(self): + # no change + l = W_ListObject([self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) + assert isinstance(l.strategy, IntegerListStrategy) + l.insert(3, self.space.wrap(4)) + assert isinstance(l.strategy, IntegerListStrategy) + + # StringStrategy + l = W_ListObject([self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) + assert isinstance(l.strategy, StringListStrategy) + l.insert(3, self.space.wrap(2)) + assert isinstance(l.strategy, ObjectListStrategy) + + # IntegerStrategy + l = W_ListObject([self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) + assert isinstance(l.strategy, IntegerListStrategy) + l.insert(3, self.space.wrap('d')) + assert isinstance(l.strategy, ObjectListStrategy) + + # EmptyStrategy + l = W_ListObject([]) + assert isinstance(l.strategy, EmptyListStrategy) + l.insert(0, self.space.wrap('a')) + assert isinstance(l.strategy, StringListStrategy) + + l = W_ListObject([]) + assert isinstance(l.strategy, EmptyListStrategy) + l.insert(0, self.space.wrap(2)) + assert isinstance(l.strategy, IntegerListStrategy) + + + From noreply at buildbot.pypy.org Fri Sep 23 13:11:25 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:25 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Fixed casting in insert and setitem (ListStrategies) Message-ID: <20110923111125.976A1820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47425:fbf3d9dfc40a Date: 2011-02-23 13:15 +0100 http://bitbucket.org/pypy/pypy/changeset/fbf3d9dfc40a/ Log: Fixed casting in insert and setitem (ListStrategies) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -173,6 +173,7 @@ raise IndexError def insert(self, w_list, index, w_item): + assert index == 0 self.append(w_list, w_item) class ObjectListStrategy(ListStrategy): @@ -263,19 +264,25 @@ def setitem(self, w_list, index, w_item): list_w = cast_from_void_star(w_list.storage, "integer") - list_w[index] = w_item - if not is_W_IntObject(w_item): - w_list.strategy = ObjectListStrategy() - w_list.strategy.init_from_list_w(w_list, list_w) + if is_W_IntObject(w_item): + list_w[index] = w_item + return + + w_list.strategy = ObjectListStrategy() + w_list.strategy.init_from_list_w(w_list, list_w) + w_list.setitem(index, w_item) def insert(self, w_list, index, w_item): list_w = cast_from_void_star(w_list.storage, "integer") - list_w.insert(index, w_item) - if not is_W_IntObject(w_item): - w_list.strategy = ObjectListStrategy() - w_list.strategy.init_from_list_w(w_list, list_w) + if is_W_IntObject(w_item): + list_w.insert(index, w_item) + return + + w_list.strategy = ObjectListStrategy() + w_list.strategy.init_from_list_w(w_list, list_w) + w_list.insert(index, w_item) class StringListStrategy(ListStrategy): @@ -323,19 +330,24 @@ def setitem(self, w_list, index, w_item): list_w = cast_from_void_star(w_list.storage, "string") - list_w[index] = w_item + if is_W_StringObject(w_item): + list_w[index] = w_item + return - if not is_W_StringObject(w_item): - w_list.strategy = ObjectListStrategy() - w_list.strategy.init_from_list_w(w_list, list_w) + w_list.strategy = ObjectListStrategy() + w_list.strategy.init_from_list_w(w_list, list_w) + w_list.setitem(index, w_item) def insert(self, w_list, index, w_item): list_w = cast_from_void_star(w_list.storage, "string") - list_w.insert(index, w_item) - if not is_W_StringObject(w_item): - w_list.strategy = ObjectListStrategy() - w_list.strategy.init_from_list_w(w_list, list_w) + if is_W_StringObject(w_item): + list_w.insert(index, w_item) + return + + w_list.strategy = ObjectListStrategy() + w_list.strategy.init_from_list_w(w_list, list_w) + w_list.insert(index, w_item) # _______________________________________________________ From noreply at buildbot.pypy.org Fri Sep 23 13:11:26 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:26 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Added base class AbstractUnwrappedStrategy (implemented append and getitem) Message-ID: <20110923111126.C28F6820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47426:0339252ceb1e Date: 2011-02-23 13:43 +0100 http://bitbucket.org/pypy/pypy/changeset/0339252ceb1e/ Log: Added base class AbstractUnwrappedStrategy (implemented append and getitem) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -219,7 +219,44 @@ list_w.insert(index, w_item) -class IntegerListStrategy(ListStrategy): +class AbstractUnwrappedStrategy(ListStrategy): + def unwrap(self, w_obj): + # XXX override later + return w_obj + + def wrap(self, item): + # XXX override later + return item + + def cast_from_void_star(self, storage): + raise NotImplementedError("abstract base class") + + def is_correct_type(self, w_obj): + raise NotImplementedError("abstract base class") + + + + def getitem(self, w_list, index): + return self.wrap(self.cast_from_void_star(w_list.storage)[index]) + + def append(self, w_list, w_item): + + if self.is_correct_type(w_item): + self.cast_from_void_star(w_list.storage).append(self.unwrap(w_item)) + return + + items_w = w_list.getitems() + w_list.strategy = ObjectListStrategy() + w_list.strategy.init_from_list_w(w_list, items_w) + w_list.append(w_item) + +class IntegerListStrategy(AbstractUnwrappedStrategy): + + def cast_from_void_star(self, storage): + return cast_from_void_star(storage, "integer") + + def is_correct_type(self, w_obj): + return is_W_IntObject(w_obj) def init_from_list_w(self, w_list, list_w): w_list.storage = cast_to_void_star(list_w, "integer") @@ -227,9 +264,6 @@ def length(self, w_list): return len(cast_from_void_star(w_list.storage, "integer")) - def getitem(self, w_list, index): - return cast_from_void_star(w_list.storage, "integer")[index] - def getslice(self, w_list, start, stop, step, length): if step == 1: return W_ListObject(cast_from_void_star(w_list.storage, "integer")[start:stop]) @@ -243,17 +277,6 @@ def getitems(self, w_list): return cast_from_void_star(w_list.storage, "integer") - def append(self, w_list, w_item): - - if is_W_IntObject(w_item): - cast_from_void_star(w_list.storage, "integer").append(w_item) - return - - items_w = w_list.getitems() - w_list.strategy = ObjectListStrategy() - w_list.strategy.init_from_list_w(w_list, items_w) - w_list.append(w_item) - def inplace_mul(self, w_list, times): list_w = cast_from_void_star(w_list.storage, "integer") list_w *= times From noreply at buildbot.pypy.org Fri Sep 23 13:11:27 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:27 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Implemented all remaining methods in AbstractUnwrappedStrategy Message-ID: <20110923111127.EE491820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47427:5658dc32c129 Date: 2011-02-23 13:57 +0100 http://bitbucket.org/pypy/pypy/changeset/5658dc32c129/ Log: Implemented all remaining methods in AbstractUnwrappedStrategy diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -235,10 +235,25 @@ raise NotImplementedError("abstract base class") + def length(self, w_list): + return len(self.cast_from_void_star(w_list.storage)) def getitem(self, w_list, index): return self.wrap(self.cast_from_void_star(w_list.storage)[index]) + def getitems(self, w_list): + return self.cast_from_void_star(w_list.storage) + + def getslice(self, w_list, start, stop, step, length): + if step == 1: + return W_ListObject(self.cast_from_void_star(w_list.storage)[start:stop]) + else: + subitems_w = [None] * length + for i in range(length): + subitems_w[i] = w_list.getitem(start) + start += step + return W_ListObject(subitems_w) + def append(self, w_list, w_item): if self.is_correct_type(w_item): @@ -250,6 +265,36 @@ w_list.strategy.init_from_list_w(w_list, items_w) w_list.append(w_item) + def insert(self, w_list, index, w_item): + list_w = self.cast_from_void_star(w_list.storage) + + if self.is_correct_type(w_item): + list_w.insert(index, w_item) + return + + w_list.strategy = ObjectListStrategy() + w_list.strategy.init_from_list_w(w_list, list_w) + w_list.insert(index, w_item) + + def setitem(self, w_list, index, w_item): + list_w = self.cast_from_void_star(w_list.storage) + + if self.is_correct_type(w_item): + list_w[index] = w_item + return + + w_list.strategy = ObjectListStrategy() + w_list.strategy.init_from_list_w(w_list, list_w) + w_list.setitem(index, w_item) + + def deleteitem(self, w_list, index): + list_w = self.cast_from_void_star(w_list.storage) + del list_w[index] + + def inplace_mul(self, w_list, times): + list_w = self.cast_from_void_star(w_list.storage) + list_w *= times + class IntegerListStrategy(AbstractUnwrappedStrategy): def cast_from_void_star(self, storage): @@ -261,53 +306,6 @@ def init_from_list_w(self, w_list, list_w): w_list.storage = cast_to_void_star(list_w, "integer") - def length(self, w_list): - return len(cast_from_void_star(w_list.storage, "integer")) - - def getslice(self, w_list, start, stop, step, length): - if step == 1: - return W_ListObject(cast_from_void_star(w_list.storage, "integer")[start:stop]) - else: - subitems_w = [None] * length - for i in range(length): - subitems_w[i] = w_list.getitem(start) - start += step - return W_ListObject(subitems_w) - - def getitems(self, w_list): - return cast_from_void_star(w_list.storage, "integer") - - def inplace_mul(self, w_list, times): - list_w = cast_from_void_star(w_list.storage, "integer") - list_w *= times - - def deleteitem(self, w_list, index): - list_w = cast_from_void_star(w_list.storage, "integer") - del list_w[index] - - def setitem(self, w_list, index, w_item): - list_w = cast_from_void_star(w_list.storage, "integer") - - if is_W_IntObject(w_item): - list_w[index] = w_item - return - - w_list.strategy = ObjectListStrategy() - w_list.strategy.init_from_list_w(w_list, list_w) - w_list.setitem(index, w_item) - - def insert(self, w_list, index, w_item): - list_w = cast_from_void_star(w_list.storage, "integer") - - if is_W_IntObject(w_item): - list_w.insert(index, w_item) - return - - w_list.strategy = ObjectListStrategy() - w_list.strategy.init_from_list_w(w_list, list_w) - w_list.insert(index, w_item) - - class StringListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): From noreply at buildbot.pypy.org Fri Sep 23 13:11:29 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:29 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: StringListStartegy now inherits from AbstractUnwrappedStrategy too (removed unnecessary methods) Message-ID: <20110923111129.2906F820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47428:0f35f1ee48eb Date: 2011-02-23 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/0f35f1ee48eb/ Log: StringListStartegy now inherits from AbstractUnwrappedStrategy too (removed unnecessary methods) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -306,70 +306,17 @@ def init_from_list_w(self, w_list, list_w): w_list.storage = cast_to_void_star(list_w, "integer") -class StringListStrategy(ListStrategy): +class StringListStrategy(AbstractUnwrappedStrategy): + + def cast_from_void_star(self, storage): + return cast_from_void_star(storage, "string") + + def is_correct_type(self, w_obj): + return is_W_StringObject(w_obj) def init_from_list_w(self, w_list, list_w): w_list.storage = cast_to_void_star(list_w, "string") - def length(self, w_list): - return len(cast_from_void_star(w_list.storage, "string")) - - def getitem(self, w_list, index): - return cast_from_void_star(w_list.storage, "string")[index] - - def getslice(self, w_list, start, stop, step, length): - if step == 1: - return W_ListObject(cast_from_void_star(w_list.storage, "string")[start:stop]) - else: - subitems_w = [None] * length - for i in range(length): - subitems_w[i] = w_list.getitem(start) - start += step - return W_ListObject(subitems_w) - - def getitems(self, w_list): - return cast_from_void_star(w_list.storage, "string") - - def append(self, w_list, w_item): - - if is_W_StringObject(w_item): - cast_from_void_star(w_list.storage, "string").append(w_item) - return - - list_w = w_list.getitems() - w_list.strategy = ObjectListStrategy() - w_list.strategy.init_from_list_w(w_list, list_w) - w_list.append(w_item) - - def inplace_mul(self, w_list, times): - list_w = cast_from_void_star(w_list.storage, "string") - list_w *= times - - def deleteitem(self, w_list, index): - list_w = cast_from_void_star(w_list.storage, "string") - del list_w[index] - - def setitem(self, w_list, index, w_item): - list_w = cast_from_void_star(w_list.storage, "string") - if is_W_StringObject(w_item): - list_w[index] = w_item - return - - w_list.strategy = ObjectListStrategy() - w_list.strategy.init_from_list_w(w_list, list_w) - w_list.setitem(index, w_item) - - def insert(self, w_list, index, w_item): - list_w = cast_from_void_star(w_list.storage, "string") - - if is_W_StringObject(w_item): - list_w.insert(index, w_item) - return - - w_list.strategy = ObjectListStrategy() - w_list.strategy.init_from_list_w(w_list, list_w) - w_list.insert(index, w_item) - # _______________________________________________________ init_signature = Signature(['sequence'], None, None) From noreply at buildbot.pypy.org Fri Sep 23 13:11:30 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:30 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: ObjectListStrategy inherits from AbstractUnwrappedStrategy (is_correct_type returns always True) Message-ID: <20110923111130.55C55820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47429:e8a8165c7468 Date: 2011-02-23 14:30 +0100 http://bitbucket.org/pypy/pypy/changeset/e8a8165c7468/ Log: ObjectListStrategy inherits from AbstractUnwrappedStrategy (is_correct_type returns always True) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -176,49 +176,6 @@ assert index == 0 self.append(w_list, w_item) -class ObjectListStrategy(ListStrategy): - def init_from_list_w(self, w_list, list_w): - w_list.storage = cast_to_void_star(list_w, "object") - - def length(self, w_list): - return len(cast_from_void_star(w_list.storage, "object")) - - def getitem(self, w_list, index): - return cast_from_void_star(w_list.storage, "object")[index] - - def getslice(self, w_list, start, stop, step, length): - if step == 1: - return W_ListObject(cast_from_void_star(w_list.storage, "object")[start:stop]) - else: - subitems_w = [None] * length - for i in range(length): - subitems_w[i] = w_list.getitem(start) - start += step - return W_ListObject(subitems_w) - - def getitems(self, w_list): - return cast_from_void_star(w_list.storage, "object") - - def append(self, w_list, w_item): - cast_from_void_star(w_list.storage, "object").append(w_item) - - def inplace_mul(self, w_list, times): - list_w = cast_from_void_star(w_list.storage, "object") - list_w *= times - - def deleteitem(self, w_list, index): - list_w = cast_from_void_star(w_list.storage, "object") - del list_w[index] - - def setitem(self, w_list, index, w_item): - list_w = cast_from_void_star(w_list.storage, "object") - list_w[index] = w_item - - def insert(self, w_list, index, w_item): - list_w = cast_from_void_star(w_list.storage, "object") - list_w.insert(index, w_item) - - class AbstractUnwrappedStrategy(ListStrategy): def unwrap(self, w_obj): # XXX override later @@ -295,6 +252,16 @@ list_w = self.cast_from_void_star(w_list.storage) list_w *= times +class ObjectListStrategy(AbstractUnwrappedStrategy): + def cast_from_void_star(self, storage): + return cast_from_void_star(storage, "object") + + def is_correct_type(self, w_obj): + return True + + def init_from_list_w(self, w_list, list_w): + w_list.storage = cast_to_void_star(list_w, "object") + class IntegerListStrategy(AbstractUnwrappedStrategy): def cast_from_void_star(self, storage): From noreply at buildbot.pypy.org Fri Sep 23 13:11:31 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:31 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Implemented deleteslice; Move delitem_slice_helper to bytearrayobject Message-ID: <20110923111131.83B8E820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47430:2a6c4e1e444b Date: 2011-02-23 14:55 +0100 http://bitbucket.org/pypy/pypy/changeset/2a6c4e1e444b/ Log: Implemented deleteslice; Move delitem_slice_helper to bytearrayobject diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -6,7 +6,6 @@ from pypy.rlib.rarithmetic import intmask from pypy.rlib.rstring import StringBuilder from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.listobject import _delitem_slice_helper from pypy.objspace.std.listtype import get_list_index from pypy.objspace.std.stringobject import W_StringObject from pypy.objspace.std.unicodeobject import W_UnicodeObject @@ -430,9 +429,36 @@ len(w_bytearray.data)) delitem_slice_helper(space, w_bytearray.data, start, step, slicelength) -# create new helper function with different list type specialisation -delitem_slice_helper = func_with_new_name(_delitem_slice_helper, - 'delitem_slice_helper') +def delitem_slice_helper(space, items, start, step, slicelength): + if slicelength==0: + return + + if step < 0: + start = start + step * (slicelength-1) + step = -step + + if step == 1: + assert start >= 0 + assert slicelength >= 0 + del items[start:start+slicelength] + else: + n = len(items) + i = start + + for discard in range(1, slicelength): + j = i+1 + i += step + while j < i: + items[j-discard] = items[j] + j += 1 + + j = i+1 + while j < n: + items[j-slicelength] = items[j] + j += 1 + start = n - slicelength + assert start >= 0 # annotator hint + del items[start:] def _setitem_helper(w_bytearray, start, stop, slicelength, data): assert start >= 0 diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -93,6 +93,9 @@ def deleteitem(self, index): self.strategy.deleteitem(self, index) + def deleteslice(self, start, step, length): + self.strategy.deleteslice(self, start, step, length) + def setitem(self, index, w_item): self.strategy.setitem(self, index, w_item) @@ -248,6 +251,38 @@ list_w = self.cast_from_void_star(w_list.storage) del list_w[index] + def deleteslice(self, w_list, start, step, slicelength): + items = self.cast_from_void_star(w_list.storage) + if slicelength==0: + return + + if step < 0: + start = start + step * (slicelength-1) + step = -step + + if step == 1: + assert start >= 0 + assert slicelength >= 0 + del items[start:start+slicelength] + else: + n = len(items) + i = start + + for discard in range(1, slicelength): + j = i+1 + i += step + while j < i: + items[j-discard] = items[j] + j += 1 + + j = i+1 + while j < n: + items[j-slicelength] = items[j] + j += 1 + start = n - slicelength + assert start >= 0 # annotator hint + del items[start:] + def inplace_mul(self, w_list, times): list_w = self.cast_from_void_star(w_list.storage) list_w *= times @@ -346,7 +381,7 @@ def delslice__List_ANY_ANY(space, w_list, w_start, w_stop): length = w_list.length() start, stop = normalize_simple_slice(space, length, w_start, w_stop) - _delitem_slice_helper(space, w_list.getitems(), start, 1, stop-start) + w_list.deleteslice(start, 1, stop-start) def contains__List_ANY(space, w_list, w_obj): # needs to be safe against eq_w() mutating the w_list behind our back @@ -459,40 +494,8 @@ def delitem__List_Slice(space, w_list, w_slice): - start, stop, step, slicelength = w_slice.indices4(space, - len(w_list.wrappeditems)) - _delitem_slice_helper(space, w_list.wrappeditems, start, step, slicelength) - -def _delitem_slice_helper(space, items, start, step, slicelength): - if slicelength==0: - return - - if step < 0: - start = start + step * (slicelength-1) - step = -step - - if step == 1: - assert start >= 0 - assert slicelength >= 0 - del items[start:start+slicelength] - else: - n = len(items) - i = start - - for discard in range(1, slicelength): - j = i+1 - i += step - while j < i: - items[j-discard] = items[j] - j += 1 - - j = i+1 - while j < n: - items[j-slicelength] = items[j] - j += 1 - start = n - slicelength - assert start >= 0 # annotator hint - del items[start:] + start, stop, step, slicelength = w_slice.indices4(space, w_list.length()) + w_list.deleteslice(start, step, slicelength) def setitem__List_ANY_ANY(space, w_list, w_index, w_any): idx = get_list_index(space, w_index) From noreply at buildbot.pypy.org Fri Sep 23 13:11:32 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:32 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Implemented setslice Message-ID: <20110923111132.B84BA820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47431:37b25c13f634 Date: 2011-02-23 16:00 +0100 http://bitbucket.org/pypy/pypy/changeset/37b25c13f634/ Log: Implemented setslice diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -99,6 +99,9 @@ def setitem(self, index, w_item): self.strategy.setitem(self, index, w_item) + def setslice(self, start, step, slicelength, sequence_w): + self.strategy.setslice(self, start, step, slicelength, sequence_w) + def insert(self, index, w_item): self.strategy.insert(self, index, w_item) @@ -130,9 +133,15 @@ def deleteitem(self, w_list, index): raise NotImplementedError + def deleteslice(self, w_list, start, step, slicelength): + raise NotImplementedError + def setitem(self, w_list, index, w_item): raise NotImplementedError + def setslice(self, w_list, start, step, slicelength, sequence_w): + raise NotImplementedError + def insert(self, w_list, index, w_item): raise NotImplementedError @@ -172,9 +181,16 @@ def deleteitem(self, w_list, index): raise IndexError + def deleteslice(self, w_list, start, step, slicelength): + raise IndexError + def setitem(self, w_list, index, w_item): raise IndexError + def setslice(self, w_list, start, step, slicelength, sequence_w): + w_list.strategy = get_strategy_from_list_objects(sequence_w) + w_list.strategy.init_from_list_w(w_list, sequence_w) + def insert(self, w_list, index, w_item): assert index == 0 self.append(w_list, w_item) @@ -247,6 +263,52 @@ w_list.strategy.init_from_list_w(w_list, list_w) w_list.setitem(index, w_item) + def setslice(self, w_list, start, step, slicelength, sequence_w): + assert slicelength >= 0 + items = self.cast_from_void_star(w_list.storage) + oldsize = len(items) + len2 = len(sequence_w) + if step == 1: # Support list resizing for non-extended slices + delta = slicelength - len2 + if delta < 0: + delta = -delta + newsize = oldsize + delta + # XXX support this in rlist! + items += [None] * delta + lim = start+len2 + i = newsize - 1 + while i >= lim: + items[i] = items[i-delta] + i -= 1 + elif start >= 0: + del items[start:start+delta] + else: + assert delta==0 # start<0 is only possible with slicelength==0 + elif len2 != slicelength: # No resize for extended slices + raise operationerrfmt(space.w_ValueError, "attempt to " + "assign sequence of size %d to extended slice of size %d", + len2, slicelength) + + if sequence_w is items: + if step > 0: + # Always copy starting from the right to avoid + # having to make a shallow copy in the case where + # the source and destination lists are the same list. + i = len2 - 1 + start += i*step + while i >= 0: + items[start] = sequence_w[i] + start -= step + i -= 1 + return + else: + # Make a shallow copy to more easily handle the reversal case + sequence_w = list(sequence_w) + for i in range(len2): + items[start] = sequence_w[i] + start += step + + def deleteitem(self, w_list, index): list_w = self.cast_from_void_star(w_list.storage) del list_w[index] @@ -373,10 +435,11 @@ start, stop = normalize_simple_slice(space, length, w_start, w_stop) return w_list.getslice(start, stop, 1, stop - start) -def setslice__List_ANY_ANY_ANY(space, w_list, w_start, w_stop, w_sequence): +def setslice__List_ANY_ANY_ANY(space, w_list, w_start, w_stop, w_iterable): length = w_list.length() start, stop = normalize_simple_slice(space, length, w_start, w_stop) - _setitem_slice_helper(space, w_list, start, 1, stop-start, w_sequence) + sequence_w = space.listview(w_iterable) + w_list.setslice(start, 1, stop-start, sequence_w) def delslice__List_ANY_ANY(space, w_list, w_start, w_stop): length = w_list.length() @@ -507,55 +570,10 @@ return space.w_None def setitem__List_Slice_ANY(space, w_list, w_slice, w_iterable): - oldsize = len(w_list.wrappeditems) + oldsize = w_list.length() start, stop, step, slicelength = w_slice.indices4(space, oldsize) - _setitem_slice_helper(space, w_list, start, step, slicelength, w_iterable) - -def _setitem_slice_helper(space, w_list, start, step, slicelength, w_iterable): - sequence2 = space.listview(w_iterable) - assert slicelength >= 0 - items = w_list.wrappeditems - oldsize = len(items) - len2 = len(sequence2) - if step == 1: # Support list resizing for non-extended slices - delta = slicelength - len2 - if delta < 0: - delta = -delta - newsize = oldsize + delta - # XXX support this in rlist! - items += [None] * delta - lim = start+len2 - i = newsize - 1 - while i >= lim: - items[i] = items[i-delta] - i -= 1 - elif start >= 0: - del items[start:start+delta] - else: - assert delta==0 # start<0 is only possible with slicelength==0 - elif len2 != slicelength: # No resize for extended slices - raise operationerrfmt(space.w_ValueError, "attempt to " - "assign sequence of size %d to extended slice of size %d", - len2, slicelength) - - if sequence2 is items: - if step > 0: - # Always copy starting from the right to avoid - # having to make a shallow copy in the case where - # the source and destination lists are the same list. - i = len2 - 1 - start += i*step - while i >= 0: - items[start] = sequence2[i] - start -= step - i -= 1 - return - else: - # Make a shallow copy to more easily handle the reversal case - sequence2 = list(sequence2) - for i in range(len2): - items[start] = sequence2[i] - start += step + sequence_w = space.listview(w_iterable) + w_list.setslice(start, step, slicelength, sequence_w) app = gateway.applevel(""" def listrepr(currently_in_repr, l): diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -92,5 +92,10 @@ l.insert(0, self.space.wrap(2)) assert isinstance(l.strategy, IntegerListStrategy) + def test_setslice(self): + l = W_ListObject([]) + assert isinstance(l.strategy, EmptyListStrategy) + l.setslice(0, 1, 2, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + assert isinstance(l.strategy, IntegerListStrategy) From noreply at buildbot.pypy.org Fri Sep 23 13:11:33 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:33 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Implemented extend Message-ID: <20110923111133.E0147820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47432:f6705d4e93a3 Date: 2011-02-23 16:43 +0100 http://bitbucket.org/pypy/pypy/changeset/f6705d4e93a3/ Log: Implemented extend diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -105,6 +105,9 @@ def insert(self, index, w_item): self.strategy.insert(self, index, w_item) + def extend(self, items_w): + self.strategy.extend(self, items_w) + registerimplementation(W_ListObject) @@ -145,6 +148,9 @@ def insert(self, w_list, index, w_item): raise NotImplementedError + def extend(self, w_list, items_w): + raise NotImplementedError + class EmptyListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): assert len(list_w) == 0 @@ -195,6 +201,11 @@ assert index == 0 self.append(w_list, w_item) + def extend(self, w_list, items_w): + #XXX: would be faster if items_w was a W_List and we could get its strategy + w_list.strategy = get_strategy_from_list_objects(items_w) + w_list.strategy.init_from_list_w(w_list, items_w) + class AbstractUnwrappedStrategy(ListStrategy): def unwrap(self, w_obj): # XXX override later @@ -210,6 +221,9 @@ def is_correct_type(self, w_obj): raise NotImplementedError("abstract base class") + def list_is_correct_type(self, w_list): + raise NotImplementedError("abstract base class") + def length(self, w_list): return len(self.cast_from_void_star(w_list.storage)) @@ -252,6 +266,16 @@ w_list.strategy.init_from_list_w(w_list, list_w) w_list.insert(index, w_item) + def extend(self, w_list, w_other): + list_w = self.cast_from_void_star(w_list.storage) + if self.list_is_correct_type(w_other): + list_w += w_other.getitems() # or self.cast_from_void_star(w_other.storage) ? + return + + w_list.strategy = ObjectListStrategy() + w_list.strategy.init_from_list_w(w_list, list_w) + w_list.extend(w_other) + def setitem(self, w_list, index, w_item): list_w = self.cast_from_void_star(w_list.storage) @@ -356,6 +380,9 @@ def is_correct_type(self, w_obj): return True + def list_is_correct_type(self, w_list): + return True + def init_from_list_w(self, w_list, list_w): w_list.storage = cast_to_void_star(list_w, "object") @@ -367,6 +394,9 @@ def is_correct_type(self, w_obj): return is_W_IntObject(w_obj) + def list_is_correct_type(self, w_list): + return type(self) == type(w_list.strategy) + def init_from_list_w(self, w_list, list_w): w_list.storage = cast_to_void_star(list_w, "integer") @@ -378,6 +408,9 @@ def is_correct_type(self, w_obj): return is_W_StringObject(w_obj) + def list_is_correct_type(self, w_list): + return type(self) == type(w_list.strategy) + def init_from_list_w(self, w_list, list_w): w_list.storage = cast_to_void_star(list_w, "string") @@ -619,11 +652,12 @@ return space.w_None def list_extend__List_List(space, w_list, w_other): - w_list.wrappeditems += w_other.wrappeditems + w_list.extend(w_other) return space.w_None def list_extend__List_ANY(space, w_list, w_any): - w_list.wrappeditems += space.listview(w_any) + w_other = W_ListObject(space.listview(w_any)) + w_list.extend(w_other) return space.w_None # note that the default value will come back wrapped!!! From noreply at buildbot.pypy.org Fri Sep 23 13:11:35 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:35 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Added tests for extend Message-ID: <20110923111135.19488820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47433:f34cf6b46a37 Date: 2011-02-23 16:58 +0100 http://bitbucket.org/pypy/pypy/changeset/f34cf6b46a37/ Log: Added tests for extend diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -201,10 +201,9 @@ assert index == 0 self.append(w_list, w_item) - def extend(self, w_list, items_w): - #XXX: would be faster if items_w was a W_List and we could get its strategy - w_list.strategy = get_strategy_from_list_objects(items_w) - w_list.strategy.init_from_list_w(w_list, items_w) + def extend(self, w_list, w_other): + w_list.strategy = w_other.strategy + w_list.strategy.init_from_list_w(w_list, w_other.getitems()) class AbstractUnwrappedStrategy(ListStrategy): def unwrap(self, w_obj): diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -98,4 +98,19 @@ l.setslice(0, 1, 2, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) + def test_extend(self): + l = W_ListObject([]) + assert isinstance(l.strategy, EmptyListStrategy) + l.extend(W_ListObject([self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) + assert isinstance(l.strategy, IntegerListStrategy) + l = W_ListObject([self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + assert isinstance(l.strategy, IntegerListStrategy) + l.extend(W_ListObject([self.space.wrap('a'), self.space.wrap('b'), self.space.wrap('c')])) + assert isinstance(l.strategy, ObjectListStrategy) + + l = W_ListObject([self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + assert isinstance(l.strategy, IntegerListStrategy) + l.extend(W_ListObject([self.space.wrap(4), self.space.wrap(5), self.space.wrap(6)])) + assert isinstance(l.strategy, IntegerListStrategy) + From noreply at buildbot.pypy.org Fri Sep 23 13:11:36 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:36 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: setslice may change strategy if necessary Message-ID: <20110923111136.454DF820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47434:f8f4c2997bac Date: 2011-02-23 17:09 +0100 http://bitbucket.org/pypy/pypy/changeset/f8f4c2997bac/ Log: setslice may change strategy if necessary diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -289,6 +289,13 @@ def setslice(self, w_list, start, step, slicelength, sequence_w): assert slicelength >= 0 items = self.cast_from_void_star(w_list.storage) + + if not self.is_correct_type(W_ListObject(sequence_w)): + w_list.strategy = ObjectListStrategy() + w_list.strategy.init_from_list_w(w_list, items) + w_list.setslice(start, step, slicelength, sequence_w) + return + oldsize = len(items) len2 = len(sequence_w) if step == 1: # Support list resizing for non-extended slices From noreply at buildbot.pypy.org Fri Sep 23 13:11:37 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:37 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Added tests for strategy changes in setslice and fixed is_correct_type error Message-ID: <20110923111137.734AD820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47435:750c8eed3a9d Date: 2011-02-23 17:15 +0100 http://bitbucket.org/pypy/pypy/changeset/750c8eed3a9d/ Log: Added tests for strategy changes in setslice and fixed is_correct_type error diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -290,7 +290,7 @@ assert slicelength >= 0 items = self.cast_from_void_star(w_list.storage) - if not self.is_correct_type(W_ListObject(sequence_w)): + if not self.list_is_correct_type(W_ListObject(sequence_w)): w_list.strategy = ObjectListStrategy() w_list.strategy.init_from_list_w(w_list, items) w_list.setslice(start, step, slicelength, sequence_w) diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -98,6 +98,16 @@ l.setslice(0, 1, 2, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) + l = W_ListObject([self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + assert isinstance(l.strategy, IntegerListStrategy) + l.setslice(0, 1, 2, [self.space.wrap(4), self.space.wrap(5), self.space.wrap(6)]) + assert isinstance(l.strategy, IntegerListStrategy) + + l = W_ListObject([self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + assert isinstance(l.strategy, IntegerListStrategy) + l.setslice(0, 1, 2, [self.space.wrap('a'), self.space.wrap('b'), self.space.wrap('c')]) + assert isinstance(l.strategy, ObjectListStrategy) + def test_extend(self): l = W_ListObject([]) assert isinstance(l.strategy, EmptyListStrategy) From noreply at buildbot.pypy.org Fri Sep 23 13:11:38 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:38 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: If list is empty after deletion (item, slice) switch to EmptyListStrategy Message-ID: <20110923111138.9FECA820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47436:3978ca76f7a3 Date: 2011-02-23 17:40 +0100 http://bitbucket.org/pypy/pypy/changeset/3978ca76f7a3/ Log: If list is empty after deletion (item, slice) switch to EmptyListStrategy diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -335,13 +335,16 @@ # Make a shallow copy to more easily handle the reversal case sequence_w = list(sequence_w) for i in range(len2): - items[start] = sequence_w[i] + items[start] = self.unwrap(sequence_w[i]) start += step def deleteitem(self, w_list, index): list_w = self.cast_from_void_star(w_list.storage) del list_w[index] + if len(list_w) == 0: + w_list.strategy = EmptyListStrategy() + w_list.strategy.init_from_list_w(w_list, list_w) def deleteslice(self, w_list, start, step, slicelength): items = self.cast_from_void_star(w_list.storage) @@ -375,6 +378,10 @@ assert start >= 0 # annotator hint del items[start:] + if len(items) == 0: + w_list.strategy = EmptyListStrategy() + w_list.strategy.init_from_list_w(w_list, items) + def inplace_mul(self, w_list, times): list_w = self.cast_from_void_star(w_list.storage) list_w *= times diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -92,6 +92,17 @@ l.insert(0, self.space.wrap(2)) assert isinstance(l.strategy, IntegerListStrategy) + def test_delete(self): + l = W_ListObject([self.space.wrap(3)]) + assert isinstance(l.strategy, IntegerListStrategy) + l.deleteitem(0) + assert isinstance(l.strategy, EmptyListStrategy) + + l = W_ListObject([self.space.wrap(1), self.space.wrap(2)]) + assert isinstance(l.strategy, IntegerListStrategy) + l.deleteslice(0, 1, 2) + assert isinstance(l.strategy, EmptyListStrategy) + def test_setslice(self): l = W_ListObject([]) assert isinstance(l.strategy, EmptyListStrategy) @@ -103,6 +114,11 @@ l.setslice(0, 1, 2, [self.space.wrap(4), self.space.wrap(5), self.space.wrap(6)]) assert isinstance(l.strategy, IntegerListStrategy) + l = W_ListObject([self.space.wrap(1), self.space.wrap('b'), self.space.wrap(3)]) + assert isinstance(l.strategy, ObjectListStrategy) + l.setslice(0, 1, 2, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + assert isinstance(l.strategy, ObjectListStrategy) + l = W_ListObject([self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) l.setslice(0, 1, 2, [self.space.wrap('a'), self.space.wrap('b'), self.space.wrap('c')]) From noreply at buildbot.pypy.org Fri Sep 23 13:11:39 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:39 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Implemented pop Message-ID: <20110923111139.CEB60820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47437:91f2d172c7d6 Date: 2011-02-25 11:47 +0100 http://bitbucket.org/pypy/pypy/changeset/91f2d172c7d6/ Log: Implemented pop diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -96,6 +96,9 @@ def deleteslice(self, start, step, length): self.strategy.deleteslice(self, start, step, length) + def pop(self, index): + return self.strategy.pop(self, index) + def setitem(self, index, w_item): self.strategy.setitem(self, index, w_item) @@ -139,6 +142,9 @@ def deleteslice(self, w_list, start, step, slicelength): raise NotImplementedError + def pop(self, w_list, index): + raise NotImplementedError + def setitem(self, w_list, index, w_item): raise NotImplementedError @@ -190,6 +196,9 @@ def deleteslice(self, w_list, start, step, slicelength): raise IndexError + def pop(self, w_list, index): + raise IndexError + def setitem(self, w_list, index, w_item): raise IndexError @@ -382,6 +391,16 @@ w_list.strategy = EmptyListStrategy() w_list.strategy.init_from_list_w(w_list, items) + def pop(self, w_list, index): + list_w = self.cast_from_void_star(w_list.storage) + item_w = self.wrap(list_w.pop(index)) + + if len(list_w) == 0: + w_list.strategy = EmptyListStrategy() + w_list.strategy.init_from_list_w(w_list, list_w) + + return item_w + def inplace_mul(self, w_list, times): list_w = self.cast_from_void_star(w_list.storage) list_w *= times @@ -675,13 +694,12 @@ # note that the default value will come back wrapped!!! def list_pop__List_ANY(space, w_list, w_idx=-1): - items = w_list.wrappeditems - if len(items)== 0: + if w_list.length() == 0: raise OperationError(space.w_IndexError, space.wrap("pop from empty list")) idx = space.int_w(w_idx) try: - return items.pop(idx) + return w_list.pop(idx) except IndexError: raise OperationError(space.w_IndexError, space.wrap("pop index out of range")) diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -92,7 +92,7 @@ l.insert(0, self.space.wrap(2)) assert isinstance(l.strategy, IntegerListStrategy) - def test_delete(self): + def test_list_empty_after_delete(self): l = W_ListObject([self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) l.deleteitem(0) @@ -103,6 +103,11 @@ l.deleteslice(0, 1, 2) assert isinstance(l.strategy, EmptyListStrategy) + l = W_ListObject([self.space.wrap(1)]) + assert isinstance(l.strategy, IntegerListStrategy) + l.pop(-1) + assert isinstance(l.strategy, EmptyListStrategy) + def test_setslice(self): l = W_ListObject([]) assert isinstance(l.strategy, EmptyListStrategy) From noreply at buildbot.pypy.org Fri Sep 23 13:11:41 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:41 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Replaced more 'wrappeditems' Message-ID: <20110923111141.06A72820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47438:5cc282e2b68e Date: 2011-02-25 12:08 +0100 http://bitbucket.org/pypy/pypy/changeset/5cc282e2b68e/ Log: Replaced more 'wrappeditems' diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -706,12 +706,13 @@ def list_remove__List_ANY(space, w_list, w_any): # needs to be safe against eq_w() mutating the w_list behind our back - items = w_list.wrappeditems i = 0 - while i < len(items): - if space.eq_w(items[i], w_any): - if i < len(items): # if this is wrong the list was changed - del items[i] + while i < w_list.length(): + #XXX: items will be wrapped. not necessary when liststrategies differ + if space.eq_w(w_list.getitem(i), w_any): + #XXX: change of w_list.storage shouldn't be possible from the outside + if i < w_list.length(): # if this is wrong the list was changed + w_list.deleteitem(i) return space.w_None i += 1 raise OperationError(space.w_ValueError, @@ -719,12 +720,11 @@ def list_index__List_ANY_ANY_ANY(space, w_list, w_any, w_start, w_stop): # needs to be safe against eq_w() mutating the w_list behind our back - items = w_list.wrappeditems - size = len(items) + size = w_list.length() i = slicetype.adapt_bound(space, size, w_start) stop = slicetype.adapt_bound(space, size, w_stop) - while i < stop and i < len(items): - if space.eq_w(items[i], w_any): + while i < stop and i < w_list.length(): + if space.eq_w(w_list.getitem(i), w_any): return space.wrap(i) i += 1 raise OperationError(space.w_ValueError, @@ -734,9 +734,8 @@ # needs to be safe against eq_w() mutating the w_list behind our back count = 0 i = 0 - items = w_list.wrappeditems - while i < len(items): - if space.eq_w(items[i], w_any): + while i < w_list.length(): + if space.eq_w(w_list.getitem(i), w_any): count += 1 i += 1 return space.wrap(count) From noreply at buildbot.pypy.org Fri Sep 23 13:11:42 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:42 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Implemented reverse Message-ID: <20110923111142.3143E820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47439:82ea67038f92 Date: 2011-02-25 12:12 +0100 http://bitbucket.org/pypy/pypy/changeset/82ea67038f92/ Log: Implemented reverse diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -111,6 +111,9 @@ def extend(self, items_w): self.strategy.extend(self, items_w) + def reverse(self): + self.strategy.reverse(self) + registerimplementation(W_ListObject) @@ -157,6 +160,9 @@ def extend(self, w_list, items_w): raise NotImplementedError + def reverse(self, w_list): + raise NotImplementedError + class EmptyListStrategy(ListStrategy): def init_from_list_w(self, w_list, list_w): assert len(list_w) == 0 @@ -214,6 +220,9 @@ w_list.strategy = w_other.strategy w_list.strategy.init_from_list_w(w_list, w_other.getitems()) + def reverse(self, w_list): + pass + class AbstractUnwrappedStrategy(ListStrategy): def unwrap(self, w_obj): # XXX override later @@ -405,6 +414,9 @@ list_w = self.cast_from_void_star(w_list.storage) list_w *= times + def reverse(self, w_list): + self.cast_from_void_star(w_list.storage).reverse() + class ObjectListStrategy(AbstractUnwrappedStrategy): def cast_from_void_star(self, storage): return cast_from_void_star(storage, "object") @@ -741,7 +753,7 @@ return space.wrap(count) def list_reverse__List(space, w_list): - w_list.wrappeditems.reverse() + w_list.reverse() return space.w_None # ____________________________________________________________ From noreply at buildbot.pypy.org Fri Sep 23 13:11:43 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:43 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Removed wrappeditems form EmptyListStrategy.append and fixed related problems Message-ID: <20110923111143.5BB8F820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47440:bac867bdab47 Date: 2011-02-25 14:35 +0100 http://bitbucket.org/pypy/pypy/changeset/bac867bdab47/ Log: Removed wrappeditems form EmptyListStrategy.append and fixed related problems diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -61,11 +61,11 @@ def __repr__(w_self): """ representation for debugging purposes """ - return "%s(%s)" % (w_self.__class__.__name__, w_self.wrappeditems) + return "%s(%s)" % (w_self.__class__.__name__, w_self.getitems()) def unwrap(w_list, space): # for tests only! - items = [space.unwrap(w_item) for w_item in w_list.wrappeditems] + items = [space.unwrap(w_item) for w_item in w_list.getitems()] return list(items) def append(w_list, w_item): @@ -190,8 +190,7 @@ else: w_list.strategy = ObjectListStrategy() - w_list.wrappeditems.append(w_item) - w_list.strategy.init_from_list_w(w_list, w_list.wrappeditems) + w_list.strategy.init_from_list_w(w_list, [w_item]) def inplace_mul(self, w_list, times): return @@ -720,9 +719,7 @@ # needs to be safe against eq_w() mutating the w_list behind our back i = 0 while i < w_list.length(): - #XXX: items will be wrapped. not necessary when liststrategies differ if space.eq_w(w_list.getitem(i), w_any): - #XXX: change of w_list.storage shouldn't be possible from the outside if i < w_list.length(): # if this is wrong the list was changed w_list.deleteitem(i) return space.w_None diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -360,8 +360,8 @@ def unpackiterable(self, w_obj, expected_length=-1): if isinstance(w_obj, W_TupleObject): t = w_obj.wrappeditems[:] - elif isinstance(w_obj, W_ListObject): - t = w_obj.wrappeditems[:] + elif isinstance(w_obj, W_ListObject): # XXX enable fast path again + t = w_obj.getitems() else: return ObjSpace.unpackiterable(self, w_obj, expected_length) if expected_length != -1 and len(t) != expected_length: @@ -375,7 +375,7 @@ if isinstance(w_obj, W_TupleObject): t = w_obj.wrappeditems elif isinstance(w_obj, W_ListObject): - t = w_obj.wrappeditems[:] + t = w_obj.getitems() else: if unroll: return make_sure_not_resized(ObjSpace.unpackiterable_unroll( @@ -392,7 +392,7 @@ def listview(self, w_obj, expected_length=-1): if isinstance(w_obj, W_ListObject): - t = w_obj.wrappeditems + t = w_obj.getitems() elif isinstance(w_obj, W_TupleObject): t = w_obj.wrappeditems[:] else: From noreply at buildbot.pypy.org Fri Sep 23 13:11:44 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:44 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Replaced warppeditems in sort-method (this only moved the problem to W_List._overwrite) Message-ID: <20110923111144.8563F820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47441:29691b49fdf7 Date: 2011-02-25 15:47 +0100 http://bitbucket.org/pypy/pypy/changeset/29691b49fdf7/ Log: Replaced warppeditems in sort-method (this only moved the problem to W_List._overwrite) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -68,6 +68,11 @@ items = [space.unwrap(w_item) for w_item in w_list.getitems()] return list(items) + def _overwrite(w_self, items_w): + w_self.strategy = get_strategy_from_list_objects(items_w) + w_self.strategy.init_from_list_w(w_self, items_w) + w_self.wrappeditems = items_w + def append(w_list, w_item): w_list.strategy.append(w_list, w_item) @@ -816,8 +821,7 @@ sorterclass = CustomKeySort else: sorterclass = SimpleSort - items = w_list.wrappeditems - sorter = sorterclass(items, len(items)) + sorter = sorterclass(w_list.getitems(), w_list.length()) sorter.space = space sorter.w_cmp = w_cmp @@ -826,7 +830,8 @@ # by comparison functions can't affect the slice of memory we're # sorting (allowing mutations during sorting is an IndexError or # core-dump factory, since wrappeditems may change). - w_list.wrappeditems = [] + w_list._overwrite([]) + #w_list.wrappeditems = [] # wrap each item in a KeyContainer if needed if has_key: @@ -856,10 +861,10 @@ sorter.list[i] = w_obj.w_item # check if the user mucked with the list during the sort - mucked = len(w_list.wrappeditems) > 0 + mucked = w_list.length() > 0 # put the items back into the list - w_list.wrappeditems = sorter.list + w_list._overwrite(sorter.list) if mucked: raise OperationError(space.w_ValueError, From noreply at buildbot.pypy.org Fri Sep 23 13:11:45 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:45 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Removed remaining wrappeditems Message-ID: <20110923111145.B0946820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47442:664e24c98b22 Date: 2011-02-25 16:13 +0100 http://bitbucket.org/pypy/pypy/changeset/664e24c98b22/ Log: Removed remaining wrappeditems diff --git a/pypy/objspace/std/frame.py b/pypy/objspace/std/frame.py --- a/pypy/objspace/std/frame.py +++ b/pypy/objspace/std/frame.py @@ -58,7 +58,7 @@ w_1 = f.popvalue() if type(w_1) is W_ListObject and type(w_2) is intobject.W_IntObject: try: - w_result = w_1.wrappeditems[w_2.intval] + w_result = w_1.getitem(w_2.intval) except IndexError: raise OperationError(f.space.w_IndexError, f.space.wrap("list index out of range")) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -57,7 +57,7 @@ assert isinstance(wrappeditems, list) w_self.strategy = get_strategy_from_list_objects(wrappeditems) w_self.strategy.init_from_list_w(w_self, wrappeditems) - w_self.wrappeditems = wrappeditems + #w_self.wrappeditems = wrappeditems def __repr__(w_self): """ representation for debugging purposes """ @@ -68,11 +68,6 @@ items = [space.unwrap(w_item) for w_item in w_list.getitems()] return list(items) - def _overwrite(w_self, items_w): - w_self.strategy = get_strategy_from_list_objects(items_w) - w_self.strategy.init_from_list_w(w_self, items_w) - w_self.wrappeditems = items_w - def append(w_list, w_item): w_list.strategy.append(w_list, w_item) @@ -250,7 +245,10 @@ return len(self.cast_from_void_star(w_list.storage)) def getitem(self, w_list, index): - return self.wrap(self.cast_from_void_star(w_list.storage)[index]) + try: + return self.wrap(self.cast_from_void_star(w_list.storage)[index]) + except IndexError: # make RPython raise the exception + raise def getitems(self, w_list): return self.cast_from_void_star(w_list.storage) @@ -806,6 +804,7 @@ return CustomCompareSort.lt(self, a.w_key, b.w_key) def list_sort__List_ANY_ANY_ANY(space, w_list, w_cmp, w_keyfunc, w_reverse): + #XXX so far sorting always wraps list has_cmp = not space.is_w(w_cmp, space.w_None) has_key = not space.is_w(w_keyfunc, space.w_None) has_reverse = space.is_true(w_reverse) @@ -830,8 +829,7 @@ # by comparison functions can't affect the slice of memory we're # sorting (allowing mutations during sorting is an IndexError or # core-dump factory, since wrappeditems may change). - w_list._overwrite([]) - #w_list.wrappeditems = [] + w_list.__init__([]) # wrap each item in a KeyContainer if needed if has_key: @@ -864,7 +862,7 @@ mucked = w_list.length() > 0 # put the items back into the list - w_list._overwrite(sorter.list) + w_list.__init__(sorter.list) if mucked: raise OperationError(space.w_ValueError, From noreply at buildbot.pypy.org Fri Sep 23 13:11:46 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:46 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Refactored EmptyListStrategy Message-ID: <20110923111146.D9737820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47443:7c904798d50d Date: 2011-02-25 16:19 +0100 http://bitbucket.org/pypy/pypy/changeset/7c904798d50d/ Log: Refactored EmptyListStrategy diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -57,7 +57,6 @@ assert isinstance(wrappeditems, list) w_self.strategy = get_strategy_from_list_objects(wrappeditems) w_self.strategy.init_from_list_w(w_self, wrappeditems) - #w_self.wrappeditems = wrappeditems def __repr__(w_self): """ representation for debugging purposes """ @@ -181,16 +180,7 @@ return [] def append(self, w_list, w_item): - if is_W_IntObject(w_item): - w_list.strategy = IntegerListStrategy() - - elif is_W_StringObject(w_item): - w_list.strategy = StringListStrategy() - - else: - w_list.strategy = ObjectListStrategy() - - w_list.strategy.init_from_list_w(w_list, [w_item]) + w_list.__init__([w_item]) def inplace_mul(self, w_list, times): return @@ -208,14 +198,14 @@ raise IndexError def setslice(self, w_list, start, step, slicelength, sequence_w): - w_list.strategy = get_strategy_from_list_objects(sequence_w) - w_list.strategy.init_from_list_w(w_list, sequence_w) + w_list.__init__(sequence_w) def insert(self, w_list, index, w_item): assert index == 0 self.append(w_list, w_item) def extend(self, w_list, w_other): + #XXX items are wrapped and unwrapped again w_list.strategy = w_other.strategy w_list.strategy.init_from_list_w(w_list, w_other.getitems()) From noreply at buildbot.pypy.org Fri Sep 23 13:11:48 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:48 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Refactored switching to Object-/EmptyListStrategy Message-ID: <20110923111148.0F47D820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47444:a5b007459cfc Date: 2011-02-25 16:40 +0100 http://bitbucket.org/pypy/pypy/changeset/a5b007459cfc/ Log: Refactored switching to Object-/EmptyListStrategy diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -67,11 +67,20 @@ items = [space.unwrap(w_item) for w_item in w_list.getitems()] return list(items) + def switch_to_object_strategy(self, items_w): + self.strategy = ObjectListStrategy() + self.strategy.init_from_list_w(self, items_w) + + def check_empty_strategy(self, items_w): + if len(items_w) == 0: + self.strategy = EmptyListStrategy() + self.strategy.init_from_list_w(self, items_w) + + # ___________________________________________________ + def append(w_list, w_item): w_list.strategy.append(w_list, w_item) - # ___________________________________________________ - def length(self): return self.strategy.length(self) @@ -259,9 +268,7 @@ self.cast_from_void_star(w_list.storage).append(self.unwrap(w_item)) return - items_w = w_list.getitems() - w_list.strategy = ObjectListStrategy() - w_list.strategy.init_from_list_w(w_list, items_w) + w_list.switch_to_object_strategy(w_list.getitems()) w_list.append(w_item) def insert(self, w_list, index, w_item): @@ -271,8 +278,7 @@ list_w.insert(index, w_item) return - w_list.strategy = ObjectListStrategy() - w_list.strategy.init_from_list_w(w_list, list_w) + w_list.switch_to_object_strategy(list_w) w_list.insert(index, w_item) def extend(self, w_list, w_other): @@ -281,8 +287,7 @@ list_w += w_other.getitems() # or self.cast_from_void_star(w_other.storage) ? return - w_list.strategy = ObjectListStrategy() - w_list.strategy.init_from_list_w(w_list, list_w) + w_list.switch_to_object_strategy(list_w) w_list.extend(w_other) def setitem(self, w_list, index, w_item): @@ -292,8 +297,7 @@ list_w[index] = w_item return - w_list.strategy = ObjectListStrategy() - w_list.strategy.init_from_list_w(w_list, list_w) + w_list.switch_to_object_strategy(list_w) w_list.setitem(index, w_item) def setslice(self, w_list, start, step, slicelength, sequence_w): @@ -301,8 +305,7 @@ items = self.cast_from_void_star(w_list.storage) if not self.list_is_correct_type(W_ListObject(sequence_w)): - w_list.strategy = ObjectListStrategy() - w_list.strategy.init_from_list_w(w_list, items) + w_list.switch_to_object_strategy(items) w_list.setslice(start, step, slicelength, sequence_w) return @@ -352,9 +355,7 @@ def deleteitem(self, w_list, index): list_w = self.cast_from_void_star(w_list.storage) del list_w[index] - if len(list_w) == 0: - w_list.strategy = EmptyListStrategy() - w_list.strategy.init_from_list_w(w_list, list_w) + w_list.check_empty_strategy(list_w) def deleteslice(self, w_list, start, step, slicelength): items = self.cast_from_void_star(w_list.storage) @@ -388,18 +389,13 @@ assert start >= 0 # annotator hint del items[start:] - if len(items) == 0: - w_list.strategy = EmptyListStrategy() - w_list.strategy.init_from_list_w(w_list, items) + w_list.check_empty_strategy(items) def pop(self, w_list, index): list_w = self.cast_from_void_star(w_list.storage) item_w = self.wrap(list_w.pop(index)) - if len(list_w) == 0: - w_list.strategy = EmptyListStrategy() - w_list.strategy.init_from_list_w(w_list, list_w) - + w_list.check_empty_strategy(list_w) return item_w def inplace_mul(self, w_list, times): From noreply at buildbot.pypy.org Fri Sep 23 13:11:49 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:49 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Preparation for wrapping/unwrapping in Strategies Message-ID: <20110923111149.39460820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47445:12e339ff3b4a Date: 2011-03-01 13:49 +0100 http://bitbucket.org/pypy/pypy/changeset/12e339ff3b4a/ Log: Preparation for wrapping/unwrapping in Strategies diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -22,25 +22,25 @@ return wrapper._content # don't know where to put this function, so it is global for now -def get_strategy_from_list_objects(list_w): +def get_strategy_from_list_objects(space, list_w): if list_w == []: - return EmptyListStrategy() + return EmptyListStrategy(space) # check for ints for e in list_w: if not is_W_IntObject(e): break if e is list_w[-1]: - return IntegerListStrategy() + return IntegerListStrategy(space) # check for ints for e in list_w: if not is_W_StringObject(e): break if e is list_w[-1]: - return StringListStrategy() + return StringListStrategy(space) - return ObjectListStrategy() + return ObjectListStrategy(space) def is_W_IntObject(w_object): from pypy.objspace.std.intobject import W_IntObject @@ -53,9 +53,9 @@ class W_ListObject(W_Object): from pypy.objspace.std.listtype import list_typedef as typedef - def __init__(w_self, wrappeditems): + def __init__(w_self, space, wrappeditems): assert isinstance(wrappeditems, list) - w_self.strategy = get_strategy_from_list_objects(wrappeditems) + w_self.strategy = get_strategy_from_list_objects(space, wrappeditems) w_self.strategy.init_from_list_w(w_self, wrappeditems) def __repr__(w_self): @@ -126,6 +126,10 @@ class ListStrategy(object): + + def __init__(self, space): + self.space = space + def init_from_list_w(self, w_list, list_w): raise NotImplementedError @@ -222,13 +226,12 @@ pass class AbstractUnwrappedStrategy(ListStrategy): - def unwrap(self, w_obj): - # XXX override later - return w_obj - def wrap(self, item): - # XXX override later - return item + def wrap(self, unwrapped): + raise NotImplementedError + + def unwrap(self, wrapped): + raise NotImplementedError def cast_from_void_star(self, storage): raise NotImplementedError("abstract base class") @@ -406,6 +409,12 @@ self.cast_from_void_star(w_list.storage).reverse() class ObjectListStrategy(AbstractUnwrappedStrategy): + def unwrap(self, w_obj): + return w_obj + + def wrap(self, item): + return item + def cast_from_void_star(self, storage): return cast_from_void_star(storage, "object") @@ -420,6 +429,12 @@ class IntegerListStrategy(AbstractUnwrappedStrategy): + def wrap(self, intval): + return self.space.wrap(intval) + + def unwrap(self, w_int): + return self.space.int_w(w_int) + def cast_from_void_star(self, storage): return cast_from_void_star(storage, "integer") @@ -434,6 +449,12 @@ class StringListStrategy(AbstractUnwrappedStrategy): + def wrap(self, stringval): + return self.space.wrap(stringval) + + def unwrap(self, w_string): + return self.space.str_w(w_string) + def cast_from_void_star(self, storage): return cast_from_void_star(storage, "string") From noreply at buildbot.pypy.org Fri Sep 23 13:11:50 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:11:50 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: (l.diekmann, cfbolz): store list as unwrapped data Message-ID: <20110923111150.6AB49820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47446:71cbd6ccc1b1 Date: 2011-03-01 16:36 +0100 http://bitbucket.org/pypy/pypy/changeset/71cbd6ccc1b1/ Log: (l.diekmann, cfbolz): store list as unwrapped data diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -55,6 +55,7 @@ def __init__(w_self, space, wrappeditems): assert isinstance(wrappeditems, list) + w_self.space = space w_self.strategy = get_strategy_from_list_objects(space, wrappeditems) w_self.strategy.init_from_list_w(w_self, wrappeditems) @@ -67,14 +68,15 @@ items = [space.unwrap(w_item) for w_item in w_list.getitems()] return list(items) - def switch_to_object_strategy(self, items_w): - self.strategy = ObjectListStrategy() - self.strategy.init_from_list_w(self, items_w) + def switch_to_object_strategy(self): + list_w = self.getitems() + self.strategy = ObjectListStrategy(self.space) + self.strategy.init_from_list_w(self, list_w) - def check_empty_strategy(self, items_w): - if len(items_w) == 0: - self.strategy = EmptyListStrategy() - self.strategy.init_from_list_w(self, items_w) + def check_empty_strategy(self): + if self.length() == 0: + self.strategy = EmptyListStrategy(self.space) + self.strategy.init_from_list_w(self, []) # ___________________________________________________ @@ -176,6 +178,7 @@ raise NotImplementedError class EmptyListStrategy(ListStrategy): + def init_from_list_w(self, w_list, list_w): assert len(list_w) == 0 w_list.storage = cast_to_void_star(None) @@ -187,13 +190,13 @@ raise IndexError def getslice(self, w_list, start, stop, step, length): - return W_ListObject([]) + return W_ListObject(self.space, []) def getitems(self, w_list): return [] def append(self, w_list, w_item): - w_list.__init__([w_item]) + w_list.__init__(self.space, [w_item]) def inplace_mul(self, w_list, times): return @@ -211,7 +214,7 @@ raise IndexError def setslice(self, w_list, start, step, slicelength, sequence_w): - w_list.__init__(sequence_w) + w_list.__init__(self.space, sequence_w) def insert(self, w_list, index, w_item): assert index == 0 @@ -242,6 +245,9 @@ def list_is_correct_type(self, w_list): raise NotImplementedError("abstract base class") + def init_from_list_w(self, w_list, list_w): + l = [self.unwrap(w_item) for w_item in list_w] + w_list.storage = self.cast_to_void_star(l) def length(self, w_list): return len(self.cast_from_void_star(w_list.storage)) @@ -253,17 +259,20 @@ raise def getitems(self, w_list): - return self.cast_from_void_star(w_list.storage) + return [self.wrap(item) for item in self.cast_from_void_star(w_list.storage)] def getslice(self, w_list, start, stop, step, length): if step == 1: - return W_ListObject(self.cast_from_void_star(w_list.storage)[start:stop]) + # XXX ineffecient cause items are wrapped and unwrapped again + # later: W_ListObject constructor for unwrapped items + l = w_list.getitems() + return W_ListObject(self.space, l[start:stop]) else: subitems_w = [None] * length for i in range(length): subitems_w[i] = w_list.getitem(start) start += step - return W_ListObject(subitems_w) + return W_ListObject(self.space, subitems_w) def append(self, w_list, w_item): @@ -271,44 +280,52 @@ self.cast_from_void_star(w_list.storage).append(self.unwrap(w_item)) return - w_list.switch_to_object_strategy(w_list.getitems()) + w_list.switch_to_object_strategy() w_list.append(w_item) def insert(self, w_list, index, w_item): - list_w = self.cast_from_void_star(w_list.storage) + l = self.cast_from_void_star(w_list.storage) if self.is_correct_type(w_item): - list_w.insert(index, w_item) + l.insert(index, self.unwrap(w_item)) return - w_list.switch_to_object_strategy(list_w) + w_list.switch_to_object_strategy() w_list.insert(index, w_item) def extend(self, w_list, w_other): - list_w = self.cast_from_void_star(w_list.storage) + l = self.cast_from_void_star(w_list.storage) if self.list_is_correct_type(w_other): - list_w += w_other.getitems() # or self.cast_from_void_star(w_other.storage) ? + l += self.cast_from_void_star(w_other.storage) return - w_list.switch_to_object_strategy(list_w) + #XXX unnecessary copy if w_other is ObjectList + list_w = w_other.getitems() + w_other = W_ListObject(self.space, list_w) + w_other.switch_to_object_strategy() + + w_list.switch_to_object_strategy() w_list.extend(w_other) def setitem(self, w_list, index, w_item): - list_w = self.cast_from_void_star(w_list.storage) + l = self.cast_from_void_star(w_list.storage) if self.is_correct_type(w_item): - list_w[index] = w_item + l[index] = self.unwrap(w_item) return - w_list.switch_to_object_strategy(list_w) + w_list.switch_to_object_strategy() w_list.setitem(index, w_item) def setslice(self, w_list, start, step, slicelength, sequence_w): + #XXX inefficient assert slicelength >= 0 items = self.cast_from_void_star(w_list.storage) - if not self.list_is_correct_type(W_ListObject(sequence_w)): - w_list.switch_to_object_strategy(items) + if (type(self) is not ObjectListStrategy and + not self.list_is_correct_type(W_ListObject(self.space, sequence_w)) and + len(sequence_w) != 0): + w_list.switch_to_object_strategy() w_list.setslice(start, step, slicelength, sequence_w) return @@ -343,22 +360,22 @@ i = len2 - 1 start += i*step while i >= 0: - items[start] = sequence_w[i] + items[start] = self.unwrap(sequence_w[i]) start -= step i -= 1 return else: # Make a shallow copy to more easily handle the reversal case + # XXX why is this needed ??? sequence_w = list(sequence_w) for i in range(len2): items[start] = self.unwrap(sequence_w[i]) start += step - def deleteitem(self, w_list, index): - list_w = self.cast_from_void_star(w_list.storage) - del list_w[index] - w_list.check_empty_strategy(list_w) + l = self.cast_from_void_star(w_list.storage) + del l[index] + w_list.check_empty_strategy() def deleteslice(self, w_list, start, step, slicelength): items = self.cast_from_void_star(w_list.storage) @@ -392,18 +409,18 @@ assert start >= 0 # annotator hint del items[start:] - w_list.check_empty_strategy(items) + w_list.check_empty_strategy() def pop(self, w_list, index): - list_w = self.cast_from_void_star(w_list.storage) - item_w = self.wrap(list_w.pop(index)) + l = self.cast_from_void_star(w_list.storage) + w_item = self.wrap(l.pop(index)) - w_list.check_empty_strategy(list_w) - return item_w + w_list.check_empty_strategy() + return w_item def inplace_mul(self, w_list, times): - list_w = self.cast_from_void_star(w_list.storage) - list_w *= times + l = self.cast_from_void_star(w_list.storage) + l *= times def reverse(self, w_list): self.cast_from_void_star(w_list.storage).reverse() @@ -422,7 +439,7 @@ return True def list_is_correct_type(self, w_list): - return True + return ObjectListStrategy is type(w_list.strategy) def init_from_list_w(self, w_list, list_w): w_list.storage = cast_to_void_star(list_w, "object") @@ -438,14 +455,14 @@ def cast_from_void_star(self, storage): return cast_from_void_star(storage, "integer") + def cast_to_void_star(self, l): + return cast_to_void_star(l, "integer") + def is_correct_type(self, w_obj): return is_W_IntObject(w_obj) def list_is_correct_type(self, w_list): - return type(self) == type(w_list.strategy) - - def init_from_list_w(self, w_list, list_w): - w_list.storage = cast_to_void_star(list_w, "integer") + return IntegerListStrategy is type(w_list.strategy) class StringListStrategy(AbstractUnwrappedStrategy): @@ -458,14 +475,14 @@ def cast_from_void_star(self, storage): return cast_from_void_star(storage, "string") + def cast_to_void_star(self, l): + return cast_to_void_star(l, "string") + def is_correct_type(self, w_obj): return is_W_StringObject(w_obj) def list_is_correct_type(self, w_list): - return type(self) == type(w_list.strategy) - - def init_from_list_w(self, w_list, list_w): - w_list.storage = cast_to_void_star(list_w, "string") + return StringListStrategy is type(w_list.strategy) # _______________________________________________________ @@ -484,8 +501,7 @@ # This is commented out to avoid assigning a new RPython list to # 'wrappeditems', which defeats the W_FastSeqIterObject optimization. # - items_w = w_list.getitems() - del items_w[:] + w_list.__init__(space, []) if w_iterable is not None: w_iterator = space.iter(w_iterable) while True: @@ -546,7 +562,7 @@ return iterobject.W_FastListIterObject(w_list, w_list.getitems()) def add__List_List(space, w_list1, w_list2): - return W_ListObject(w_list1.getitems() + w_list2.getitems()) + return W_ListObject(space, w_list1.getitems() + w_list2.getitems()) def inplace_add__List_ANY(space, w_list1, w_iterable2): @@ -564,7 +580,7 @@ if e.match(space, space.w_TypeError): raise FailedToImplement raise - return W_ListObject(w_list.getitems() * times) + return W_ListObject(space, w_list.getitems() * times) def mul__List_ANY(space, w_list, w_times): return mul_list_times(space, w_list, w_times) @@ -709,8 +725,8 @@ return space.w_None def list_extend__List_ANY(space, w_list, w_any): - w_other = W_ListObject(space.listview(w_any)) - w_list.extend(w_other) + w_other = W_ListObject(space, space.listview(w_any)) + w_list.extend(w_other) return space.w_None # note that the default value will come back wrapped!!! @@ -836,7 +852,7 @@ # by comparison functions can't affect the slice of memory we're # sorting (allowing mutations during sorting is an IndexError or # core-dump factory, since wrappeditems may change). - w_list.__init__([]) + w_list.__init__(space, []) # wrap each item in a KeyContainer if needed if has_key: @@ -869,7 +885,7 @@ mucked = w_list.length() > 0 # put the items back into the list - w_list.__init__(sorter.list) + w_list.__init__(space, sorter.list) if mucked: raise OperationError(space.w_ValueError, diff --git a/pypy/objspace/std/listtype.py b/pypy/objspace/std/listtype.py --- a/pypy/objspace/std/listtype.py +++ b/pypy/objspace/std/listtype.py @@ -43,7 +43,7 @@ def descr__new__(space, w_listtype, __args__): from pypy.objspace.std.listobject import W_ListObject w_obj = space.allocate_instance(W_ListObject, w_listtype) - W_ListObject.__init__(w_obj, []) + W_ListObject.__init__(w_obj, space, []) return w_obj # ____________________________________________________________ diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -274,7 +274,7 @@ return wraptuple(self, list_w) def newlist(self, list_w): - return W_ListObject(list_w) + return W_ListObject(self, list_w) def newdict(self, module=False, instance=False, classofinstance=None, from_strdict_shared=None, strdict=False): diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -9,25 +9,25 @@ def test_is_true(self): w = self.space.wrap - w_list = W_ListObject([]) + w_list = W_ListObject(self.space, []) assert self.space.is_true(w_list) == False - w_list = W_ListObject([w(5)]) + w_list = W_ListObject(self.space, [w(5)]) assert self.space.is_true(w_list) == True - w_list = W_ListObject([w(5), w(3)]) + w_list = W_ListObject(self.space, [w(5), w(3)]) assert self.space.is_true(w_list) == True def test_len(self): w = self.space.wrap - w_list = W_ListObject([]) + w_list = W_ListObject(self.space, []) assert self.space.eq_w(self.space.len(w_list), w(0)) - w_list = W_ListObject([w(5)]) + w_list = W_ListObject(self.space, [w(5)]) assert self.space.eq_w(self.space.len(w_list), w(1)) - w_list = W_ListObject([w(5), w(3), w(99)]*111) + w_list = W_ListObject(self.space, [w(5), w(3), w(99)]*111) assert self.space.eq_w(self.space.len(w_list), w(333)) def test_getitem(self): w = self.space.wrap - w_list = W_ListObject([w(5), w(3)]) + w_list = W_ListObject(self.space, [w(5), w(3)]) assert self.space.eq_w(self.space.getitem(w_list, w(0)), w(5)) assert self.space.eq_w(self.space.getitem(w_list, w(1)), w(3)) assert self.space.eq_w(self.space.getitem(w_list, w(-2)), w(5)) @@ -42,7 +42,7 @@ def test_random_getitem(self): w = self.space.wrap s = list('qedx387tn3uixhvt 7fh387fymh3dh238 dwd-wq.dwq9') - w_list = W_ListObject(map(w, s)) + w_list = W_ListObject(self.space, map(w, s)) keys = range(-len(s)-5, len(s)+5) choices = keys + [None]*12 stepchoices = [None, None, None, 1, 1, -1, -1, 2, -2, @@ -65,7 +65,7 @@ def test_iter(self): w = self.space.wrap - w_list = W_ListObject([w(5), w(3), w(99)]) + w_list = W_ListObject(self.space, [w(5), w(3), w(99)]) w_iter = self.space.iter(w_list) assert self.space.eq_w(self.space.next(w_iter), w(5)) assert self.space.eq_w(self.space.next(w_iter), w(3)) @@ -75,7 +75,7 @@ def test_contains(self): w = self.space.wrap - w_list = W_ListObject([w(5), w(3), w(99)]) + w_list = W_ListObject(self.space, [w(5), w(3), w(99)]) assert self.space.eq_w(self.space.contains(w_list, w(5)), self.space.w_True) assert self.space.eq_w(self.space.contains(w_list, w(99)), @@ -90,7 +90,7 @@ def test1(testlist, start, stop, step, expected): w_slice = self.space.newslice(w(start), w(stop), w(step)) - w_list = W_ListObject([w(i) for i in testlist]) + w_list = W_ListObject(self.space, [w(i) for i in testlist]) w_result = self.space.getitem(w_list, w_slice) assert self.space.unwrap(w_result) == expected @@ -111,8 +111,8 @@ def test1(lhslist, start, stop, rhslist, expected): w_slice = self.space.newslice(w(start), w(stop), w(1)) - w_lhslist = W_ListObject([w(i) for i in lhslist]) - w_rhslist = W_ListObject([w(i) for i in rhslist]) + w_lhslist = W_ListObject(self.space, [w(i) for i in lhslist]) + w_rhslist = W_ListObject(self.space, [w(i) for i in rhslist]) self.space.setitem(w_lhslist, w_slice, w_rhslist) assert self.space.unwrap(w_lhslist) == expected @@ -126,14 +126,14 @@ def test_add(self): w = self.space.wrap - w_list0 = W_ListObject([]) - w_list1 = W_ListObject([w(5), w(3), w(99)]) - w_list2 = W_ListObject([w(-7)] * 111) + w_list0 = W_ListObject(self.space, []) + w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list2 = W_ListObject(self.space, [w(-7)] * 111) assert self.space.eq_w(self.space.add(w_list1, w_list1), - W_ListObject([w(5), w(3), w(99), + W_ListObject(self.space, [w(5), w(3), w(99), w(5), w(3), w(99)])) assert self.space.eq_w(self.space.add(w_list1, w_list2), - W_ListObject([w(5), w(3), w(99)] + + W_ListObject(self.space, [w(5), w(3), w(99)] + [w(-7)] * 111)) assert self.space.eq_w(self.space.add(w_list1, w_list0), w_list1) assert self.space.eq_w(self.space.add(w_list0, w_list2), w_list2) @@ -143,8 +143,8 @@ w = self.space.wrap arg = w(2) n = 3 - w_lis = W_ListObject([arg]) - w_lis3 = W_ListObject([arg]*n) + w_lis = W_ListObject(self.space, [arg]) + w_lis3 = W_ListObject(self.space, [arg]*n) w_res = self.space.mul(w_lis, w(n)) assert self.space.eq_w(w_lis3, w_res) # commute @@ -153,9 +153,9 @@ def test_setitem(self): w = self.space.wrap - w_list = W_ListObject([w(5), w(3)]) - w_exp1 = W_ListObject([w(5), w(7)]) - w_exp2 = W_ListObject([w(8), w(7)]) + w_list = W_ListObject(self.space, [w(5), w(3)]) + w_exp1 = W_ListObject(self.space, [w(5), w(7)]) + w_exp2 = W_ListObject(self.space, [w(8), w(7)]) self.space.setitem(w_list, w(1), w(7)) assert self.space.eq_w(w_exp1, w_list) self.space.setitem(w_list, w(-2), w(8)) @@ -168,7 +168,7 @@ def test_random_setitem_delitem(self): w = self.space.wrap s = range(39) - w_list = W_ListObject(map(w, s)) + w_list = W_ListObject(self.space, map(w, s)) expected = list(s) keys = range(-len(s)-5, len(s)+5) choices = keys + [None]*12 @@ -184,7 +184,7 @@ for key in keys: if random.random() < 0.15: random.shuffle(s) - w_list = W_ListObject(map(w, s)) + w_list = W_ListObject(self.space, map(w, s)) expected = list(s) try: value = expected[key] @@ -220,10 +220,10 @@ def test_eq(self): w = self.space.wrap - w_list0 = W_ListObject([]) - w_list1 = W_ListObject([w(5), w(3), w(99)]) - w_list2 = W_ListObject([w(5), w(3), w(99)]) - w_list3 = W_ListObject([w(5), w(3), w(99), w(-1)]) + w_list0 = W_ListObject(self.space, []) + w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)]) assert self.space.eq_w(self.space.eq(w_list0, w_list1), self.space.w_False) @@ -238,10 +238,10 @@ def test_ne(self): w = self.space.wrap - w_list0 = W_ListObject([]) - w_list1 = W_ListObject([w(5), w(3), w(99)]) - w_list2 = W_ListObject([w(5), w(3), w(99)]) - w_list3 = W_ListObject([w(5), w(3), w(99), w(-1)]) + w_list0 = W_ListObject(self.space, []) + w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)]) assert self.space.eq_w(self.space.ne(w_list0, w_list1), self.space.w_True) @@ -256,11 +256,11 @@ def test_lt(self): w = self.space.wrap - w_list0 = W_ListObject([]) - w_list1 = W_ListObject([w(5), w(3), w(99)]) - w_list2 = W_ListObject([w(5), w(3), w(99)]) - w_list3 = W_ListObject([w(5), w(3), w(99), w(-1)]) - w_list4 = W_ListObject([w(5), w(3), w(9), w(-1)]) + w_list0 = W_ListObject(self.space, []) + w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)]) + w_list4 = W_ListObject(self.space, [w(5), w(3), w(9), w(-1)]) assert self.space.eq_w(self.space.lt(w_list0, w_list1), self.space.w_True) @@ -278,11 +278,11 @@ def test_ge(self): w = self.space.wrap - w_list0 = W_ListObject([]) - w_list1 = W_ListObject([w(5), w(3), w(99)]) - w_list2 = W_ListObject([w(5), w(3), w(99)]) - w_list3 = W_ListObject([w(5), w(3), w(99), w(-1)]) - w_list4 = W_ListObject([w(5), w(3), w(9), w(-1)]) + w_list0 = W_ListObject(self.space, []) + w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)]) + w_list4 = W_ListObject(self.space, [w(5), w(3), w(9), w(-1)]) assert self.space.eq_w(self.space.ge(w_list0, w_list1), self.space.w_False) @@ -300,11 +300,11 @@ def test_gt(self): w = self.space.wrap - w_list0 = W_ListObject([]) - w_list1 = W_ListObject([w(5), w(3), w(99)]) - w_list2 = W_ListObject([w(5), w(3), w(99)]) - w_list3 = W_ListObject([w(5), w(3), w(99), w(-1)]) - w_list4 = W_ListObject([w(5), w(3), w(9), w(-1)]) + w_list0 = W_ListObject(self.space, []) + w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)]) + w_list4 = W_ListObject(self.space, [w(5), w(3), w(9), w(-1)]) assert self.space.eq_w(self.space.gt(w_list0, w_list1), self.space.w_False) @@ -322,11 +322,11 @@ def test_le(self): w = self.space.wrap - w_list0 = W_ListObject([]) - w_list1 = W_ListObject([w(5), w(3), w(99)]) - w_list2 = W_ListObject([w(5), w(3), w(99)]) - w_list3 = W_ListObject([w(5), w(3), w(99), w(-1)]) - w_list4 = W_ListObject([w(5), w(3), w(9), w(-1)]) + w_list0 = W_ListObject(self.space, []) + w_list1 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list2 = W_ListObject(self.space, [w(5), w(3), w(99)]) + w_list3 = W_ListObject(self.space, [w(5), w(3), w(99), w(-1)]) + w_list4 = W_ListObject(self.space, [w(5), w(3), w(9), w(-1)]) assert self.space.eq_w(self.space.le(w_list0, w_list1), self.space.w_True) diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -4,29 +4,29 @@ class TestW_ListStrategies(TestW_ListObject): def test_check_strategy(self): - assert isinstance(W_ListObject([]).strategy, EmptyListStrategy) - assert isinstance(W_ListObject([self.space.wrap(1),self.space.wrap('a')]).strategy, ObjectListStrategy) - assert isinstance(W_ListObject([self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]).strategy, IntegerListStrategy) - assert isinstance(W_ListObject([self.space.wrap('a'), self.space.wrap('b')]).strategy, StringListStrategy) + assert isinstance(W_ListObject(self.space, []).strategy, EmptyListStrategy) + assert isinstance(W_ListObject(self.space, [self.space.wrap(1),self.space.wrap('a')]).strategy, ObjectListStrategy) + assert isinstance(W_ListObject(self.space, [self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]).strategy, IntegerListStrategy) + assert isinstance(W_ListObject(self.space, [self.space.wrap('a'), self.space.wrap('b')]).strategy, StringListStrategy) def test_empty_to_any(self): - l = W_ListObject([]) + l = W_ListObject(self.space, []) assert isinstance(l.strategy, EmptyListStrategy) l.append(self.space.wrap(1.)) assert isinstance(l.strategy, ObjectListStrategy) - l = W_ListObject([]) + l = W_ListObject(self.space, []) assert isinstance(l.strategy, EmptyListStrategy) l.append(self.space.wrap(1)) assert isinstance(l.strategy, IntegerListStrategy) - l = W_ListObject([]) + l = W_ListObject(self.space, []) assert isinstance(l.strategy, EmptyListStrategy) l.append(self.space.wrap('a')) assert isinstance(l.strategy, StringListStrategy) def test_int_to_any(self): - l = W_ListObject([self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) + l = W_ListObject(self.space, [self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) l.append(self.space.wrap(4)) assert isinstance(l.strategy, IntegerListStrategy) @@ -34,7 +34,7 @@ assert isinstance(l.strategy, ObjectListStrategy) def test_string_to_any(self): - l = W_ListObject([self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) + l = W_ListObject(self.space, [self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) assert isinstance(l.strategy, StringListStrategy) l.append(self.space.wrap('d')) assert isinstance(l.strategy, StringListStrategy) @@ -43,7 +43,7 @@ def test_setitem(self): # This should work if test_listobject.py passes - l = W_ListObject([self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) + l = W_ListObject(self.space, [self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) assert self.space.eq_w(l.getitem(0), self.space.wrap('a')) l.setitem(0, self.space.wrap('d')) assert self.space.eq_w(l.getitem(0), self.space.wrap('d')) @@ -51,97 +51,97 @@ assert isinstance(l.strategy, StringListStrategy) # IntStrategy to ObjectStrategy - l = W_ListObject([self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) + l = W_ListObject(self.space, [self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) l.setitem(0, self.space.wrap('d')) assert isinstance(l.strategy, ObjectListStrategy) # StringStrategy to ObjectStrategy - l = W_ListObject([self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) + l = W_ListObject(self.space, [self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) assert isinstance(l.strategy, StringListStrategy) l.setitem(0, self.space.wrap(2)) assert isinstance(l.strategy, ObjectListStrategy) def test_insert(self): # no change - l = W_ListObject([self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) + l = W_ListObject(self.space, [self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) l.insert(3, self.space.wrap(4)) assert isinstance(l.strategy, IntegerListStrategy) # StringStrategy - l = W_ListObject([self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) + l = W_ListObject(self.space, [self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) assert isinstance(l.strategy, StringListStrategy) l.insert(3, self.space.wrap(2)) assert isinstance(l.strategy, ObjectListStrategy) # IntegerStrategy - l = W_ListObject([self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) + l = W_ListObject(self.space, [self.space.wrap(1),self.space.wrap(2),self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) l.insert(3, self.space.wrap('d')) assert isinstance(l.strategy, ObjectListStrategy) # EmptyStrategy - l = W_ListObject([]) + l = W_ListObject(self.space, []) assert isinstance(l.strategy, EmptyListStrategy) l.insert(0, self.space.wrap('a')) assert isinstance(l.strategy, StringListStrategy) - l = W_ListObject([]) + l = W_ListObject(self.space, []) assert isinstance(l.strategy, EmptyListStrategy) l.insert(0, self.space.wrap(2)) assert isinstance(l.strategy, IntegerListStrategy) def test_list_empty_after_delete(self): - l = W_ListObject([self.space.wrap(3)]) + l = W_ListObject(self.space, [self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) l.deleteitem(0) assert isinstance(l.strategy, EmptyListStrategy) - l = W_ListObject([self.space.wrap(1), self.space.wrap(2)]) + l = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2)]) assert isinstance(l.strategy, IntegerListStrategy) l.deleteslice(0, 1, 2) assert isinstance(l.strategy, EmptyListStrategy) - l = W_ListObject([self.space.wrap(1)]) + l = W_ListObject(self.space, [self.space.wrap(1)]) assert isinstance(l.strategy, IntegerListStrategy) l.pop(-1) assert isinstance(l.strategy, EmptyListStrategy) def test_setslice(self): - l = W_ListObject([]) + l = W_ListObject(self.space, []) assert isinstance(l.strategy, EmptyListStrategy) l.setslice(0, 1, 2, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) - l = W_ListObject([self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + l = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) l.setslice(0, 1, 2, [self.space.wrap(4), self.space.wrap(5), self.space.wrap(6)]) assert isinstance(l.strategy, IntegerListStrategy) - l = W_ListObject([self.space.wrap(1), self.space.wrap('b'), self.space.wrap(3)]) + l = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap('b'), self.space.wrap(3)]) assert isinstance(l.strategy, ObjectListStrategy) l.setslice(0, 1, 2, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) assert isinstance(l.strategy, ObjectListStrategy) - l = W_ListObject([self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + l = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) l.setslice(0, 1, 2, [self.space.wrap('a'), self.space.wrap('b'), self.space.wrap('c')]) assert isinstance(l.strategy, ObjectListStrategy) def test_extend(self): - l = W_ListObject([]) + l = W_ListObject(self.space, []) assert isinstance(l.strategy, EmptyListStrategy) - l.extend(W_ListObject([self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) + l.extend(W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) assert isinstance(l.strategy, IntegerListStrategy) - l = W_ListObject([self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + l = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) - l.extend(W_ListObject([self.space.wrap('a'), self.space.wrap('b'), self.space.wrap('c')])) + l.extend(W_ListObject(self.space, [self.space.wrap('a'), self.space.wrap('b'), self.space.wrap('c')])) assert isinstance(l.strategy, ObjectListStrategy) - l = W_ListObject([self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + l = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) - l.extend(W_ListObject([self.space.wrap(4), self.space.wrap(5), self.space.wrap(6)])) + l.extend(W_ListObject(self.space, [self.space.wrap(4), self.space.wrap(5), self.space.wrap(6)])) assert isinstance(l.strategy, IntegerListStrategy) diff --git a/pypy/objspace/std/test/test_stringobject.py b/pypy/objspace/std/test/test_stringobject.py --- a/pypy/objspace/std/test/test_stringobject.py +++ b/pypy/objspace/std/test/test_stringobject.py @@ -495,7 +495,7 @@ assert "".join([]) == "" assert "-".join(['a', 'b']) == 'a-b' text = 'text' - assert "".join([text]) is text + assert "".join([text]) == text raises(TypeError, ''.join, 1) raises(TypeError, ''.join, [1]) raises(TypeError, ''.join, [[1]]) From noreply at buildbot.pypy.org Fri Sep 23 13:12:02 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:02 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: merge default Message-ID: <20110923111202.9CFDD820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47447:4b06aa516880 Date: 2011-03-01 18:36 +0100 http://bitbucket.org/pypy/pypy/changeset/4b06aa516880/ Log: merge default diff too long, truncating to 10000 out of 101175 lines diff --git a/.gitignore b/.gitignore new file mode 100644 --- /dev/null +++ b/.gitignore @@ -0,0 +1,14 @@ +.hg +.svn + +*.pyc +*.pyo +*~ + +bin/pypy-c +include/*.h +lib_pypy/ctypes_config_cache/_[^_]*_*.py +pypy/_cache +pypy/translator/goal/pypy-c +pypy/translator/goal/target*-c +release/ \ No newline at end of file diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -16,6 +16,7 @@ ^pypy/doc/.+\.html$ ^pypy/doc/basicblock\.asc$ ^pypy/doc/.+\.svninfo$ +^pypy/translator/c/src/dtoa.o$ ^pypy/translator/c/src/libffi_msvc/.+\.obj$ ^pypy/translator/c/src/libffi_msvc/.+\.dll$ ^pypy/translator/c/src/libffi_msvc/.+\.lib$ @@ -59,3 +60,4 @@ ^pypy/doc/image/lattice3\.png$ ^pypy/doc/image/stackless_informal\.png$ ^pypy/doc/image/parsing_example.+\.png$ +^compiled diff --git a/.hgsub b/.hgsub --- a/.hgsub +++ b/.hgsub @@ -1,4 +1,3 @@ -#greenlet = [svn]http://codespeak.net/svn/greenlet/trunk/c -#testrunner = [svn]http://codespeak.net/svn/pypy/build/testrunner -#lib_pypy/pyrepl = [svn]http://codespeak.net/svn/pyrepl/trunk/pyrepl/pyrepl -#lib_pypy/sqlite3 = [svn]http://codespeak.net/svn/pypy/pysqlite2 +greenlet = [svn]http://codespeak.net/svn/greenlet/trunk/c +testrunner = [svn]http://codespeak.net/svn/pypy/build/testrunner +lib_pypy/pyrepl = [svn]http://codespeak.net/svn/pyrepl/trunk/pyrepl/pyrepl diff --git a/.hgsubstate b/.hgsubstate --- a/.hgsubstate +++ b/.hgsubstate @@ -1,4 +1,3 @@ 80037 greenlet -80037 lib_pypy/pyrepl -80037 lib_pypy/sqlite3 +80348 lib_pypy/pyrepl 80037 testrunner diff --git a/dotviewer/graphclient.py b/dotviewer/graphclient.py --- a/dotviewer/graphclient.py +++ b/dotviewer/graphclient.py @@ -127,7 +127,7 @@ def spawn_local_handler(): if hasattr(sys, 'pypy_objspaceclass'): - python = 'python' + python = '/usr/bin/python' else: python = sys.executable args = [python, '-u', GRAPHSERVER, '--stdio'] diff --git a/lib-python/2.7.0/email/test/data/msg_26.txt b/lib-python/2.7.0/email/test/data/msg_26.txt --- a/lib-python/2.7.0/email/test/data/msg_26.txt +++ b/lib-python/2.7.0/email/test/data/msg_26.txt @@ -1,45 +1,45 @@ -Received: from xcar [192.168.0.2] by jeeves.wooster.local - (SMTPD32-7.07 EVAL) id AFF92F0214; Sun, 12 May 2002 08:55:37 +0100 -Date: Sun, 12 May 2002 08:56:15 +0100 -From: Father Time -To: timbo at jeeves.wooster.local -Subject: IMAP file test -Message-ID: <6df65d354b.father.time at rpc.wooster.local> -X-Organization: Home -User-Agent: Messenger-Pro/2.50a (MsgServe/1.50) (RISC-OS/4.02) POPstar/2.03 -MIME-Version: 1.0 -Content-Type: multipart/mixed; boundary="1618492860--2051301190--113853680" -Status: R -X-UIDL: 319998302 - -This message is in MIME format which your mailer apparently does not support. -You either require a newer version of your software which supports MIME, or -a separate MIME decoding utility. Alternatively, ask the sender of this -message to resend it in a different format. - ---1618492860--2051301190--113853680 -Content-Type: text/plain; charset=us-ascii - -Simple email with attachment. - - ---1618492860--2051301190--113853680 -Content-Type: application/riscos; name="clock.bmp,69c"; type=BMP; load=&fff69c4b; exec=&355dd4d1; access=&03 -Content-Disposition: attachment; filename="clock.bmp" -Content-Transfer-Encoding: base64 - -Qk12AgAAAAAAAHYAAAAoAAAAIAAAACAAAAABAAQAAAAAAAAAAADXDQAA1w0AAAAAAAAA -AAAAAAAAAAAAiAAAiAAAAIiIAIgAAACIAIgAiIgAALu7uwCIiIgAERHdACLuIgAz//8A -zAAAAN0R3QDu7iIA////AAAAAAAAAAAAAAAAAAAAAAAAAAi3AAAAAAAAADeAAAAAAAAA -C3ADMzMzMANwAAAAAAAAAAAHMAAAAANwAAAAAAAAAACAMAd3zPfwAwgAAAAAAAAIAwd/ -f8x/f3AwgAAAAAAAgDB0x/f3//zPAwgAAAAAAAcHfM9////8z/AwAAAAAAiwd/f3//// -////A4AAAAAAcEx/f///////zAMAAAAAiwfM9////3///8zwOAAAAAcHf3////B///// -8DAAAAALB/f3///wd3d3//AwAAAABwTPf//wCQAAD/zAMAAAAAsEx/f///B////8wDAA -AAAHB39////wf/////AwAAAACwf39///8H/////wMAAAAIcHfM9///B////M8DgAAAAA -sHTH///wf///xAMAAAAACHB3f3//8H////cDgAAAAAALB3zH//D//M9wMAAAAAAAgLB0 -z39///xHAwgAAAAAAAgLB3d3RHd3cDCAAAAAAAAAgLAHd0R3cAMIAAAAAAAAgAgLcAAA -AAMwgAgAAAAACDAAAAu7t7cwAAgDgAAAAABzcIAAAAAAAAgDMwAAAAAAN7uwgAAAAAgH -MzMAAAAACH97tzAAAAALu3c3gAAAAAAL+7tzDABAu7f7cAAAAAAACA+3MA7EQAv/sIAA -AAAAAAAIAAAAAAAAAIAAAAAA - ---1618492860--2051301190--113853680-- +Received: from xcar [192.168.0.2] by jeeves.wooster.local + (SMTPD32-7.07 EVAL) id AFF92F0214; Sun, 12 May 2002 08:55:37 +0100 +Date: Sun, 12 May 2002 08:56:15 +0100 +From: Father Time +To: timbo at jeeves.wooster.local +Subject: IMAP file test +Message-ID: <6df65d354b.father.time at rpc.wooster.local> +X-Organization: Home +User-Agent: Messenger-Pro/2.50a (MsgServe/1.50) (RISC-OS/4.02) POPstar/2.03 +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="1618492860--2051301190--113853680" +Status: R +X-UIDL: 319998302 + +This message is in MIME format which your mailer apparently does not support. +You either require a newer version of your software which supports MIME, or +a separate MIME decoding utility. Alternatively, ask the sender of this +message to resend it in a different format. + +--1618492860--2051301190--113853680 +Content-Type: text/plain; charset=us-ascii + +Simple email with attachment. + + +--1618492860--2051301190--113853680 +Content-Type: application/riscos; name="clock.bmp,69c"; type=BMP; load=&fff69c4b; exec=&355dd4d1; access=&03 +Content-Disposition: attachment; filename="clock.bmp" +Content-Transfer-Encoding: base64 + +Qk12AgAAAAAAAHYAAAAoAAAAIAAAACAAAAABAAQAAAAAAAAAAADXDQAA1w0AAAAAAAAA +AAAAAAAAAAAAiAAAiAAAAIiIAIgAAACIAIgAiIgAALu7uwCIiIgAERHdACLuIgAz//8A +zAAAAN0R3QDu7iIA////AAAAAAAAAAAAAAAAAAAAAAAAAAi3AAAAAAAAADeAAAAAAAAA +C3ADMzMzMANwAAAAAAAAAAAHMAAAAANwAAAAAAAAAACAMAd3zPfwAwgAAAAAAAAIAwd/ +f8x/f3AwgAAAAAAAgDB0x/f3//zPAwgAAAAAAAcHfM9////8z/AwAAAAAAiwd/f3//// +////A4AAAAAAcEx/f///////zAMAAAAAiwfM9////3///8zwOAAAAAcHf3////B///// +8DAAAAALB/f3///wd3d3//AwAAAABwTPf//wCQAAD/zAMAAAAAsEx/f///B////8wDAA +AAAHB39////wf/////AwAAAACwf39///8H/////wMAAAAIcHfM9///B////M8DgAAAAA +sHTH///wf///xAMAAAAACHB3f3//8H////cDgAAAAAALB3zH//D//M9wMAAAAAAAgLB0 +z39///xHAwgAAAAAAAgLB3d3RHd3cDCAAAAAAAAAgLAHd0R3cAMIAAAAAAAAgAgLcAAA +AAMwgAgAAAAACDAAAAu7t7cwAAgDgAAAAABzcIAAAAAAAAgDMwAAAAAAN7uwgAAAAAgH +MzMAAAAACH97tzAAAAALu3c3gAAAAAAL+7tzDABAu7f7cAAAAAAACA+3MA7EQAv/sIAA +AAAAAAAIAAAAAAAAAIAAAAAA + +--1618492860--2051301190--113853680-- diff --git a/lib-python/TODO b/lib-python/TODO --- a/lib-python/TODO +++ b/lib-python/TODO @@ -8,30 +8,32 @@ Probably easy tasks ------------------- -- New complex syntax (see test_complex.py):: - - assert complex("(1+2j)") == (1+2j) - assert complex("(1-2j)") == (1-2j) - assert complex("1e500") == complex(INF, 0.0) - - (unicode|bytearray).(index|find) should accept None as indices (see test_unicode.py) -- Fix fcntl.fcntl(fd, fcntl.F_NOTIFY, fcntl.DN_MULTISHOT) on 32bit platform. - -- missing functions in itertools: combinations, product... - -- in test_os.py, fix posix.setregid(-1, -1), posix.setreuid(-1, -1). This - proably requires to use the git_t typedef instead of rffi.INT. - -- missing posix.(confstr|pathconf|fpathconf)(_names)? (see +- missing posix.confstr and posix.confstr_names - remove code duplication: bit_length() and _count_bits() in rlib/rbigint.py, objspace/std/longobject.py and objspace/std/longtype.py. -- Add missing methods to bytearray (see test_bytes.py). Also ideally refactor - stringobject.py and unicodeobject.py to allow more code reuse between these - (now) three types. +- missing module pyexpat.errors + +- support for PYTHONIOENCODING, this needs a way to update file.encoding + +- implement format__Complex_ANY() in pypy/objspace/std/complexobject.py + +- Code like this does not work, for two reasons:: + + \ + from __future__ import (with_statement, + unicode_literals) + assert type("") is unicode + +- Code like:: + + assert(x is not None, "error message") + + should emit a SyntaxWarning when compiled (the tuple is always true) Medium tasks @@ -40,25 +42,18 @@ - Ast objects should be picklable, see in pypy/module/_ast/test/test_ast.py: test_pickle() -- add 'unicode' in ObjSpace.MethodTable + probably a default implementation that - falls back to space.str(). (WHY?) - - socket module has a couple of changes (including AF_TIPC packet range) -- implement _io.open() (currently it delegates to _pyio.open) - -- module/unicodedata/generate_unicodedb.py should parse LineBreaks.txt - see http://svn.python.org/view?view=rev&revision=79494 +- (test_lib2to3) When a "for" loop runs a generator function, if the loop is + exited before the end, the "finally" clause of the generator is not called + until the next gc collection. In our case, in lib2to3/pytree.py, + WildcardPattern.match_seq() does not exhaust the generate_matches() generator, + and stderr is not restored. Longer tasks ------------ -- many features are missing from the _ssl module - -- "Shorter float representation" (see pypy/translator/c/test/test_dtoa.py) to - format/parse floats. Enable this with a translation option. - - Fix usage of __cmp__ in subclasses:: class badint(int): @@ -74,10 +69,6 @@ a = A() a.__class__ = B -- Remove "unbound builtin methods": some code in pprint.py, _threading_local.py - relies on comparisons like "str.__init__ is object.__init__", or - "type(x).__repr__ is dict.__repr__" - - Show a ResourceWarning when a file/socket is not explicitely closed, like CPython did for 3.2: http://svn.python.org/view?view=rev&revision=85920 in PyPy this should be enabled by default @@ -100,4 +91,20 @@ * In test_codecs, commented out various items in `all_unicode_encodings`. +- Error messages about ill-formed calls (like "argument after ** must be a + mapping") don't always show the function name. That's hard to fix for + the case of errors raised when the Argument object is created (as opposed + to when parsing for a given target function, which occurs later). + * Some "..." were added to doctests in test_extcall.py + +- CPython's builtin methods are both functions and unbound methods (for + example, `str.upper is dict(str.__dict__)['upper']`). This is not the case + in pypy, and assertions like `object.__str__ is object.__str__` are False + with pypy. Use the `==` operator instead. + + * pprint.py, _threading_local.py + +- When importing a nested module fails, the ImportError message mentions the + name of the package up to the component that could not be imported (CPython + prefers to display the names starting with the failing part). diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -36,7 +36,9 @@ group.addoption('--pypy', action="store", type="string", dest="pypy", help="use given pypy executable to run lib-python tests. " "This will run the tests directly (i.e. not through py.py)") - + group.addoption('--filter', action="store", type="string", default=None, + dest="unittest_filter", help="Similar to -k, XXX") + option = py.test.config.option def gettimeout(): @@ -193,7 +195,7 @@ RegrTest('test_dbm.py'), RegrTest('test_decimal.py'), RegrTest('test_decorators.py', core=True), - RegrTest('test_deque.py', core=True), + RegrTest('test_deque.py', core=True, usemodules='_collections'), RegrTest('test_descr.py', core=True, usemodules='_weakref'), RegrTest('test_descrtut.py', core=True), RegrTest('test_dict.py', core=True), @@ -420,7 +422,7 @@ RegrTest('test_sundry.py'), RegrTest('test_symtable.py', skip="implementation detail"), RegrTest('test_syntax.py', core=True), - RegrTest('test_sys.py', core=True), + RegrTest('test_sys.py', core=True, usemodules='struct'), RegrTest('test_sys_settrace.py', core=True), RegrTest('test_sys_setprofile.py', core=True), RegrTest('test_sysconfig.py'), @@ -496,8 +498,8 @@ RegrTest('test_coding.py'), RegrTest('test_complex_args.py'), RegrTest('test_contextlib.py', usemodules="thread"), - RegrTest('test_ctypes.py', usemodules="_rawffi"), - RegrTest('test_defaultdict.py'), + RegrTest('test_ctypes.py', usemodules="_rawffi thread"), + RegrTest('test_defaultdict.py', usemodules='_collections'), RegrTest('test_email_renamed.py'), RegrTest('test_exception_variations.py'), RegrTest('test_float.py'), @@ -517,8 +519,8 @@ RegrTest('test_with.py'), RegrTest('test_wsgiref.py'), RegrTest('test_xdrlib.py'), - RegrTest('test_xml_etree.py', skip="unsupported ext module"), - RegrTest('test_xml_etree_c.py', skip="unsupported ext module"), + RegrTest('test_xml_etree.py'), + RegrTest('test_xml_etree_c.py'), RegrTest('test_zipfile64.py'), ] @@ -688,7 +690,16 @@ else: status = 'abnormal termination 0x%x' % status else: - status = os.system("%s >>%s 2>>%s" %(cmd, stdout, stderr)) + if self.config.option.unittest_filter is not None: + cmd += ' --filter %s' % self.config.option.unittest_filter + if self.config.option.usepdb: + cmd += ' --pdb' + if self.config.option.capture == 'no': + status = os.system(cmd) + stdout.write('') + stderr.write('') + else: + status = os.system("%s >>%s 2>>%s" %(cmd, stdout, stderr)) if os.WIFEXITED(status): status = os.WEXITSTATUS(status) else: @@ -705,8 +716,10 @@ if test_stderr.rfind(26*"=" + "skipped" + 26*"=") != -1: skipped = True outcome = 'OK' - if not exit_status: - if 'FAIL' in test_stdout or re.search('[^:]ERROR', test_stderr): + if not exit_status: + # match "FAIL" but not e.g. "FAILURE", which is in the output of a + # test in test_zipimport_support.py + if re.search(r'\bFAIL\b', test_stdout) or re.search('[^:]ERROR', test_stderr): outcome = 'FAIL' exit_status = 2 elif timedout: diff --git a/lib-python/2.7.0/UserDict.py b/lib-python/modified-2.7.0/UserDict.py copy from lib-python/2.7.0/UserDict.py copy to lib-python/modified-2.7.0/UserDict.py --- a/lib-python/2.7.0/UserDict.py +++ b/lib-python/modified-2.7.0/UserDict.py @@ -1,5 +1,10 @@ """A more or less complete user-defined wrapper around dictionary objects.""" +# XXX This is a bit of a hack (as usual :-)) +# the actual content of the file is not changed, but we put it here to make +# virtualenv happy (because its internal logic expects at least one of the +# REQUIRED_MODULES to be in modified-*) + class UserDict: def __init__(self, dict=None, **kwargs): self.data = {} diff --git a/lib-python/2.7.0/_threading_local.py b/lib-python/modified-2.7.0/_threading_local.py copy from lib-python/2.7.0/_threading_local.py copy to lib-python/modified-2.7.0/_threading_local.py --- a/lib-python/2.7.0/_threading_local.py +++ b/lib-python/modified-2.7.0/_threading_local.py @@ -155,7 +155,7 @@ object.__setattr__(self, '_local__args', (args, kw)) object.__setattr__(self, '_local__lock', RLock()) - if (args or kw) and (cls.__init__ is object.__init__): + if (args or kw) and (cls.__init__ == object.__init__): raise TypeError("Initialization arguments are not supported") # We need to create the thread dict in anticipation of diff --git a/lib-python/modified-2.7.0/ctypes/test/__init__.py b/lib-python/modified-2.7.0/ctypes/test/__init__.py --- a/lib-python/modified-2.7.0/ctypes/test/__init__.py +++ b/lib-python/modified-2.7.0/ctypes/test/__init__.py @@ -211,9 +211,9 @@ """ Poor's man xfail: remove it when all the failures have been fixed """ - def new_method(self): + def new_method(self, *args, **kwds): try: - method(self) + method(self, *args, **kwds) except: pass else: diff --git a/lib-python/modified-2.7.0/ctypes/test/test_bitfields.py b/lib-python/modified-2.7.0/ctypes/test/test_bitfields.py --- a/lib-python/modified-2.7.0/ctypes/test/test_bitfields.py +++ b/lib-python/modified-2.7.0/ctypes/test/test_bitfields.py @@ -4,7 +4,6 @@ import ctypes import _ctypes_test -from ctypes.test import xfail class BITS(Structure): _fields_ = [("A", c_int, 1), @@ -113,21 +112,24 @@ return self.get_except(type(Structure), "X", (), {"_fields_": fields}) - @xfail def test_nonint_types(self): # bit fields are not allowed on non-integer types. result = self.fail_fields(("a", c_char_p, 1)) - self.assertEqual(result, (TypeError, 'bit fields not allowed for type c_char_p')) + self.assertEqual(result[0], TypeError) + self.assertIn('bit fields not allowed for type', result[1]) result = self.fail_fields(("a", c_void_p, 1)) - self.assertEqual(result, (TypeError, 'bit fields not allowed for type c_void_p')) + self.assertEqual(result[0], TypeError) + self.assertIn('bit fields not allowed for type', result[1]) if c_int != c_long: result = self.fail_fields(("a", POINTER(c_int), 1)) - self.assertEqual(result, (TypeError, 'bit fields not allowed for type LP_c_int')) + self.assertEqual(result[0], TypeError) + self.assertIn('bit fields not allowed for type', result[1]) result = self.fail_fields(("a", c_char, 1)) - self.assertEqual(result, (TypeError, 'bit fields not allowed for type c_char')) + self.assertEqual(result[0], TypeError) + self.assertIn('bit fields not allowed for type', result[1]) try: c_wchar @@ -135,15 +137,16 @@ pass else: result = self.fail_fields(("a", c_wchar, 1)) - self.assertEqual(result, (TypeError, 'bit fields not allowed for type c_wchar')) + self.assertEqual(result[0], TypeError) + self.assertIn('bit fields not allowed for type', result[1]) class Dummy(Structure): _fields_ = [] result = self.fail_fields(("a", Dummy, 1)) - self.assertEqual(result, (TypeError, 'bit fields not allowed for type Dummy')) + self.assertEqual(result[0], TypeError) + self.assertIn('bit fields not allowed for type', result[1]) - @xfail def test_single_bitfield_size(self): for c_typ in int_types: result = self.fail_fields(("a", c_typ, -1)) diff --git a/lib-python/modified-2.7.0/ctypes/test/test_refcounts.py b/lib-python/modified-2.7.0/ctypes/test/test_refcounts.py --- a/lib-python/modified-2.7.0/ctypes/test/test_refcounts.py +++ b/lib-python/modified-2.7.0/ctypes/test/test_refcounts.py @@ -1,7 +1,6 @@ import unittest import ctypes import gc -from ctypes.test import xfail MyCallback = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int) OtherCallback = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_ulonglong) @@ -83,7 +82,6 @@ self.assertEqual(grc(func), 2) class AnotherLeak(unittest.TestCase): - @xfail def test_callback(self): import sys diff --git a/lib-python/modified-2.7.0/ctypes/test/test_strings.py b/lib-python/modified-2.7.0/ctypes/test/test_strings.py --- a/lib-python/modified-2.7.0/ctypes/test/test_strings.py +++ b/lib-python/modified-2.7.0/ctypes/test/test_strings.py @@ -1,7 +1,6 @@ import unittest from ctypes import * from test import test_support -from ctypes.test import xfail class StringArrayTestCase(unittest.TestCase): def test(self): @@ -26,27 +25,26 @@ self.assertRaises(ValueError, setattr, buf, "value", "aaaaaaaa") self.assertRaises(TypeError, setattr, buf, "value", 42) - @xfail def test_c_buffer_value(self, memoryview=memoryview): buf = c_buffer(32) buf.value = "Hello, World" self.assertEqual(buf.value, "Hello, World") - self.assertRaises(TypeError, setattr, buf, "value", memoryview("Hello, World")) - self.assertRaises(TypeError, setattr, buf, "value", memoryview("abc")) + if test_support.check_impl_detail(): + self.assertRaises(TypeError, setattr, buf, "value", memoryview("Hello, World")) + self.assertRaises(TypeError, setattr, buf, "value", memoryview("abc")) self.assertRaises(ValueError, setattr, buf, "raw", memoryview("x" * 100)) - @xfail def test_c_buffer_raw(self, memoryview=memoryview): buf = c_buffer(32) buf.raw = memoryview("Hello, World") self.assertEqual(buf.value, "Hello, World") - self.assertRaises(TypeError, setattr, buf, "value", memoryview("abc")) + if test_support.check_impl_detail(): + self.assertRaises(TypeError, setattr, buf, "value", memoryview("abc")) self.assertRaises(ValueError, setattr, buf, "raw", memoryview("x" * 100)) - @xfail def test_c_buffer_deprecated(self): # Compatibility with 2.x with test_support.check_py3k_warnings(): diff --git a/lib-python/modified-2.7.0/distutils/command/install.py b/lib-python/modified-2.7.0/distutils/command/install.py --- a/lib-python/modified-2.7.0/distutils/command/install.py +++ b/lib-python/modified-2.7.0/distutils/command/install.py @@ -83,6 +83,13 @@ 'scripts': '$userbase/bin', 'data' : '$userbase', }, + 'pypy': { + 'purelib': '$base/site-packages', + 'platlib': '$base/site-packages', + 'headers': '$base/include', + 'scripts': '$base/bin', + 'data' : '$base', + }, } # The keys to an installation scheme; if any new types of files are to be @@ -467,6 +474,8 @@ def select_scheme (self, name): # it's the caller's problem if they supply a bad name! + if hasattr(sys, 'pypy_version_info'): + name = 'pypy' scheme = INSTALL_SCHEMES[name] for key in SCHEME_KEYS: attrname = 'install_' + key diff --git a/lib-python/modified-2.7.0/distutils/sysconfig.py b/lib-python/modified-2.7.0/distutils/sysconfig.py --- a/lib-python/modified-2.7.0/distutils/sysconfig.py +++ b/lib-python/modified-2.7.0/distutils/sysconfig.py @@ -22,6 +22,6 @@ from distutils.sysconfig_pypy import _config_vars # needed by setuptools else: from distutils.sysconfig_cpython import * - from distutils.sysconfig_pypy import _config_vars # needed by setuptools + from distutils.sysconfig_cpython import _config_vars # needed by setuptools diff --git a/lib-python/modified-2.7.0/distutils/tests/test_build_ext.py b/lib-python/modified-2.7.0/distutils/tests/test_build_ext.py --- a/lib-python/modified-2.7.0/distutils/tests/test_build_ext.py +++ b/lib-python/modified-2.7.0/distutils/tests/test_build_ext.py @@ -292,7 +292,7 @@ finally: os.chdir(old_wd) self.assert_(os.path.exists(so_file)) - self.assertEquals(os.path.splitext(so_file)[-1], + self.assertEquals(so_file[so_file.index(os.path.extsep):], sysconfig.get_config_var('SO')) so_dir = os.path.dirname(so_file) self.assertEquals(so_dir, other_tmp_dir) @@ -301,7 +301,7 @@ cmd.run() so_file = cmd.get_outputs()[0] self.assert_(os.path.exists(so_file)) - self.assertEquals(os.path.splitext(so_file)[-1], + self.assertEquals(so_file[so_file.index(os.path.extsep):], sysconfig.get_config_var('SO')) so_dir = os.path.dirname(so_file) self.assertEquals(so_dir, cmd.build_lib) diff --git a/lib-python/modified-2.7.0/distutils/tests/test_install.py b/lib-python/modified-2.7.0/distutils/tests/test_install.py --- a/lib-python/modified-2.7.0/distutils/tests/test_install.py +++ b/lib-python/modified-2.7.0/distutils/tests/test_install.py @@ -2,6 +2,7 @@ import os import unittest +from test import test_support from distutils.command.install import install from distutils.core import Distribution @@ -38,14 +39,15 @@ expected = os.path.normpath(expected) self.assertEqual(got, expected) - libdir = os.path.join(destination, "lib", "python") - check_path(cmd.install_lib, libdir) - check_path(cmd.install_platlib, libdir) - check_path(cmd.install_purelib, libdir) - check_path(cmd.install_headers, - os.path.join(destination, "include", "python", "foopkg")) - check_path(cmd.install_scripts, os.path.join(destination, "bin")) - check_path(cmd.install_data, destination) + if test_support.check_impl_detail(): + libdir = os.path.join(destination, "lib", "python") + check_path(cmd.install_lib, libdir) + check_path(cmd.install_platlib, libdir) + check_path(cmd.install_purelib, libdir) + check_path(cmd.install_headers, + os.path.join(destination, "include", "python", "foopkg")) + check_path(cmd.install_scripts, os.path.join(destination, "bin")) + check_path(cmd.install_data, destination) def test_suite(): diff --git a/lib-python/2.7.0/email/__init__.py b/lib-python/modified-2.7.0/email/__init__.py copy from lib-python/2.7.0/email/__init__.py copy to lib-python/modified-2.7.0/email/__init__.py diff --git a/lib-python/2.7.0/email/_parseaddr.py b/lib-python/modified-2.7.0/email/_parseaddr.py copy from lib-python/2.7.0/email/_parseaddr.py copy to lib-python/modified-2.7.0/email/_parseaddr.py diff --git a/lib-python/2.7.0/email/base64mime.py b/lib-python/modified-2.7.0/email/base64mime.py copy from lib-python/2.7.0/email/base64mime.py copy to lib-python/modified-2.7.0/email/base64mime.py diff --git a/lib-python/2.7.0/email/charset.py b/lib-python/modified-2.7.0/email/charset.py copy from lib-python/2.7.0/email/charset.py copy to lib-python/modified-2.7.0/email/charset.py diff --git a/lib-python/2.7.0/email/encoders.py b/lib-python/modified-2.7.0/email/encoders.py copy from lib-python/2.7.0/email/encoders.py copy to lib-python/modified-2.7.0/email/encoders.py diff --git a/lib-python/2.7.0/email/errors.py b/lib-python/modified-2.7.0/email/errors.py copy from lib-python/2.7.0/email/errors.py copy to lib-python/modified-2.7.0/email/errors.py diff --git a/lib-python/2.7.0/email/feedparser.py b/lib-python/modified-2.7.0/email/feedparser.py copy from lib-python/2.7.0/email/feedparser.py copy to lib-python/modified-2.7.0/email/feedparser.py diff --git a/lib-python/2.7.0/email/generator.py b/lib-python/modified-2.7.0/email/generator.py copy from lib-python/2.7.0/email/generator.py copy to lib-python/modified-2.7.0/email/generator.py diff --git a/lib-python/2.7.0/email/header.py b/lib-python/modified-2.7.0/email/header.py copy from lib-python/2.7.0/email/header.py copy to lib-python/modified-2.7.0/email/header.py diff --git a/lib-python/2.7.0/email/iterators.py b/lib-python/modified-2.7.0/email/iterators.py copy from lib-python/2.7.0/email/iterators.py copy to lib-python/modified-2.7.0/email/iterators.py diff --git a/lib-python/2.7.0/email/message.py b/lib-python/modified-2.7.0/email/message.py copy from lib-python/2.7.0/email/message.py copy to lib-python/modified-2.7.0/email/message.py diff --git a/lib-python/2.7.0/email/mime/__init__.py b/lib-python/modified-2.7.0/email/mime/__init__.py copy from lib-python/2.7.0/email/mime/__init__.py copy to lib-python/modified-2.7.0/email/mime/__init__.py diff --git a/lib-python/2.7.0/email/mime/application.py b/lib-python/modified-2.7.0/email/mime/application.py copy from lib-python/2.7.0/email/mime/application.py copy to lib-python/modified-2.7.0/email/mime/application.py diff --git a/lib-python/2.7.0/email/mime/audio.py b/lib-python/modified-2.7.0/email/mime/audio.py copy from lib-python/2.7.0/email/mime/audio.py copy to lib-python/modified-2.7.0/email/mime/audio.py diff --git a/lib-python/2.7.0/email/mime/base.py b/lib-python/modified-2.7.0/email/mime/base.py copy from lib-python/2.7.0/email/mime/base.py copy to lib-python/modified-2.7.0/email/mime/base.py diff --git a/lib-python/2.7.0/email/mime/image.py b/lib-python/modified-2.7.0/email/mime/image.py copy from lib-python/2.7.0/email/mime/image.py copy to lib-python/modified-2.7.0/email/mime/image.py diff --git a/lib-python/2.7.0/email/mime/message.py b/lib-python/modified-2.7.0/email/mime/message.py copy from lib-python/2.7.0/email/mime/message.py copy to lib-python/modified-2.7.0/email/mime/message.py diff --git a/lib-python/2.7.0/email/mime/multipart.py b/lib-python/modified-2.7.0/email/mime/multipart.py copy from lib-python/2.7.0/email/mime/multipart.py copy to lib-python/modified-2.7.0/email/mime/multipart.py diff --git a/lib-python/2.7.0/email/mime/nonmultipart.py b/lib-python/modified-2.7.0/email/mime/nonmultipart.py copy from lib-python/2.7.0/email/mime/nonmultipart.py copy to lib-python/modified-2.7.0/email/mime/nonmultipart.py diff --git a/lib-python/2.7.0/email/mime/text.py b/lib-python/modified-2.7.0/email/mime/text.py copy from lib-python/2.7.0/email/mime/text.py copy to lib-python/modified-2.7.0/email/mime/text.py diff --git a/lib-python/2.7.0/email/parser.py b/lib-python/modified-2.7.0/email/parser.py copy from lib-python/2.7.0/email/parser.py copy to lib-python/modified-2.7.0/email/parser.py diff --git a/lib-python/2.7.0/email/quoprimime.py b/lib-python/modified-2.7.0/email/quoprimime.py copy from lib-python/2.7.0/email/quoprimime.py copy to lib-python/modified-2.7.0/email/quoprimime.py diff --git a/lib-python/2.7.0/email/test/__init__.py b/lib-python/modified-2.7.0/email/test/__init__.py copy from lib-python/2.7.0/email/test/__init__.py copy to lib-python/modified-2.7.0/email/test/__init__.py diff --git a/lib-python/2.7.0/email/test/data/PyBanner048.gif b/lib-python/modified-2.7.0/email/test/data/PyBanner048.gif copy from lib-python/2.7.0/email/test/data/PyBanner048.gif copy to lib-python/modified-2.7.0/email/test/data/PyBanner048.gif diff --git a/lib-python/2.7.0/email/test/data/audiotest.au b/lib-python/modified-2.7.0/email/test/data/audiotest.au copy from lib-python/2.7.0/email/test/data/audiotest.au copy to lib-python/modified-2.7.0/email/test/data/audiotest.au diff --git a/lib-python/2.7.0/email/test/data/msg_01.txt b/lib-python/modified-2.7.0/email/test/data/msg_01.txt copy from lib-python/2.7.0/email/test/data/msg_01.txt copy to lib-python/modified-2.7.0/email/test/data/msg_01.txt diff --git a/lib-python/2.7.0/email/test/data/msg_02.txt b/lib-python/modified-2.7.0/email/test/data/msg_02.txt copy from lib-python/2.7.0/email/test/data/msg_02.txt copy to lib-python/modified-2.7.0/email/test/data/msg_02.txt diff --git a/lib-python/2.7.0/email/test/data/msg_03.txt b/lib-python/modified-2.7.0/email/test/data/msg_03.txt copy from lib-python/2.7.0/email/test/data/msg_03.txt copy to lib-python/modified-2.7.0/email/test/data/msg_03.txt diff --git a/lib-python/2.7.0/email/test/data/msg_04.txt b/lib-python/modified-2.7.0/email/test/data/msg_04.txt copy from lib-python/2.7.0/email/test/data/msg_04.txt copy to lib-python/modified-2.7.0/email/test/data/msg_04.txt diff --git a/lib-python/2.7.0/email/test/data/msg_05.txt b/lib-python/modified-2.7.0/email/test/data/msg_05.txt copy from lib-python/2.7.0/email/test/data/msg_05.txt copy to lib-python/modified-2.7.0/email/test/data/msg_05.txt diff --git a/lib-python/2.7.0/email/test/data/msg_06.txt b/lib-python/modified-2.7.0/email/test/data/msg_06.txt copy from lib-python/2.7.0/email/test/data/msg_06.txt copy to lib-python/modified-2.7.0/email/test/data/msg_06.txt diff --git a/lib-python/2.7.0/email/test/data/msg_07.txt b/lib-python/modified-2.7.0/email/test/data/msg_07.txt copy from lib-python/2.7.0/email/test/data/msg_07.txt copy to lib-python/modified-2.7.0/email/test/data/msg_07.txt diff --git a/lib-python/2.7.0/email/test/data/msg_08.txt b/lib-python/modified-2.7.0/email/test/data/msg_08.txt copy from lib-python/2.7.0/email/test/data/msg_08.txt copy to lib-python/modified-2.7.0/email/test/data/msg_08.txt diff --git a/lib-python/2.7.0/email/test/data/msg_09.txt b/lib-python/modified-2.7.0/email/test/data/msg_09.txt copy from lib-python/2.7.0/email/test/data/msg_09.txt copy to lib-python/modified-2.7.0/email/test/data/msg_09.txt diff --git a/lib-python/2.7.0/email/test/data/msg_10.txt b/lib-python/modified-2.7.0/email/test/data/msg_10.txt copy from lib-python/2.7.0/email/test/data/msg_10.txt copy to lib-python/modified-2.7.0/email/test/data/msg_10.txt diff --git a/lib-python/2.7.0/email/test/data/msg_11.txt b/lib-python/modified-2.7.0/email/test/data/msg_11.txt copy from lib-python/2.7.0/email/test/data/msg_11.txt copy to lib-python/modified-2.7.0/email/test/data/msg_11.txt diff --git a/lib-python/2.7.0/email/test/data/msg_12.txt b/lib-python/modified-2.7.0/email/test/data/msg_12.txt copy from lib-python/2.7.0/email/test/data/msg_12.txt copy to lib-python/modified-2.7.0/email/test/data/msg_12.txt diff --git a/lib-python/2.7.0/email/test/data/msg_12a.txt b/lib-python/modified-2.7.0/email/test/data/msg_12a.txt copy from lib-python/2.7.0/email/test/data/msg_12a.txt copy to lib-python/modified-2.7.0/email/test/data/msg_12a.txt diff --git a/lib-python/2.7.0/email/test/data/msg_13.txt b/lib-python/modified-2.7.0/email/test/data/msg_13.txt copy from lib-python/2.7.0/email/test/data/msg_13.txt copy to lib-python/modified-2.7.0/email/test/data/msg_13.txt diff --git a/lib-python/2.7.0/email/test/data/msg_14.txt b/lib-python/modified-2.7.0/email/test/data/msg_14.txt copy from lib-python/2.7.0/email/test/data/msg_14.txt copy to lib-python/modified-2.7.0/email/test/data/msg_14.txt diff --git a/lib-python/2.7.0/email/test/data/msg_15.txt b/lib-python/modified-2.7.0/email/test/data/msg_15.txt copy from lib-python/2.7.0/email/test/data/msg_15.txt copy to lib-python/modified-2.7.0/email/test/data/msg_15.txt diff --git a/lib-python/2.7.0/email/test/data/msg_16.txt b/lib-python/modified-2.7.0/email/test/data/msg_16.txt copy from lib-python/2.7.0/email/test/data/msg_16.txt copy to lib-python/modified-2.7.0/email/test/data/msg_16.txt diff --git a/lib-python/2.7.0/email/test/data/msg_17.txt b/lib-python/modified-2.7.0/email/test/data/msg_17.txt copy from lib-python/2.7.0/email/test/data/msg_17.txt copy to lib-python/modified-2.7.0/email/test/data/msg_17.txt diff --git a/lib-python/2.7.0/email/test/data/msg_18.txt b/lib-python/modified-2.7.0/email/test/data/msg_18.txt copy from lib-python/2.7.0/email/test/data/msg_18.txt copy to lib-python/modified-2.7.0/email/test/data/msg_18.txt diff --git a/lib-python/2.7.0/email/test/data/msg_19.txt b/lib-python/modified-2.7.0/email/test/data/msg_19.txt copy from lib-python/2.7.0/email/test/data/msg_19.txt copy to lib-python/modified-2.7.0/email/test/data/msg_19.txt diff --git a/lib-python/2.7.0/email/test/data/msg_20.txt b/lib-python/modified-2.7.0/email/test/data/msg_20.txt copy from lib-python/2.7.0/email/test/data/msg_20.txt copy to lib-python/modified-2.7.0/email/test/data/msg_20.txt diff --git a/lib-python/2.7.0/email/test/data/msg_21.txt b/lib-python/modified-2.7.0/email/test/data/msg_21.txt copy from lib-python/2.7.0/email/test/data/msg_21.txt copy to lib-python/modified-2.7.0/email/test/data/msg_21.txt diff --git a/lib-python/2.7.0/email/test/data/msg_22.txt b/lib-python/modified-2.7.0/email/test/data/msg_22.txt copy from lib-python/2.7.0/email/test/data/msg_22.txt copy to lib-python/modified-2.7.0/email/test/data/msg_22.txt diff --git a/lib-python/2.7.0/email/test/data/msg_23.txt b/lib-python/modified-2.7.0/email/test/data/msg_23.txt copy from lib-python/2.7.0/email/test/data/msg_23.txt copy to lib-python/modified-2.7.0/email/test/data/msg_23.txt diff --git a/lib-python/2.7.0/email/test/data/msg_24.txt b/lib-python/modified-2.7.0/email/test/data/msg_24.txt copy from lib-python/2.7.0/email/test/data/msg_24.txt copy to lib-python/modified-2.7.0/email/test/data/msg_24.txt diff --git a/lib-python/2.7.0/email/test/data/msg_25.txt b/lib-python/modified-2.7.0/email/test/data/msg_25.txt copy from lib-python/2.7.0/email/test/data/msg_25.txt copy to lib-python/modified-2.7.0/email/test/data/msg_25.txt diff --git a/lib-python/2.7.0/email/test/data/msg_26.txt b/lib-python/modified-2.7.0/email/test/data/msg_26.txt copy from lib-python/2.7.0/email/test/data/msg_26.txt copy to lib-python/modified-2.7.0/email/test/data/msg_26.txt --- a/lib-python/2.7.0/email/test/data/msg_26.txt +++ b/lib-python/modified-2.7.0/email/test/data/msg_26.txt @@ -1,45 +1,45 @@ -Received: from xcar [192.168.0.2] by jeeves.wooster.local - (SMTPD32-7.07 EVAL) id AFF92F0214; Sun, 12 May 2002 08:55:37 +0100 -Date: Sun, 12 May 2002 08:56:15 +0100 -From: Father Time -To: timbo at jeeves.wooster.local -Subject: IMAP file test -Message-ID: <6df65d354b.father.time at rpc.wooster.local> -X-Organization: Home -User-Agent: Messenger-Pro/2.50a (MsgServe/1.50) (RISC-OS/4.02) POPstar/2.03 -MIME-Version: 1.0 -Content-Type: multipart/mixed; boundary="1618492860--2051301190--113853680" -Status: R -X-UIDL: 319998302 - -This message is in MIME format which your mailer apparently does not support. -You either require a newer version of your software which supports MIME, or -a separate MIME decoding utility. Alternatively, ask the sender of this -message to resend it in a different format. - ---1618492860--2051301190--113853680 -Content-Type: text/plain; charset=us-ascii - -Simple email with attachment. - - ---1618492860--2051301190--113853680 -Content-Type: application/riscos; name="clock.bmp,69c"; type=BMP; load=&fff69c4b; exec=&355dd4d1; access=&03 -Content-Disposition: attachment; filename="clock.bmp" -Content-Transfer-Encoding: base64 - -Qk12AgAAAAAAAHYAAAAoAAAAIAAAACAAAAABAAQAAAAAAAAAAADXDQAA1w0AAAAAAAAA -AAAAAAAAAAAAiAAAiAAAAIiIAIgAAACIAIgAiIgAALu7uwCIiIgAERHdACLuIgAz//8A -zAAAAN0R3QDu7iIA////AAAAAAAAAAAAAAAAAAAAAAAAAAi3AAAAAAAAADeAAAAAAAAA -C3ADMzMzMANwAAAAAAAAAAAHMAAAAANwAAAAAAAAAACAMAd3zPfwAwgAAAAAAAAIAwd/ -f8x/f3AwgAAAAAAAgDB0x/f3//zPAwgAAAAAAAcHfM9////8z/AwAAAAAAiwd/f3//// -////A4AAAAAAcEx/f///////zAMAAAAAiwfM9////3///8zwOAAAAAcHf3////B///// -8DAAAAALB/f3///wd3d3//AwAAAABwTPf//wCQAAD/zAMAAAAAsEx/f///B////8wDAA -AAAHB39////wf/////AwAAAACwf39///8H/////wMAAAAIcHfM9///B////M8DgAAAAA -sHTH///wf///xAMAAAAACHB3f3//8H////cDgAAAAAALB3zH//D//M9wMAAAAAAAgLB0 -z39///xHAwgAAAAAAAgLB3d3RHd3cDCAAAAAAAAAgLAHd0R3cAMIAAAAAAAAgAgLcAAA -AAMwgAgAAAAACDAAAAu7t7cwAAgDgAAAAABzcIAAAAAAAAgDMwAAAAAAN7uwgAAAAAgH -MzMAAAAACH97tzAAAAALu3c3gAAAAAAL+7tzDABAu7f7cAAAAAAACA+3MA7EQAv/sIAA -AAAAAAAIAAAAAAAAAIAAAAAA - ---1618492860--2051301190--113853680-- +Received: from xcar [192.168.0.2] by jeeves.wooster.local + (SMTPD32-7.07 EVAL) id AFF92F0214; Sun, 12 May 2002 08:55:37 +0100 +Date: Sun, 12 May 2002 08:56:15 +0100 +From: Father Time +To: timbo at jeeves.wooster.local +Subject: IMAP file test +Message-ID: <6df65d354b.father.time at rpc.wooster.local> +X-Organization: Home +User-Agent: Messenger-Pro/2.50a (MsgServe/1.50) (RISC-OS/4.02) POPstar/2.03 +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="1618492860--2051301190--113853680" +Status: R +X-UIDL: 319998302 + +This message is in MIME format which your mailer apparently does not support. +You either require a newer version of your software which supports MIME, or +a separate MIME decoding utility. Alternatively, ask the sender of this +message to resend it in a different format. + +--1618492860--2051301190--113853680 +Content-Type: text/plain; charset=us-ascii + +Simple email with attachment. + + +--1618492860--2051301190--113853680 +Content-Type: application/riscos; name="clock.bmp,69c"; type=BMP; load=&fff69c4b; exec=&355dd4d1; access=&03 +Content-Disposition: attachment; filename="clock.bmp" +Content-Transfer-Encoding: base64 + +Qk12AgAAAAAAAHYAAAAoAAAAIAAAACAAAAABAAQAAAAAAAAAAADXDQAA1w0AAAAAAAAA +AAAAAAAAAAAAiAAAiAAAAIiIAIgAAACIAIgAiIgAALu7uwCIiIgAERHdACLuIgAz//8A +zAAAAN0R3QDu7iIA////AAAAAAAAAAAAAAAAAAAAAAAAAAi3AAAAAAAAADeAAAAAAAAA +C3ADMzMzMANwAAAAAAAAAAAHMAAAAANwAAAAAAAAAACAMAd3zPfwAwgAAAAAAAAIAwd/ +f8x/f3AwgAAAAAAAgDB0x/f3//zPAwgAAAAAAAcHfM9////8z/AwAAAAAAiwd/f3//// +////A4AAAAAAcEx/f///////zAMAAAAAiwfM9////3///8zwOAAAAAcHf3////B///// +8DAAAAALB/f3///wd3d3//AwAAAABwTPf//wCQAAD/zAMAAAAAsEx/f///B////8wDAA +AAAHB39////wf/////AwAAAACwf39///8H/////wMAAAAIcHfM9///B////M8DgAAAAA +sHTH///wf///xAMAAAAACHB3f3//8H////cDgAAAAAALB3zH//D//M9wMAAAAAAAgLB0 +z39///xHAwgAAAAAAAgLB3d3RHd3cDCAAAAAAAAAgLAHd0R3cAMIAAAAAAAAgAgLcAAA +AAMwgAgAAAAACDAAAAu7t7cwAAgDgAAAAABzcIAAAAAAAAgDMwAAAAAAN7uwgAAAAAgH +MzMAAAAACH97tzAAAAALu3c3gAAAAAAL+7tzDABAu7f7cAAAAAAACA+3MA7EQAv/sIAA +AAAAAAAIAAAAAAAAAIAAAAAA + +--1618492860--2051301190--113853680-- diff --git a/lib-python/2.7.0/email/test/data/msg_27.txt b/lib-python/modified-2.7.0/email/test/data/msg_27.txt copy from lib-python/2.7.0/email/test/data/msg_27.txt copy to lib-python/modified-2.7.0/email/test/data/msg_27.txt diff --git a/lib-python/2.7.0/email/test/data/msg_28.txt b/lib-python/modified-2.7.0/email/test/data/msg_28.txt copy from lib-python/2.7.0/email/test/data/msg_28.txt copy to lib-python/modified-2.7.0/email/test/data/msg_28.txt diff --git a/lib-python/2.7.0/email/test/data/msg_29.txt b/lib-python/modified-2.7.0/email/test/data/msg_29.txt copy from lib-python/2.7.0/email/test/data/msg_29.txt copy to lib-python/modified-2.7.0/email/test/data/msg_29.txt diff --git a/lib-python/2.7.0/email/test/data/msg_30.txt b/lib-python/modified-2.7.0/email/test/data/msg_30.txt copy from lib-python/2.7.0/email/test/data/msg_30.txt copy to lib-python/modified-2.7.0/email/test/data/msg_30.txt diff --git a/lib-python/2.7.0/email/test/data/msg_31.txt b/lib-python/modified-2.7.0/email/test/data/msg_31.txt copy from lib-python/2.7.0/email/test/data/msg_31.txt copy to lib-python/modified-2.7.0/email/test/data/msg_31.txt diff --git a/lib-python/2.7.0/email/test/data/msg_32.txt b/lib-python/modified-2.7.0/email/test/data/msg_32.txt copy from lib-python/2.7.0/email/test/data/msg_32.txt copy to lib-python/modified-2.7.0/email/test/data/msg_32.txt diff --git a/lib-python/2.7.0/email/test/data/msg_33.txt b/lib-python/modified-2.7.0/email/test/data/msg_33.txt copy from lib-python/2.7.0/email/test/data/msg_33.txt copy to lib-python/modified-2.7.0/email/test/data/msg_33.txt diff --git a/lib-python/2.7.0/email/test/data/msg_34.txt b/lib-python/modified-2.7.0/email/test/data/msg_34.txt copy from lib-python/2.7.0/email/test/data/msg_34.txt copy to lib-python/modified-2.7.0/email/test/data/msg_34.txt diff --git a/lib-python/2.7.0/email/test/data/msg_35.txt b/lib-python/modified-2.7.0/email/test/data/msg_35.txt copy from lib-python/2.7.0/email/test/data/msg_35.txt copy to lib-python/modified-2.7.0/email/test/data/msg_35.txt diff --git a/lib-python/2.7.0/email/test/data/msg_36.txt b/lib-python/modified-2.7.0/email/test/data/msg_36.txt copy from lib-python/2.7.0/email/test/data/msg_36.txt copy to lib-python/modified-2.7.0/email/test/data/msg_36.txt diff --git a/lib-python/2.7.0/email/test/data/msg_37.txt b/lib-python/modified-2.7.0/email/test/data/msg_37.txt copy from lib-python/2.7.0/email/test/data/msg_37.txt copy to lib-python/modified-2.7.0/email/test/data/msg_37.txt diff --git a/lib-python/2.7.0/email/test/data/msg_38.txt b/lib-python/modified-2.7.0/email/test/data/msg_38.txt copy from lib-python/2.7.0/email/test/data/msg_38.txt copy to lib-python/modified-2.7.0/email/test/data/msg_38.txt diff --git a/lib-python/2.7.0/email/test/data/msg_39.txt b/lib-python/modified-2.7.0/email/test/data/msg_39.txt copy from lib-python/2.7.0/email/test/data/msg_39.txt copy to lib-python/modified-2.7.0/email/test/data/msg_39.txt diff --git a/lib-python/2.7.0/email/test/data/msg_40.txt b/lib-python/modified-2.7.0/email/test/data/msg_40.txt copy from lib-python/2.7.0/email/test/data/msg_40.txt copy to lib-python/modified-2.7.0/email/test/data/msg_40.txt diff --git a/lib-python/2.7.0/email/test/data/msg_41.txt b/lib-python/modified-2.7.0/email/test/data/msg_41.txt copy from lib-python/2.7.0/email/test/data/msg_41.txt copy to lib-python/modified-2.7.0/email/test/data/msg_41.txt diff --git a/lib-python/2.7.0/email/test/data/msg_42.txt b/lib-python/modified-2.7.0/email/test/data/msg_42.txt copy from lib-python/2.7.0/email/test/data/msg_42.txt copy to lib-python/modified-2.7.0/email/test/data/msg_42.txt diff --git a/lib-python/2.7.0/email/test/data/msg_43.txt b/lib-python/modified-2.7.0/email/test/data/msg_43.txt copy from lib-python/2.7.0/email/test/data/msg_43.txt copy to lib-python/modified-2.7.0/email/test/data/msg_43.txt diff --git a/lib-python/2.7.0/email/test/data/msg_44.txt b/lib-python/modified-2.7.0/email/test/data/msg_44.txt copy from lib-python/2.7.0/email/test/data/msg_44.txt copy to lib-python/modified-2.7.0/email/test/data/msg_44.txt diff --git a/lib-python/2.7.0/email/test/data/msg_45.txt b/lib-python/modified-2.7.0/email/test/data/msg_45.txt copy from lib-python/2.7.0/email/test/data/msg_45.txt copy to lib-python/modified-2.7.0/email/test/data/msg_45.txt diff --git a/lib-python/2.7.0/email/test/data/msg_46.txt b/lib-python/modified-2.7.0/email/test/data/msg_46.txt copy from lib-python/2.7.0/email/test/data/msg_46.txt copy to lib-python/modified-2.7.0/email/test/data/msg_46.txt diff --git a/lib-python/2.7.0/email/test/test_email.py b/lib-python/modified-2.7.0/email/test/test_email.py copy from lib-python/2.7.0/email/test/test_email.py copy to lib-python/modified-2.7.0/email/test/test_email.py --- a/lib-python/2.7.0/email/test/test_email.py +++ b/lib-python/modified-2.7.0/email/test/test_email.py @@ -31,7 +31,7 @@ from email import base64MIME from email import quopriMIME -from test.test_support import findfile, run_unittest +from test.test_support import findfile, run_unittest, impl_detail from email.test import __file__ as landmark @@ -564,6 +564,7 @@ msg = MIMEText('hello \xf8 world', _charset='iso-8859-1') eq(msg['content-transfer-encoding'], 'quoted-printable') + @impl_detail("PyPy has no cjkc codec yet", pypy=False) def test_encode7or8bit(self): # Make sure a charset whose input character set is 8bit but # whose output character set is 7bit gets a transfer-encoding diff --git a/lib-python/2.7.0/email/test/test_email_codecs.py b/lib-python/modified-2.7.0/email/test/test_email_codecs.py copy from lib-python/2.7.0/email/test/test_email_codecs.py copy to lib-python/modified-2.7.0/email/test/test_email_codecs.py diff --git a/lib-python/2.7.0/email/test/test_email_codecs_renamed.py b/lib-python/modified-2.7.0/email/test/test_email_codecs_renamed.py copy from lib-python/2.7.0/email/test/test_email_codecs_renamed.py copy to lib-python/modified-2.7.0/email/test/test_email_codecs_renamed.py diff --git a/lib-python/2.7.0/email/test/test_email_renamed.py b/lib-python/modified-2.7.0/email/test/test_email_renamed.py copy from lib-python/2.7.0/email/test/test_email_renamed.py copy to lib-python/modified-2.7.0/email/test/test_email_renamed.py diff --git a/lib-python/2.7.0/email/test/test_email_torture.py b/lib-python/modified-2.7.0/email/test/test_email_torture.py copy from lib-python/2.7.0/email/test/test_email_torture.py copy to lib-python/modified-2.7.0/email/test/test_email_torture.py diff --git a/lib-python/2.7.0/email/utils.py b/lib-python/modified-2.7.0/email/utils.py copy from lib-python/2.7.0/email/utils.py copy to lib-python/modified-2.7.0/email/utils.py diff --git a/lib-python/2.7.0/json/__init__.py b/lib-python/modified-2.7.0/json/__init__.py copy from lib-python/2.7.0/json/__init__.py copy to lib-python/modified-2.7.0/json/__init__.py diff --git a/lib-python/2.7.0/json/decoder.py b/lib-python/modified-2.7.0/json/decoder.py copy from lib-python/2.7.0/json/decoder.py copy to lib-python/modified-2.7.0/json/decoder.py --- a/lib-python/2.7.0/json/decoder.py +++ b/lib-python/modified-2.7.0/json/decoder.py @@ -161,6 +161,12 @@ nextchar = s[end:end + 1] # Trivial empty object if nextchar == '}': + if object_pairs_hook is not None: + result = object_pairs_hook(pairs) + return result, end + pairs = {} + if object_hook is not None: + pairs = object_hook(pairs) return pairs, end + 1 elif nextchar != '"': raise ValueError(errmsg("Expecting property name", s, end)) diff --git a/lib-python/2.7.0/json/encoder.py b/lib-python/modified-2.7.0/json/encoder.py copy from lib-python/2.7.0/json/encoder.py copy to lib-python/modified-2.7.0/json/encoder.py diff --git a/lib-python/2.7.0/json/scanner.py b/lib-python/modified-2.7.0/json/scanner.py copy from lib-python/2.7.0/json/scanner.py copy to lib-python/modified-2.7.0/json/scanner.py diff --git a/lib-python/2.7.0/json/tests/__init__.py b/lib-python/modified-2.7.0/json/tests/__init__.py copy from lib-python/2.7.0/json/tests/__init__.py copy to lib-python/modified-2.7.0/json/tests/__init__.py diff --git a/lib-python/2.7.0/json/tests/test_check_circular.py b/lib-python/modified-2.7.0/json/tests/test_check_circular.py copy from lib-python/2.7.0/json/tests/test_check_circular.py copy to lib-python/modified-2.7.0/json/tests/test_check_circular.py diff --git a/lib-python/2.7.0/json/tests/test_decode.py b/lib-python/modified-2.7.0/json/tests/test_decode.py copy from lib-python/2.7.0/json/tests/test_decode.py copy to lib-python/modified-2.7.0/json/tests/test_decode.py --- a/lib-python/2.7.0/json/tests/test_decode.py +++ b/lib-python/modified-2.7.0/json/tests/test_decode.py @@ -23,6 +23,14 @@ rval = json.loads('{ "key" : "value" , "k":"v" }') self.assertEquals(rval, {"key":"value", "k":"v"}) + def test_empty_objects(self): + s = '{}' + self.assertEqual(json.loads(s), eval(s)) + s = '[]' + self.assertEqual(json.loads(s), eval(s)) + s = '""' + self.assertEqual(json.loads(s), eval(s)) + def test_object_pairs_hook(self): s = '{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}' p = [("xkd", 1), ("kcw", 2), ("art", 3), ("hxm", 4), diff --git a/lib-python/2.7.0/json/tests/test_default.py b/lib-python/modified-2.7.0/json/tests/test_default.py copy from lib-python/2.7.0/json/tests/test_default.py copy to lib-python/modified-2.7.0/json/tests/test_default.py diff --git a/lib-python/2.7.0/json/tests/test_dump.py b/lib-python/modified-2.7.0/json/tests/test_dump.py copy from lib-python/2.7.0/json/tests/test_dump.py copy to lib-python/modified-2.7.0/json/tests/test_dump.py diff --git a/lib-python/2.7.0/json/tests/test_encode_basestring_ascii.py b/lib-python/modified-2.7.0/json/tests/test_encode_basestring_ascii.py copy from lib-python/2.7.0/json/tests/test_encode_basestring_ascii.py copy to lib-python/modified-2.7.0/json/tests/test_encode_basestring_ascii.py diff --git a/lib-python/2.7.0/json/tests/test_fail.py b/lib-python/modified-2.7.0/json/tests/test_fail.py copy from lib-python/2.7.0/json/tests/test_fail.py copy to lib-python/modified-2.7.0/json/tests/test_fail.py diff --git a/lib-python/2.7.0/json/tests/test_float.py b/lib-python/modified-2.7.0/json/tests/test_float.py copy from lib-python/2.7.0/json/tests/test_float.py copy to lib-python/modified-2.7.0/json/tests/test_float.py diff --git a/lib-python/2.7.0/json/tests/test_indent.py b/lib-python/modified-2.7.0/json/tests/test_indent.py copy from lib-python/2.7.0/json/tests/test_indent.py copy to lib-python/modified-2.7.0/json/tests/test_indent.py diff --git a/lib-python/2.7.0/json/tests/test_pass1.py b/lib-python/modified-2.7.0/json/tests/test_pass1.py copy from lib-python/2.7.0/json/tests/test_pass1.py copy to lib-python/modified-2.7.0/json/tests/test_pass1.py diff --git a/lib-python/2.7.0/json/tests/test_pass2.py b/lib-python/modified-2.7.0/json/tests/test_pass2.py copy from lib-python/2.7.0/json/tests/test_pass2.py copy to lib-python/modified-2.7.0/json/tests/test_pass2.py diff --git a/lib-python/2.7.0/json/tests/test_pass3.py b/lib-python/modified-2.7.0/json/tests/test_pass3.py copy from lib-python/2.7.0/json/tests/test_pass3.py copy to lib-python/modified-2.7.0/json/tests/test_pass3.py diff --git a/lib-python/2.7.0/json/tests/test_recursion.py b/lib-python/modified-2.7.0/json/tests/test_recursion.py copy from lib-python/2.7.0/json/tests/test_recursion.py copy to lib-python/modified-2.7.0/json/tests/test_recursion.py diff --git a/lib-python/2.7.0/json/tests/test_scanstring.py b/lib-python/modified-2.7.0/json/tests/test_scanstring.py copy from lib-python/2.7.0/json/tests/test_scanstring.py copy to lib-python/modified-2.7.0/json/tests/test_scanstring.py --- a/lib-python/2.7.0/json/tests/test_scanstring.py +++ b/lib-python/modified-2.7.0/json/tests/test_scanstring.py @@ -1,6 +1,7 @@ import sys import decimal from unittest import TestCase +from test import test_support import json import json.decoder @@ -9,6 +10,7 @@ def test_py_scanstring(self): self._test_scanstring(json.decoder.py_scanstring) + @test_support.impl_detail() def test_c_scanstring(self): self._test_scanstring(json.decoder.c_scanstring) diff --git a/lib-python/2.7.0/json/tests/test_separators.py b/lib-python/modified-2.7.0/json/tests/test_separators.py copy from lib-python/2.7.0/json/tests/test_separators.py copy to lib-python/modified-2.7.0/json/tests/test_separators.py diff --git a/lib-python/2.7.0/json/tests/test_speedups.py b/lib-python/modified-2.7.0/json/tests/test_speedups.py copy from lib-python/2.7.0/json/tests/test_speedups.py copy to lib-python/modified-2.7.0/json/tests/test_speedups.py --- a/lib-python/2.7.0/json/tests/test_speedups.py +++ b/lib-python/modified-2.7.0/json/tests/test_speedups.py @@ -1,22 +1,27 @@ import decimal from unittest import TestCase +from test import test_support from json import decoder, encoder, scanner class TestSpeedups(TestCase): + @test_support.impl_detail() def test_scanstring(self): self.assertEquals(decoder.scanstring.__module__, "_json") self.assertTrue(decoder.scanstring is decoder.c_scanstring) + @test_support.impl_detail() def test_encode_basestring_ascii(self): self.assertEquals(encoder.encode_basestring_ascii.__module__, "_json") self.assertTrue(encoder.encode_basestring_ascii is encoder.c_encode_basestring_ascii) class TestDecode(TestCase): + @test_support.impl_detail() def test_make_scanner(self): self.assertRaises(AttributeError, scanner.c_make_scanner, 1) + @test_support.impl_detail() def test_make_encoder(self): self.assertRaises(TypeError, encoder.c_make_encoder, None, diff --git a/lib-python/2.7.0/json/tests/test_unicode.py b/lib-python/modified-2.7.0/json/tests/test_unicode.py copy from lib-python/2.7.0/json/tests/test_unicode.py copy to lib-python/modified-2.7.0/json/tests/test_unicode.py diff --git a/lib-python/2.7.0/json/tool.py b/lib-python/modified-2.7.0/json/tool.py copy from lib-python/2.7.0/json/tool.py copy to lib-python/modified-2.7.0/json/tool.py diff --git a/lib-python/2.7.0/pydoc.py b/lib-python/modified-2.7.0/pydoc.py copy from lib-python/2.7.0/pydoc.py copy to lib-python/modified-2.7.0/pydoc.py --- a/lib-python/2.7.0/pydoc.py +++ b/lib-python/modified-2.7.0/pydoc.py @@ -620,7 +620,9 @@ head, '#ffffff', '#7799ee', 'index
    ' + filelink + docloc) - modules = inspect.getmembers(object, inspect.ismodule) + def isnonbuiltinmodule(obj): + return inspect.ismodule(obj) and obj is not __builtin__ + modules = inspect.getmembers(object, isnonbuiltinmodule) classes, cdict = [], {} for key, value in inspect.getmembers(object, inspect.isclass): diff --git a/lib-python/2.7.0/site.py b/lib-python/modified-2.7.0/site.py copy from lib-python/2.7.0/site.py copy to lib-python/modified-2.7.0/site.py --- a/lib-python/2.7.0/site.py +++ b/lib-python/modified-2.7.0/site.py @@ -74,7 +74,6 @@ USER_SITE = None USER_BASE = None - def makepath(*paths): dir = os.path.join(*paths) try: @@ -90,7 +89,10 @@ if hasattr(m, '__loader__'): continue # don't mess with a PEP 302-supplied __file__ try: - m.__file__ = os.path.abspath(m.__file__) + prev = m.__file__ + new = os.path.abspath(m.__file__) + if prev != new: + m.__file__ = new except (AttributeError, OSError): pass @@ -279,6 +281,7 @@ will find its `site-packages` subdirectory depending on the system environment, and will return a list of full paths. """ + is_pypy = '__pypy__' in sys.builtin_module_names sitepackages = [] seen = set() @@ -289,6 +292,10 @@ if sys.platform in ('os2emx', 'riscos'): sitepackages.append(os.path.join(prefix, "Lib", "site-packages")) + elif is_pypy: + from distutils.sysconfig import get_python_lib + sitedir = get_python_lib(standard_lib=False, prefix=prefix) + sitepackages.append(sitedir) elif os.sep == '/': sitepackages.append(os.path.join(prefix, "lib", "python" + sys.version[:3], @@ -425,22 +432,33 @@ if key == 'q': break +##def setcopyright(): +## """Set 'copyright' and 'credits' in __builtin__""" +## __builtin__.copyright = _Printer("copyright", sys.copyright) +## if sys.platform[:4] == 'java': +## __builtin__.credits = _Printer( +## "credits", +## "Jython is maintained by the Jython developers (www.jython.org).") +## else: +## __builtin__.credits = _Printer("credits", """\ +## Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands +## for supporting Python development. See www.python.org for more information.""") +## here = os.path.dirname(os.__file__) +## __builtin__.license = _Printer( +## "license", "See http://www.python.org/%.3s/license.html" % sys.version, +## ["LICENSE.txt", "LICENSE"], +## [os.path.join(here, os.pardir), here, os.curdir]) + def setcopyright(): - """Set 'copyright' and 'credits' in __builtin__""" + # XXX this is the PyPy-specific version. Should be unified with the above. __builtin__.copyright = _Printer("copyright", sys.copyright) - if sys.platform[:4] == 'java': - __builtin__.credits = _Printer( - "credits", - "Jython is maintained by the Jython developers (www.jython.org).") - else: - __builtin__.credits = _Printer("credits", """\ - Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands - for supporting Python development. See www.python.org for more information.""") - here = os.path.dirname(os.__file__) + __builtin__.credits = _Printer( + "credits", + "PyPy is maintained by the PyPy developers: http://codespeak.net/pypy") __builtin__.license = _Printer( - "license", "See http://www.python.org/%.3s/license.html" % sys.version, - ["LICENSE.txt", "LICENSE"], - [os.path.join(here, os.pardir), here, os.curdir]) + "license", + "See http://codespeak.net/svn/pypy/dist/LICENSE") + class _Helper(object): @@ -466,7 +484,7 @@ if sys.platform == 'win32': import locale, codecs enc = locale.getdefaultlocale()[1] - if enc.startswith('cp'): # "cp***" ? + if enc is not None and enc.startswith('cp'): # "cp***" ? try: codecs.lookup(enc) except LookupError: diff --git a/lib-python/modified-2.7.0/socket.py b/lib-python/modified-2.7.0/socket.py --- a/lib-python/modified-2.7.0/socket.py +++ b/lib-python/modified-2.7.0/socket.py @@ -46,8 +46,6 @@ import _socket from _socket import * -from functools import partial -from types import MethodType try: import _ssl @@ -159,11 +157,6 @@ if sys.platform == "riscos": _socketmethods = _socketmethods + ('sleeptaskw',) -# All the method names that must be delegated to either the real socket -# object or the _closedsocket object. -_delegate_methods = ("recv", "recvfrom", "recv_into", "recvfrom_into", - "send", "sendto") - class _closedsocket(object): __slots__ = [] def _dummy(*args): @@ -180,25 +173,43 @@ __doc__ = _realsocket.__doc__ - __slots__ = ["_sock", "__weakref__", "_io_refs", "_closed" - ] + list(_delegate_methods) - def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): if _sock is None: _sock = _realsocket(family, type, proto) self._sock = _sock self._io_refs = 0 self._closed = False - for method in _delegate_methods: - setattr(self, method, getattr(_sock, method)) - def close(self, _closedsocket=_closedsocket, - _delegate_methods=_delegate_methods, setattr=setattr): + def send(self, data, flags=0): + return self._sock.send(data, flags=flags) + send.__doc__ = _realsocket.send.__doc__ + + def recv(self, buffersize, flags=0): + return self._sock.recv(buffersize, flags=flags) + recv.__doc__ = _realsocket.recv.__doc__ + + def recv_into(self, buffer, nbytes=0, flags=0): + return self._sock.recv_into(buffer, nbytes=nbytes, flags=flags) + recv_into.__doc__ = _realsocket.recv_into.__doc__ + + def recvfrom(self, buffersize, flags=0): + return self._sock.recvfrom(buffersize, flags=flags) + recvfrom.__doc__ = _realsocket.recvfrom.__doc__ + + def recvfrom_into(self, buffer, nbytes=0, flags=0): + return self._sock.recvfrom_into(buffer, nbytes=nbytes, flags=flags) + recvfrom_into.__doc__ = _realsocket.recvfrom_into.__doc__ + + def sendto(self, data, param2, param3=None): + if param3 is None: + return self._sock.sendto(data, param2) + else: + return self._sock.sendto(data, param2, param3) + sendto.__doc__ = _realsocket.sendto.__doc__ + + def close(self): # This function should not reference any globals. See issue #808164. self._sock = _closedsocket() - dummy = self._sock._dummy - for method in _delegate_methods: - setattr(self, method, dummy) close.__doc__ = _realsocket.close.__doc__ def accept(self): @@ -240,15 +251,26 @@ type = property(lambda self: self._sock.type, doc="the socket type") proto = property(lambda self: self._sock.proto, doc="the socket protocol") -def meth(name,self,*args): - return getattr(self._sock,name)(*args) + # Delegate many calls to the raw socket object. + _s = ("def %(name)s(self, %(args)s): return self._sock.%(name)s(%(args)s)\n\n" + "%(name)s.__doc__ = _realsocket.%(name)s.__doc__\n") + for _m in _socketmethods: + # yupi! we're on pypy, all code objects have this interface + argcount = getattr(_realsocket, _m).im_func.func_code.co_argcount - 1 + exec _s % {'name': _m, 'args': ', '.join('arg%d' % i for i in range(argcount))} + del _m, _s, argcount -for _m in _socketmethods: - p = partial(meth,_m) - p.__name__ = _m - p.__doc__ = getattr(_realsocket,_m).__doc__ - m = MethodType(p,None,_socketobject) - setattr(_socketobject,_m,m) + # Delegation methods with default arguments, that the code above + # cannot handle correctly + def sendall(self, data, flags=0): + self._sock.sendall(data, flags) + sendall.__doc__ = _realsocket.sendall.__doc__ + + def getsockopt(self, level, optname, buflen=None): + if buflen is None: + return self._sock.getsockopt(level, optname) + return self._sock.getsockopt(level, optname, buflen) + getsockopt.__doc__ = _realsocket.getsockopt.__doc__ socket = SocketType = _socketobject diff --git a/lib-python/2.7.0/sqlite3/__init__.py b/lib-python/modified-2.7.0/sqlite3/__init__.py copy from lib-python/2.7.0/sqlite3/__init__.py copy to lib-python/modified-2.7.0/sqlite3/__init__.py diff --git a/lib-python/2.7.0/sqlite3/dbapi2.py b/lib-python/modified-2.7.0/sqlite3/dbapi2.py copy from lib-python/2.7.0/sqlite3/dbapi2.py copy to lib-python/modified-2.7.0/sqlite3/dbapi2.py diff --git a/lib-python/2.7.0/sqlite3/dump.py b/lib-python/modified-2.7.0/sqlite3/dump.py copy from lib-python/2.7.0/sqlite3/dump.py copy to lib-python/modified-2.7.0/sqlite3/dump.py diff --git a/lib-python/2.7.0/sqlite3/test/__init__.py b/lib-python/modified-2.7.0/sqlite3/test/__init__.py copy from lib-python/2.7.0/sqlite3/test/__init__.py copy to lib-python/modified-2.7.0/sqlite3/test/__init__.py diff --git a/lib-python/2.7.0/sqlite3/test/dbapi.py b/lib-python/modified-2.7.0/sqlite3/test/dbapi.py copy from lib-python/2.7.0/sqlite3/test/dbapi.py copy to lib-python/modified-2.7.0/sqlite3/test/dbapi.py --- a/lib-python/2.7.0/sqlite3/test/dbapi.py +++ b/lib-python/modified-2.7.0/sqlite3/test/dbapi.py @@ -1,4 +1,4 @@ -#-*- coding: ISO-8859-1 -*- +#-*- coding: iso-8859-1 -*- # pysqlite2/test/dbapi.py: tests for DB-API compliance # # Copyright (C) 2004-2010 Gerhard H�ring @@ -332,6 +332,9 @@ def __init__(self): self.value = 5 + def __iter__(self): + return self + def next(self): if self.value == 10: raise StopIteration @@ -826,7 +829,7 @@ con = sqlite.connect(":memory:") con.close() try: - con() + con("select 1") self.fail("Should have raised a ProgrammingError") except sqlite.ProgrammingError: pass diff --git a/lib-python/2.7.0/sqlite3/test/dump.py b/lib-python/modified-2.7.0/sqlite3/test/dump.py copy from lib-python/2.7.0/sqlite3/test/dump.py copy to lib-python/modified-2.7.0/sqlite3/test/dump.py diff --git a/lib-python/2.7.0/sqlite3/test/factory.py b/lib-python/modified-2.7.0/sqlite3/test/factory.py copy from lib-python/2.7.0/sqlite3/test/factory.py copy to lib-python/modified-2.7.0/sqlite3/test/factory.py diff --git a/lib-python/2.7.0/sqlite3/test/hooks.py b/lib-python/modified-2.7.0/sqlite3/test/hooks.py copy from lib-python/2.7.0/sqlite3/test/hooks.py copy to lib-python/modified-2.7.0/sqlite3/test/hooks.py diff --git a/lib-python/2.7.0/sqlite3/test/py25tests.py b/lib-python/modified-2.7.0/sqlite3/test/py25tests.py copy from lib-python/2.7.0/sqlite3/test/py25tests.py copy to lib-python/modified-2.7.0/sqlite3/test/py25tests.py diff --git a/lib-python/2.7.0/sqlite3/test/regression.py b/lib-python/modified-2.7.0/sqlite3/test/regression.py copy from lib-python/2.7.0/sqlite3/test/regression.py copy to lib-python/modified-2.7.0/sqlite3/test/regression.py diff --git a/lib-python/2.7.0/sqlite3/test/transactions.py b/lib-python/modified-2.7.0/sqlite3/test/transactions.py copy from lib-python/2.7.0/sqlite3/test/transactions.py copy to lib-python/modified-2.7.0/sqlite3/test/transactions.py diff --git a/lib-python/2.7.0/sqlite3/test/types.py b/lib-python/modified-2.7.0/sqlite3/test/types.py copy from lib-python/2.7.0/sqlite3/test/types.py copy to lib-python/modified-2.7.0/sqlite3/test/types.py diff --git a/lib-python/2.7.0/sqlite3/test/userfunctions.py b/lib-python/modified-2.7.0/sqlite3/test/userfunctions.py copy from lib-python/2.7.0/sqlite3/test/userfunctions.py copy to lib-python/modified-2.7.0/sqlite3/test/userfunctions.py --- a/lib-python/2.7.0/sqlite3/test/userfunctions.py +++ b/lib-python/modified-2.7.0/sqlite3/test/userfunctions.py @@ -275,12 +275,14 @@ pass def CheckAggrNoStep(self): + # XXX it's better to raise OperationalError in order to stop + # the query earlier. cur = self.con.cursor() try: cur.execute("select nostep(t) from test") - self.fail("should have raised an AttributeError") - except AttributeError, e: - self.assertEqual(e.args[0], "AggrNoStep instance has no attribute 'step'") + self.fail("should have raised an OperationalError") + except sqlite.OperationalError, e: + self.assertEqual(e.args[0], "user-defined aggregate's 'step' method raised error") def CheckAggrNoFinalize(self): cur = self.con.cursor() diff --git a/lib-python/modified-2.7.0/ssl.py b/lib-python/modified-2.7.0/ssl.py new file mode 100644 --- /dev/null +++ b/lib-python/modified-2.7.0/ssl.py @@ -0,0 +1,437 @@ +# Wrapper module for _ssl, providing some additional facilities +# implemented in Python. Written by Bill Janssen. + +"""\ +This module provides some more Pythonic support for SSL. + +Object types: + + SSLSocket -- subtype of socket.socket which does SSL over the socket + +Exceptions: + + SSLError -- exception raised for I/O errors + +Functions: + + cert_time_to_seconds -- convert time string used for certificate + notBefore and notAfter functions to integer + seconds past the Epoch (the time values + returned from time.time()) + + fetch_server_certificate (HOST, PORT) -- fetch the certificate provided + by the server running on HOST at port PORT. No + validation of the certificate is performed. + +Integer constants: + +SSL_ERROR_ZERO_RETURN +SSL_ERROR_WANT_READ +SSL_ERROR_WANT_WRITE +SSL_ERROR_WANT_X509_LOOKUP +SSL_ERROR_SYSCALL +SSL_ERROR_SSL +SSL_ERROR_WANT_CONNECT + +SSL_ERROR_EOF +SSL_ERROR_INVALID_ERROR_CODE + +The following group define certificate requirements that one side is +allowing/requiring from the other side: + +CERT_NONE - no certificates from the other side are required (or will + be looked at if provided) +CERT_OPTIONAL - certificates are not required, but if provided will be + validated, and if validation fails, the connection will + also fail +CERT_REQUIRED - certificates are required, and will be validated, and + if validation fails, the connection will also fail + +The following constants identify various SSL protocol variants: + +PROTOCOL_SSLv2 +PROTOCOL_SSLv3 +PROTOCOL_SSLv23 +PROTOCOL_TLSv1 +""" + +import textwrap + +import _ssl # if we can't import it, let the error propagate + +from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION +from _ssl import SSLError +from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED +from _ssl import PROTOCOL_SSLv2, PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1 +from _ssl import RAND_status, RAND_egd, RAND_add +from _ssl import \ + SSL_ERROR_ZERO_RETURN, \ + SSL_ERROR_WANT_READ, \ + SSL_ERROR_WANT_WRITE, \ + SSL_ERROR_WANT_X509_LOOKUP, \ + SSL_ERROR_SYSCALL, \ + SSL_ERROR_SSL, \ + SSL_ERROR_WANT_CONNECT, \ + SSL_ERROR_EOF, \ + SSL_ERROR_INVALID_ERROR_CODE + +from socket import socket, _fileobject, error as socket_error +from socket import getnameinfo as _getnameinfo +import base64 # for DER-to-PEM translation +import errno + +class SSLSocket(socket): + + """This class implements a subtype of socket.socket that wraps + the underlying OS socket in an SSL context when necessary, and + provides read and write methods over that channel.""" + + def __init__(self, sock, keyfile=None, certfile=None, + server_side=False, cert_reqs=CERT_NONE, + ssl_version=PROTOCOL_SSLv23, ca_certs=None, + do_handshake_on_connect=True, + suppress_ragged_eofs=True, ciphers=None): + socket.__init__(self, _sock=sock._sock) + + if certfile and not keyfile: + keyfile = certfile + # see if it's connected + try: + socket.getpeername(self) + except socket_error, e: + if e.errno != errno.ENOTCONN: + raise + # no, no connection yet + self._sslobj = None + else: + # yes, create the SSL object + self._sslobj = _ssl.sslwrap(self._sock, server_side, + keyfile, certfile, + cert_reqs, ssl_version, ca_certs, + ciphers) + if do_handshake_on_connect: + self.do_handshake() + self.keyfile = keyfile + self.certfile = certfile + self.cert_reqs = cert_reqs + self.ssl_version = ssl_version + self.ca_certs = ca_certs + self.ciphers = ciphers + self.do_handshake_on_connect = do_handshake_on_connect + self.suppress_ragged_eofs = suppress_ragged_eofs + self._makefile_refs = 0 + + def read(self, len=1024): + + """Read up to LEN bytes and return them. + Return zero-length string on EOF.""" + + try: + return self._sslobj.read(len) + except SSLError, x: + if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs: + return '' + else: + raise + + def write(self, data): + + """Write DATA to the underlying SSL channel. Returns + number of bytes of DATA actually transmitted.""" + + return self._sslobj.write(data) + + def getpeercert(self, binary_form=False): + + """Returns a formatted version of the data in the + certificate provided by the other end of the SSL channel. + Return None if no certificate was provided, {} if a + certificate was provided, but not validated.""" + + return self._sslobj.peer_certificate(binary_form) + + def cipher(self): + + if not self._sslobj: + return None + else: + return self._sslobj.cipher() + + def send(self, data, flags=0): + if self._sslobj: + if flags != 0: + raise ValueError( + "non-zero flags not allowed in calls to send() on %s" % + self.__class__) + while True: + try: + v = self._sslobj.write(data) + except SSLError, x: + if x.args[0] == SSL_ERROR_WANT_READ: + return 0 + elif x.args[0] == SSL_ERROR_WANT_WRITE: + return 0 + else: + raise + else: + return v + else: + return self._sock.send(data, flags) + + def sendto(self, data, flags_or_addr, addr=None): + if self._sslobj: + raise ValueError("sendto not allowed on instances of %s" % + self.__class__) + elif addr is None: + return self._sock.sendto(data, flags_or_addr) + else: + return self._sock.sendto(data, flags_or_addr, addr) + + def sendall(self, data, flags=0): + if self._sslobj: + if flags != 0: + raise ValueError( + "non-zero flags not allowed in calls to sendall() on %s" % + self.__class__) + amount = len(data) + count = 0 + while (count < amount): + v = self.send(data[count:]) + count += v + return amount + else: + return socket.sendall(self, data, flags) + + def recv(self, buflen=1024, flags=0): + if self._sslobj: + if flags != 0: + raise ValueError( + "non-zero flags not allowed in calls to recv() on %s" % + self.__class__) + return self.read(buflen) + else: + return self._sock.recv(buflen, flags) + + def recv_into(self, buffer, nbytes=None, flags=0): + if buffer and (nbytes is None): + nbytes = len(buffer) + elif nbytes is None: + nbytes = 1024 + if self._sslobj: + if flags != 0: + raise ValueError( + "non-zero flags not allowed in calls to recv_into() on %s" % + self.__class__) + tmp_buffer = self.read(nbytes) + v = len(tmp_buffer) + buffer[:v] = tmp_buffer + return v + else: + return self._sock.recv_into(buffer, nbytes, flags) + + def recvfrom(self, buflen=1024, flags=0): + if self._sslobj: + raise ValueError("recvfrom not allowed on instances of %s" % + self.__class__) + else: + return self._sock.recvfrom(buflen, flags) + + def recvfrom_into(self, buffer, nbytes=None, flags=0): + if self._sslobj: + raise ValueError("recvfrom_into not allowed on instances of %s" % + self.__class__) + else: + return self._sock.recvfrom_into(buffer, nbytes, flags) + + def pending(self): + if self._sslobj: + return self._sslobj.pending() + else: + return 0 + + def unwrap(self): + if self._sslobj: + s = self._sslobj.shutdown() + self._sslobj = None + return s + else: + raise ValueError("No SSL wrapper around " + str(self)) + + def shutdown(self, how): + self._sslobj = None + socket.shutdown(self, how) + + def close(self): + if self._makefile_refs < 1: + self._sslobj = None + socket.close(self) + else: + self._makefile_refs -= 1 + + def do_handshake(self): + + """Perform a TLS/SSL handshake.""" + + self._sslobj.do_handshake() + + def connect(self, addr): + + """Connects to remote ADDR, and then wraps the connection in + an SSL channel.""" + + # Here we assume that the socket is client-side, and not + # connected at the time of the call. We connect it, then wrap it. + if self._sslobj: + raise ValueError("attempt to connect already-connected SSLSocket!") + socket.connect(self, addr) + self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile, + self.cert_reqs, self.ssl_version, + self.ca_certs, self.ciphers) + if self.do_handshake_on_connect: + self.do_handshake() + + def accept(self): + + """Accepts a new connection from a remote client, and returns + a tuple containing that new connection wrapped with a server-side + SSL channel, and the address of the remote client.""" + + newsock, addr = socket.accept(self) + return (SSLSocket(newsock, + keyfile=self.keyfile, + certfile=self.certfile, + server_side=True, + cert_reqs=self.cert_reqs, + ssl_version=self.ssl_version, + ca_certs=self.ca_certs, + ciphers=self.ciphers, + do_handshake_on_connect=self.do_handshake_on_connect, + suppress_ragged_eofs=self.suppress_ragged_eofs), + addr) + + def makefile(self, mode='r', bufsize=-1): + + """Make and return a file-like object that + works with the SSL connection. Just use the code + from the socket module.""" + + self._makefile_refs += 1 + # close=True so as to decrement the reference count when done with + # the file-like object. + return _fileobject(self, mode, bufsize, close=True) + + + +def wrap_socket(sock, keyfile=None, certfile=None, + server_side=False, cert_reqs=CERT_NONE, + ssl_version=PROTOCOL_SSLv23, ca_certs=None, + do_handshake_on_connect=True, + suppress_ragged_eofs=True, ciphers=None): + + return SSLSocket(sock, keyfile=keyfile, certfile=certfile, + server_side=server_side, cert_reqs=cert_reqs, + ssl_version=ssl_version, ca_certs=ca_certs, + do_handshake_on_connect=do_handshake_on_connect, + suppress_ragged_eofs=suppress_ragged_eofs, + ciphers=ciphers) + + +# some utility functions + +def cert_time_to_seconds(cert_time): + + """Takes a date-time string in standard ASN1_print form + ("MON DAY 24HOUR:MINUTE:SEC YEAR TIMEZONE") and return + a Python time value in seconds past the epoch.""" + + import time + return time.mktime(time.strptime(cert_time, "%b %d %H:%M:%S %Y GMT")) + +PEM_HEADER = "-----BEGIN CERTIFICATE-----" +PEM_FOOTER = "-----END CERTIFICATE-----" + +def DER_cert_to_PEM_cert(der_cert_bytes): + + """Takes a certificate in binary DER format and returns the + PEM version of it as a string.""" + + if hasattr(base64, 'standard_b64encode'): + # preferred because older API gets line-length wrong + f = base64.standard_b64encode(der_cert_bytes) + return (PEM_HEADER + '\n' + + textwrap.fill(f, 64) + '\n' + + PEM_FOOTER + '\n') + else: + return (PEM_HEADER + '\n' + + base64.encodestring(der_cert_bytes) + + PEM_FOOTER + '\n') + +def PEM_cert_to_DER_cert(pem_cert_string): + + """Takes a certificate in ASCII PEM format and returns the + DER-encoded version of it as a byte sequence""" + + if not pem_cert_string.startswith(PEM_HEADER): + raise ValueError("Invalid PEM encoding; must start with %s" + % PEM_HEADER) + if not pem_cert_string.strip().endswith(PEM_FOOTER): + raise ValueError("Invalid PEM encoding; must end with %s" + % PEM_FOOTER) + d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)] + return base64.decodestring(d) + +def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None): + + """Retrieve the certificate from the server at the specified address, + and return it as a PEM-encoded string. + If 'ca_certs' is specified, validate the server cert against it. + If 'ssl_version' is specified, use it in the connection attempt.""" + + host, port = addr + if (ca_certs is not None): + cert_reqs = CERT_REQUIRED + else: + cert_reqs = CERT_NONE + s = wrap_socket(socket(), ssl_version=ssl_version, + cert_reqs=cert_reqs, ca_certs=ca_certs) + s.connect(addr) + dercert = s.getpeercert(True) + s.close() + return DER_cert_to_PEM_cert(dercert) + +def get_protocol_name(protocol_code): + if protocol_code == PROTOCOL_TLSv1: + return "TLSv1" + elif protocol_code == PROTOCOL_SSLv23: + return "SSLv23" + elif protocol_code == PROTOCOL_SSLv2: + return "SSLv2" + elif protocol_code == PROTOCOL_SSLv3: + return "SSLv3" + else: + return "" + + +# a replacement for the old socket.ssl function + +def sslwrap_simple(sock, keyfile=None, certfile=None): + + """A replacement for the old socket.ssl function. Designed + for compability with Python 2.5 and earlier. Will disappear in + Python 3.0.""" + + if hasattr(sock, "_sock"): + sock = sock._sock + + ssl_sock = _ssl.sslwrap(sock, 0, keyfile, certfile, CERT_NONE, + PROTOCOL_SSLv23, None) + try: + sock.getpeername() + except socket_error: + # no, no connection yet + pass + else: + # yes, do the handshake + ssl_sock.do_handshake() + + return ssl_sock diff --git a/lib-python/modified-2.7.0/sysconfig.py b/lib-python/modified-2.7.0/sysconfig.py --- a/lib-python/modified-2.7.0/sysconfig.py +++ b/lib-python/modified-2.7.0/sysconfig.py @@ -26,6 +26,16 @@ 'scripts': '{base}/bin', 'data' : '{base}', }, + 'pypy': { + 'stdlib': '{base}/lib-python', + 'platstdlib': '{base}/lib-python', + 'purelib': '{base}/lib-python', + 'platlib': '{base}/lib-python', + 'include': '{base}/include', + 'platinclude': '{base}/include', + 'scripts': '{base}/bin', + 'data' : '{base}', + }, 'nt': { 'stdlib': '{base}/Lib', 'platstdlib': '{base}/Lib', @@ -158,7 +168,9 @@ return res def _get_default_scheme(): - if os.name == 'posix': + if '__pypy__' in sys.builtin_module_names: + return 'pypy' + elif os.name == 'posix': # the default scheme for posix is posix_prefix return 'posix_prefix' return os.name diff --git a/lib-python/2.7.0/test/mapping_tests.py b/lib-python/modified-2.7.0/test/mapping_tests.py copy from lib-python/2.7.0/test/mapping_tests.py copy to lib-python/modified-2.7.0/test/mapping_tests.py --- a/lib-python/2.7.0/test/mapping_tests.py +++ b/lib-python/modified-2.7.0/test/mapping_tests.py @@ -531,7 +531,10 @@ self.assertEqual(va, int(ka)) kb, vb = tb = b.popitem() self.assertEqual(vb, int(kb)) - self.assertTrue(not(copymode < 0 and ta != tb)) + if copymode < 0 and test_support.check_impl_detail(): + # popitem() is not guaranteed to be deterministic on + # all implementations + self.assertEqual(ta, tb) self.assertTrue(not a) self.assertTrue(not b) diff --git a/lib-python/modified-2.7.0/test/pickletester.py b/lib-python/modified-2.7.0/test/pickletester.py --- a/lib-python/modified-2.7.0/test/pickletester.py +++ b/lib-python/modified-2.7.0/test/pickletester.py @@ -1092,6 +1092,7 @@ s = StringIO.StringIO("X''.") self.assertRaises(EOFError, self.module.load, s) + @impl_detail("no full restricted mode in pypy", pypy=False) def test_restricted(self): # issue7128: cPickle failed in restricted mode builtins = {self.module.__name__: self.module, diff --git a/lib-python/2.7.0/test/pyclbr_input.py b/lib-python/modified-2.7.0/test/pyclbr_input.py copy from lib-python/2.7.0/test/pyclbr_input.py copy to lib-python/modified-2.7.0/test/pyclbr_input.py diff --git a/lib-python/2.7.0/test/string_tests.py b/lib-python/modified-2.7.0/test/string_tests.py copy from lib-python/2.7.0/test/string_tests.py copy to lib-python/modified-2.7.0/test/string_tests.py --- a/lib-python/2.7.0/test/string_tests.py +++ b/lib-python/modified-2.7.0/test/string_tests.py @@ -1024,7 +1024,10 @@ self.checkequal('abc', 'abc', '__mul__', 1) self.checkequal('abcabcabc', 'abc', '__mul__', 3) self.checkraises(TypeError, 'abc', '__mul__') - self.checkraises(TypeError, 'abc', '__mul__', '') + class Mul(object): + def mul(self, a, b): + return a * b + self.checkraises(TypeError, Mul(), 'mul', 'abc', '') # XXX: on a 64-bit system, this doesn't raise an overflow error, # but either raises a MemoryError, or succeeds (if you have 54TiB) #self.checkraises(OverflowError, 10000*'abc', '__mul__', 2000000000) diff --git a/lib-python/2.7.0/test/test_ascii_formatd.py b/lib-python/modified-2.7.0/test/test_ascii_formatd.py copy from lib-python/2.7.0/test/test_ascii_formatd.py copy to lib-python/modified-2.7.0/test/test_ascii_formatd.py --- a/lib-python/2.7.0/test/test_ascii_formatd.py +++ b/lib-python/modified-2.7.0/test/test_ascii_formatd.py @@ -4,6 +4,10 @@ import unittest from test.test_support import check_warnings, run_unittest, import_module +from test.test_support import check_impl_detail + +if not check_impl_detail(cpython=True): + raise unittest.SkipTest("this test is only for CPython") # Skip tests if _ctypes module does not exist import_module('_ctypes') diff --git a/lib-python/modified-2.7.0/test/test_ast.py b/lib-python/modified-2.7.0/test/test_ast.py new file mode 100644 --- /dev/null +++ b/lib-python/modified-2.7.0/test/test_ast.py @@ -0,0 +1,578 @@ +import sys, itertools, unittest +from test import test_support +import ast + +def to_tuple(t): + if t is None or isinstance(t, (basestring, int, long, complex)): + return t + elif isinstance(t, list): + return [to_tuple(e) for e in t] + result = [t.__class__.__name__] + if hasattr(t, 'lineno') and hasattr(t, 'col_offset'): + result.append((t.lineno, t.col_offset)) + if t._fields is None: + return tuple(result) + for f in t._fields: + result.append(to_tuple(getattr(t, f))) + return tuple(result) + + +# These tests are compiled through "exec" +# There should be atleast one test per statement +exec_tests = [ + # None + "None", + # FunctionDef + "def f(): pass", + # FunctionDef with arg + "def f(a): pass", + # FunctionDef with arg and default value + "def f(a=0): pass", + # FunctionDef with varargs + "def f(*args): pass", + # FunctionDef with kwargs + "def f(**kwargs): pass", + # FunctionDef with all kind of args + "def f(a, b=1, c=None, d=[], e={}, *args, **kwargs): pass", + # ClassDef + "class C:pass", + # ClassDef, new style class + "class C(object): pass", + # Return + "def f():return 1", + # Delete + "del v", + # Assign + "v = 1", + # AugAssign + "v += 1", + # Print + "print >>f, 1, ", + # For + "for v in v:pass", + # While + "while v:pass", + # If + "if v:pass", + # Raise + "raise Exception, 'string'", + # TryExcept + "try:\n pass\nexcept Exception:\n pass", + # TryFinally + "try:\n pass\nfinally:\n pass", + # Assert + "assert v", + # Import + "import sys", + # ImportFrom + "from sys import v", + # Exec + "exec 'v'", + # Global + "global v", + # Expr + "1", + # Pass, + "pass", + # Break + "break", + # Continue + "continue", + # for statements with naked tuples (see http://bugs.python.org/issue6704) + "for a,b in c: pass", + "[(a,b) for a,b in c]", + "((a,b) for a,b in c)", + "((a,b) for (a,b) in c)", + # Multiline generator expression + """( + ( + Aa + , + Bb + ) + for + Aa + , + Bb in Cc + )""", + # dictcomp + "{a : b for w in x for m in p if g}", + # dictcomp with naked tuple + "{a : b for v,w in x}", + # setcomp + "{r for l in x if g}", + # setcomp with naked tuple + "{r for l,m in x}", +] + +# These are compiled through "single" +# because of overlap with "eval", it just tests what +# can't be tested with "eval" +single_tests = [ + "1+2" +] + +# These are compiled through "eval" +# It should test all expressions +eval_tests = [ + # None + "None", + # BoolOp + "a and b", + # BinOp + "a + b", + # UnaryOp + "not v", + # Lambda + "lambda:None", + # Dict + "{ 1:2 }", + # Empty dict + "{}", + # Set + "{None,}", + # Multiline dict + """{ + 1 + : + 2 + }""", + # ListComp + "[a for b in c if d]", + # GeneratorExp + "(a for b in c if d)", + # Yield - yield expressions can't work outside a function + # + # Compare + "1 < 2 < 3", + # Call + "f(1,2,c=3,*d,**e)", + # Repr + "`v`", + # Num + "10L", + # Str + "'string'", + # Attribute + "a.b", + # Subscript + "a[b:c]", + # Name + "v", + # List + "[1,2,3]", + # Empty list + "[]", + # Tuple + "1,2,3", + # Tuple + "(1,2,3)", + # Empty tuple + "()", + # Combination + "a.b.c.d(a.b[1:2])", + +] + +# TODO: expr_context, slice, boolop, operator, unaryop, cmpop, comprehension +# excepthandler, arguments, keywords, alias + +class AST_Tests(unittest.TestCase): + + def _assertTrueorder(self, ast_node, parent_pos): + if not isinstance(ast_node, ast.AST) or ast_node._fields is None: + return + if isinstance(ast_node, (ast.expr, ast.stmt, ast.excepthandler)): + node_pos = (ast_node.lineno, ast_node.col_offset) + self.assertTrue(node_pos >= parent_pos) + parent_pos = (ast_node.lineno, ast_node.col_offset) + for name in ast_node._fields: + value = getattr(ast_node, name) + if isinstance(value, list): + for child in value: + self._assertTrueorder(child, parent_pos) + elif value is not None: + self._assertTrueorder(value, parent_pos) + + def test_AST_objects(self): + x = ast.AST() + try: + x.foobar = 21 + except AttributeError, e: + self.assertEquals(e.args[0], + "'_ast.AST' object has no attribute 'foobar'") + else: + self.assert_(False) + + try: + ast.AST(lineno=2) + except AttributeError, e: + self.assertEquals(e.args[0], + "'_ast.AST' object has no attribute 'lineno'") + else: + self.assert_(False) + try: + ast.AST(2) + except TypeError, e: + self.assertEquals(e.args[0], + "_ast.AST constructor takes 0 positional arguments") + else: + self.assert_(False) + + def test_snippets(self): + for input, output, kind in ((exec_tests, exec_results, "exec"), + (single_tests, single_results, "single"), + (eval_tests, eval_results, "eval")): + for i, o in itertools.izip(input, output): + ast_tree = compile(i, "?", kind, ast.PyCF_ONLY_AST) + self.assertEquals(to_tuple(ast_tree), o) + self._assertTrueorder(ast_tree, (0, 0)) + + def test_slice(self): + slc = ast.parse("x[::]").body[0].value.slice + self.assertIsNone(slc.upper) + self.assertIsNone(slc.lower) + self.assertIsInstance(slc.step, ast.Name) + self.assertEqual(slc.step.id, "None") + + def test_from_import(self): + im = ast.parse("from . import y").body[0] + self.assertIsNone(im.module) + + def test_base_classes(self): + self.assertTrue(issubclass(ast.For, ast.stmt)) + self.assertTrue(issubclass(ast.Name, ast.expr)) + self.assertTrue(issubclass(ast.stmt, ast.AST)) + self.assertTrue(issubclass(ast.expr, ast.AST)) + self.assertTrue(issubclass(ast.comprehension, ast.AST)) + self.assertTrue(issubclass(ast.Gt, ast.AST)) + + def test_field_attr_existence(self): + for name, item in ast.__dict__.iteritems(): + if isinstance(item, type) and name != 'AST' and name[0].isupper(): # XXX: pypy does not allow abstract ast class instanciation + x = item() + if isinstance(x, ast.AST): + self.assertEquals(type(x._fields), tuple) + + def test_arguments(self): + x = ast.arguments() + self.assertEquals(x._fields, ('args', 'vararg', 'kwarg', 'defaults')) + try: + x.vararg + except AttributeError, e: + self.assertEquals(e.args[0], + "'arguments' object has no attribute 'vararg'") + else: + self.assert_(False) + x = ast.arguments(1, 2, 3, 4) + self.assertEquals(x.vararg, 2) + + def test_field_attr_writable(self): + x = ast.Num() + # We can assign to _fields + x._fields = 666 + self.assertEquals(x._fields, 666) + + def test_classattrs(self): + x = ast.Num() + self.assertEquals(x._fields, ('n',)) + try: + x.n + except AttributeError, e: + self.assertEquals(e.args[0], + "'Num' object has no attribute 'n'") + else: + self.assert_(False) + + x = ast.Num(42) + self.assertEquals(x.n, 42) + try: + x.lineno + except AttributeError, e: + self.assertEquals(e.args[0], + "'Num' object has no attribute 'lineno'") + else: + self.assert_(False) + + y = ast.Num() + x.lineno = y + self.assertEquals(x.lineno, y) + + try: + x.foobar + except AttributeError, e: + self.assertEquals(e.args[0], + "'Num' object has no attribute 'foobar'") + else: + self.assert_(False) + + x = ast.Num(lineno=2) + self.assertEquals(x.lineno, 2) + + x = ast.Num(42, lineno=0) + self.assertEquals(x.lineno, 0) + self.assertEquals(x._fields, ('n',)) + self.assertEquals(x.n, 42) + + self.assertRaises(TypeError, ast.Num, 1, 2) + self.assertRaises(TypeError, ast.Num, 1, 2, lineno=0) + + def test_module(self): + body = [ast.Num(42)] + x = ast.Module(body) + self.assertEquals(x.body, body) + + def test_nodeclass(self): + x = ast.BinOp() + self.assertEquals(x._fields, ('left', 'op', 'right')) + + # Zero arguments constructor explicitely allowed + x = ast.BinOp() + # Random attribute allowed too + x.foobarbaz = 5 + self.assertEquals(x.foobarbaz, 5) + + n1 = ast.Num(1) + n3 = ast.Num(3) + addop = ast.Add() + x = ast.BinOp(n1, addop, n3) + self.assertEquals(x.left, n1) + self.assertEquals(x.op, addop) + self.assertEquals(x.right, n3) + + x = ast.BinOp(1, 2, 3) + self.assertEquals(x.left, 1) + self.assertEquals(x.op, 2) + self.assertEquals(x.right, 3) + + x = ast.BinOp(1, 2, 3, lineno=0) + self.assertEquals(x.lineno, 0) + + # node raises exception when not given enough arguments + self.assertRaises(TypeError, ast.BinOp, 1, 2) + # node raises exception when given too many arguments + self.assertRaises(TypeError, ast.BinOp, 1, 2, 3, 4) + # node raises exception when not given enough arguments + self.assertRaises(TypeError, ast.BinOp, 1, 2, lineno=0) + # node raises exception when given too many arguments + self.assertRaises(TypeError, ast.BinOp, 1, 2, 3, 4, lineno=0) + + # can set attributes through kwargs too + x = ast.BinOp(left=1, op=2, right=3, lineno=0) + self.assertEquals(x.left, 1) + self.assertEquals(x.op, 2) + self.assertEquals(x.right, 3) + self.assertEquals(x.lineno, 0) + + # Random kwargs also allowed + x = ast.BinOp(1, 2, 3, foobarbaz=42) + self.assertEquals(x.foobarbaz, 42) + + def test_no_fields(self): + # this used to fail because Sub._fields was None + x = ast.Sub() + self.assertEquals(x._fields, ()) + + def test_pickling(self): + import pickle + mods = [pickle] + try: + import cPickle + mods.append(cPickle) + except ImportError: + pass + protocols = [0, 1, 2] + for mod in mods: + for protocol in protocols: + for ast in (compile(i, "?", "exec", 0x400) for i in exec_tests): + ast2 = mod.loads(mod.dumps(ast, protocol)) + self.assertEquals(to_tuple(ast2), to_tuple(ast)) + + +class ASTHelpers_Test(unittest.TestCase): + + def test_parse(self): + a = ast.parse('foo(1 + 1)') + b = compile('foo(1 + 1)', '', 'exec', ast.PyCF_ONLY_AST) + self.assertEqual(ast.dump(a), ast.dump(b)) + + def test_dump(self): + node = ast.parse('spam(eggs, "and cheese")') + self.assertEqual(ast.dump(node), + "Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), " + "args=[Name(id='eggs', ctx=Load()), Str(s='and cheese')], " + "keywords=[], starargs=None, kwargs=None))])" + ) + self.assertEqual(ast.dump(node, annotate_fields=False), + "Module([Expr(Call(Name('spam', Load()), [Name('eggs', Load()), " + "Str('and cheese')], [], None, None))])" + ) + self.assertEqual(ast.dump(node, include_attributes=True), + "Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load(), " + "lineno=1, col_offset=0), args=[Name(id='eggs', ctx=Load(), " + "lineno=1, col_offset=5), Str(s='and cheese', lineno=1, " + "col_offset=11)], keywords=[], starargs=None, kwargs=None, " + "lineno=1, col_offset=0), lineno=1, col_offset=0)])" + ) + + def test_copy_location(self): + src = ast.parse('1 + 1', mode='eval') + src.body.right = ast.copy_location(ast.Num(2), src.body.right) + self.assertEqual(ast.dump(src, include_attributes=True), + 'Expression(body=BinOp(left=Num(n=1, lineno=1, col_offset=0), ' + 'op=Add(), right=Num(n=2, lineno=1, col_offset=4), lineno=1, ' + 'col_offset=0))' + ) + + def test_fix_missing_locations(self): + src = ast.parse('write("spam")') + src.body.append(ast.Expr(ast.Call(ast.Name('spam', ast.Load()), + [ast.Str('eggs')], [], None, None))) + self.assertEqual(src, ast.fix_missing_locations(src)) + self.assertEqual(ast.dump(src, include_attributes=True), + "Module(body=[Expr(value=Call(func=Name(id='write', ctx=Load(), " + "lineno=1, col_offset=0), args=[Str(s='spam', lineno=1, " + "col_offset=6)], keywords=[], starargs=None, kwargs=None, " + "lineno=1, col_offset=0), lineno=1, col_offset=0), " + "Expr(value=Call(func=Name(id='spam', ctx=Load(), lineno=1, " + "col_offset=0), args=[Str(s='eggs', lineno=1, col_offset=0)], " + "keywords=[], starargs=None, kwargs=None, lineno=1, " + "col_offset=0), lineno=1, col_offset=0)])" + ) + + def test_increment_lineno(self): + src = ast.parse('1 + 1', mode='eval') + self.assertEqual(ast.increment_lineno(src, n=3), src) + self.assertEqual(ast.dump(src, include_attributes=True), + 'Expression(body=BinOp(left=Num(n=1, lineno=4, col_offset=0), ' + 'op=Add(), right=Num(n=1, lineno=4, col_offset=4), lineno=4, ' + 'col_offset=0))' + ) + + def test_iter_fields(self): + node = ast.parse('foo()', mode='eval') + d = dict(ast.iter_fields(node.body)) + self.assertEqual(d.pop('func').id, 'foo') + self.assertEqual(d, {'keywords': [], 'kwargs': None, + 'args': [], 'starargs': None}) + + def test_iter_child_nodes(self): + node = ast.parse("spam(23, 42, eggs='leek')", mode='eval') + self.assertEqual(len(list(ast.iter_child_nodes(node.body))), 4) + iterator = ast.iter_child_nodes(node.body) + self.assertEqual(next(iterator).id, 'spam') + self.assertEqual(next(iterator).n, 23) + self.assertEqual(next(iterator).n, 42) + self.assertEqual(ast.dump(next(iterator)), + "keyword(arg='eggs', value=Str(s='leek'))" + ) + + def test_get_docstring(self): + node = ast.parse('def foo():\n """line one\n line two"""') + self.assertEqual(ast.get_docstring(node.body[0]), + 'line one\nline two') + + def test_literal_eval(self): + self.assertEqual(ast.literal_eval('[1, 2, 3]'), [1, 2, 3]) + self.assertEqual(ast.literal_eval('{"foo": 42}'), {"foo": 42}) + self.assertEqual(ast.literal_eval('(True, False, None)'), (True, False, None)) + self.assertRaises(ValueError, ast.literal_eval, 'foo()') + + def test_literal_eval_issue4907(self): + self.assertEqual(ast.literal_eval('2j'), 2j) + self.assertEqual(ast.literal_eval('10 + 2j'), 10 + 2j) + self.assertEqual(ast.literal_eval('1.5 - 2j'), 1.5 - 2j) + self.assertRaises(ValueError, ast.literal_eval, '2 + (3 + 4j)') + + +def test_main(): + with test_support.check_py3k_warnings(("backquote not supported", + SyntaxWarning)): + test_support.run_unittest(AST_Tests, ASTHelpers_Test) + +def main(): + if __name__ != '__main__': + return + if sys.argv[1:] == ['-g']: + for statements, kind in ((exec_tests, "exec"), (single_tests, "single"), + (eval_tests, "eval")): + print kind+"_results = [" + for s in statements: + print repr(to_tuple(compile(s, "?", kind, 0x400)))+"," + print "]" + print "main()" + raise SystemExit + test_main() + +#### EVERYTHING BELOW IS GENERATED ##### +exec_results = [ +('Module', [('Expr', (1, 0), ('Name', (1, 0), 'None', ('Load',)))]), +('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], None, None, []), [('Pass', (1, 9))], [])]), +('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [('Name', (1, 6), 'a', ('Param',))], None, None, []), [('Pass', (1, 10))], [])]), +('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [('Name', (1, 6), 'a', ('Param',))], None, None, [('Num', (1, 8), 0)]), [('Pass', (1, 12))], [])]), +('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], 'args', None, []), [('Pass', (1, 14))], [])]), +('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], None, 'kwargs', []), [('Pass', (1, 17))], [])]), +('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [('Name', (1, 6), 'a', ('Param',)), ('Name', (1, 9), 'b', ('Param',)), ('Name', (1, 14), 'c', ('Param',)), ('Name', (1, 22), 'd', ('Param',)), ('Name', (1, 28), 'e', ('Param',))], 'args', 'kwargs', [('Num', (1, 11), 1), ('Name', (1, 16), 'None', ('Load',)), ('List', (1, 24), [], ('Load',)), ('Dict', (1, 30), [], [])]), [('Pass', (1, 52))], [])]), +('Module', [('ClassDef', (1, 0), 'C', [], [('Pass', (1, 8))], [])]), +('Module', [('ClassDef', (1, 0), 'C', [('Name', (1, 8), 'object', ('Load',))], [('Pass', (1, 17))], [])]), +('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], None, None, []), [('Return', (1, 8), ('Num', (1, 15), 1))], [])]), +('Module', [('Delete', (1, 0), [('Name', (1, 4), 'v', ('Del',))])]), +('Module', [('Assign', (1, 0), [('Name', (1, 0), 'v', ('Store',))], ('Num', (1, 4), 1))]), +('Module', [('AugAssign', (1, 0), ('Name', (1, 0), 'v', ('Store',)), ('Add',), ('Num', (1, 5), 1))]), +('Module', [('Print', (1, 0), ('Name', (1, 8), 'f', ('Load',)), [('Num', (1, 11), 1)], False)]), +('Module', [('For', (1, 0), ('Name', (1, 4), 'v', ('Store',)), ('Name', (1, 9), 'v', ('Load',)), [('Pass', (1, 11))], [])]), +('Module', [('While', (1, 0), ('Name', (1, 6), 'v', ('Load',)), [('Pass', (1, 8))], [])]), +('Module', [('If', (1, 0), ('Name', (1, 3), 'v', ('Load',)), [('Pass', (1, 5))], [])]), +('Module', [('Raise', (1, 0), ('Name', (1, 6), 'Exception', ('Load',)), ('Str', (1, 17), 'string'), None)]), +('Module', [('TryExcept', (1, 0), [('Pass', (2, 2))], [('ExceptHandler', (3, 0), ('Name', (3, 7), 'Exception', ('Load',)), None, [('Pass', (4, 2))])], [])]), +('Module', [('TryFinally', (1, 0), [('Pass', (2, 2))], [('Pass', (4, 2))])]), +('Module', [('Assert', (1, 0), ('Name', (1, 7), 'v', ('Load',)), None)]), +('Module', [('Import', (1, 0), [('alias', 'sys', None)])]), +('Module', [('ImportFrom', (1, 0), 'sys', [('alias', 'v', None)], 0)]), +('Module', [('Exec', (1, 0), ('Str', (1, 5), 'v'), None, None)]), +('Module', [('Global', (1, 0), ['v'])]), +('Module', [('Expr', (1, 0), ('Num', (1, 0), 1))]), +('Module', [('Pass', (1, 0))]), +('Module', [('Break', (1, 0))]), +('Module', [('Continue', (1, 0))]), +('Module', [('For', (1, 0), ('Tuple', (1, 4), [('Name', (1, 4), 'a', ('Store',)), ('Name', (1, 6), 'b', ('Store',))], ('Store',)), ('Name', (1, 11), 'c', ('Load',)), [('Pass', (1, 14))], [])]), +('Module', [('Expr', (1, 0), ('ListComp', (1, 1), ('Tuple', (1, 2), [('Name', (1, 2), 'a', ('Load',)), ('Name', (1, 4), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11), [('Name', (1, 11), 'a', ('Store',)), ('Name', (1, 13), 'b', ('Store',))], ('Store',)), ('Name', (1, 18), 'c', ('Load',)), [])]))]), +('Module', [('Expr', (1, 0), ('GeneratorExp', (1, 1), ('Tuple', (1, 2), [('Name', (1, 2), 'a', ('Load',)), ('Name', (1, 4), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11), [('Name', (1, 11), 'a', ('Store',)), ('Name', (1, 13), 'b', ('Store',))], ('Store',)), ('Name', (1, 18), 'c', ('Load',)), [])]))]), +('Module', [('Expr', (1, 0), ('GeneratorExp', (1, 1), ('Tuple', (1, 2), [('Name', (1, 2), 'a', ('Load',)), ('Name', (1, 4), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 12), [('Name', (1, 12), 'a', ('Store',)), ('Name', (1, 14), 'b', ('Store',))], ('Store',)), ('Name', (1, 20), 'c', ('Load',)), [])]))]), +('Module', [('Expr', (1, 0), ('GeneratorExp', (2, 4), ('Tuple', (3, 4), [('Name', (3, 4), 'Aa', ('Load',)), ('Name', (5, 7), 'Bb', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (8, 4), [('Name', (8, 4), 'Aa', ('Store',)), ('Name', (10, 4), 'Bb', ('Store',))], ('Store',)), ('Name', (10, 10), 'Cc', ('Load',)), [])]))]), +('Module', [('Expr', (1, 0), ('DictComp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), ('Name', (1, 5), 'b', ('Load',)), [('comprehension', ('Name', (1, 11), 'w', ('Store',)), ('Name', (1, 16), 'x', ('Load',)), []), ('comprehension', ('Name', (1, 22), 'm', ('Store',)), ('Name', (1, 27), 'p', ('Load',)), [('Name', (1, 32), 'g', ('Load',))])]))]), +('Module', [('Expr', (1, 0), ('DictComp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), ('Name', (1, 5), 'b', ('Load',)), [('comprehension', ('Tuple', (1, 11), [('Name', (1, 11), 'v', ('Store',)), ('Name', (1, 13), 'w', ('Store',))], ('Store',)), ('Name', (1, 18), 'x', ('Load',)), [])]))]), +('Module', [('Expr', (1, 0), ('SetComp', (1, 1), ('Name', (1, 1), 'r', ('Load',)), [('comprehension', ('Name', (1, 7), 'l', ('Store',)), ('Name', (1, 12), 'x', ('Load',)), [('Name', (1, 17), 'g', ('Load',))])]))]), +('Module', [('Expr', (1, 0), ('SetComp', (1, 1), ('Name', (1, 1), 'r', ('Load',)), [('comprehension', ('Tuple', (1, 7), [('Name', (1, 7), 'l', ('Store',)), ('Name', (1, 9), 'm', ('Store',))], ('Store',)), ('Name', (1, 14), 'x', ('Load',)), [])]))]), +] +single_results = [ +('Interactive', [('Expr', (1, 0), ('BinOp', (1, 0), ('Num', (1, 0), 1), ('Add',), ('Num', (1, 2), 2)))]), +] +eval_results = [ +('Expression', ('Name', (1, 0), 'None', ('Load',))), +('Expression', ('BoolOp', (1, 0), ('And',), [('Name', (1, 0), 'a', ('Load',)), ('Name', (1, 6), 'b', ('Load',))])), +('Expression', ('BinOp', (1, 0), ('Name', (1, 0), 'a', ('Load',)), ('Add',), ('Name', (1, 4), 'b', ('Load',)))), +('Expression', ('UnaryOp', (1, 0), ('Not',), ('Name', (1, 4), 'v', ('Load',)))), +('Expression', ('Lambda', (1, 0), ('arguments', [], None, None, []), ('Name', (1, 7), 'None', ('Load',)))), +('Expression', ('Dict', (1, 0), [('Num', (1, 2), 1)], [('Num', (1, 4), 2)])), +('Expression', ('Dict', (1, 0), [], [])), +('Expression', ('Set', (1, 0), [('Name', (1, 1), 'None', ('Load',))])), +('Expression', ('Dict', (1, 0), [('Num', (2, 6), 1)], [('Num', (4, 10), 2)])), +('Expression', ('ListComp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), [('comprehension', ('Name', (1, 7), 'b', ('Store',)), ('Name', (1, 12), 'c', ('Load',)), [('Name', (1, 17), 'd', ('Load',))])])), +('Expression', ('GeneratorExp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), [('comprehension', ('Name', (1, 7), 'b', ('Store',)), ('Name', (1, 12), 'c', ('Load',)), [('Name', (1, 17), 'd', ('Load',))])])), +('Expression', ('Compare', (1, 0), ('Num', (1, 0), 1), [('Lt',), ('Lt',)], [('Num', (1, 4), 2), ('Num', (1, 8), 3)])), +('Expression', ('Call', (1, 0), ('Name', (1, 0), 'f', ('Load',)), [('Num', (1, 2), 1), ('Num', (1, 4), 2)], [('keyword', 'c', ('Num', (1, 8), 3))], ('Name', (1, 11), 'd', ('Load',)), ('Name', (1, 15), 'e', ('Load',)))), +('Expression', ('Repr', (1, 0), ('Name', (1, 1), 'v', ('Load',)))), +('Expression', ('Num', (1, 0), 10L)), +('Expression', ('Str', (1, 0), 'string')), +('Expression', ('Attribute', (1, 0), ('Name', (1, 0), 'a', ('Load',)), 'b', ('Load',))), +('Expression', ('Subscript', (1, 0), ('Name', (1, 0), 'a', ('Load',)), ('Slice', ('Name', (1, 2), 'b', ('Load',)), ('Name', (1, 4), 'c', ('Load',)), None), ('Load',))), +('Expression', ('Name', (1, 0), 'v', ('Load',))), +('Expression', ('List', (1, 0), [('Num', (1, 1), 1), ('Num', (1, 3), 2), ('Num', (1, 5), 3)], ('Load',))), +('Expression', ('List', (1, 0), [], ('Load',))), +('Expression', ('Tuple', (1, 0), [('Num', (1, 0), 1), ('Num', (1, 2), 2), ('Num', (1, 4), 3)], ('Load',))), +('Expression', ('Tuple', (1, 1), [('Num', (1, 1), 1), ('Num', (1, 3), 2), ('Num', (1, 5), 3)], ('Load',))), +('Expression', ('Tuple', (1, 0), [], ('Load',))), +('Expression', ('Call', (1, 0), ('Attribute', (1, 0), ('Attribute', (1, 0), ('Attribute', (1, 0), ('Name', (1, 0), 'a', ('Load',)), 'b', ('Load',)), 'c', ('Load',)), 'd', ('Load',)), [('Subscript', (1, 8), ('Attribute', (1, 8), ('Name', (1, 8), 'a', ('Load',)), 'b', ('Load',)), ('Slice', ('Num', (1, 12), 1), ('Num', (1, 14), 2), None), ('Load',))], [], None, None)), +] +main() diff --git a/lib-python/2.7.0/test/test_builtin.py b/lib-python/modified-2.7.0/test/test_builtin.py copy from lib-python/2.7.0/test/test_builtin.py copy to lib-python/modified-2.7.0/test/test_builtin.py --- a/lib-python/2.7.0/test/test_builtin.py +++ b/lib-python/modified-2.7.0/test/test_builtin.py @@ -3,7 +3,8 @@ import platform import unittest from test.test_support import fcmp, have_unicode, TESTFN, unlink, \ - run_unittest, check_py3k_warnings + run_unittest, check_py3k_warnings, \ + check_impl_detail import warnings from operator import neg @@ -247,12 +248,14 @@ self.assertRaises(TypeError, compile) self.assertRaises(ValueError, compile, 'print 42\n', '', 'badmode') self.assertRaises(ValueError, compile, 'print 42\n', '', 'single', 0xff) - self.assertRaises(TypeError, compile, chr(0), 'f', 'exec') + if check_impl_detail(cpython=True): + self.assertRaises(TypeError, compile, chr(0), 'f', 'exec') self.assertRaises(TypeError, compile, 'pass', '?', 'exec', mode='eval', source='0', filename='tmp') if have_unicode: compile(unicode('print u"\xc3\xa5"\n', 'utf8'), '', 'exec') - self.assertRaises(TypeError, compile, unichr(0), 'f', 'exec') + if check_impl_detail(cpython=True): + self.assertRaises(TypeError, compile, unichr(0), 'f', 'exec') self.assertRaises(ValueError, compile, unicode('a = 1'), 'f', 'bad') @@ -395,12 +398,16 @@ self.assertEqual(eval('dir()', g, m), list('xyz')) self.assertEqual(eval('globals()', g, m), g) self.assertEqual(eval('locals()', g, m), m) - self.assertRaises(TypeError, eval, 'a', m) + # on top of CPython, the first dictionary (the globals) has to + # be a real dict. This is not the case on top of PyPy. + if check_impl_detail(pypy=False): + self.assertRaises(TypeError, eval, 'a', m) + class A: "Non-mapping" pass m = A() - self.assertRaises(TypeError, eval, 'a', g, m) + self.assertRaises((TypeError, AttributeError), eval, 'a', g, m) # Verify that dict subclasses work as well class D(dict): @@ -491,9 +498,10 @@ execfile(TESTFN, globals, locals) self.assertEqual(locals['z'], 2) + self.assertRaises(TypeError, execfile, TESTFN, {}, ()) unlink(TESTFN) self.assertRaises(TypeError, execfile) - self.assertRaises(TypeError, execfile, TESTFN, {}, ()) + self.assertRaises((TypeError, IOError), execfile, TESTFN, {}, ()) import os self.assertRaises(IOError, execfile, os.curdir) self.assertRaises(IOError, execfile, "I_dont_exist") @@ -1108,7 +1116,8 @@ def __cmp__(self, other): raise RuntimeError __hash__ = None # Invalid cmp makes this unhashable - self.assertRaises(RuntimeError, range, a, a + 1, badzero(1)) + if check_impl_detail(cpython=True): + self.assertRaises(RuntimeError, range, a, a + 1, badzero(1)) # Reject floats. self.assertRaises(TypeError, range, 1., 1., 1.) diff --git a/lib-python/2.7.0/test/test_bytes.py b/lib-python/modified-2.7.0/test/test_bytes.py copy from lib-python/2.7.0/test/test_bytes.py copy to lib-python/modified-2.7.0/test/test_bytes.py --- a/lib-python/2.7.0/test/test_bytes.py +++ b/lib-python/modified-2.7.0/test/test_bytes.py @@ -632,6 +632,7 @@ self.assertEqual(b, b1) self.assertTrue(b is b1) + @test.test_support.impl_detail("undocumented bytes.__alloc__()") def test_alloc(self): b = bytearray() alloc = b.__alloc__() @@ -759,6 +760,8 @@ self.assertEqual(b, b"") self.assertEqual(c, b"") + @test.test_support.impl_detail( + "resizing semantics of CPython rely on refcounting") def test_resize_forbidden(self): # #4509: can't resize a bytearray when there are buffer exports, even # if it wouldn't reallocate the underlying buffer. @@ -791,6 +794,26 @@ self.assertRaises(BufferError, delslice) self.assertEquals(b, orig) + @test.test_support.impl_detail("resizing semantics", cpython=False) + def test_resize_forbidden_non_cpython(self): + # on non-CPython implementations, we cannot prevent changes to + # bytearrays just because there are buffers around. Instead, + # we get (on PyPy) a buffer that follows the changes and resizes. + b = bytearray(range(10)) + for v in [memoryview(b), buffer(b)]: + b[5] = 99 + self.assertIn(v[5], (99, chr(99))) + b[5] = 100 + b += b + b += b + b += b + self.assertEquals(len(v), 80) + self.assertIn(v[5], (100, chr(100))) + self.assertIn(v[79], (9, chr(9))) + del b[10:] + self.assertRaises(IndexError, lambda: v[10]) + self.assertEquals(len(v), 10) + def test_empty_bytearray(self): # Issue #7561: operations on empty bytearrays could crash in many # situations, due to a fragile implementation of the diff --git a/lib-python/2.7.0/test/test_codeop.py b/lib-python/modified-2.7.0/test/test_codeop.py copy from lib-python/2.7.0/test/test_codeop.py copy to lib-python/modified-2.7.0/test/test_codeop.py --- a/lib-python/2.7.0/test/test_codeop.py +++ b/lib-python/modified-2.7.0/test/test_codeop.py @@ -3,7 +3,7 @@ Nick Mathewson """ import unittest -from test.test_support import run_unittest, is_jython +from test.test_support import run_unittest, is_jython, check_impl_detail from codeop import compile_command, PyCF_DONT_IMPLY_DEDENT @@ -270,7 +270,9 @@ ai("a = 'a\\\n") ai("a = 1","eval") - ai("a = (","eval") + if check_impl_detail(): # on PyPy it asks for more data, which is not + ai("a = (","eval") # completely correct but hard to fix and + # really a detail (in my opinion ) ai("]","eval") ai("())","eval") ai("[}","eval") diff --git a/lib-python/2.7.0/test/test_coercion.py b/lib-python/modified-2.7.0/test/test_coercion.py copy from lib-python/2.7.0/test/test_coercion.py copy to lib-python/modified-2.7.0/test/test_coercion.py --- a/lib-python/2.7.0/test/test_coercion.py +++ b/lib-python/modified-2.7.0/test/test_coercion.py @@ -1,6 +1,7 @@ import copy import unittest -from test.test_support import run_unittest, TestFailed, check_warnings +from test.test_support import ( + run_unittest, TestFailed, check_warnings, check_impl_detail) # Fake a number that implements numeric methods through __coerce__ @@ -306,12 +307,18 @@ self.assertNotEquals(cmp(u'fish', evil_coercer), 0) self.assertNotEquals(cmp(slice(1), evil_coercer), 0) # ...but that this still works - class WackyComparer(object): - def __cmp__(slf, other): - self.assertTrue(other == 42, 'expected evil_coercer, got %r' % other) - return 0 - __hash__ = None # Invalid cmp makes this unhashable - self.assertEquals(cmp(WackyComparer(), evil_coercer), 0) + if check_impl_detail(): + # NB. I (arigo) would consider the following as implementation- + # specific. For example, in CPython, if we replace 42 with 42.0 + # both below and in CoerceTo() above, then the test fails. This + # hints that the behavior is really dependent on some obscure + # internal details. + class WackyComparer(object): + def __cmp__(slf, other): + self.assertTrue(other == 42, 'expected evil_coercer, got %r' % other) + return 0 + __hash__ = None # Invalid cmp makes this unhashable + self.assertEquals(cmp(WackyComparer(), evil_coercer), 0) # ...and classic classes too, since that code path is a little different class ClassicWackyComparer: def __cmp__(slf, other): diff --git a/lib-python/2.7.0/test/test_cpickle.py b/lib-python/modified-2.7.0/test/test_cpickle.py copy from lib-python/2.7.0/test/test_cpickle.py copy to lib-python/modified-2.7.0/test/test_cpickle.py --- a/lib-python/2.7.0/test/test_cpickle.py +++ b/lib-python/modified-2.7.0/test/test_cpickle.py @@ -61,27 +61,27 @@ error = cPickle.BadPickleGet def test_recursive_list(self): - self.assertRaises(ValueError, + self.assertRaises((ValueError, RuntimeError), AbstractPickleTests.test_recursive_list, self) def test_recursive_tuple(self): - self.assertRaises(ValueError, + self.assertRaises((ValueError, RuntimeError), AbstractPickleTests.test_recursive_tuple, self) def test_recursive_inst(self): - self.assertRaises(ValueError, + self.assertRaises((ValueError, RuntimeError), AbstractPickleTests.test_recursive_inst, self) def test_recursive_dict(self): - self.assertRaises(ValueError, + self.assertRaises((ValueError, RuntimeError), AbstractPickleTests.test_recursive_dict, self) def test_recursive_multi(self): - self.assertRaises(ValueError, + self.assertRaises((ValueError, RuntimeError), AbstractPickleTests.test_recursive_multi, self) diff --git a/lib-python/modified-2.7.0/test/test_deque.py b/lib-python/modified-2.7.0/test/test_deque.py --- a/lib-python/modified-2.7.0/test/test_deque.py +++ b/lib-python/modified-2.7.0/test/test_deque.py @@ -109,7 +109,7 @@ self.assertEqual(deque('abc', maxlen=4).maxlen, 4) self.assertEqual(deque('abc', maxlen=2).maxlen, 2) self.assertEqual(deque('abc', maxlen=0).maxlen, 0) - with self.assertRaises(AttributeError): + with self.assertRaises((AttributeError, TypeError)): d = deque('abc') d.maxlen = 10 @@ -343,7 +343,10 @@ for match in (True, False): d = deque(['ab']) d.extend([MutateCmp(d, match), 'c']) - self.assertRaises(IndexError, d.remove, 'c') + # On CPython we get IndexError: deque mutated during remove(). + # Why is it an IndexError during remove() only??? + # On PyPy it is a RuntimeError, as in the other operations. + self.assertRaises((IndexError, RuntimeError), d.remove, 'c') self.assertEqual(d, deque()) def test_repr(self): diff --git a/lib-python/2.7.0/test/test_descr.py b/lib-python/modified-2.7.0/test/test_descr.py copy from lib-python/2.7.0/test/test_descr.py copy to lib-python/modified-2.7.0/test/test_descr.py --- a/lib-python/2.7.0/test/test_descr.py +++ b/lib-python/modified-2.7.0/test/test_descr.py @@ -1128,7 +1128,7 @@ # Test lookup leaks [SF bug 572567] import gc - if hasattr(gc, 'get_objects'): + if test_support.check_impl_detail(): class G(object): def __cmp__(self, other): return 0 @@ -1740,6 +1740,10 @@ raise MyException for name, runner, meth_impl, ok, env in specials: + if name == '__length_hint__' or name == '__sizeof__': + if not test_support.check_impl_detail(): + continue + class X(Checker): pass for attr, obj in env.iteritems(): @@ -1979,7 +1983,9 @@ except TypeError, msg: self.assertTrue(str(msg).find("weak reference") >= 0) else: - self.fail("weakref.ref(no) should be illegal") + if test_support.check_impl_detail(pypy=False): + self.fail("weakref.ref(no) should be illegal") + #else: pypy supports taking weakrefs to some more objects class Weak(object): __slots__ = ['foo', '__weakref__'] yes = Weak() @@ -3091,7 +3097,16 @@ class R(J): __slots__ = ["__dict__", "__weakref__"] - for cls, cls2 in ((G, H), (G, I), (I, H), (Q, R), (R, Q)): + if test_support.check_impl_detail(pypy=False): + lst = ((G, H), (G, I), (I, H), (Q, R), (R, Q)) + else: + # Not supported in pypy: changing the __class__ of an object + # to another __class__ that just happens to have the same slots. + # If needed, we can add the feature, but what we'll likely do + # then is to allow mostly any __class__ assignment, even if the + # classes have different __slots__, because we it's easier. + lst = ((Q, R), (R, Q)) + for cls, cls2 in lst: x = cls() x.a = 1 x.__class__ = cls2 diff --git a/lib-python/2.7.0/test/test_descrtut.py b/lib-python/modified-2.7.0/test/test_descrtut.py copy from lib-python/2.7.0/test/test_descrtut.py copy to lib-python/modified-2.7.0/test/test_descrtut.py --- a/lib-python/2.7.0/test/test_descrtut.py +++ b/lib-python/modified-2.7.0/test/test_descrtut.py @@ -172,46 +172,12 @@ AttributeError: 'list' object has no attribute '__methods__' >>> -Instead, you can get the same information from the list type: +Instead, you can get the same information from the list type +(the following example filters out the numerous method names +starting with '_'): - >>> pprint.pprint(dir(list)) # like list.__dict__.keys(), but sorted - ['__add__', - '__class__', - '__contains__', - '__delattr__', - '__delitem__', - '__delslice__', - '__doc__', - '__eq__', - '__format__', - '__ge__', - '__getattribute__', - '__getitem__', - '__getslice__', - '__gt__', - '__hash__', - '__iadd__', - '__imul__', - '__init__', - '__iter__', - '__le__', - '__len__', - '__lt__', - '__mul__', - '__ne__', - '__new__', - '__reduce__', - '__reduce_ex__', - '__repr__', - '__reversed__', - '__rmul__', - '__setattr__', - '__setitem__', - '__setslice__', - '__sizeof__', - '__str__', - '__subclasshook__', - 'append', + >>> pprint.pprint([name for name in dir(list) if not name.startswith('_')]) + ['append', 'count', 'extend', 'index', diff --git a/lib-python/2.7.0/test/test_doctest.py b/lib-python/modified-2.7.0/test/test_doctest.py copy from lib-python/2.7.0/test/test_doctest.py copy to lib-python/modified-2.7.0/test/test_doctest.py --- a/lib-python/2.7.0/test/test_doctest.py +++ b/lib-python/modified-2.7.0/test/test_doctest.py @@ -782,7 +782,7 @@ ... >>> x = 12 ... >>> print x//0 ... Traceback (most recent call last): - ... ZeroDivisionError: integer division or modulo by zero + ... ZeroDivisionError: integer division by zero ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) @@ -799,7 +799,7 @@ ... >>> print 'pre-exception output', x//0 ... pre-exception output ... Traceback (most recent call last): - ... ZeroDivisionError: integer division or modulo by zero + ... ZeroDivisionError: integer division by zero ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) @@ -810,7 +810,7 @@ print 'pre-exception output', x//0 Exception raised: ... - ZeroDivisionError: integer division or modulo by zero + ZeroDivisionError: integer division by zero TestResults(failed=1, attempted=2) Exception messages may contain newlines: @@ -978,7 +978,7 @@ Exception raised: Traceback (most recent call last): ... - ZeroDivisionError: integer division or modulo by zero + ZeroDivisionError: integer division by zero TestResults(failed=1, attempted=1) """ def displayhook(): r""" @@ -1924,7 +1924,7 @@ > (1)() -> calls_set_trace() (Pdb) print foo - *** NameError: name 'foo' is not defined + *** NameError: global name 'foo' is not defined (Pdb) continue TestResults(failed=0, attempted=2) """ @@ -2229,7 +2229,7 @@ favorite_color Exception raised: ... - NameError: name 'favorite_color' is not defined + NameError: global name 'favorite_color' is not defined @@ -2289,7 +2289,7 @@ favorite_color Exception raised: ... - NameError: name 'favorite_color' is not defined + NameError: global name 'favorite_color' is not defined ********************************************************************** 1 items had failures: 1 of 2 in test_doctest.txt @@ -2382,7 +2382,7 @@ favorite_color Exception raised: ... - NameError: name 'favorite_color' is not defined + NameError: global name 'favorite_color' is not defined TestResults(failed=1, attempted=2) >>> doctest.master = None # Reset master. diff --git a/lib-python/2.7.0/test/test_doctest.txt b/lib-python/modified-2.7.0/test/test_doctest.txt copy from lib-python/2.7.0/test/test_doctest.txt copy to lib-python/modified-2.7.0/test/test_doctest.txt diff --git a/lib-python/2.7.0/test/test_doctest2.txt b/lib-python/modified-2.7.0/test/test_doctest2.txt copy from lib-python/2.7.0/test/test_doctest2.txt copy to lib-python/modified-2.7.0/test/test_doctest2.txt diff --git a/lib-python/2.7.0/test/test_doctest3.txt b/lib-python/modified-2.7.0/test/test_doctest3.txt copy from lib-python/2.7.0/test/test_doctest3.txt copy to lib-python/modified-2.7.0/test/test_doctest3.txt diff --git a/lib-python/2.7.0/test/test_doctest4.txt b/lib-python/modified-2.7.0/test/test_doctest4.txt copy from lib-python/2.7.0/test/test_doctest4.txt copy to lib-python/modified-2.7.0/test/test_doctest4.txt diff --git a/lib-python/2.7.0/test/test_extcall.py b/lib-python/modified-2.7.0/test/test_extcall.py copy from lib-python/2.7.0/test/test_extcall.py copy to lib-python/modified-2.7.0/test/test_extcall.py --- a/lib-python/2.7.0/test/test_extcall.py +++ b/lib-python/modified-2.7.0/test/test_extcall.py @@ -90,19 +90,19 @@ >>> class Nothing: pass ... - >>> g(*Nothing()) + >>> g(*Nothing()) #doctest: +ELLIPSIS Traceback (most recent call last): ... - TypeError: g() argument after * must be a sequence, not instance + TypeError: ...argument after * must be a sequence, not instance >>> class Nothing: ... def __len__(self): return 5 ... - >>> g(*Nothing()) + >>> g(*Nothing()) #doctest: +ELLIPSIS Traceback (most recent call last): ... - TypeError: g() argument after * must be a sequence, not instance + TypeError: ...argument after * must be a sequence, not instance >>> class Nothing(): ... def __len__(self): return 5 @@ -154,52 +154,50 @@ ... TypeError: g() got multiple values for keyword argument 'x' - >>> f(**{1:2}) + >>> f(**{1:2}) #doctest: +ELLIPSIS Traceback (most recent call last): ... - TypeError: f() keywords must be strings + TypeError: ...keywords must be strings >>> h(**{'e': 2}) Traceback (most recent call last): ... TypeError: h() got an unexpected keyword argument 'e' - >>> h(*h) + >>> h(*h) #doctest: +ELLIPSIS Traceback (most recent call last): ... - TypeError: h() argument after * must be a sequence, not function + TypeError: ...argument after * must be a sequence, not function - >>> dir(*h) + >>> dir(*h) #doctest: +ELLIPSIS Traceback (most recent call last): ... - TypeError: dir() argument after * must be a sequence, not function + TypeError: ...argument after * must be a sequence, not function - >>> None(*h) + >>> None(*h) #doctest: +ELLIPSIS Traceback (most recent call last): ... - TypeError: NoneType object argument after * must be a sequence, \ -not function + TypeError: ...argument after * must be a sequence, not function - >>> h(**h) + >>> h(**h) #doctest: +ELLIPSIS Traceback (most recent call last): ... - TypeError: h() argument after ** must be a mapping, not function + TypeError: ...argument after ** must be a mapping, not function - >>> dir(**h) + >>> dir(**h) #doctest: +ELLIPSIS Traceback (most recent call last): ... - TypeError: dir() argument after ** must be a mapping, not function + TypeError: ...argument after ** must be a mapping, not function - >>> None(**h) + >>> None(**h) #doctest: +ELLIPSIS Traceback (most recent call last): ... - TypeError: NoneType object argument after ** must be a mapping, \ -not function + TypeError: ...argument after ** must be a mapping, not function - >>> dir(b=1, **{'b': 1}) + >>> dir(b=1, **{'b': 1}) #doctest: +ELLIPSIS Traceback (most recent call last): ... - TypeError: dir() got multiple values for keyword argument 'b' + TypeError: ...got multiple values for keyword argument 'b' Another helper function @@ -247,10 +245,10 @@ ... False True - >>> id(1, **{'foo': 1}) + >>> id(1, **{'foo': 1}) #doctest: +ELLIPSIS Traceback (most recent call last): ... - TypeError: id() takes no keyword arguments + TypeError: id() ... keyword argument... A corner case of keyword dictionary items being deleted during the function call setup. See . @@ -301,7 +299,7 @@ def f(a): return a self.assertEqual(f(**{u'a': 4}), 4) - self.assertRaises(TypeError, f, **{u'stören': 4}) + self.assertRaises(TypeError, lambda: f(**{u'stören': 4})) self.assertRaises(TypeError, f, **{u'someLongString':2}) try: f(a=4, **{u'a': 4}) diff --git a/lib-python/modified-2.7.0/test/test_file.py b/lib-python/modified-2.7.0/test/test_file.py --- a/lib-python/modified-2.7.0/test/test_file.py +++ b/lib-python/modified-2.7.0/test/test_file.py @@ -158,7 +158,12 @@ def testStdin(self): # This causes the interpreter to exit on OSF1 v5.1. if sys.platform != 'osf1V5': - self.assertRaises((IOError, ValueError), sys.stdin.seek, -1) + if sys.stdin.isatty(): + self.assertRaises((IOError, ValueError), sys.stdin.seek, -1) + else: + print(( + ' Skipping sys.stdin.seek(-1): stdin is not a tty.' + ' Test manually.'), file=sys.__stdout__) else: print(( ' Skipping sys.stdin.seek(-1), it may crash the interpreter.' diff --git a/lib-python/modified-2.7.0/test/test_file2k.py b/lib-python/modified-2.7.0/test/test_file2k.py --- a/lib-python/modified-2.7.0/test/test_file2k.py +++ b/lib-python/modified-2.7.0/test/test_file2k.py @@ -117,8 +117,12 @@ for methodname in methods: method = getattr(self.f, methodname) + args = {'readinto': (bytearray(''),), + 'seek': (0,), + 'write': ('',), + }.get(methodname, ()) # should raise on closed file - self.assertRaises(ValueError, method) + self.assertRaises(ValueError, method, *args) with test_support.check_py3k_warnings(): for methodname in deprecated_methods: method = getattr(self.f, methodname) @@ -217,7 +221,12 @@ def testStdin(self): # This causes the interpreter to exit on OSF1 v5.1. if sys.platform != 'osf1V5': - self.assertRaises(IOError, sys.stdin.seek, -1) + if sys.stdin.isatty(): + self.assertRaises(IOError, sys.stdin.seek, -1) + else: + print >>sys.__stdout__, ( + ' Skipping sys.stdin.seek(-1): stdin is not a tty.' + ' Test manualy.') else: print >>sys.__stdout__, ( ' Skipping sys.stdin.seek(-1), it may crash the interpreter.' @@ -337,8 +346,9 @@ except ValueError: pass else: - self.fail("%s%r after next() didn't raise ValueError" % - (methodname, args)) + if test_support.check_impl_detail(): + self.fail("%s%r after next() didn't raise ValueError" % + (methodname, args)) f.close() # Test to see if harmless (by accident) mixing of read* and @@ -389,6 +399,7 @@ if lines != testlines: self.fail("readlines() after next() with empty buffer " "failed. Got %r, expected %r" % (line, testline)) + f.close() # Reading after iteration hit EOF shouldn't hurt either f = open(TESTFN) try: @@ -439,6 +450,9 @@ self.close_count = 0 self.close_success_count = 0 self.use_buffering = False + # to prevent running out of file descriptors on PyPy, + # we only keep the 50 most recent files open + self.all_files = [None] * 50 def tearDown(self): if self.f: @@ -454,9 +468,14 @@ def _create_file(self): if self.use_buffering: - self.f = open(self.filename, "w+", buffering=1024*16) + f = open(self.filename, "w+", buffering=1024*16) else: - self.f = open(self.filename, "w+") + f = open(self.filename, "w+") + self.f = f + self.all_files.append(f) + oldf = self.all_files.pop(0) + if oldf is not None: + oldf.close() def _close_file(self): with self._count_lock: @@ -497,7 +516,6 @@ def _test_close_open_io(self, io_func, nb_workers=5): def worker(): - self._create_file() funcs = itertools.cycle(( lambda: io_func(), lambda: self._close_and_reopen_file(), @@ -509,6 +527,7 @@ f() except (IOError, ValueError): pass + self._create_file() self._run_workers(worker, nb_workers) if test_support.verbose: # Useful verbose statistics when tuning this test to take diff --git a/lib-python/2.7.0/test/test_format.py b/lib-python/modified-2.7.0/test/test_format.py copy from lib-python/2.7.0/test/test_format.py copy to lib-python/modified-2.7.0/test/test_format.py --- a/lib-python/2.7.0/test/test_format.py +++ b/lib-python/modified-2.7.0/test/test_format.py @@ -242,7 +242,7 @@ try: testformat(formatstr, args) except exception, exc: - if str(exc) == excmsg: + if str(exc) == excmsg or not test_support.check_impl_detail(): if verbose: print "yes" else: @@ -272,13 +272,16 @@ test_exc(u'no format', u'1', TypeError, "not all arguments converted during string formatting") - class Foobar(long): - def __oct__(self): - # Returning a non-string should not blow up. - return self + 1 - - test_exc('%o', Foobar(), TypeError, - "expected string or Unicode object, long found") + if test_support.check_impl_detail(): + # __oct__() is called if Foobar inherits from 'long', but + # not, say, 'object' or 'int' or 'str'. This seems strange + # enough to consider it a complete implementation detail. + class Foobar(long): + def __oct__(self): + # Returning a non-string should not blow up. + return self + 1 + test_exc('%o', Foobar(), TypeError, + "expected string or Unicode object, long found") if maxsize == 2**31-1: # crashes 2.2.1 and earlier: diff --git a/lib-python/modified-2.7.0/test/test_functools.py b/lib-python/modified-2.7.0/test/test_functools.py --- a/lib-python/modified-2.7.0/test/test_functools.py +++ b/lib-python/modified-2.7.0/test/test_functools.py @@ -45,6 +45,8 @@ # attributes should not be writable if not isinstance(self.thetype, type): return + if not test_support.check_impl_detail(): + return self.assertRaises(TypeError, setattr, p, 'func', map) self.assertRaises(TypeError, setattr, p, 'args', (1, 2)) self.assertRaises(TypeError, setattr, p, 'keywords', dict(a=1, b=2)) diff --git a/lib-python/modified-2.7.0/test/test_genexps.py b/lib-python/modified-2.7.0/test/test_genexps.py --- a/lib-python/modified-2.7.0/test/test_genexps.py +++ b/lib-python/modified-2.7.0/test/test_genexps.py @@ -128,9 +128,10 @@ Verify re-use of tuples (a side benefit of using genexps over listcomps) -## >>> tupleids = map(id, ((i,i) for i in xrange(10))) -## >>> int(max(tupleids) - min(tupleids)) -## 0 + >>> from test.test_support import check_impl_detail + >>> tupleids = map(id, ((i,i) for i in xrange(10))) + >>> int(max(tupleids) - min(tupleids)) if check_impl_detail() else 0 + 0 Verify that syntax error's are raised for genexps used as lvalues @@ -198,13 +199,13 @@ >>> g = (10 // i for i in (5, 0, 2)) >>> g.next() 2 - >>> g.next() + >>> g.next() # doctest: +ELLIPSIS Traceback (most recent call last): File "", line 1, in -toplevel- g.next() File "", line 1, in g = (10 // i for i in (5, 0, 2)) - ZeroDivisionError: integer division by zero + ZeroDivisionError: integer division...by zero >>> g.next() Traceback (most recent call last): File "", line 1, in -toplevel- @@ -224,7 +225,7 @@ True >>> print g.next.__doc__ - next() -> the next value, or raise StopIteration + x.next() -> the next value, or raise StopIteration >>> import types >>> isinstance(g, types.GeneratorType) True diff --git a/lib-python/2.7.0/test/test_import.py b/lib-python/modified-2.7.0/test/test_import.py copy from lib-python/2.7.0/test/test_import.py copy to lib-python/modified-2.7.0/test/test_import.py --- a/lib-python/2.7.0/test/test_import.py +++ b/lib-python/modified-2.7.0/test/test_import.py @@ -7,7 +7,8 @@ import sys import unittest from test.test_support import (unlink, TESTFN, unload, run_unittest, rmtree, - is_jython, check_warnings, EnvironmentVarGuard) + is_jython, check_warnings, EnvironmentVarGuard, + impl_detail, check_impl_detail) def remove_files(name): @@ -68,7 +69,8 @@ self.assertEqual(mod.b, b, "module loaded (%s) but contents invalid" % mod) finally: - unlink(source) + if check_impl_detail(pypy=False): + unlink(source) try: imp.reload(mod) @@ -148,13 +150,16 @@ # Compile & remove .py file, we only need .pyc (or .pyo). with open(filename, 'r') as f: py_compile.compile(filename) - unlink(filename) + if check_impl_detail(pypy=False): + # pypy refuses to import a .pyc if the .py does not exist + unlink(filename) # Need to be able to load from current dir. sys.path.append('') # This used to crash. exec 'import ' + module + reload(longlist) # Cleanup. del sys.path[-1] @@ -314,6 +319,7 @@ self.assertEqual(mod.code_filename, self.file_name) self.assertEqual(mod.func_filename, self.file_name) + @impl_detail("pypy refuses to import without a .py source", pypy=False) def test_module_without_source(self): target = "another_module.py" py_compile.compile(self.file_name, dfile=target) diff --git a/lib-python/2.7.0/test/test_inspect.py b/lib-python/modified-2.7.0/test/test_inspect.py copy from lib-python/2.7.0/test/test_inspect.py copy to lib-python/modified-2.7.0/test/test_inspect.py --- a/lib-python/2.7.0/test/test_inspect.py +++ b/lib-python/modified-2.7.0/test/test_inspect.py @@ -4,11 +4,11 @@ import unittest import inspect import linecache -import datetime from UserList import UserList from UserDict import UserDict from test.test_support import run_unittest, check_py3k_warnings +from test.test_support import check_impl_detail with check_py3k_warnings( ("tuple parameter unpacking has been removed", SyntaxWarning), @@ -74,7 +74,8 @@ def test_excluding_predicates(self): self.istest(inspect.isbuiltin, 'sys.exit') - self.istest(inspect.isbuiltin, '[].append') + if check_impl_detail(): + self.istest(inspect.isbuiltin, '[].append') self.istest(inspect.iscode, 'mod.spam.func_code') self.istest(inspect.isframe, 'tb.tb_frame') self.istest(inspect.isfunction, 'mod.spam') @@ -92,9 +93,9 @@ else: self.assertFalse(inspect.isgetsetdescriptor(type(tb.tb_frame).f_locals)) if hasattr(types, 'MemberDescriptorType'): - self.istest(inspect.ismemberdescriptor, 'datetime.timedelta.days') + self.istest(inspect.ismemberdescriptor, 'type(lambda: None).func_globals') else: - self.assertFalse(inspect.ismemberdescriptor(datetime.timedelta.days)) + self.assertFalse(inspect.ismemberdescriptor(type(lambda: None).func_globals)) def test_isroutine(self): self.assertTrue(inspect.isroutine(mod.spam)) @@ -567,7 +568,8 @@ else: self.fail('Exception not raised') self.assertIs(type(ex1), type(ex2)) - self.assertEqual(str(ex1), str(ex2)) + if check_impl_detail(): + self.assertEqual(str(ex1), str(ex2)) def makeCallable(self, signature): """Create a function that returns its locals(), excluding the diff --git a/lib-python/2.7.0/test/test_io.py b/lib-python/modified-2.7.0/test/test_io.py copy from lib-python/2.7.0/test/test_io.py copy to lib-python/modified-2.7.0/test/test_io.py --- a/lib-python/2.7.0/test/test_io.py +++ b/lib-python/modified-2.7.0/test/test_io.py @@ -2516,6 +2516,31 @@ def check_interrupted_write(self, item, bytes, **fdopen_kwargs): """Check that a partial write, when it gets interrupted, properly invokes the signal handler.""" + + # XXX This test has three flaws that appear when objects are + # XXX not reference counted. + + # - if wio.write() happens to trigger a garbage collection, + # the signal exception may be raised when some __del__ + # method is running; it will not reach the assertRaises() + # call. + + # - more subtle, if the wio object is not destroyed at once + # and survives this function, the next opened file is likely + # to have the same fileno (since the file descriptor was + # actively closed). When wio.__del__ is finally called, it + # will close the other's test file... To trigger this with + # CPython, try adding "global wio" in this function. + + # - This happens only for streams created by the _pyio module, + # because a wio.close() that fails still consider that the + # file needs to be closed again. You can try adding an + # "assert wio.closed" at the end of the function. + + # Fortunately, a little gc.gollect() seems to be enough to + # work around all these issues. + support.gc_collect() + read_results = [] def _read(): s = os.read(r, 1) diff --git a/lib-python/modified-2.7.0/test/test_isinstance.py b/lib-python/modified-2.7.0/test/test_isinstance.py new file mode 100644 --- /dev/null +++ b/lib-python/modified-2.7.0/test/test_isinstance.py @@ -0,0 +1,288 @@ +# Tests some corner cases with isinstance() and issubclass(). While these +# tests use new style classes and properties, they actually do whitebox +# testing of error conditions uncovered when using extension types. + +import unittest +from test import test_support +import sys + + + +class TestIsInstanceExceptions(unittest.TestCase): + # Test to make sure that an AttributeError when accessing the instance's + # class's bases is masked. This was actually a bug in Python 2.2 and + # 2.2.1 where the exception wasn't caught but it also wasn't being cleared + # (leading to an "undetected error" in the debug build). Set up is, + # isinstance(inst, cls) where: + # + # - inst isn't an InstanceType + # - cls isn't a ClassType, a TypeType, or a TupleType + # - cls has a __bases__ attribute + # - inst has a __class__ attribute + # - inst.__class__ as no __bases__ attribute + # + # Sounds complicated, I know, but this mimics a situation where an + # extension type raises an AttributeError when its __bases__ attribute is + # gotten. In that case, isinstance() should return False. + def test_class_has_no_bases(self): + class I(object): + def getclass(self): + # This must return an object that has no __bases__ attribute + return None + __class__ = property(getclass) + + class C(object): + def getbases(self): + return () + __bases__ = property(getbases) + + self.assertEqual(False, isinstance(I(), C())) + + # Like above except that inst.__class__.__bases__ raises an exception + # other than AttributeError + def test_bases_raises_other_than_attribute_error(self): + class E(object): + def getbases(self): + raise RuntimeError + __bases__ = property(getbases) + + class I(object): + def getclass(self): + return E() + __class__ = property(getclass) + + class C(object): + def getbases(self): + return () + __bases__ = property(getbases) + + self.assertRaises(RuntimeError, isinstance, I(), C()) + + # Here's a situation where getattr(cls, '__bases__') raises an exception. + # If that exception is not AttributeError, it should not get masked + def test_dont_mask_non_attribute_error(self): + class I: pass + + class C(object): + def getbases(self): + raise RuntimeError + __bases__ = property(getbases) + + self.assertRaises(RuntimeError, isinstance, I(), C()) + + # Like above, except that getattr(cls, '__bases__') raises an + # AttributeError, which /should/ get masked as a TypeError + def test_mask_attribute_error(self): + class I: pass + + class C(object): + def getbases(self): + raise AttributeError + __bases__ = property(getbases) + + self.assertRaises(TypeError, isinstance, I(), C()) + + + +# These tests are similar to above, but tickle certain code paths in +# issubclass() instead of isinstance() -- really PyObject_IsSubclass() +# vs. PyObject_IsInstance(). +class TestIsSubclassExceptions(unittest.TestCase): + def test_dont_mask_non_attribute_error(self): + class C(object): + def getbases(self): + raise RuntimeError + __bases__ = property(getbases) + + class S(C): pass + + self.assertRaises(RuntimeError, issubclass, C(), S()) + + def test_mask_attribute_error(self): + class C(object): + def getbases(self): + raise AttributeError + __bases__ = property(getbases) + + class S(C): pass + + self.assertRaises(TypeError, issubclass, C(), S()) + + # Like above, but test the second branch, where the __bases__ of the + # second arg (the cls arg) is tested. This means the first arg must + # return a valid __bases__, and it's okay for it to be a normal -- + # unrelated by inheritance -- class. + def test_dont_mask_non_attribute_error_in_cls_arg(self): + class B: pass + + class C(object): + def getbases(self): + raise RuntimeError + __bases__ = property(getbases) + + self.assertRaises(RuntimeError, issubclass, B, C()) + + def test_mask_attribute_error_in_cls_arg(self): + class B: pass + + class C(object): + def getbases(self): + raise AttributeError + __bases__ = property(getbases) + + self.assertRaises(TypeError, issubclass, B, C()) + + + +# meta classes for creating abstract classes and instances +class AbstractClass(object): + def __init__(self, bases): + self.bases = bases + + def getbases(self): + return self.bases + __bases__ = property(getbases) + + def __call__(self): + return AbstractInstance(self) + +class AbstractInstance(object): + def __init__(self, klass): + self.klass = klass + + def getclass(self): + return self.klass + __class__ = property(getclass) + +# abstract classes +AbstractSuper = AbstractClass(bases=()) + +AbstractChild = AbstractClass(bases=(AbstractSuper,)) + +# normal classes +class Super: + pass + +class Child(Super): + pass + +# new-style classes +class NewSuper(object): + pass + +class NewChild(NewSuper): + pass + + + +class TestIsInstanceIsSubclass(unittest.TestCase): + # Tests to ensure that isinstance and issubclass work on abstract + # classes and instances. Before the 2.2 release, TypeErrors were + # raised when boolean values should have been returned. The bug was + # triggered by mixing 'normal' classes and instances were with + # 'abstract' classes and instances. This case tries to test all + # combinations. + + def test_isinstance_normal(self): + # normal instances + self.assertEqual(True, isinstance(Super(), Super)) + self.assertEqual(False, isinstance(Super(), Child)) + self.assertEqual(False, isinstance(Super(), AbstractSuper)) + self.assertEqual(False, isinstance(Super(), AbstractChild)) + + self.assertEqual(True, isinstance(Child(), Super)) + self.assertEqual(False, isinstance(Child(), AbstractSuper)) + + def test_isinstance_abstract(self): + # abstract instances + self.assertEqual(True, isinstance(AbstractSuper(), AbstractSuper)) + self.assertEqual(False, isinstance(AbstractSuper(), AbstractChild)) + self.assertEqual(False, isinstance(AbstractSuper(), Super)) + self.assertEqual(False, isinstance(AbstractSuper(), Child)) + + self.assertEqual(True, isinstance(AbstractChild(), AbstractChild)) + self.assertEqual(True, isinstance(AbstractChild(), AbstractSuper)) + self.assertEqual(False, isinstance(AbstractChild(), Super)) + self.assertEqual(False, isinstance(AbstractChild(), Child)) + + def test_subclass_normal(self): + # normal classes + self.assertEqual(True, issubclass(Super, Super)) + self.assertEqual(False, issubclass(Super, AbstractSuper)) + self.assertEqual(False, issubclass(Super, Child)) + + self.assertEqual(True, issubclass(Child, Child)) + self.assertEqual(True, issubclass(Child, Super)) + self.assertEqual(False, issubclass(Child, AbstractSuper)) + + def test_subclass_abstract(self): + # abstract classes + self.assertEqual(True, issubclass(AbstractSuper, AbstractSuper)) + self.assertEqual(False, issubclass(AbstractSuper, AbstractChild)) + self.assertEqual(False, issubclass(AbstractSuper, Child)) + + self.assertEqual(True, issubclass(AbstractChild, AbstractChild)) + self.assertEqual(True, issubclass(AbstractChild, AbstractSuper)) + self.assertEqual(False, issubclass(AbstractChild, Super)) + self.assertEqual(False, issubclass(AbstractChild, Child)) + + def test_subclass_tuple(self): + # test with a tuple as the second argument classes + self.assertEqual(True, issubclass(Child, (Child,))) + self.assertEqual(True, issubclass(Child, (Super,))) + self.assertEqual(False, issubclass(Super, (Child,))) + self.assertEqual(True, issubclass(Super, (Child, Super))) + self.assertEqual(False, issubclass(Child, ())) + self.assertEqual(True, issubclass(Super, (Child, (Super,)))) + + self.assertEqual(True, issubclass(NewChild, (NewChild,))) + self.assertEqual(True, issubclass(NewChild, (NewSuper,))) + self.assertEqual(False, issubclass(NewSuper, (NewChild,))) + self.assertEqual(True, issubclass(NewSuper, (NewChild, NewSuper))) + self.assertEqual(False, issubclass(NewChild, ())) + self.assertEqual(True, issubclass(NewSuper, (NewChild, (NewSuper,)))) + + self.assertEqual(True, issubclass(int, (long, (float, int)))) + if test_support.have_unicode: + self.assertEqual(True, issubclass(str, (unicode, (Child, NewChild, basestring)))) + + def test_subclass_recursion_limit(self): + # make sure that issubclass raises RuntimeError before the C stack is + # blown + self.assertRaises(RuntimeError, blowstack, issubclass, str, str) + + def test_isinstance_recursion_limit(self): + # make sure that issubclass raises RuntimeError before the C stack is + # blown + self.assertRaises(RuntimeError, blowstack, isinstance, '', str) + +def blowstack(fxn, arg, compare_to): + # Make sure that calling isinstance with a deeply nested tuple for its + # argument will raise RuntimeError eventually. + tuple_arg = (compare_to,) + + + if test_support.check_impl_detail(cpython=True): + RECURSION_LIMIT = sys.getrecursionlimit() + else: + # on non-CPython implementations, the maximum + # actual recursion limit might be higher, but + # probably not higher than 99999 + # + RECURSION_LIMIT = 99999 + + for cnt in xrange(RECURSION_LIMIT+5): + tuple_arg = (tuple_arg,) + fxn(arg, tuple_arg) + + +def test_main(): + test_support.run_unittest( + TestIsInstanceExceptions, + TestIsSubclassExceptions, + TestIsInstanceIsSubclass + ) + + +if __name__ == '__main__': + test_main() diff --git a/lib-python/2.7.0/test/test_itertools.py b/lib-python/modified-2.7.0/test/test_itertools.py copy from lib-python/2.7.0/test/test_itertools.py copy to lib-python/modified-2.7.0/test/test_itertools.py --- a/lib-python/2.7.0/test/test_itertools.py +++ b/lib-python/modified-2.7.0/test/test_itertools.py @@ -137,6 +137,8 @@ self.assertEqual(result, list(combinations2(values, r))) # matches second pure python version self.assertEqual(result, list(combinations3(values, r))) # matches second pure python version + @test_support.impl_detail("tuple reuse is specific to CPython") + def test_combinations_tuple_reuse(self): # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1) @@ -207,7 +209,10 @@ self.assertEqual(result, list(cwr1(values, r))) # matches first pure python version self.assertEqual(result, list(cwr2(values, r))) # matches second pure python version + @test_support.impl_detail("tuple reuse is specific to CPython") + def test_combinations_with_replacement_tuple_reuse(self): # Test implementation detail: tuple re-use + cwr = combinations_with_replacement self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1) @@ -271,6 +276,8 @@ self.assertEqual(result, list(permutations(values, None))) # test r as None self.assertEqual(result, list(permutations(values))) # test default r + @test_support.impl_detail("tuple reuse is specific to CPython") + def test_permutations_tuple_reuse(self): # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1) @@ -526,6 +533,9 @@ self.assertEqual(list(izip()), zip()) self.assertRaises(TypeError, izip, 3) self.assertRaises(TypeError, izip, range(3), 3) + + @test_support.impl_detail("tuple reuse is specific to CPython") + def test_izip_tuple_reuse(self): # Check tuple re-use (implementation detail) self.assertEqual([tuple(list(pair)) for pair in izip('abc', 'def')], zip('abc', 'def')) @@ -575,6 +585,8 @@ else: self.fail('Did not raise Type in: ' + stmt) + @test_support.impl_detail("tuple reuse is specific to CPython") + def test_iziplongest_tuple_reuse(self): # Check tuple re-use (implementation detail) self.assertEqual([tuple(list(pair)) for pair in izip_longest('abc', 'def')], zip('abc', 'def')) @@ -683,6 +695,8 @@ args = map(iter, args) self.assertEqual(len(list(product(*args))), expected_len) + @test_support.impl_detail("tuple reuse is specific to CPython") + def test_product_tuple_reuse(self): # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, product('abc', 'def')))), 1) self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1) @@ -771,11 +785,11 @@ self.assertRaises(ValueError, islice, xrange(10), 1, -5, -1) self.assertRaises(ValueError, islice, xrange(10), 1, 10, -1) self.assertRaises(ValueError, islice, xrange(10), 1, 10, 0) - self.assertRaises(ValueError, islice, xrange(10), 'a') - self.assertRaises(ValueError, islice, xrange(10), 'a', 1) - self.assertRaises(ValueError, islice, xrange(10), 1, 'a') - self.assertRaises(ValueError, islice, xrange(10), 'a', 1, 1) - self.assertRaises(ValueError, islice, xrange(10), 1, 'a', 1) + self.assertRaises((ValueError,TypeError), islice, xrange(10), 'a') + self.assertRaises((ValueError,TypeError), islice, xrange(10), 'a', 1) + self.assertRaises((ValueError,TypeError), islice, xrange(10), 1, 'a') + self.assertRaises((ValueError,TypeError), islice, xrange(10), 'a', 1, 1) + self.assertRaises((ValueError,TypeError), islice, xrange(10), 1, 'a', 1) self.assertEqual(len(list(islice(count(), 1, 10, maxsize))), 1) def test_takewhile(self): @@ -850,9 +864,17 @@ self.assertRaises(TypeError, tee, [1,2], 3, 'x') # tee object should be instantiable - a, b = tee('abc') - c = type(a)('def') - self.assertEqual(list(c), list('def')) + if test_support.check_impl_detail(): + # XXX I (arigo) would argue that 'type(a)(iterable)' has + # ill-defined semantics: it always return a fresh tee object, + # but depending on whether 'iterable' is itself a tee object + # or not, it is ok or not to continue using 'iterable' after + # the call. I cannot imagine why 'type(a)(non_tee_object)' + # would be useful, as 'iter(non_tee_obect)' is equivalent + # as far as I can see. + a, b = tee('abc') + c = type(a)('def') + self.assertEqual(list(c), list('def')) # test long-lagged and multi-way split a, b, c = tee(xrange(2000), 3) @@ -890,6 +912,7 @@ p = proxy(a) self.assertEqual(getattr(p, '__class__'), type(b)) del a + test_support.gc_collect() self.assertRaises(ReferenceError, getattr, p, '__class__') def test_StopIteration(self): @@ -1312,6 +1335,7 @@ class LengthTransparency(unittest.TestCase): + @test_support.impl_detail("__length_hint__() API is undocumented") def test_repeat(self): from test.test_iterlen import len self.assertEqual(len(repeat(None, 50)), 50) diff --git a/lib-python/modified-2.7.0/test/test_linecache.py b/lib-python/modified-2.7.0/test/test_linecache.py new file mode 100644 --- /dev/null +++ b/lib-python/modified-2.7.0/test/test_linecache.py @@ -0,0 +1,131 @@ +""" Tests for the linecache module """ + +import linecache +import unittest +import os.path +from test import test_support as support + + +FILENAME = linecache.__file__ +INVALID_NAME = '!@$)(!@#_1' +EMPTY = '' +TESTS = 'cjkencodings_test inspect_fodder inspect_fodder2 mapping_tests' +TESTS = TESTS.split() +TEST_PATH = os.path.dirname(support.__file__) +MODULES = "linecache abc".split() +MODULE_PATH = os.path.dirname(FILENAME) + +SOURCE_1 = ''' +" Docstring " + +def function(): + return result + +''' + +SOURCE_2 = ''' +def f(): + return 1 + 1 + +a = f() + +''' + +SOURCE_3 = ''' +def f(): + return 3''' # No ending newline + + +class LineCacheTests(unittest.TestCase): + + def test_getline(self): + getline = linecache.getline + + # Bad values for line number should return an empty string + self.assertEquals(getline(FILENAME, 2**15), EMPTY) + self.assertEquals(getline(FILENAME, -1), EMPTY) + + # Float values currently raise TypeError, should it? + self.assertRaises(TypeError, getline, FILENAME, 1.1) + + # Bad filenames should return an empty string + self.assertEquals(getline(EMPTY, 1), EMPTY) + self.assertEquals(getline(INVALID_NAME, 1), EMPTY) + + # Check whether lines correspond to those from file iteration + for entry in TESTS: + filename = support.findfile( entry + '.py') + for index, line in enumerate(open(filename)): + self.assertEquals(line, getline(filename, index + 1)) + + # Check module loading + for entry in MODULES: + filename = support.findfile( entry + '.py') + for index, line in enumerate(open(filename)): + self.assertEquals(line, getline(filename, index + 1)) + + # Check that bogus data isn't returned (issue #1309567) + empty = linecache.getlines('a/b/c/__init__.py') + self.assertEquals(empty, []) + + def test_no_ending_newline(self): + self.addCleanup(support.unlink, support.TESTFN) + with open(support.TESTFN, "w") as fp: + fp.write(SOURCE_3) + lines = linecache.getlines(support.TESTFN) + self.assertEqual(lines, ["\n", "def f():\n", " return 3\n"]) + + def test_clearcache(self): + cached = [] + for entry in TESTS: + filename = support.findfile( entry + '.py') + cached.append(filename) + linecache.getline(filename, 1) + + # Are all files cached? + cached_empty = [fn for fn in cached if fn not in linecache.cache] + self.assertEquals(cached_empty, []) + + # Can we clear the cache? + linecache.clearcache() + cached_empty = [fn for fn in cached if fn in linecache.cache] + self.assertEquals(cached_empty, []) + + def test_checkcache(self): + getline = linecache.getline + # Create a source file and cache its contents + source_name = support.TESTFN + '.py' + self.addCleanup(support.unlink, source_name) + with open(source_name, 'w') as source: + source.write(SOURCE_1) + getline(source_name, 1) + + # Keep a copy of the old contents + source_list = [] + with open(source_name) as source: + for index, line in enumerate(source): + self.assertEquals(line, getline(source_name, index + 1)) + source_list.append(line) + + with open(source_name, 'w') as source: + source.write(SOURCE_2) + + # Try to update a bogus cache entry + linecache.checkcache('dummy') + + # Check that the cache matches the old contents + for index, line in enumerate(source_list): + self.assertEquals(line, getline(source_name, index + 1)) + + # Update the cache and check whether it matches the new source file + linecache.checkcache(source_name) + with open(source_name) as source: + for index, line in enumerate(source): + self.assertEquals(line, getline(source_name, index + 1)) + source_list.append(line) + +def test_main(): + support.run_unittest(LineCacheTests) + +if __name__ == "__main__": + test_main() diff --git a/lib-python/2.7.0/test/test_list.py b/lib-python/modified-2.7.0/test/test_list.py copy from lib-python/2.7.0/test/test_list.py copy to lib-python/modified-2.7.0/test/test_list.py --- a/lib-python/2.7.0/test/test_list.py +++ b/lib-python/modified-2.7.0/test/test_list.py @@ -15,6 +15,10 @@ self.assertEqual(list(''), []) self.assertEqual(list('spam'), ['s', 'p', 'a', 'm']) + # the following test also works with pypy, but eats all your address + # space's RAM before raising and takes too long. + @test_support.impl_detail("eats all your RAM before working", pypy=False) + def test_segfault_1(self): if sys.maxsize == 0x7fffffff: # This test can currently only work on 32-bit machines. # XXX If/when PySequence_Length() returns a ssize_t, it should be @@ -32,6 +36,7 @@ # http://sources.redhat.com/ml/newlib/2002/msg00369.html self.assertRaises(MemoryError, list, xrange(sys.maxint // 2)) + def test_segfault_2(self): # This code used to segfault in Py2.4a3 x = [] x.extend(-y for y in x) diff --git a/lib-python/2.7.0/test/test_memoryio.py b/lib-python/modified-2.7.0/test/test_memoryio.py copy from lib-python/2.7.0/test/test_memoryio.py copy to lib-python/modified-2.7.0/test/test_memoryio.py --- a/lib-python/2.7.0/test/test_memoryio.py +++ b/lib-python/modified-2.7.0/test/test_memoryio.py @@ -612,7 +612,7 @@ state = memio.__getstate__() self.assertEqual(len(state), 3) bytearray(state[0]) # Check if state[0] supports the buffer interface. - self.assertIsInstance(state[1], int) + self.assertIsInstance(state[1], (int, long)) self.assert_(isinstance(state[2], dict) or state[2] is None) memio.close() self.assertRaises(ValueError, memio.__getstate__) diff --git a/lib-python/2.7.0/test/test_memoryview.py b/lib-python/modified-2.7.0/test/test_memoryview.py copy from lib-python/2.7.0/test/test_memoryview.py copy to lib-python/modified-2.7.0/test/test_memoryview.py --- a/lib-python/2.7.0/test/test_memoryview.py +++ b/lib-python/modified-2.7.0/test/test_memoryview.py @@ -25,7 +25,8 @@ def check_getitem_with_type(self, tp): item = self.getitem_type b = tp(self._source) - oldrefcount = sys.getrefcount(b) + if hasattr(sys, 'getrefcount'): + oldrefcount = sys.getrefcount(b) m = self._view(b) self.assertEquals(m[0], item(b"a")) self.assertIsInstance(m[0], bytes) @@ -42,7 +43,8 @@ self.assertRaises(TypeError, lambda: m[0.0]) self.assertRaises(TypeError, lambda: m["a"]) m = None - self.assertEquals(sys.getrefcount(b), oldrefcount) + if hasattr(sys, 'getrefcount'): + self.assertEquals(sys.getrefcount(b), oldrefcount) def test_getitem(self): for tp in self._types: @@ -64,7 +66,8 @@ if not self.ro_type: return b = self.ro_type(self._source) - oldrefcount = sys.getrefcount(b) + if hasattr(sys, 'getrefcount'): + oldrefcount = sys.getrefcount(b) m = self._view(b) def setitem(value): m[0] = value @@ -72,14 +75,16 @@ self.assertRaises(TypeError, setitem, 65) self.assertRaises(TypeError, setitem, memoryview(b"a")) m = None - self.assertEquals(sys.getrefcount(b), oldrefcount) + if hasattr(sys, 'getrefcount'): + self.assertEquals(sys.getrefcount(b), oldrefcount) def test_setitem_writable(self): if not self.rw_type: return tp = self.rw_type b = self.rw_type(self._source) - oldrefcount = sys.getrefcount(b) + if hasattr(sys, 'getrefcount'): + oldrefcount = sys.getrefcount(b) m = self._view(b) m[0] = tp(b"0") self._check_contents(tp, b, b"0bcdef") @@ -109,13 +114,14 @@ self.assertRaises(TypeError, setitem, (0,), b"a") self.assertRaises(TypeError, setitem, "a", b"a") # Trying to resize the memory object - self.assertRaises(ValueError, setitem, 0, b"") - self.assertRaises(ValueError, setitem, 0, b"ab") + self.assertRaises((ValueError, TypeError), setitem, 0, b"") + self.assertRaises((ValueError, TypeError), setitem, 0, b"ab") self.assertRaises(ValueError, setitem, slice(1,1), b"a") self.assertRaises(ValueError, setitem, slice(0,2), b"a") m = None - self.assertEquals(sys.getrefcount(b), oldrefcount) + if hasattr(sys, 'getrefcount'): + self.assertEquals(sys.getrefcount(b), oldrefcount) def test_delitem(self): for tp in self._types: @@ -281,6 +287,7 @@ def _check_contents(self, tp, obj, contents): self.assertEquals(obj[1:7], tp(contents)) + @unittest.skipUnless(hasattr(sys, 'getrefcount'), "Reference counting") def test_refs(self): for tp in self._types: m = memoryview(tp(self._source)) diff --git a/lib-python/modified-2.7.0/test/test_mmap.py b/lib-python/modified-2.7.0/test/test_mmap.py --- a/lib-python/modified-2.7.0/test/test_mmap.py +++ b/lib-python/modified-2.7.0/test/test_mmap.py @@ -168,9 +168,11 @@ else: self.fail("Able to resize readonly memory map") f.close() + m.close() del m, f - self.assertEqual(open(TESTFN, "rb").read(), 'a'*mapsize, - "Readonly memory map data file was modified") + with open(TESTFN, "rb") as f: + self.assertEqual(f.read(), 'a'*mapsize, + "Readonly memory map data file was modified") # Opening mmap with size too big import sys @@ -220,11 +222,13 @@ self.assertEqual(m[:], 'd' * mapsize, "Copy-on-write memory map data not written correctly.") m.flush() - self.assertEqual(open(TESTFN, "rb").read(), 'c'*mapsize, - "Copy-on-write test data file should not be modified.") + f.close() + with open(TESTFN, "rb") as f: + self.assertEqual(f.read(), 'c'*mapsize, + "Copy-on-write test data file should not be modified.") # Ensuring copy-on-write maps cannot be resized self.assertRaises(TypeError, m.resize, 2*mapsize) - f.close() + m.close() del m, f # Ensuring invalid access parameter raises exception @@ -279,6 +283,7 @@ self.assertEqual(m.find('one', 1), 8) self.assertEqual(m.find('one', 1, -1), 8) self.assertEqual(m.find('one', 1, -2), -1) + m.close() def test_rfind(self): @@ -297,6 +302,7 @@ self.assertEqual(m.rfind('one', 0, -2), 0) self.assertEqual(m.rfind('one', 1, -1), 8) self.assertEqual(m.rfind('one', 1, -2), -1) + m.close() def test_double_close(self): @@ -538,6 +544,7 @@ self.assertEquals(m[:], "012bar6789") m.seek(8) self.assertRaises(ValueError, m.write, "bar") + m.close() if os.name == 'nt': def test_tagname(self): @@ -575,7 +582,8 @@ m.close() # Should not crash (Issue 5385) - open(TESTFN, "wb").write("x"*10) + with open(TESTFN, "wb") as f: + f.write("x"*10) f = open(TESTFN, "r+b") m = mmap.mmap(f.fileno(), 0) f.close() diff --git a/lib-python/2.7.0/test/test_module.py b/lib-python/modified-2.7.0/test/test_module.py copy from lib-python/2.7.0/test/test_module.py copy to lib-python/modified-2.7.0/test/test_module.py --- a/lib-python/2.7.0/test/test_module.py +++ b/lib-python/modified-2.7.0/test/test_module.py @@ -1,6 +1,6 @@ # Test the module type import unittest -from test.test_support import run_unittest, gc_collect +from test.test_support import run_unittest, gc_collect, check_impl_detail import sys ModuleType = type(sys) @@ -10,8 +10,10 @@ # An uninitialized module has no __dict__ or __name__, # and __doc__ is None foo = ModuleType.__new__(ModuleType) - self.assertTrue(foo.__dict__ is None) - self.assertRaises(SystemError, dir, foo) + self.assertFalse(foo.__dict__) + if check_impl_detail(): + self.assertTrue(foo.__dict__ is None) + self.assertRaises(SystemError, dir, foo) try: s = foo.__name__ self.fail("__name__ = %s" % repr(s)) diff --git a/lib-python/2.7.0/test/test_peepholer.py b/lib-python/modified-2.7.0/test/test_peepholer.py copy from lib-python/2.7.0/test/test_peepholer.py copy to lib-python/modified-2.7.0/test/test_peepholer.py --- a/lib-python/2.7.0/test/test_peepholer.py +++ b/lib-python/modified-2.7.0/test/test_peepholer.py @@ -41,7 +41,7 @@ def test_none_as_constant(self): # LOAD_GLOBAL None --> LOAD_CONST None def f(x): - None + y = None return x asm = disassemble(f) for elem in ('LOAD_GLOBAL',): @@ -67,10 +67,13 @@ self.assertIn(elem, asm) def test_pack_unpack(self): + # On PyPy, "a, b = ..." is even more optimized, by removing + # the ROT_TWO. But the ROT_TWO is not removed if assigning + # to more complex expressions, so check that. for line, elem in ( ('a, = a,', 'LOAD_CONST',), - ('a, b = a, b', 'ROT_TWO',), - ('a, b, c = a, b, c', 'ROT_THREE',), + ('a[1], b = a, b', 'ROT_TWO',), + ('a, b[2], c = a, b, c', 'ROT_THREE',), ): asm = dis_single(line) self.assertIn(elem, asm) @@ -78,6 +81,8 @@ self.assertNotIn('UNPACK_TUPLE', asm) def test_folding_of_tuples_of_constants(self): + # On CPython, "a,b,c=1,2,3" turns into "a,b,c=" + # but on PyPy, it turns into "a=1;b=2;c=3". for line, elem in ( ('a = 1,2,3', '((1, 2, 3))'), ('("a","b","c")', "(('a', 'b', 'c'))"), @@ -86,7 +91,8 @@ ('((1, 2), 3, 4)', '(((1, 2), 3, 4))'), ): asm = dis_single(line) - self.assertIn(elem, asm) + self.assert_(elem in asm or ( + line == 'a,b,c = 1,2,3' and 'UNPACK_TUPLE' not in asm)) self.assertNotIn('BUILD_TUPLE', asm) # Bug 1053819: Tuple of constants misidentified when presented with: diff --git a/lib-python/2.7.0/test/test_pprint.py b/lib-python/modified-2.7.0/test/test_pprint.py copy from lib-python/2.7.0/test/test_pprint.py copy to lib-python/modified-2.7.0/test/test_pprint.py --- a/lib-python/2.7.0/test/test_pprint.py +++ b/lib-python/modified-2.7.0/test/test_pprint.py @@ -233,7 +233,16 @@ frozenset([0, 2]), frozenset([0, 1])])}""" cube = test.test_set.cube(3) - self.assertEqual(pprint.pformat(cube), cube_repr_tgt) + # XXX issues of dictionary order, and for the case below, + # order of items in the frozenset([...]) representation. + # Whether we get precisely cube_repr_tgt or not is open + # to implementation-dependent choices (this test probably + # fails horribly in CPython if we tweak the dict order too). + got = pprint.pformat(cube) + if test.test_support.check_impl_detail(cpython=True): + self.assertEqual(got, cube_repr_tgt) + else: + self.assertEqual(eval(got), cube) cubo_repr_tgt = """\ {frozenset([frozenset([0, 2]), frozenset([0])]): frozenset([frozenset([frozenset([0, 2]), @@ -393,7 +402,11 @@ 2])])])}""" cubo = test.test_set.linegraph(cube) - self.assertEqual(pprint.pformat(cubo), cubo_repr_tgt) + got = pprint.pformat(cubo) + if test.test_support.check_impl_detail(cpython=True): + self.assertEqual(got, cubo_repr_tgt) + else: + self.assertEqual(eval(got), cubo) def test_depth(self): nested_tuple = (1, (2, (3, (4, (5, 6))))) diff --git a/lib-python/2.7.0/test/test_pyclbr.py b/lib-python/modified-2.7.0/test/test_pyclbr.py copy from lib-python/2.7.0/test/test_pyclbr.py copy to lib-python/modified-2.7.0/test/test_pyclbr.py diff --git a/lib-python/2.7.0/test/test_pydoc.py b/lib-python/modified-2.7.0/test/test_pydoc.py copy from lib-python/2.7.0/test/test_pydoc.py copy to lib-python/modified-2.7.0/test/test_pydoc.py --- a/lib-python/2.7.0/test/test_pydoc.py +++ b/lib-python/modified-2.7.0/test/test_pydoc.py @@ -265,8 +265,8 @@ testpairs = ( ('i_am_not_here', 'i_am_not_here'), ('test.i_am_not_here_either', 'i_am_not_here_either'), - ('test.i_am_not_here.neither_am_i', 'i_am_not_here.neither_am_i'), - ('i_am_not_here.{}'.format(modname), 'i_am_not_here.{}'.format(modname)), + ('test.i_am_not_here.neither_am_i', 'i_am_not_here'), + ('i_am_not_here.{}'.format(modname), 'i_am_not_here'), ('test.{}'.format(modname), modname), ) @@ -290,8 +290,8 @@ result = run_pydoc(modname) finally: forget(modname) - expected = badimport_pattern % (modname, expectedinmsg) - self.assertEqual(expected, result) + expected = badimport_pattern % (modname, '(.+\\.)?' + expectedinmsg + '(\\..+)?$') + self.assertTrue(re.match(expected, result)) def test_input_strip(self): missing_module = " test.i_am_not_here " diff --git a/lib-python/modified-2.7.0/test/test_rlcompleter.py b/lib-python/modified-2.7.0/test/test_rlcompleter.py new file mode 100644 --- /dev/null +++ b/lib-python/modified-2.7.0/test/test_rlcompleter.py @@ -0,0 +1,73 @@ +from test import test_support as support +import unittest +import __builtin__ as builtins +import rlcompleter + +class CompleteMe(object): + """ Trivial class used in testing rlcompleter.Completer. """ + spam = 1 + + +class TestRlcompleter(unittest.TestCase): + def setUp(self): + self.stdcompleter = rlcompleter.Completer() + self.completer = rlcompleter.Completer(dict(spam=int, + egg=str, + CompleteMe=CompleteMe)) + + # forces stdcompleter to bind builtins namespace + self.stdcompleter.complete('', 0) + + def test_namespace(self): + class A(dict): + pass + class B(list): + pass + + self.assertTrue(self.stdcompleter.use_main_ns) + self.assertFalse(self.completer.use_main_ns) + self.assertFalse(rlcompleter.Completer(A()).use_main_ns) + self.assertRaises(TypeError, rlcompleter.Completer, B((1,))) + + def test_global_matches(self): + # test with builtins namespace + self.assertEqual(sorted(self.stdcompleter.global_matches('di')), + [x+'(' for x in dir(builtins) if x.startswith('di')]) + self.assertEqual(sorted(self.stdcompleter.global_matches('st')), + [x+'(' for x in dir(builtins) if x.startswith('st')]) + self.assertEqual(self.stdcompleter.global_matches('akaksajadhak'), []) + + # test with a customized namespace + self.assertEqual(self.completer.global_matches('CompleteM'), + ['CompleteMe(']) + self.assertEqual(self.completer.global_matches('eg'), + ['egg(']) + # XXX: see issue5256 + self.assertEqual(self.completer.global_matches('CompleteM'), + ['CompleteMe(']) + + def test_attr_matches(self): + # test with builtins namespace + self.assertEqual(self.stdcompleter.attr_matches('str.s'), + ['str.{}('.format(x) for x in dir(str) + if x.startswith('s')]) + self.assertEqual(self.stdcompleter.attr_matches('tuple.foospamegg'), []) + + # test with a customized namespace + self.assertEqual(self.completer.attr_matches('CompleteMe.sp'), + ['CompleteMe.spam']) + self.assertEqual(self.completer.attr_matches('Completeme.egg'), []) + + CompleteMe.me = CompleteMe + self.assertEqual(self.completer.attr_matches('CompleteMe.me.me.sp'), + ['CompleteMe.me.me.spam']) + self.assertEqual(self.completer.attr_matches('egg.s'), + ['egg.{}('.format(x) for x in dir(str) + if x.startswith('s')]) + +def test_main(): + support.run_unittest(TestRlcompleter) + + +if __name__ == '__main__': + test_main() diff --git a/lib-python/2.7.0/test/test_site.py b/lib-python/modified-2.7.0/test/test_site.py copy from lib-python/2.7.0/test/test_site.py copy to lib-python/modified-2.7.0/test/test_site.py --- a/lib-python/2.7.0/test/test_site.py +++ b/lib-python/modified-2.7.0/test/test_site.py @@ -168,6 +168,10 @@ self.assertEqual(len(dirs), 1) wanted = os.path.join('xoxo', 'Lib', 'site-packages') self.assertEquals(dirs[0], wanted) + elif '__pypy__' in sys.builtin_module_names: + self.assertEquals(len(dirs), 1) + wanted = os.path.join('xoxo', 'site-packages') + self.assertEquals(dirs[0], wanted) elif os.sep == '/': self.assertEqual(len(dirs), 2) wanted = os.path.join('xoxo', 'lib', 'python' + sys.version[:3], diff --git a/lib-python/modified-2.7.0/test/test_ssl.py b/lib-python/modified-2.7.0/test/test_ssl.py new file mode 100644 --- /dev/null +++ b/lib-python/modified-2.7.0/test/test_ssl.py @@ -0,0 +1,1318 @@ +# Test the support for SSL and sockets + +import sys +import unittest +from test import test_support +import asyncore +import socket +import select +import time +import gc +import os +import errno +import pprint +import urllib, urlparse +import traceback +import weakref +import functools +import platform + +from BaseHTTPServer import HTTPServer +from SimpleHTTPServer import SimpleHTTPRequestHandler + +# Optionally test SSL support, if we have it in the tested platform +skip_expected = False +try: + import ssl +except ImportError: + skip_expected = True + +HOST = test_support.HOST +CERTFILE = None +SVN_PYTHON_ORG_ROOT_CERT = None + +def handle_error(prefix): + exc_format = ' '.join(traceback.format_exception(*sys.exc_info())) + if test_support.verbose: + sys.stdout.write(prefix + exc_format) + + +class BasicTests(unittest.TestCase): + + def test_sslwrap_simple(self): + # A crude test for the legacy API + try: + ssl.sslwrap_simple(socket.socket(socket.AF_INET)) + except IOError, e: + if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that + pass + else: + raise + try: + ssl.sslwrap_simple(socket.socket(socket.AF_INET)._sock) + except IOError, e: + if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that + pass + else: + raise + +# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2 +def skip_if_broken_ubuntu_ssl(func): + # We need to access the lower-level wrapper in order to create an + # implicit SSL context without trying to connect or listen. + try: + import _ssl + except ImportError: + # The returned function won't get executed, just ignore the error + pass + @functools.wraps(func) + def f(*args, **kwargs): + try: + s = socket.socket(socket.AF_INET) + _ssl.sslwrap(s._sock, 0, None, None, + ssl.CERT_NONE, ssl.PROTOCOL_SSLv2, None, None) + except ssl.SSLError as e: + if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and + platform.linux_distribution() == ('debian', 'squeeze/sid', '') + and 'Invalid SSL protocol variant specified' in str(e)): + raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour") + return func(*args, **kwargs) + return f + + +class BasicSocketTests(unittest.TestCase): + + def test_constants(self): + ssl.PROTOCOL_SSLv2 + ssl.PROTOCOL_SSLv23 + ssl.PROTOCOL_SSLv3 + ssl.PROTOCOL_TLSv1 + ssl.CERT_NONE + ssl.CERT_OPTIONAL + ssl.CERT_REQUIRED + + def test_random(self): + v = ssl.RAND_status() + if test_support.verbose: + sys.stdout.write("\n RAND_status is %d (%s)\n" + % (v, (v and "sufficient randomness") or + "insufficient randomness")) + try: + ssl.RAND_egd(1) + except TypeError: + pass + else: + print "didn't raise TypeError" + ssl.RAND_add("this is a random string", 75.0) + + @test_support.impl_detail("obscure test") + def test_parse_cert(self): + # note that this uses an 'unofficial' function in _ssl.c, + # provided solely for this test, to exercise the certificate + # parsing code + p = ssl._ssl._test_decode_cert(CERTFILE, False) + if test_support.verbose: + sys.stdout.write("\n" + pprint.pformat(p) + "\n") + + def test_DER_to_PEM(self): + with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f: + pem = f.read() + d1 = ssl.PEM_cert_to_DER_cert(pem) + p2 = ssl.DER_cert_to_PEM_cert(d1) + d2 = ssl.PEM_cert_to_DER_cert(p2) + self.assertEqual(d1, d2) + if not p2.startswith(ssl.PEM_HEADER + '\n'): + self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2) + if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'): + self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2) + + def test_openssl_version(self): + n = ssl.OPENSSL_VERSION_NUMBER + t = ssl.OPENSSL_VERSION_INFO + s = ssl.OPENSSL_VERSION + self.assertIsInstance(n, (int, long)) + self.assertIsInstance(t, tuple) + self.assertIsInstance(s, str) + # Some sanity checks follow + # >= 0.9 + self.assertGreaterEqual(n, 0x900000) + # < 2.0 + self.assertLess(n, 0x20000000) + major, minor, fix, patch, status = t + self.assertGreaterEqual(major, 0) + self.assertLess(major, 2) + self.assertGreaterEqual(minor, 0) + self.assertLess(minor, 256) + self.assertGreaterEqual(fix, 0) + self.assertLess(fix, 256) + self.assertGreaterEqual(patch, 0) + self.assertLessEqual(patch, 26) + self.assertGreaterEqual(status, 0) + self.assertLessEqual(status, 15) + # Version string as returned by OpenSSL, the format might change + self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)), + (s, t)) + + def test_ciphers(self): + if not test_support.is_resource_enabled('network'): + return + remote = ("svn.python.org", 443) + with test_support.transient_internet(remote[0]): + s = ssl.wrap_socket(socket.socket(socket.AF_INET), + cert_reqs=ssl.CERT_NONE, ciphers="ALL") + s.connect(remote) + s = ssl.wrap_socket(socket.socket(socket.AF_INET), + cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") + s.connect(remote) + # Error checking occurs when connecting, because the SSL context + # isn't created before. + s = ssl.wrap_socket(socket.socket(socket.AF_INET), + cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx") + with self.assertRaisesRegexp(ssl.SSLError, "No cipher can be selected"): + s.connect(remote) + + @test_support.cpython_only + def test_refcycle(self): + # Issue #7943: an SSL object doesn't create reference cycles with + # itself. + s = socket.socket(socket.AF_INET) + ss = ssl.wrap_socket(s) + wr = weakref.ref(ss) + del ss + self.assertEqual(wr(), None) + + def test_wrapped_unconnected(self): + # The _delegate_methods in socket.py are correctly delegated to by an + # unconnected SSLSocket, so they will raise a socket.error rather than + # something unexpected like TypeError. + s = socket.socket(socket.AF_INET) + ss = ssl.wrap_socket(s) + self.assertRaises(socket.error, ss.recv, 1) + self.assertRaises(socket.error, ss.recv_into, bytearray(b'x')) + self.assertRaises(socket.error, ss.recvfrom, 1) + self.assertRaises(socket.error, ss.recvfrom_into, bytearray(b'x'), 1) + self.assertRaises(socket.error, ss.send, b'x') + self.assertRaises(socket.error, ss.sendto, b'x', ('0.0.0.0', 0)) + + +class NetworkedTests(unittest.TestCase): + + def test_connect(self): + with test_support.transient_internet("svn.python.org"): + s = ssl.wrap_socket(socket.socket(socket.AF_INET), + cert_reqs=ssl.CERT_NONE) + s.connect(("svn.python.org", 443)) + c = s.getpeercert() + if c: + self.fail("Peer cert %s shouldn't be here!") + s.close() + + # this should fail because we have no verification certs + s = ssl.wrap_socket(socket.socket(socket.AF_INET), + cert_reqs=ssl.CERT_REQUIRED) + try: + s.connect(("svn.python.org", 443)) + except ssl.SSLError: + pass + finally: + s.close() + + # this should succeed because we specify the root cert + s = ssl.wrap_socket(socket.socket(socket.AF_INET), + cert_reqs=ssl.CERT_REQUIRED, + ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + try: + s.connect(("svn.python.org", 443)) + finally: + s.close() + + @unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows") + def test_makefile_close(self): + # Issue #5238: creating a file-like object with makefile() shouldn't + # delay closing the underlying "real socket" (here tested with its + # file descriptor, hence skipping the test under Windows). + with test_support.transient_internet("svn.python.org"): + ss = ssl.wrap_socket(socket.socket(socket.AF_INET)) + ss.connect(("svn.python.org", 443)) + fd = ss.fileno() + f = ss.makefile() + f.close() + # The fd is still open + os.read(fd, 0) + # Closing the SSL socket should close the fd too + ss.close() + gc.collect() + with self.assertRaises(OSError) as e: + os.read(fd, 0) + self.assertEqual(e.exception.errno, errno.EBADF) + + def test_non_blocking_handshake(self): + with test_support.transient_internet("svn.python.org"): + s = socket.socket(socket.AF_INET) + s.connect(("svn.python.org", 443)) + s.setblocking(False) + s = ssl.wrap_socket(s, + cert_reqs=ssl.CERT_NONE, + do_handshake_on_connect=False) + count = 0 + while True: + try: + count += 1 + s.do_handshake() + break + except ssl.SSLError, err: + if err.args[0] == ssl.SSL_ERROR_WANT_READ: + select.select([s], [], []) + elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE: + select.select([], [s], []) + else: + raise + s.close() + if test_support.verbose: + sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count) + + def test_get_server_certificate(self): + with test_support.transient_internet("svn.python.org"): + pem = ssl.get_server_certificate(("svn.python.org", 443)) + if not pem: + self.fail("No server certificate on svn.python.org:443!") + + try: + pem = ssl.get_server_certificate(("svn.python.org", 443), ca_certs=CERTFILE) + except ssl.SSLError: + #should fail + pass + else: + self.fail("Got server certificate %s for svn.python.org!" % pem) + + pem = ssl.get_server_certificate(("svn.python.org", 443), ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + if not pem: + self.fail("No server certificate on svn.python.org:443!") + if test_support.verbose: + sys.stdout.write("\nVerified certificate for svn.python.org:443 is\n%s\n" % pem) + + def test_algorithms(self): + # Issue #8484: all algorithms should be available when verifying a + # certificate. + # SHA256 was added in OpenSSL 0.9.8 + if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15): + self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION) + # NOTE: https://sha256.tbs-internet.com is another possible test host + remote = ("sha2.hboeck.de", 443) + sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem") + with test_support.transient_internet("sha2.hboeck.de"): + s = ssl.wrap_socket(socket.socket(socket.AF_INET), + cert_reqs=ssl.CERT_REQUIRED, + ca_certs=sha256_cert,) + try: + s.connect(remote) + if test_support.verbose: + sys.stdout.write("\nCipher with %r is %r\n" % + (remote, s.cipher())) + sys.stdout.write("Certificate is:\n%s\n" % + pprint.pformat(s.getpeercert())) + finally: + s.close() + + +try: + import threading +except ImportError: + _have_threads = False +else: + _have_threads = True + + class ThreadedEchoServer(threading.Thread): + + class ConnectionHandler(threading.Thread): + + """A mildly complicated class, because we want it to work both + with and without the SSL wrapper around the socket connection, so + that we can test the STARTTLS functionality.""" + + def __init__(self, server, connsock): + self.server = server + self.running = False + self.sock = connsock + self.sock.setblocking(1) + self.sslconn = None + threading.Thread.__init__(self) + self.daemon = True + + def show_conn_details(self): + if self.server.certreqs == ssl.CERT_REQUIRED: + cert = self.sslconn.getpeercert() + if test_support.verbose and self.server.chatty: + sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n") + cert_binary = self.sslconn.getpeercert(True) + if test_support.verbose and self.server.chatty: + sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n") + cipher = self.sslconn.cipher() + if test_support.verbose and self.server.chatty: + sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n") + + def wrap_conn(self): + try: + self.sslconn = ssl.wrap_socket(self.sock, server_side=True, + certfile=self.server.certificate, + ssl_version=self.server.protocol, + ca_certs=self.server.cacerts, + cert_reqs=self.server.certreqs, + ciphers=self.server.ciphers) + except ssl.SSLError: + # XXX Various errors can have happened here, for example + # a mismatching protocol version, an invalid certificate, + # or a low-level bug. This should be made more discriminating. + if self.server.chatty: + handle_error("\n server: bad connection attempt from " + + str(self.sock.getpeername()) + ":\n") + self.close() + self.running = False + self.server.stop() + return False + else: + return True + + def read(self): + if self.sslconn: + return self.sslconn.read() + else: + return self.sock.recv(1024) + + def write(self, bytes): + if self.sslconn: + return self.sslconn.write(bytes) + else: + return self.sock.send(bytes) + + def close(self): + if self.sslconn: + self.sslconn.close() + else: + self.sock._sock.close() + + def run(self): + self.running = True + if not self.server.starttls_server: + if isinstance(self.sock, ssl.SSLSocket): + self.sslconn = self.sock + elif not self.wrap_conn(): + return + self.show_conn_details() + while self.running: + try: + msg = self.read() + if not msg: + # eof, so quit this handler + self.running = False + self.close() + elif msg.strip() == 'over': + if test_support.verbose and self.server.connectionchatty: + sys.stdout.write(" server: client closed connection\n") + self.close() + return + elif self.server.starttls_server and msg.strip() == 'STARTTLS': + if test_support.verbose and self.server.connectionchatty: + sys.stdout.write(" server: read STARTTLS from client, sending OK...\n") + self.write("OK\n") + if not self.wrap_conn(): + return + elif self.server.starttls_server and self.sslconn and msg.strip() == 'ENDTLS': + if test_support.verbose and self.server.connectionchatty: + sys.stdout.write(" server: read ENDTLS from client, sending OK...\n") + self.write("OK\n") + self.sslconn.unwrap() + self.sslconn = None + if test_support.verbose and self.server.connectionchatty: + sys.stdout.write(" server: connection is now unencrypted...\n") + else: + if (test_support.verbose and + self.server.connectionchatty): + ctype = (self.sslconn and "encrypted") or "unencrypted" + sys.stdout.write(" server: read %s (%s), sending back %s (%s)...\n" + % (repr(msg), ctype, repr(msg.lower()), ctype)) + self.write(msg.lower()) + except ssl.SSLError: + if self.server.chatty: + handle_error("Test server failure:\n") + self.close() + self.running = False + # normally, we'd just stop here, but for the test + # harness, we want to stop the server + self.server.stop() + + def __init__(self, certificate, ssl_version=None, + certreqs=None, cacerts=None, + chatty=True, connectionchatty=False, starttls_server=False, + wrap_accepting_socket=False, ciphers=None): + + if ssl_version is None: + ssl_version = ssl.PROTOCOL_TLSv1 + if certreqs is None: + certreqs = ssl.CERT_NONE + self.certificate = certificate + self.protocol = ssl_version + self.certreqs = certreqs + self.cacerts = cacerts + self.ciphers = ciphers + self.chatty = chatty + self.connectionchatty = connectionchatty + self.starttls_server = starttls_server + self.sock = socket.socket() + self.flag = None + if wrap_accepting_socket: + self.sock = ssl.wrap_socket(self.sock, server_side=True, + certfile=self.certificate, + cert_reqs = self.certreqs, + ca_certs = self.cacerts, + ssl_version = self.protocol, + ciphers = self.ciphers) + if test_support.verbose and self.chatty: + sys.stdout.write(' server: wrapped server socket as %s\n' % str(self.sock)) + self.port = test_support.bind_port(self.sock) + self.active = False + threading.Thread.__init__(self) + self.daemon = True + + def start(self, flag=None): + self.flag = flag + threading.Thread.start(self) + + def run(self): + self.sock.settimeout(0.05) + self.sock.listen(5) + self.active = True + if self.flag: + # signal an event + self.flag.set() + while self.active: + try: + newconn, connaddr = self.sock.accept() + if test_support.verbose and self.chatty: + sys.stdout.write(' server: new connection from ' + + str(connaddr) + '\n') + handler = self.ConnectionHandler(self, newconn) + handler.start() + except socket.timeout: + pass + except KeyboardInterrupt: + self.stop() + self.sock.close() + + def stop(self): + self.active = False + + class AsyncoreEchoServer(threading.Thread): + + class EchoServer(asyncore.dispatcher): + + class ConnectionHandler(asyncore.dispatcher_with_send): + + def __init__(self, conn, certfile): + asyncore.dispatcher_with_send.__init__(self, conn) + self.socket = ssl.wrap_socket(conn, server_side=True, + certfile=certfile, + do_handshake_on_connect=False) + self._ssl_accepting = True + + def readable(self): + if isinstance(self.socket, ssl.SSLSocket): + while self.socket.pending() > 0: + self.handle_read_event() + return True + + def _do_ssl_handshake(self): + try: + self.socket.do_handshake() + except ssl.SSLError, err: + if err.args[0] in (ssl.SSL_ERROR_WANT_READ, + ssl.SSL_ERROR_WANT_WRITE): + return + elif err.args[0] == ssl.SSL_ERROR_EOF: + return self.handle_close() + raise + except socket.error, err: + if err.args[0] == errno.ECONNABORTED: + return self.handle_close() + else: + self._ssl_accepting = False + + def handle_read(self): + if self._ssl_accepting: + self._do_ssl_handshake() + else: + data = self.recv(1024) + if data and data.strip() != 'over': + self.send(data.lower()) + + def handle_close(self): + self.close() + if test_support.verbose: + sys.stdout.write(" server: closed connection %s\n" % self.socket) + + def handle_error(self): + raise + + def __init__(self, certfile): + self.certfile = certfile + asyncore.dispatcher.__init__(self) + self.create_socket(socket.AF_INET, socket.SOCK_STREAM) + self.port = test_support.bind_port(self.socket) + self.listen(5) + + def handle_accept(self): + sock_obj, addr = self.accept() + if test_support.verbose: + sys.stdout.write(" server: new connection from %s:%s\n" %addr) + self.ConnectionHandler(sock_obj, self.certfile) + + def handle_error(self): + raise + + def __init__(self, certfile): + self.flag = None + self.active = False + self.server = self.EchoServer(certfile) + self.port = self.server.port + threading.Thread.__init__(self) + self.daemon = True + + def __str__(self): + return "<%s %s>" % (self.__class__.__name__, self.server) + + def start(self, flag=None): + self.flag = flag + threading.Thread.start(self) + + def run(self): + self.active = True + if self.flag: + self.flag.set() + while self.active: + asyncore.loop(0.05) + + def stop(self): + self.active = False + self.server.close() + + class SocketServerHTTPSServer(threading.Thread): + + class HTTPSServer(HTTPServer): + + def __init__(self, server_address, RequestHandlerClass, certfile): + HTTPServer.__init__(self, server_address, RequestHandlerClass) + # we assume the certfile contains both private key and certificate + self.certfile = certfile + self.allow_reuse_address = True + + def __str__(self): + return ('<%s %s:%s>' % + (self.__class__.__name__, + self.server_name, + self.server_port)) + + def get_request(self): + # override this to wrap socket with SSL + sock, addr = self.socket.accept() + sslconn = ssl.wrap_socket(sock, server_side=True, + certfile=self.certfile) + return sslconn, addr + + class RootedHTTPRequestHandler(SimpleHTTPRequestHandler): + # need to override translate_path to get a known root, + # instead of using os.curdir, since the test could be + # run from anywhere + + server_version = "TestHTTPS/1.0" + + root = None + + def translate_path(self, path): + """Translate a /-separated PATH to the local filename syntax. + + Components that mean special things to the local file system + (e.g. drive or directory names) are ignored. (XXX They should + probably be diagnosed.) + + """ + # abandon query parameters + path = urlparse.urlparse(path)[2] + path = os.path.normpath(urllib.unquote(path)) + words = path.split('/') + words = filter(None, words) + path = self.root + for word in words: + drive, word = os.path.splitdrive(word) + head, word = os.path.split(word) + if word in self.root: continue + path = os.path.join(path, word) + return path + + def log_message(self, format, *args): + + # we override this to suppress logging unless "verbose" + + if test_support.verbose: + sys.stdout.write(" server (%s:%d %s):\n [%s] %s\n" % + (self.server.server_address, + self.server.server_port, + self.request.cipher(), + self.log_date_time_string(), + format%args)) + + + def __init__(self, certfile): + self.flag = None + self.RootedHTTPRequestHandler.root = os.path.split(CERTFILE)[0] + self.server = self.HTTPSServer( + (HOST, 0), self.RootedHTTPRequestHandler, certfile) + self.port = self.server.server_port + threading.Thread.__init__(self) + self.daemon = True + + def __str__(self): + return "<%s %s>" % (self.__class__.__name__, self.server) + + def start(self, flag=None): + self.flag = flag + threading.Thread.start(self) + + def run(self): + if self.flag: + self.flag.set() + self.server.serve_forever(0.05) + + def stop(self): + self.server.shutdown() + + + def bad_cert_test(certfile): + """ + Launch a server with CERT_REQUIRED, and check that trying to + connect to it with the given client certificate fails. + """ + server = ThreadedEchoServer(CERTFILE, + certreqs=ssl.CERT_REQUIRED, + cacerts=CERTFILE, chatty=False) + flag = threading.Event() + server.start(flag) + # wait for it to start + flag.wait() + # try to connect + try: + try: + s = ssl.wrap_socket(socket.socket(), + certfile=certfile, + ssl_version=ssl.PROTOCOL_TLSv1) + s.connect((HOST, server.port)) + except ssl.SSLError, x: + if test_support.verbose: + sys.stdout.write("\nSSLError is %s\n" % x[1]) + except socket.error, x: + if test_support.verbose: + sys.stdout.write("\nsocket.error is %s\n" % x[1]) + else: + raise AssertionError("Use of invalid cert should have failed!") + finally: + server.stop() + server.join() + + def server_params_test(certfile, protocol, certreqs, cacertsfile, + client_certfile, client_protocol=None, indata="FOO\n", + ciphers=None, chatty=True, connectionchatty=False, + wrap_accepting_socket=False): + """ + Launch a server, connect a client to it and try various reads + and writes. + """ + server = ThreadedEchoServer(certfile, + certreqs=certreqs, + ssl_version=protocol, + cacerts=cacertsfile, + ciphers=ciphers, + chatty=chatty, + connectionchatty=connectionchatty, + wrap_accepting_socket=wrap_accepting_socket) + flag = threading.Event() + server.start(flag) + # wait for it to start + flag.wait() + # try to connect + if client_protocol is None: + client_protocol = protocol + try: + s = ssl.wrap_socket(socket.socket(), + certfile=client_certfile, + ca_certs=cacertsfile, + ciphers=ciphers, + cert_reqs=certreqs, + ssl_version=client_protocol) + s.connect((HOST, server.port)) + for arg in [indata, bytearray(indata), memoryview(indata)]: + if connectionchatty: + if test_support.verbose: + sys.stdout.write( + " client: sending %s...\n" % (repr(arg))) + s.write(arg) + outdata = s.read() + if connectionchatty: + if test_support.verbose: + sys.stdout.write(" client: read %s\n" % repr(outdata)) + if outdata != indata.lower(): + raise AssertionError( + "bad data <<%s>> (%d) received; expected <<%s>> (%d)\n" + % (outdata[:min(len(outdata),20)], len(outdata), + indata[:min(len(indata),20)].lower(), len(indata))) + s.write("over\n") + if connectionchatty: + if test_support.verbose: + sys.stdout.write(" client: closing connection.\n") + s.close() + finally: + server.stop() + server.join() + + def try_protocol_combo(server_protocol, + client_protocol, + expect_success, + certsreqs=None): + if certsreqs is None: + certsreqs = ssl.CERT_NONE + certtype = { + ssl.CERT_NONE: "CERT_NONE", + ssl.CERT_OPTIONAL: "CERT_OPTIONAL", + ssl.CERT_REQUIRED: "CERT_REQUIRED", + }[certsreqs] + if test_support.verbose: + formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n" + sys.stdout.write(formatstr % + (ssl.get_protocol_name(client_protocol), + ssl.get_protocol_name(server_protocol), + certtype)) + try: + # NOTE: we must enable "ALL" ciphers, otherwise an SSLv23 client + # will send an SSLv3 hello (rather than SSLv2) starting from + # OpenSSL 1.0.0 (see issue #8322). + server_params_test(CERTFILE, server_protocol, certsreqs, + CERTFILE, CERTFILE, client_protocol, + ciphers="ALL", chatty=False) + # Protocol mismatch can result in either an SSLError, or a + # "Connection reset by peer" error. + except ssl.SSLError: + if expect_success: + raise + except socket.error as e: + if expect_success or e.errno != errno.ECONNRESET: + raise + else: + if not expect_success: + raise AssertionError( + "Client protocol %s succeeded with server protocol %s!" + % (ssl.get_protocol_name(client_protocol), + ssl.get_protocol_name(server_protocol))) + + + class ThreadedTests(unittest.TestCase): + + def test_rude_shutdown(self): + """A brutal shutdown of an SSL server should raise an IOError + in the client when attempting handshake. + """ + listener_ready = threading.Event() + listener_gone = threading.Event() + + s = socket.socket() + port = test_support.bind_port(s, HOST) + + # `listener` runs in a thread. It sits in an accept() until + # the main thread connects. Then it rudely closes the socket, + # and sets Event `listener_gone` to let the main thread know + # the socket is gone. + def listener(): + s.listen(5) + listener_ready.set() + s.accept() + s.close() + listener_gone.set() + + def connector(): + listener_ready.wait() + c = socket.socket() + c.connect((HOST, port)) + listener_gone.wait() + try: + ssl_sock = ssl.wrap_socket(c) + except IOError: + pass + else: + self.fail('connecting to closed SSL socket should have failed') + + t = threading.Thread(target=listener) + t.start() + try: + connector() + finally: + t.join() + + @skip_if_broken_ubuntu_ssl + def test_echo(self): + """Basic test of an SSL client connecting to a server""" + if test_support.verbose: + sys.stdout.write("\n") + server_params_test(CERTFILE, ssl.PROTOCOL_TLSv1, ssl.CERT_NONE, + CERTFILE, CERTFILE, ssl.PROTOCOL_TLSv1, + chatty=True, connectionchatty=True) + + def test_getpeercert(self): + if test_support.verbose: + sys.stdout.write("\n") + s2 = socket.socket() + server = ThreadedEchoServer(CERTFILE, + certreqs=ssl.CERT_NONE, + ssl_version=ssl.PROTOCOL_SSLv23, + cacerts=CERTFILE, + chatty=False) + flag = threading.Event() + server.start(flag) + # wait for it to start + flag.wait() + # try to connect + try: + s = ssl.wrap_socket(socket.socket(), + certfile=CERTFILE, + ca_certs=CERTFILE, + cert_reqs=ssl.CERT_REQUIRED, + ssl_version=ssl.PROTOCOL_SSLv23) + s.connect((HOST, server.port)) + cert = s.getpeercert() + self.assertTrue(cert, "Can't get peer certificate.") + cipher = s.cipher() + if test_support.verbose: + sys.stdout.write(pprint.pformat(cert) + '\n') + sys.stdout.write("Connection cipher is " + str(cipher) + '.\n') + if 'subject' not in cert: + self.fail("No subject field in certificate: %s." % + pprint.pformat(cert)) + if ((('organizationName', 'Python Software Foundation'),) + not in cert['subject']): + self.fail( + "Missing or invalid 'organizationName' field in certificate subject; " + "should be 'Python Software Foundation'.") + s.close() + finally: + server.stop() + server.join() + + def test_empty_cert(self): + """Connecting with an empty cert file""" + bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir, + "nullcert.pem")) + def test_malformed_cert(self): + """Connecting with a badly formatted certificate (syntax error)""" + bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir, + "badcert.pem")) + def test_nonexisting_cert(self): + """Connecting with a non-existing cert file""" + bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir, + "wrongcert.pem")) + def test_malformed_key(self): + """Connecting with a badly formatted key (syntax error)""" + bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir, + "badkey.pem")) + + @skip_if_broken_ubuntu_ssl + def test_protocol_sslv2(self): + """Connecting to an SSLv2 server with various client options""" + if test_support.verbose: + sys.stdout.write("\n") + try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True) + try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL) + try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED) + try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True) + try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False) + try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False) + + @skip_if_broken_ubuntu_ssl + def test_protocol_sslv23(self): + """Connecting to an SSLv23 server with various client options""" + if test_support.verbose: + sys.stdout.write("\n") + try: + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True) + except (ssl.SSLError, socket.error), x: + # this fails on some older versions of OpenSSL (0.9.7l, for instance) + if test_support.verbose: + sys.stdout.write( + " SSL2 client to SSL23 server test unexpectedly failed:\n %s\n" + % str(x)) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True) + + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL) + + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED) + + @skip_if_broken_ubuntu_ssl + def test_protocol_sslv3(self): + """Connecting to an SSLv3 server with various client options""" + if test_support.verbose: + sys.stdout.write("\n") + try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True) + try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL) + try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED) + try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False) + try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False) + try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False) + + @skip_if_broken_ubuntu_ssl + def test_protocol_tlsv1(self): + """Connecting to a TLSv1 server with various client options""" + if test_support.verbose: + sys.stdout.write("\n") + try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True) + try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL) + try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED) + try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False) + try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False) + try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False) + + def test_starttls(self): + """Switching from clear text to encrypted and back again.""" + msgs = ("msg 1", "MSG 2", "STARTTLS", "MSG 3", "msg 4", "ENDTLS", "msg 5", "msg 6") + + server = ThreadedEchoServer(CERTFILE, + ssl_version=ssl.PROTOCOL_TLSv1, + starttls_server=True, + chatty=True, + connectionchatty=True) + flag = threading.Event() + server.start(flag) + # wait for it to start + flag.wait() + # try to connect + wrapped = False + try: + s = socket.socket() + s.setblocking(1) + s.connect((HOST, server.port)) + if test_support.verbose: + sys.stdout.write("\n") + for indata in msgs: + if test_support.verbose: + sys.stdout.write( + " client: sending %s...\n" % repr(indata)) + if wrapped: + conn.write(indata) + outdata = conn.read() + else: + s.send(indata) + outdata = s.recv(1024) + if (indata == "STARTTLS" and + outdata.strip().lower().startswith("ok")): + # STARTTLS ok, switch to secure mode + if test_support.verbose: + sys.stdout.write( + " client: read %s from server, starting TLS...\n" + % repr(outdata)) + conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1) + wrapped = True + elif (indata == "ENDTLS" and + outdata.strip().lower().startswith("ok")): + # ENDTLS ok, switch back to clear text + if test_support.verbose: + sys.stdout.write( + " client: read %s from server, ending TLS...\n" + % repr(outdata)) + s = conn.unwrap() + wrapped = False + else: + if test_support.verbose: + sys.stdout.write( + " client: read %s from server\n" % repr(outdata)) + if test_support.verbose: + sys.stdout.write(" client: closing connection.\n") + if wrapped: + conn.write("over\n") + else: + s.send("over\n") + s.close() + finally: + server.stop() + server.join() + + def test_socketserver(self): + """Using a SocketServer to create and manage SSL connections.""" + server = SocketServerHTTPSServer(CERTFILE) + flag = threading.Event() + server.start(flag) + # wait for it to start + flag.wait() + # try to connect + try: + if test_support.verbose: + sys.stdout.write('\n') + with open(CERTFILE, 'rb') as f: + d1 = f.read() + d2 = '' + # now fetch the same data from the HTTPS server + url = 'https://127.0.0.1:%d/%s' % ( + server.port, os.path.split(CERTFILE)[1]) + with test_support.check_py3k_warnings(): + f = urllib.urlopen(url) + dlen = f.info().getheader("content-length") + if dlen and (int(dlen) > 0): + d2 = f.read(int(dlen)) + if test_support.verbose: + sys.stdout.write( + " client: read %d bytes from remote server '%s'\n" + % (len(d2), server)) + f.close() + self.assertEqual(d1, d2) + finally: + server.stop() + server.join() + + def test_wrapped_accept(self): + """Check the accept() method on SSL sockets.""" + if test_support.verbose: + sys.stdout.write("\n") + server_params_test(CERTFILE, ssl.PROTOCOL_SSLv23, ssl.CERT_REQUIRED, + CERTFILE, CERTFILE, ssl.PROTOCOL_SSLv23, + chatty=True, connectionchatty=True, + wrap_accepting_socket=True) + + def test_asyncore_server(self): + """Check the example asyncore integration.""" + indata = "TEST MESSAGE of mixed case\n" + + if test_support.verbose: + sys.stdout.write("\n") + server = AsyncoreEchoServer(CERTFILE) + flag = threading.Event() + server.start(flag) + # wait for it to start + flag.wait() + # try to connect + try: + s = ssl.wrap_socket(socket.socket()) + s.connect(('127.0.0.1', server.port)) + if test_support.verbose: + sys.stdout.write( + " client: sending %s...\n" % (repr(indata))) + s.write(indata) + outdata = s.read() + if test_support.verbose: + sys.stdout.write(" client: read %s\n" % repr(outdata)) + if outdata != indata.lower(): + self.fail( + "bad data <<%s>> (%d) received; expected <<%s>> (%d)\n" + % (outdata[:min(len(outdata),20)], len(outdata), + indata[:min(len(indata),20)].lower(), len(indata))) + s.write("over\n") + if test_support.verbose: + sys.stdout.write(" client: closing connection.\n") + s.close() + finally: + server.stop() + # wait for server thread to end + server.join() + + def test_recv_send(self): + """Test recv(), send() and friends.""" + if test_support.verbose: + sys.stdout.write("\n") + + server = ThreadedEchoServer(CERTFILE, + certreqs=ssl.CERT_NONE, + ssl_version=ssl.PROTOCOL_TLSv1, + cacerts=CERTFILE, + chatty=True, + connectionchatty=False) + flag = threading.Event() + server.start(flag) + # wait for it to start + flag.wait() + # try to connect + s = ssl.wrap_socket(socket.socket(), + server_side=False, + certfile=CERTFILE, + ca_certs=CERTFILE, + cert_reqs=ssl.CERT_NONE, + ssl_version=ssl.PROTOCOL_TLSv1) + s.connect((HOST, server.port)) + try: + # helper methods for standardising recv* method signatures + def _recv_into(): + b = bytearray("\0"*100) + count = s.recv_into(b) + return b[:count] + + def _recvfrom_into(): + b = bytearray("\0"*100) + count, addr = s.recvfrom_into(b) + return b[:count] + + # (name, method, whether to expect success, *args) + send_methods = [ + ('send', s.send, True, []), + ('sendto', s.sendto, False, ["some.address"]), + ('sendall', s.sendall, True, []), + ] + recv_methods = [ + ('recv', s.recv, True, []), + ('recvfrom', s.recvfrom, False, ["some.address"]), + ('recv_into', _recv_into, True, []), + ('recvfrom_into', _recvfrom_into, False, []), + ] + data_prefix = u"PREFIX_" + + for meth_name, send_meth, expect_success, args in send_methods: + indata = data_prefix + meth_name + try: + send_meth(indata.encode('ASCII', 'strict'), *args) + outdata = s.read() + outdata = outdata.decode('ASCII', 'strict') + if outdata != indata.lower(): + self.fail( + "While sending with <<%s>> bad data " + "<<%r>> (%d) received; " + "expected <<%r>> (%d)\n" % ( + meth_name, outdata[:20], len(outdata), + indata[:20], len(indata) + ) + ) + except ValueError as e: + if expect_success: + self.fail( + "Failed to send with method <<%s>>; " + "expected to succeed.\n" % (meth_name,) + ) + if not str(e).startswith(meth_name): + self.fail( + "Method <<%s>> failed with unexpected " + "exception message: %s\n" % ( + meth_name, e + ) + ) + + for meth_name, recv_meth, expect_success, args in recv_methods: + indata = data_prefix + meth_name + try: + s.send(indata.encode('ASCII', 'strict')) + outdata = recv_meth(*args) + outdata = outdata.decode('ASCII', 'strict') + if outdata != indata.lower(): + self.fail( + "While receiving with <<%s>> bad data " + "<<%r>> (%d) received; " + "expected <<%r>> (%d)\n" % ( + meth_name, outdata[:20], len(outdata), + indata[:20], len(indata) + ) + ) + except ValueError as e: + if expect_success: + self.fail( + "Failed to receive with method <<%s>>; " + "expected to succeed.\n" % (meth_name,) + ) + if not str(e).startswith(meth_name): + self.fail( + "Method <<%s>> failed with unexpected " + "exception message: %s\n" % ( + meth_name, e + ) + ) + # consume data + s.read() + + s.write("over\n".encode("ASCII", "strict")) + s.close() + finally: + server.stop() + server.join() + + def test_handshake_timeout(self): + # Issue #5103: SSL handshake must respect the socket timeout + server = socket.socket(socket.AF_INET) + host = "127.0.0.1" + port = test_support.bind_port(server) + started = threading.Event() + finish = False + + def serve(): + server.listen(5) + started.set() + conns = [] + while not finish: + r, w, e = select.select([server], [], [], 0.1) + if server in r: + # Let the socket hang around rather than having + # it closed by garbage collection. + conns.append(server.accept()[0]) + + t = threading.Thread(target=serve) + t.start() + started.wait() + + try: + try: + c = socket.socket(socket.AF_INET) + c.settimeout(0.2) + c.connect((host, port)) + # Will attempt handshake and time out + self.assertRaisesRegexp(ssl.SSLError, "timed out", + ssl.wrap_socket, c) + finally: + c.close() + try: + c = socket.socket(socket.AF_INET) + c.settimeout(0.2) + c = ssl.wrap_socket(c) + # Will attempt handshake and time out + self.assertRaisesRegexp(ssl.SSLError, "timed out", + c.connect, (host, port)) + finally: + c.close() + finally: + finish = True + t.join() + server.close() + + +def test_main(verbose=False): + if skip_expected: + raise unittest.SkipTest("No SSL support") + + global CERTFILE, SVN_PYTHON_ORG_ROOT_CERT + CERTFILE = test_support.findfile("keycert.pem") + SVN_PYTHON_ORG_ROOT_CERT = test_support.findfile( + "https_svn_python_org_root.pem") + + if (not os.path.exists(CERTFILE) or + not os.path.exists(SVN_PYTHON_ORG_ROOT_CERT)): + raise test_support.TestFailed("Can't read certificate files!") + + tests = [BasicTests, BasicSocketTests] + + if test_support.is_resource_enabled('network'): + tests.append(NetworkedTests) + + if _have_threads: + thread_info = test_support.threading_setup() + if thread_info and test_support.is_resource_enabled('network'): + tests.append(ThreadedTests) + + try: + test_support.run_unittest(*tests) + finally: + if _have_threads: + test_support.threading_cleanup(*thread_info) + +if __name__ == "__main__": + test_main() diff --git a/lib-python/modified-2.7.0/test/test_support.py b/lib-python/modified-2.7.0/test/test_support.py --- a/lib-python/modified-2.7.0/test/test_support.py +++ b/lib-python/modified-2.7.0/test/test_support.py @@ -1052,15 +1052,33 @@ guards, default = _parse_guards(guards) return guards.get(platform.python_implementation().lower(), default) +# ---------------------------------- +# PyPy extension: you can run:: +# python ..../test_foo.py --pdb +# to get a pdb prompt in case of exceptions +ResultClass = unittest.TextTestRunner.resultclass + +class TestResultWithPdb(ResultClass): + + def addError(self, testcase, exc_info): + ResultClass.addError(self, testcase, exc_info) + if '--pdb' in sys.argv: + import pdb, traceback + traceback.print_tb(exc_info[2]) + pdb.post_mortem(exc_info[2], pdb.Pdb) + +# ---------------------------------- def _run_suite(suite): """Run tests from a unittest.TestSuite-derived class.""" if verbose: - runner = unittest.TextTestRunner(sys.stdout, verbosity=2) + runner = unittest.TextTestRunner(sys.stdout, verbosity=2, + resultclass=TestResultWithPdb) else: runner = BasicTestRunner() + result = runner.run(suite) if not result.wasSuccessful(): if len(result.errors) == 1 and not result.failures: @@ -1073,6 +1091,34 @@ err += "; run in verbose mode for details" raise TestFailed(err) +# ---------------------------------- +# PyPy extension: you can run:: +# python ..../test_foo.py --filter bar +# to run only the test cases whose name contains bar + +def filter_maybe(suite): + try: + i = sys.argv.index('--filter') + filter = sys.argv[i+1] + except (ValueError, IndexError): + return suite + tests = [] + for test in linearize_suite(suite): + if filter in test._testMethodName: + tests.append(test) + return unittest.TestSuite(tests) + +def linearize_suite(suite_or_test): + try: + it = iter(suite_or_test) + except TypeError: + yield suite_or_test + return + for subsuite in it: + for item in linearize_suite(subsuite): + yield item + +# ---------------------------------- def run_unittest(*classes): """Run tests from unittest.TestCase-derived classes.""" @@ -1088,6 +1134,7 @@ suite.addTest(cls) else: suite.addTest(unittest.makeSuite(cls)) + suite = filter_maybe(suite) _run_suite(suite) diff --git a/lib-python/modified-2.7.0/test/test_sys.py b/lib-python/modified-2.7.0/test/test_sys.py --- a/lib-python/modified-2.7.0/test/test_sys.py +++ b/lib-python/modified-2.7.0/test/test_sys.py @@ -384,7 +384,10 @@ self.assertEqual(len(sys.float_info), 11) self.assertEqual(sys.float_info.radix, 2) self.assertEqual(len(sys.long_info), 2) - self.assertTrue(sys.long_info.bits_per_digit % 5 == 0) + if test.test_support.check_impl_detail(cpython=True): + self.assertTrue(sys.long_info.bits_per_digit % 5 == 0) + else: + self.assertTrue(sys.long_info.bits_per_digit >= 1) self.assertTrue(sys.long_info.sizeof_digit >= 1) self.assertEqual(type(sys.long_info.bits_per_digit), int) self.assertEqual(type(sys.long_info.sizeof_digit), int) @@ -433,6 +436,7 @@ self.assertEqual(type(getattr(sys.flags, attr)), int, attr) self.assertTrue(repr(sys.flags)) + @test.test_support.impl_detail("sys._clear_type_cache") def test_clear_type_cache(self): sys._clear_type_cache() @@ -474,6 +478,7 @@ p.wait() self.assertIn(executable, ["''", repr(sys.executable)]) + at unittest.skipUnless(test.test_support.check_impl_detail(), "sys.getsizeof()") class SizeofTest(unittest.TestCase): TPFLAGS_HAVE_GC = 1<<14 diff --git a/lib-python/2.7.0/test/test_sys_settrace.py b/lib-python/modified-2.7.0/test/test_sys_settrace.py copy from lib-python/2.7.0/test/test_sys_settrace.py copy to lib-python/modified-2.7.0/test/test_sys_settrace.py --- a/lib-python/2.7.0/test/test_sys_settrace.py +++ b/lib-python/modified-2.7.0/test/test_sys_settrace.py @@ -213,12 +213,16 @@ "finally" def generator_example(): # any() will leave the generator before its end - x = any(generator_function()) + x = any(generator_function()); gc.collect() # the following lines were not traced for x in range(10): y = x +# On CPython, when the generator is decref'ed to zero, we see the trace +# for the "finally:" portion. On PyPy, we don't see it before the next +# garbage collection. That's why we put gc.collect() on the same line above. + generator_example.events = ([(0, 'call'), (2, 'line'), (-6, 'call'), @@ -323,17 +327,24 @@ self.run_test(tighterloop_example) def test_13_genexp(self): - self.run_test(generator_example) - # issue1265: if the trace function contains a generator, - # and if the traced function contains another generator - # that is not completely exhausted, the trace stopped. - # Worse: the 'finally' clause was not invoked. - tracer = Tracer() - sys.settrace(tracer.traceWithGenexp) - generator_example() - sys.settrace(None) - self.compare_events(generator_example.__code__.co_firstlineno, - tracer.events, generator_example.events) + if self.using_gc: + test_support.gc_collect() + gc.enable() + try: + self.run_test(generator_example) + # issue1265: if the trace function contains a generator, + # and if the traced function contains another generator + # that is not completely exhausted, the trace stopped. + # Worse: the 'finally' clause was not invoked. + tracer = Tracer() + sys.settrace(tracer.traceWithGenexp) + generator_example() + sys.settrace(None) + self.compare_events(generator_example.__code__.co_firstlineno, + tracer.events, generator_example.events) + finally: + if self.using_gc: + gc.disable() def test_14_onliner_if(self): def onliners(): diff --git a/lib-python/2.7.0/test/test_sysconfig.py b/lib-python/modified-2.7.0/test/test_sysconfig.py copy from lib-python/2.7.0/test/test_sysconfig.py copy to lib-python/modified-2.7.0/test/test_sysconfig.py --- a/lib-python/2.7.0/test/test_sysconfig.py +++ b/lib-python/modified-2.7.0/test/test_sysconfig.py @@ -236,7 +236,7 @@ def test_get_scheme_names(self): wanted = ('nt', 'nt_user', 'os2', 'os2_home', 'osx_framework_user', - 'posix_home', 'posix_prefix', 'posix_user') + 'posix_home', 'posix_prefix', 'posix_user', 'pypy') self.assertEquals(get_scheme_names(), wanted) def test_symlink(self): diff --git a/lib-python/2.7.0/test/test_thread.py b/lib-python/modified-2.7.0/test/test_thread.py copy from lib-python/2.7.0/test/test_thread.py copy to lib-python/modified-2.7.0/test/test_thread.py --- a/lib-python/2.7.0/test/test_thread.py +++ b/lib-python/modified-2.7.0/test/test_thread.py @@ -128,6 +128,7 @@ del task while not done: time.sleep(0.01) + test_support.gc_collect() self.assertEqual(thread._count(), orig) diff --git a/lib-python/modified-2.7.0/test/test_traceback.py b/lib-python/modified-2.7.0/test/test_traceback.py --- a/lib-python/modified-2.7.0/test/test_traceback.py +++ b/lib-python/modified-2.7.0/test/test_traceback.py @@ -5,7 +5,8 @@ import sys import unittest from imp import reload -from test.test_support import run_unittest, is_jython, Error +from test.test_support import run_unittest, Error +from test.test_support import impl_detail, check_impl_detail import traceback @@ -49,10 +50,8 @@ self.assertTrue(err[2].count('\n') == 1) # and no additional newline self.assertTrue(err[1].find("+") == err[2].find("^")) # in the right place + @impl_detail("other implementations may add a caret (why shouldn't they?)") def test_nocaret(self): - if is_jython: - # jython adds a caret in this case (why shouldn't it?) - return err = self.get_exception_format(self.syntax_error_without_caret, SyntaxError) self.assertTrue(len(err) == 3) @@ -63,8 +62,11 @@ IndentationError) self.assertTrue(len(err) == 4) self.assertTrue(err[1].strip() == "print 2") - self.assertIn("^", err[2]) - self.assertTrue(err[1].find("2") == err[2].find("^")) + if check_impl_detail(): + # on CPython, there is a "^" at the end of the line + # on PyPy, there is a "^" too, but at the start, more logically + self.assertIn("^", err[2]) + self.assertTrue(err[1].find("2") == err[2].find("^")) def test_bug737473(self): import os, tempfile, time diff --git a/lib-python/2.7.0/test/test_types.py b/lib-python/modified-2.7.0/test/test_types.py copy from lib-python/2.7.0/test/test_types.py copy to lib-python/modified-2.7.0/test/test_types.py --- a/lib-python/2.7.0/test/test_types.py +++ b/lib-python/modified-2.7.0/test/test_types.py @@ -1,7 +1,8 @@ # Python test set -- part 6, built-in types from test.test_support import run_unittest, have_unicode, run_with_locale, \ - check_py3k_warnings + check_py3k_warnings, \ + impl_detail, check_impl_detail import unittest import sys import locale @@ -289,9 +290,14 @@ # array.array() returns an object that does not implement a char buffer, # something which int() uses for conversion. import array - try: int(buffer(array.array('c'))) + try: int(buffer(array.array('c', '5'))) except TypeError: pass - else: self.fail("char buffer (at C level) not working") + else: + if check_impl_detail(): + self.fail("char buffer (at C level) not working") + #else: + # it works on PyPy, which does not have the distinction + # between char buffer and binary buffer. XXX fine enough? def test_int__format__(self): def test(i, format_spec, result): @@ -741,6 +747,7 @@ for code in 'xXobns': self.assertRaises(ValueError, format, 0, ',' + code) + @impl_detail("the types' internal size attributes are CPython-only") def test_internal_sizes(self): self.assertGreater(object.__basicsize__, 0) self.assertGreater(tuple.__itemsize__, 0) diff --git a/lib-python/2.7.0/test/test_unicodedata.py b/lib-python/modified-2.7.0/test/test_unicodedata.py copy from lib-python/2.7.0/test/test_unicodedata.py copy to lib-python/modified-2.7.0/test/test_unicodedata.py --- a/lib-python/2.7.0/test/test_unicodedata.py +++ b/lib-python/modified-2.7.0/test/test_unicodedata.py @@ -220,10 +220,12 @@ # been loaded in this process. popen = subprocess.Popen(args, stderr=subprocess.PIPE) popen.wait() - self.assertEqual(popen.returncode, 1) - error = "SyntaxError: (unicode error) \N escapes not supported " \ - "(can't load unicodedata module)" - self.assertIn(error, popen.stderr.read()) + self.assertIn(popen.returncode, [0, 1]) # at least it did not segfault + if test.test_support.check_impl_detail(): + self.assertEqual(popen.returncode, 1) + error = "SyntaxError: (unicode error) \N escapes not supported " \ + "(can't load unicodedata module)" + self.assertIn(error, popen.stderr.read()) def test_decimal_numeric_consistent(self): # Test that decimal and numeric are consistent, diff --git a/lib-python/2.7.0/test/test_xml_etree.py b/lib-python/modified-2.7.0/test/test_xml_etree.py copy from lib-python/2.7.0/test/test_xml_etree.py copy to lib-python/modified-2.7.0/test/test_xml_etree.py --- a/lib-python/2.7.0/test/test_xml_etree.py +++ b/lib-python/modified-2.7.0/test/test_xml_etree.py @@ -1628,10 +1628,10 @@ Check reference leak. >>> xmltoolkit63() - >>> count = sys.getrefcount(None) + >>> count = sys.getrefcount(None) #doctest: +SKIP >>> for i in range(1000): ... xmltoolkit63() - >>> sys.getrefcount(None) - count + >>> sys.getrefcount(None) - count #doctest: +SKIP 0 """ diff --git a/lib-python/2.7.0/test/test_xpickle.py b/lib-python/modified-2.7.0/test/test_xpickle.py copy from lib-python/2.7.0/test/test_xpickle.py copy to lib-python/modified-2.7.0/test/test_xpickle.py diff --git a/lib-python/2.7.0/trace.py b/lib-python/modified-2.7.0/trace.py copy from lib-python/2.7.0/trace.py copy to lib-python/modified-2.7.0/trace.py --- a/lib-python/2.7.0/trace.py +++ b/lib-python/modified-2.7.0/trace.py @@ -546,6 +546,10 @@ if len(funcs) == 1: dicts = [d for d in gc.get_referrers(funcs[0]) if isinstance(d, dict)] + if len(dicts) == 0: + # PyPy may store functions directly on the class + # (more exactly: the container is not a Python object) + dicts = funcs if len(dicts) == 1: classes = [c for c in gc.get_referrers(dicts[0]) if hasattr(c, "__bases__")] diff --git a/lib_pypy/_csv.py b/lib_pypy/_csv.py --- a/lib_pypy/_csv.py +++ b/lib_pypy/_csv.py @@ -194,8 +194,7 @@ names = csv.list_dialects()""" return list(_dialects) -class Reader: - +class Reader(object): """CSV reader Reader objects are responsible for reading and parsing tabular data @@ -214,7 +213,7 @@ self._parse_reset() def _parse_reset(self): - self.field = [] + self.field = '' self.fields = [] self.state = self.START_RECORD self.numeric_field = False @@ -235,11 +234,12 @@ self.line_num += 1 - for c in line: - if c == '\0': - raise Error("line contains NULL byte") - self._parse_process_char(c) - self._parse_process_char('\0') + if '\0' in line: + raise Error("line contains NULL byte") + pos = 0 + while pos < len(line): + pos = self._parse_process_char(line, pos) + self._parse_eol() if self.state == self.START_RECORD: break @@ -248,46 +248,46 @@ self.fields = [] return fields - def _parse_process_char(self, c): + def _parse_process_char(self, line, pos): + c = line[pos] if self.state == self.IN_FIELD: # in unquoted field - if c in ('\n', '\r', '\0'): - # end of line - return [fields] - self._parse_save_field() - if c == '\0': - self.state = self.START_RECORD + pos2 = pos + while True: + if c in '\n\r': + # end of line - return [fields] + self._parse_save_field() + self.state = self.EAT_CRNL + elif c == self.dialect.escapechar: + # possible escaped character + self.state = self.ESCAPED_CHAR + elif c == self.dialect.delimiter: + # save field - wait for new field + self._parse_save_field() + self.state = self.START_FIELD else: - self.state = self.EAT_CRNL - elif c == self.dialect.escapechar: - # possible escaped character - self.state = self.ESCAPED_CHAR - elif c == self.dialect.delimiter: - # save field - wait for new field - self._parse_save_field() - self.state = self.START_FIELD - else: - # normal character - save in field - self._parse_add_char(c) + # normal character - save in field + pos2 += 1 + c = line[pos2] + continue + break + if pos2 > pos: + self._parse_add_char(line[pos:pos2]) + pos = pos2 elif self.state == self.START_RECORD: - if c == '\0': - # empty line - return [] - pass - elif c in ('\n', '\r'): + if c in '\n\r': self.state = self.EAT_CRNL else: self.state = self.START_FIELD # restart process - self._parse_process_char(c) + self._parse_process_char(line, pos) elif self.state == self.START_FIELD: - if c in ('\n', '\r', '\0'): + if c in '\n\r': # save empty field - return [fields] self._parse_save_field() - if c == '\0': - self.state = self.START_RECORD - else: - self.state = self.EAT_CRNL + self.state = self.EAT_CRNL elif (c == self.dialect.quotechar and self.dialect.quoting != QUOTE_NONE): # start quoted field @@ -309,15 +309,11 @@ self.state = self.IN_FIELD elif self.state == self.ESCAPED_CHAR: - if c == '\0': - c = '\n' self._parse_add_char(c) self.state = self.IN_FIELD elif self.state == self.IN_QUOTED_FIELD: - if c == '\0': - pass - elif c == self.dialect.escapechar: + if c == self.dialect.escapechar: # possible escape character self.state = self.ESCAPE_IN_QUOTED_FIELD elif (c == self.dialect.quotechar @@ -333,8 +329,6 @@ self._parse_add_char(c) elif self.state == self.ESCAPE_IN_QUOTED_FIELD: - if c == '\0': - c = '\n' self._parse_add_char(c) self.state = self.IN_QUOTED_FIELD @@ -349,13 +343,10 @@ # save field - wait for new field self._parse_save_field() self.state = self.START_FIELD - elif c in ('\r', '\n', '\0'): + elif c in '\r\n': # end of line - return [fields] self._parse_save_field() - if c == '\0': - self.state = self.START_RECORD - else: - self.state = self.EAT_CRNL + self.state = self.EAT_CRNL elif not self.dialect.strict: self._parse_add_char(c) self.state = self.IN_FIELD @@ -364,10 +355,8 @@ (self.dialect.delimiter, self.dialect.quotechar)) elif self.state == self.EAT_CRNL: - if c in ('\r', '\n'): + if c in '\r\n': pass - elif c == '\0': - self.state = self.START_RECORD else: raise Error("new-line character seen in unquoted field - " "do you need to open the file " @@ -376,21 +365,52 @@ else: raise RuntimeError("unknown state: %r" % (self.state,)) + return pos + 1 + + def _parse_eol(self): + if self.state == self.EAT_CRNL: + self.state = self.START_RECORD + elif self.state == self.START_RECORD: + # empty line - return [] + pass + elif self.state == self.IN_FIELD: + # in unquoted field + # end of line - return [fields] + self._parse_save_field() + self.state = self.START_RECORD + elif self.state == self.START_FIELD: + # save empty field - return [fields] + self._parse_save_field() + self.state = self.START_RECORD + elif self.state == self.ESCAPED_CHAR: + self._parse_add_char('\n') + self.state = self.IN_FIELD + elif self.state == self.IN_QUOTED_FIELD: + pass + elif self.state == self.ESCAPE_IN_QUOTED_FIELD: + self._parse_add_char('\n') + self.state = self.IN_QUOTED_FIELD + elif self.state == self.QUOTE_IN_QUOTED_FIELD: + # end of line - return [fields] + self._parse_save_field() + self.state = self.START_RECORD + else: + raise RuntimeError("unknown state: %r" % (self.state,)) + def _parse_save_field(self): - field, self.field = self.field, [] - field = ''.join(field) + field, self.field = self.field, '' if self.numeric_field: self.numeric_field = False field = float(field) self.fields.append(field) def _parse_add_char(self, c): - if len(self.field) >= _field_limit: + if len(self.field) + len(c) > _field_limit: raise Error("field larget than field limit (%d)" % (_field_limit)) - self.field.append(c) + self.field += c -class Writer: +class Writer(object): """CSV writer Writer objects are responsible for generating tabular data diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -31,6 +31,8 @@ self._length_) def setraw(self, buffer): + if len(buffer) > self._length_: + raise ValueError("%r too long" % (buffer,)) for i in range(len(buffer)): self[i] = buffer[i] res.raw = property(getraw, setraw) diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -124,6 +124,8 @@ # A callback into python self.callable = argument ffiargs, ffires = self._ffishapes(self._argtypes_, self._restype_) + if self._restype_ is None: + ffires = None self._ptr = _rawffi.CallbackPtr(self._wrap_callable(argument, self.argtypes), ffiargs, ffires, self._flags_) diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -105,6 +105,7 @@ res = type.__new__(self, name, cls, typedict) if "_abstract_" in typedict: return res + cls = cls or (object,) if isinstance(cls[0], StructOrUnionMeta): cls[0]._make_final() if '_fields_' in typedict: diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -5,9 +5,6 @@ # Monkeypatch & hacks to let ctypes.tests import. # This should be removed at some point. sys.getrefcount = lambda x: len(gc.get_referrers(x)) - 1 -import _ctypes -_ctypes.PyObj_FromPtr = None -del _ctypes def compile_shared(): """Compile '_ctypes_test.c' into an extension module, and import it @@ -55,4 +52,12 @@ fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) imp.load_module('_ctypes_test', fp, filename, description) -compile_shared() + +try: + import _ctypes + _ctypes.PyObj_FromPtr = None + del _ctypes +except ImportError: + pass # obscure condition of _ctypes_test.py being imported by py.test +else: + compile_shared() diff --git a/lib_pypy/_functools.py b/lib_pypy/_functools.py --- a/lib_pypy/_functools.py +++ b/lib_pypy/_functools.py @@ -3,12 +3,11 @@ # reduce() has moved to _functools in Python 2.6+. reduce = reduce -class partial: +class partial(object): """ partial(func, *args, **keywords) - new function with partial application of the given arguments and keywords. """ - __slots__ = ['func', 'args', 'keywords'] def __init__(self, func, *args, **keywords): if not callable(func): diff --git a/lib_pypy/_pypy_interact.py b/lib_pypy/_pypy_interact.py --- a/lib_pypy/_pypy_interact.py +++ b/lib_pypy/_pypy_interact.py @@ -1,6 +1,7 @@ """Imported by app_main.py when PyPy needs to fire up the interactive console. """ import sys +import os def interactive_console(mainmodule=None): @@ -24,6 +25,11 @@ pass # try: + if not os.isatty(sys.stdin.fileno()): + # Bail out if stdin is not tty-like, as pyrepl wouldn't be happy + # For example, with: + # subprocess.Popen(['pypy', '-i'], stdin=subprocess.PIPE) + raise ImportError from pyrepl.simple_interact import check if not check(): raise ImportError diff --git a/lib_pypy/_sha256.py b/lib_pypy/_sha256.py --- a/lib_pypy/_sha256.py +++ b/lib_pypy/_sha256.py @@ -131,6 +131,14 @@ sha_info['digestsize'] = 28 return sha_info +def getbuf(s): + if isinstance(s, str): + return s + elif isinstance(s, unicode): + return str(s) + else: + return buffer(s) + def sha_update(sha_info, buffer): count = len(buffer) buffer_idx = 0 @@ -211,10 +219,10 @@ def __init__(self, s=None): self._sha = sha_init() if s: - sha_update(self._sha, s) + sha_update(self._sha, getbuf(s)) def update(self, s): - sha_update(self._sha, s) + sha_update(self._sha, getbuf(s)) def digest(self): return sha_final(self._sha.copy())[:self._sha['digestsize']] @@ -233,7 +241,7 @@ def __init__(self, s=None): self._sha = sha224_init() if s: - sha_update(self._sha, s) + sha_update(self._sha, getbuf(s)) def copy(self): new = sha224.__new__(sha224) diff --git a/lib_pypy/_sha512.py b/lib_pypy/_sha512.py --- a/lib_pypy/_sha512.py +++ b/lib_pypy/_sha512.py @@ -152,6 +152,14 @@ sha_info['digestsize'] = 48 return sha_info +def getbuf(s): + if isinstance(s, str): + return s + elif isinstance(s, unicode): + return str(s) + else: + return buffer(s) + def sha_update(sha_info, buffer): count = len(buffer) buffer_idx = 0 @@ -241,10 +249,10 @@ def __init__(self, s=None): self._sha = sha_init() if s: - sha_update(self._sha, s) + sha_update(self._sha, getbuf(s)) def update(self, s): - sha_update(self._sha, s) + sha_update(self._sha, getbuf(s)) def digest(self): return sha_final(self._sha.copy())[:self._sha['digestsize']] @@ -263,7 +271,7 @@ def __init__(self, s=None): self._sha = sha384_init() if s: - sha_update(self._sha, s) + sha_update(self._sha, getbuf(s)) def copy(self): new = sha384.__new__(sha384) diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_sqlite3.py @@ -0,0 +1,1194 @@ +#-*- coding: utf-8 -*- +# pysqlite2/dbapi.py: pysqlite DB-API module +# +# Copyright (C) 2007-2008 Gerhard Häring +# +# This file is part of pysqlite. +# +# This software is provided 'as-is', without any express or implied +# warranty. In no event will the authors be held liable for any damages +# arising from the use of this software. +# +# Permission is granted to anyone to use this software for any purpose, +# including commercial applications, and to alter it and redistribute it +# freely, subject to the following restrictions: +# +# 1. The origin of this software must not be misrepresented; you must not +# claim that you wrote the original software. If you use this software +# in a product, an acknowledgment in the product documentation would be +# appreciated but is not required. +# 2. Altered source versions must be plainly marked as such, and must not be +# misrepresented as being the original software. +# 3. This notice may not be removed or altered from any source distribution. + +from ctypes import c_void_p, c_int, c_double, c_int64, c_char_p, cdll +from ctypes import POINTER, byref, string_at, CFUNCTYPE, cast +from ctypes import sizeof, c_ssize_t +import datetime +import sys +import time +import weakref +from threading import _get_ident as thread_get_ident + +names = "sqlite3.dll libsqlite3.so.0 libsqlite3.so libsqlite3.dylib".split() +for name in names: + try: + sqlite = cdll.LoadLibrary(name) + break + except OSError: + continue +else: + raise ImportError("Could not load C-library, tried: %s" %(names,)) + +# pysqlite version information +version = "2.6.0" + +# pysqlite constants +PARSE_COLNAMES = 1 +PARSE_DECLTYPES = 2 + + +########################################## +# BEGIN Wrapped SQLite C API and constants +########################################## + +SQLITE_OK = 0 +SQLITE_ERROR = 1 +SQLITE_INTERNAL = 2 +SQLITE_PERM = 3 +SQLITE_ABORT = 4 +SQLITE_BUSY = 5 +SQLITE_LOCKED = 6 +SQLITE_NOMEM = 7 +SQLITE_READONLY = 8 +SQLITE_INTERRUPT = 9 +SQLITE_IOERR = 10 +SQLITE_CORRUPT = 11 +SQLITE_NOTFOUND = 12 +SQLITE_FULL = 13 +SQLITE_CANTOPEN = 14 +SQLITE_PROTOCOL = 15 +SQLITE_EMPTY = 16 +SQLITE_SCHEMA = 17 +SQLITE_TOOBIG = 18 +SQLITE_CONSTRAINT = 19 +SQLITE_MISMATCH = 20 +SQLITE_MISUSE = 21 +SQLITE_NOLFS = 22 +SQLITE_AUTH = 23 +SQLITE_FORMAT = 24 +SQLITE_RANGE = 25 +SQLITE_NOTADB = 26 +SQLITE_ROW = 100 +SQLITE_DONE = 101 +SQLITE_INTEGER = 1 +SQLITE_FLOAT = 2 +SQLITE_BLOB = 4 +SQLITE_NULL = 5 +SQLITE_TEXT = 3 +SQLITE3_TEXT = 3 + +SQLITE_TRANSIENT = cast(-1, c_void_p) +SQLITE_UTF8 = 1 + +SQLITE_DENY = 1 +SQLITE_IGNORE = 2 + +SQLITE_CREATE_INDEX = 1 +SQLITE_CREATE_TABLE = 2 +SQLITE_CREATE_TEMP_INDEX = 3 +SQLITE_CREATE_TEMP_TABLE = 4 +SQLITE_CREATE_TEMP_TRIGGER = 5 +SQLITE_CREATE_TEMP_VIEW = 6 +SQLITE_CREATE_TRIGGER = 7 +SQLITE_CREATE_VIEW = 8 +SQLITE_DELETE = 9 +SQLITE_DROP_INDEX = 10 +SQLITE_DROP_TABLE = 11 +SQLITE_DROP_TEMP_INDEX = 12 +SQLITE_DROP_TEMP_TABLE = 13 +SQLITE_DROP_TEMP_TRIGGER = 14 +SQLITE_DROP_TEMP_VIEW = 15 +SQLITE_DROP_TRIGGER = 16 +SQLITE_DROP_VIEW = 17 +SQLITE_INSERT = 18 +SQLITE_PRAGMA = 19 +SQLITE_READ = 20 +SQLITE_SELECT = 21 +SQLITE_TRANSACTION = 22 +SQLITE_UPDATE = 23 +SQLITE_ATTACH = 24 +SQLITE_DETACH = 25 +SQLITE_ALTER_TABLE = 26 +SQLITE_REINDEX = 27 +SQLITE_ANALYZE = 28 +SQLITE_CREATE_VTABLE = 29 +SQLITE_DROP_VTABLE = 30 +SQLITE_FUNCTION = 31 + +# SQLite C API +sqlite.sqlite3_bind_double.argtypes = [c_void_p, c_int, c_double] +sqlite.sqlite3_bind_int64.argtypes = [c_void_p, c_int, c_int64] + +sqlite.sqlite3_value_int.argtypes = [c_void_p] +sqlite.sqlite3_value_int.restype = c_int + +sqlite.sqlite3_value_int64.argtypes = [c_void_p] +sqlite.sqlite3_value_int64.restype = c_int64 + +sqlite.sqlite3_value_blob.argtypes = [c_void_p] +sqlite.sqlite3_value_blob.restype = c_void_p + +sqlite.sqlite3_value_bytes.argtypes = [c_void_p] +sqlite.sqlite3_value_bytes.restype = c_int + +sqlite.sqlite3_value_double.argtypes = [c_void_p] +sqlite.sqlite3_value_double.restype = c_double + +sqlite.sqlite3_value_text.argtypes = [c_void_p] +sqlite.sqlite3_value_text.restype = c_char_p + +sqlite.sqlite3_bind_int.argtypes = [c_void_p, c_int, c_int] +sqlite.sqlite3_bind_parameter_count.argtypes = [c_void_p] +sqlite.sqlite3_bind_parameter_count.restype = c_int +sqlite.sqlite3_bind_parameter_index.argtypes = [c_void_p, c_char_p] +sqlite.sqlite3_bind_parameter_index.restype = c_int +sqlite.sqlite3_bind_parameter_name.argtypes = [c_void_p, c_int] +sqlite.sqlite3_bind_parameter_name.restype = c_char_p +sqlite.sqlite3_bind_text.argtypes = [c_void_p, c_int, c_char_p, c_int,c_void_p] +sqlite.sqlite3_bind_blob.argtypes = [c_void_p, c_int, c_void_p, c_int,c_void_p] +sqlite.sqlite3_bind_blob.restype = c_int +sqlite.sqlite3_changes.argtypes = [c_void_p] +sqlite.sqlite3_changes.restype = c_int +sqlite.sqlite3_close.argtypes = [c_void_p] +sqlite.sqlite3_close.restype = c_int +sqlite.sqlite3_column_blob.restype = c_void_p +sqlite.sqlite3_column_bytes.restype = c_int +sqlite.sqlite3_column_double.restype = c_double +sqlite.sqlite3_column_int64.restype = c_int64 +sqlite.sqlite3_column_name.restype = c_char_p +sqlite.sqlite3_column_text.restype = c_char_p +sqlite.sqlite3_complete.argtypes = [c_char_p] +sqlite.sqlite3_complete.restype = c_int +sqlite.sqlite3_errcode.restype = c_int +sqlite.sqlite3_errmsg.restype = c_char_p +sqlite.sqlite3_get_autocommit.argtypes = [c_void_p] +sqlite.sqlite3_get_autocommit.restype = c_int +sqlite.sqlite3_libversion.restype = c_char_p +sqlite.sqlite3_open.argtypes = [c_char_p, c_void_p] +sqlite.sqlite3_prepare_v2.argtypes = [c_void_p, c_char_p, c_int, c_void_p, POINTER(c_char_p)] +sqlite.sqlite3_column_decltype.argtypes = [c_void_p, c_int] +sqlite.sqlite3_column_decltype.restype = c_char_p + +sqlite.sqlite3_result_blob.argtypes = [c_void_p, c_char_p, c_int, c_void_p] +sqlite.sqlite3_result_int64.argtypes = [c_void_p, c_int64] +sqlite.sqlite3_result_null.argtypes = [c_void_p] +sqlite.sqlite3_result_double.argtypes = [c_void_p, c_double] +sqlite.sqlite3_result_error.argtypes = [c_void_p, c_char_p, c_int] +sqlite.sqlite3_result_text.argtypes = [c_void_p, c_char_p, c_int, c_void_p] + +########################################## +# END Wrapped SQLite C API and constants +########################################## + +# SQLite version information +sqlite_version = sqlite.sqlite3_libversion() + +class Error(StandardError): + pass + +class Warning(StandardError): + pass + +class InterfaceError(Error): + pass + +class DatabaseError(Error): + pass + +class InternalError(DatabaseError): + pass + +class OperationalError(DatabaseError): + pass + +class ProgrammingError(DatabaseError): + pass + +class IntegrityError(DatabaseError): + pass + +class DataError(DatabaseError): + pass + +class NotSupportedError(DatabaseError): + pass + +def connect(database, **kwargs): + factory = kwargs.get("factory", Connection) + return factory(database, **kwargs) + +class Connection(object): + def __init__(self, database, isolation_level="", detect_types=0, timeout=None, *args, **kwargs): + self.db = c_void_p() + if sqlite.sqlite3_open(database, byref(self.db)) != SQLITE_OK: + raise OperationalError("Could not open database") + if timeout is not None: + timeout = int(timeout * 1000) # pysqlite2 uses timeout in seconds + sqlite.sqlite3_busy_timeout(self.db, timeout) + + self.text_factory = lambda x: unicode(x, "utf-8") + self.closed = False + self.statements = [] + self.statement_counter = 0 + self.row_factory = None + self._isolation_level = isolation_level + self.detect_types = detect_types + + self.Error = Error + self.Warning = Warning + self.InterfaceError = InterfaceError + self.DatabaseError = DatabaseError + self.InternalError = InternalError + self.OperationalError = OperationalError + self.ProgrammingError = ProgrammingError + self.IntegrityError = IntegrityError + self.DataError = DataError + self.NotSupportedError = NotSupportedError + + self.func_cache = {} + self._aggregates = {} + self.aggregate_instances = {} + self._collations = {} + self.thread_ident = thread_get_ident() + + def _get_exception(self, error_code = None): + if error_code is None: + error_code = sqlite.sqlite3_errcode(self.db) + error_message = sqlite.sqlite3_errmsg(self.db) + + if error_code == SQLITE_OK: + raise ValueError("error signalled but got SQLITE_OK") + elif error_code in (SQLITE_INTERNAL, SQLITE_NOTFOUND): + exc = InternalError + elif error_code == SQLITE_NOMEM: + exc = MemoryError + elif error_code in (SQLITE_ERROR, SQLITE_PERM, SQLITE_ABORT, SQLITE_BUSY, SQLITE_LOCKED, + SQLITE_READONLY, SQLITE_INTERRUPT, SQLITE_IOERR, SQLITE_FULL, SQLITE_CANTOPEN, + SQLITE_PROTOCOL, SQLITE_EMPTY, SQLITE_SCHEMA): + exc = OperationalError + elif error_code == SQLITE_CORRUPT: + exc = DatabaseError + elif error_code == SQLITE_TOOBIG: + exc = DataError + elif error_code in (SQLITE_CONSTRAINT, SQLITE_MISMATCH): + exc = IntegrityError + elif error_code == SQLITE_MISUSE: + exc = ProgrammingError + else: + exc = DatabaseError + exc = exc(error_message) + exc.error_code = error_code + return exc + + def _remember_statement(self, statement): + self.statements.append(weakref.ref(statement)) + self.statement_counter += 1 + + if self.statement_counter % 100 == 0: + self.statements = [ref for ref in self.statements if ref() is not None] + + def _check_thread(self): + if not hasattr(self, 'thread_ident'): + return + if self.thread_ident != thread_get_ident(): + raise ProgrammingError( + "SQLite objects created in a thread can only be used in that same thread." + "The object was created in thread id %d and this is thread id %d", + self.thread_ident, thread_get_ident()) + + def cursor(self, factory=None): + self._check_thread() + self._check_closed() + if factory is None: + factory = Cursor + cur = factory(self) + if self.row_factory is not None: + cur.row_factory = self.row_factory + return cur + + def executemany(self, *args): + self._check_closed() + cur = Cursor(self) + if self.row_factory is not None: + cur.row_factory = self.row_factory + return cur.executemany(*args) + + def execute(self, *args): + self._check_closed() + cur = Cursor(self) + if self.row_factory is not None: + cur.row_factory = self.row_factory + return cur.execute(*args) + + def executescript(self, *args): + self._check_closed() + cur = Cursor(self) + if self.row_factory is not None: + cur.row_factory = self.row_factory + return cur.executescript(*args) + + def __call__(self, sql): + self._check_closed() + cur = Cursor(self) + if not isinstance(sql, (str, unicode)): + raise Warning("SQL is of wrong type. Must be string or unicode.") + statement = Statement(cur, sql, self.row_factory) + return statement + + def _get_isolation_level(self): + return self._isolation_level + def _set_isolation_level(self, val): + if val is None: + self.commit() + if isinstance(val, unicode): + val = str(val) + self._isolation_level = val + isolation_level = property(_get_isolation_level, _set_isolation_level) + + def _begin(self): + self._check_closed() + if self._isolation_level is None: + return + if sqlite.sqlite3_get_autocommit(self.db): + try: + sql = "BEGIN " + self._isolation_level + statement = c_void_p() + next_char = c_char_p() + ret = sqlite.sqlite3_prepare_v2(self.db, sql, -1, byref(statement), next_char) + if ret != SQLITE_OK: + raise self._get_exception(ret) + ret = sqlite.sqlite3_step(statement) + if ret != SQLITE_DONE: + raise self._get_exception(ret) + finally: + sqlite.sqlite3_finalize(statement) + + def commit(self): + self._check_thread() + self._check_closed() + if sqlite.sqlite3_get_autocommit(self.db): + return + + for statement in self.statements: + obj = statement() + if obj is not None: + obj.reset() + + try: + sql = "COMMIT" + statement = c_void_p() + next_char = c_char_p() + ret = sqlite.sqlite3_prepare_v2(self.db, sql, -1, byref(statement), next_char) + if ret != SQLITE_OK: + raise self._get_exception(ret) + ret = sqlite.sqlite3_step(statement) + if ret != SQLITE_DONE: + raise self._get_exception(ret) + finally: + sqlite.sqlite3_finalize(statement) + + def rollback(self): + self._check_thread() + self._check_closed() + if sqlite.sqlite3_get_autocommit(self.db): + return + + for statement in self.statements: + obj = statement() + if obj is not None: + obj.reset() + + try: + sql = "ROLLBACK" + statement = c_void_p() + next_char = c_char_p() + ret = sqlite.sqlite3_prepare_v2(self.db, sql, -1, byref(statement), next_char) + if ret != SQLITE_OK: + raise self._get_exception(ret) + ret = sqlite.sqlite3_step(statement) + if ret != SQLITE_DONE: + raise self._get_exception(ret) + finally: + sqlite.sqlite3_finalize(statement) + + def _check_closed(self): + if getattr(self, 'closed', True): + raise ProgrammingError("Cannot operate on a closed database.") + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + if exc_type is None and exc_value is None and exc_tb is None: + self.commit() + else: + self.rollback() + + def _get_total_changes(self): + return sqlite.sqlite3_total_changes(self.db) + total_changes = property(_get_total_changes) + + def close(self): + self._check_thread() + if self.closed: + return + for statement in self.statements: + obj = statement() + if obj is not None: + obj.finalize() + + self.closed = True + ret = sqlite.sqlite3_close(self.db) + if ret != SQLITE_OK: + raise self._get_exception(ret) + + def create_collation(self, name, callback): + self._check_thread() + self._check_closed() + name = name.upper() + if not name.replace('_', '').isalnum(): + raise ProgrammingError("invalid character in collation name") + + if callback is None: + del self._collations[name] + c_collation_callback = cast(None, COLLATION) + else: + if not callable(callback): + raise TypeError("parameter must be callable") + + def collation_callback(context, len1, str1, len2, str2): + text1 = string_at(str1, len1) + text2 = string_at(str2, len2) + + return callback(text1, text2) + + c_collation_callback = COLLATION(collation_callback) + self._collations[name] = collation_callback + + + ret = sqlite.sqlite3_create_collation(self.db, name, + SQLITE_UTF8, + None, + c_collation_callback) + if ret != SQLITE_OK: + raise self._get_exception(ret) + + def set_progress_handler(self, callable, nsteps): + self._check_thread() + self._check_closed() + if callable is None: + c_progress_handler = cast(None, PROGRESS) + else: + try: + c_progress_handler, _ = self.func_cache[callable] + except KeyError: + def progress_handler(userdata): + try: + ret = callable() + return bool(ret) + except Exception: + # abort query if error occurred + return 1 + c_progress_handler = PROGRESS(progress_handler) + + self.func_cache[callable] = c_progress_handler, progress_handler + ret = sqlite.sqlite3_progress_handler(self.db, nsteps, + c_progress_handler, + None) + if ret != SQLITE_OK: + raise self._get_exception(ret) + + def set_authorizer(self, callback): + self._check_thread() + self._check_closed() + + try: + c_authorizer, _ = self.func_cache[callback] + except KeyError: + def authorizer(userdata, action, arg1, arg2, dbname, source): + try: + return int(callback(action, arg1, arg2, dbname, source)) + except Exception, e: + return SQLITE_DENY + c_authorizer = AUTHORIZER(authorizer) + + self.func_cache[callback] = c_authorizer, authorizer + + ret = sqlite.sqlite3_set_authorizer(self.db, + c_authorizer, + None) + if ret != SQLITE_OK: + raise self._get_exception(ret) + + def create_function(self, name, num_args, callback): + self._check_thread() + self._check_closed() + try: + c_closure, _ = self.func_cache[callback] + except KeyError: + def closure(context, nargs, c_params): + function_callback(callback, context, nargs, c_params) + c_closure = FUNC(closure) + self.func_cache[callback] = c_closure, closure + ret = sqlite.sqlite3_create_function(self.db, name, num_args, + SQLITE_UTF8, None, + c_closure, + cast(None, STEP), + cast(None, FINAL)) + if ret != SQLITE_OK: + raise self._get_exception(ret) + + def create_aggregate(self, name, num_args, cls): + self._check_thread() + self._check_closed() + + try: + c_step_callback, c_final_callback, _, _ = self._aggregates[cls] + except KeyError: + def step_callback(context, argc, c_params): + + aggregate_ptr = cast( + sqlite.sqlite3_aggregate_context( + context, sizeof(c_ssize_t)), + POINTER(c_ssize_t)) + + if not aggregate_ptr[0]: + try: + aggregate = cls() + except Exception, e: + msg = ("user-defined aggregate's '__init__' " + "method raised error") + sqlite.sqlite3_result_error(context, msg, len(msg)) + return + aggregate_id = id(aggregate) + self.aggregate_instances[aggregate_id] = aggregate + aggregate_ptr[0] = aggregate_id + else: + aggregate = self.aggregate_instances[aggregate_ptr[0]] + + params = _convert_params(context, argc, c_params) + try: + aggregate.step(*params) + except Exception, e: + msg = ("user-defined aggregate's 'step' " + "method raised error") + sqlite.sqlite3_result_error(context, msg, len(msg)) + + def final_callback(context): + + aggregate_ptr = cast( + sqlite.sqlite3_aggregate_context( + context, sizeof(c_ssize_t)), + POINTER(c_ssize_t)) + + if aggregate_ptr[0]: + aggregate = self.aggregate_instances[aggregate_ptr[0]] + try: + val = aggregate.finalize() + except Exception, e: + msg = ("user-defined aggregate's 'finalize' " + "method raised error") + sqlite.sqlite3_result_error(context, msg, len(msg)) + else: + _convert_result(context, val) + finally: + del self.aggregate_instances[aggregate_ptr[0]] + + c_step_callback = STEP(step_callback) + c_final_callback = FINAL(final_callback) + + self._aggregates[cls] = (c_step_callback, c_final_callback, + step_callback, final_callback) + + ret = sqlite.sqlite3_create_function(self.db, name, num_args, + SQLITE_UTF8, None, + cast(None, FUNC), + c_step_callback, + c_final_callback) + if ret != SQLITE_OK: + raise self._get_exception(ret) + + def iterdump(self): + from sqlite3.dump import _iterdump + return _iterdump(self) + +class Cursor(object): + def __init__(self, con): + if not isinstance(con, Connection): + raise TypeError + con._check_thread() + con._check_closed() + self.connection = con + self._description = None + self.arraysize = 1 + self.text_factory = con.text_factory + self.row_factory = None + self.rowcount = -1 + self.statement = None + + def _check_closed(self): + if not getattr(self, 'connection', None): + raise ProgrammingError("Cannot operate on a closed cursor.") + self.connection._check_thread() + self.connection._check_closed() + + def execute(self, sql, params=None): + self._description = None + if type(sql) is unicode: + sql = sql.encode("utf-8") + self._check_closed() + self.statement = Statement(self, sql, self.row_factory) + + if self.connection._isolation_level is not None: + if self.statement.kind == "DDL": + self.connection.commit() + elif self.statement.kind == "DML": + self.connection._begin() + + self.statement.set_params(params) + + # Actually execute the SQL statement + ret = sqlite.sqlite3_step(self.statement.statement) + if ret not in (SQLITE_DONE, SQLITE_ROW): + self.statement.reset() + raise self.connection._get_exception(ret) + + if self.statement.kind == "DQL": + self.statement._readahead() + self.statement._build_row_cast_map() + + if self.statement.kind in ("DML", "DDL"): + self.statement.reset() + + self.rowcount = -1 + if self.statement.kind == "DML": + self.rowcount = sqlite.sqlite3_changes(self.connection.db) + + return self + + def executemany(self, sql, many_params): + self._description = None + if type(sql) is unicode: + sql = sql.encode("utf-8") + self._check_closed() + self.statement = Statement(self, sql, self.row_factory) + if self.statement.kind == "DML": + self.connection._begin() + else: + raise ProgrammingError, "executemany is only for DML statements" + + self.rowcount = 0 + for params in many_params: + self.statement.set_params(params) + ret = sqlite.sqlite3_step(self.statement.statement) + if ret != SQLITE_DONE: + raise self.connection._get_exception(ret) + self.rowcount += sqlite.sqlite3_changes(self.connection.db) + + return self + + def executescript(self, sql): + self._description = None + if type(sql) is unicode: + sql = sql.encode("utf-8") + self._check_closed() + statement = c_void_p() + c_sql = c_char_p(sql) + + self.connection.commit() + while True: + rc = sqlite.sqlite3_prepare(self.connection.db, c_sql, -1, byref(statement), byref(c_sql)) + if rc != SQLITE_OK: + raise self.connection._get_exception(rc) + + rc = SQLITE_ROW + while rc == SQLITE_ROW: + if not statement: + rc = SQLITE_OK + else: + rc = sqlite.sqlite3_step(statement) + + if rc != SQLITE_DONE: + sqlite.sqlite3_finalize(statement) + if rc == SQLITE_OK: + return self + else: + raise self.connection._get_exception(rc) + rc = sqlite.sqlite3_finalize(statement) + if rc != SQLITE_OK: + raise self.connection._get_exception(rc) + + if not c_sql.value: + break + return self + + def __iter__(self): + return self.statement + + def fetchone(self): + self._check_closed() + if self.statement is None: + return None + + try: + return self.statement.next() + except StopIteration: + return None + + return nextrow + + def fetchmany(self, size=None): + self._check_closed() + if self.statement is None: + return [] + if size is None: + size = self.arraysize + lst = [] + for row in self.statement: + lst.append(row) + if len(lst) == size: + break + return lst + + def fetchall(self): + self._check_closed() + if self.statement is None: + return [] + return list(self.statement) + + def _getdescription(self): + if self._description is None: + self._description = self.statement._get_description() + return self._description + + def _getlastrowid(self): + return sqlite.sqlite3_last_insert_rowid(self.connection.db) + + def close(self): + if not self.connection: + return + self._check_closed() + if self.statement: + self.statement.reset() + self.statement = None + self.connection = None + + def setinputsizes(self, *args): + pass + def setoutputsize(self, *args): + pass + + + description = property(_getdescription) + lastrowid = property(_getlastrowid) + +class Statement(object): + def __init__(self, cur, sql, row_factory): + self.statement = None + if not isinstance(sql, str): + raise ValueError, "sql must be a string" + self.con = cur.connection + self.cur = weakref.ref(cur) + self.sql = sql # DEBUG ONLY + self.row_factory = row_factory + first_word = self._statement_kind = sql.lstrip().split(" ")[0].upper() + if first_word in ("INSERT", "UPDATE", "DELETE", "REPLACE"): + self.kind = "DML" + elif first_word in ("SELECT", "PRAGMA"): + self.kind = "DQL" + else: + self.kind = "DDL" + self.exhausted = False + + self.statement = c_void_p() + next_char = c_char_p() + ret = sqlite.sqlite3_prepare_v2(self.con.db, sql, -1, byref(self.statement), byref(next_char)) + if ret == SQLITE_OK and self.statement.value is None: + # an empty statement, we work around that, as it's the least trouble + ret = sqlite.sqlite3_prepare_v2(self.con.db, "select 42", -1, byref(self.statement), byref(next_char)) + self.kind = "DQL" + + if ret != SQLITE_OK: + raise self.con._get_exception(ret) + self.con._remember_statement(self) + if _check_remaining_sql(next_char.value): + raise Warning, "One and only one statement required" + + self._build_row_cast_map() + + def _build_row_cast_map(self): + self.row_cast_map = [] + for i in range(sqlite.sqlite3_column_count(self.statement)): + converter = None + + if self.con.detect_types & PARSE_COLNAMES: + colname = sqlite.sqlite3_column_name(self.statement, i) + if colname is not None: + type_start = -1 + key = None + for pos in range(len(colname)): + if colname[pos] == '[': + type_start = pos + 1 + elif colname[pos] == ']' and type_start != -1: + key = colname[type_start:pos] + converter = converters[key.upper()] + + if converter is None and self.con.detect_types & PARSE_DECLTYPES: + decltype = sqlite.sqlite3_column_decltype(self.statement, i) + if decltype is not None: + decltype = decltype.split()[0] # if multiple words, use first, eg. "INTEGER NOT NULL" => "INTEGER" + if '(' in decltype: + decltype = decltype[:decltype.index('(')] + converter = converters.get(decltype.upper(), None) + + self.row_cast_map.append(converter) + + def set_param(self, idx, param): + cvt = converters.get(type(param)) + if cvt is not None: + cvt = param = cvt(param) + + adapter = adapters.get((type(param), PrepareProtocol), None) + if adapter is not None: + param = adapter(param) + + if param is None: + sqlite.sqlite3_bind_null(self.statement, idx) + elif type(param) in (bool, int, long): + if -2147483648 <= param <= 2147483647: + sqlite.sqlite3_bind_int(self.statement, idx, param) + else: + sqlite.sqlite3_bind_int64(self.statement, idx, param) + elif type(param) is float: + sqlite.sqlite3_bind_double(self.statement, idx, param) + elif isinstance(param, str): + sqlite.sqlite3_bind_text(self.statement, idx, param, -1, SQLITE_TRANSIENT) + elif isinstance(param, unicode): + param = param.encode("utf-8") + sqlite.sqlite3_bind_text(self.statement, idx, param, -1, SQLITE_TRANSIENT) + elif type(param) is buffer: + sqlite.sqlite3_bind_blob(self.statement, idx, str(param), len(param), SQLITE_TRANSIENT) + else: + raise InterfaceError, "parameter type %s is not supported" % str(type(param)) + + def set_params(self, params): + ret = sqlite.sqlite3_reset(self.statement) + if ret != SQLITE_OK: + raise self.con._get_exception(ret) + + if params is None: + if sqlite.sqlite3_bind_parameter_count(self.statement) != 0: + raise ProgrammingError("wrong number of arguments") + return + + params_type = None + if isinstance(params, dict): + params_type = dict + else: + params_type = list + + if params_type == list: + if len(params) != sqlite.sqlite3_bind_parameter_count(self.statement): + raise ProgrammingError("wrong number of arguments") + + for idx, param in enumerate(params): + self.set_param(idx+1, param) + else: + for idx in range(1, sqlite.sqlite3_bind_parameter_count(self.statement) + 1): + param_name = sqlite.sqlite3_bind_parameter_name(self.statement, idx) + if param_name is None: + raise ProgrammingError, "need named parameters" + param_name = param_name[1:] + try: + param = params[param_name] + except KeyError, e: + raise ProgrammingError("missing parameter '%s'" %param) + self.set_param(idx, param) + + def __iter__(self): + return self + + def next(self): + self.con._check_closed() + self.con._check_thread() + if self.exhausted: + raise StopIteration + item = self.item + + ret = sqlite.sqlite3_step(self.statement) + if ret == SQLITE_DONE: + self.exhausted = True + self.item = None + elif ret != SQLITE_ROW: + exc = self.con._get_exception(ret) + sqlite.sqlite3_reset(self.statement) + raise exc + + self._readahead() + return item + + def _readahead(self): + self.column_count = sqlite.sqlite3_column_count(self.statement) + row = [] + for i in xrange(self.column_count): + typ = sqlite.sqlite3_column_type(self.statement, i) + converter = self.row_cast_map[i] + if converter is None: + if typ == SQLITE_INTEGER: + val = sqlite.sqlite3_column_int64(self.statement, i) + if -sys.maxint-1 <= val <= sys.maxint: + val = int(val) + elif typ == SQLITE_FLOAT: + val = sqlite.sqlite3_column_double(self.statement, i) + elif typ == SQLITE_BLOB: + blob_len = sqlite.sqlite3_column_bytes(self.statement, i) + blob = sqlite.sqlite3_column_blob(self.statement, i) + val = buffer(string_at(blob, blob_len)) + elif typ == SQLITE_NULL: + val = None + elif typ == SQLITE_TEXT: + val = sqlite.sqlite3_column_text(self.statement, i) + val = self.cur().text_factory(val) + else: + blob = sqlite.sqlite3_column_blob(self.statement, i) + if not blob: + val = None + else: + blob_len = sqlite.sqlite3_column_bytes(self.statement, i) + val = string_at(blob, blob_len) + val = converter(val) + row.append(val) + + row = tuple(row) + if self.row_factory is not None: + row = self.row_factory(self.cur(), row) + self.item = row + + def reset(self): + self.row_cast_map = None + return sqlite.sqlite3_reset(self.statement) + + def finalize(self): + sqlite.sqlite3_finalize(self.statement) + self.statement = None + + def __del__(self): + sqlite.sqlite3_finalize(self.statement) + self.statement = None + + def _get_description(self): + desc = [] + for i in xrange(sqlite.sqlite3_column_count(self.statement)): + name = sqlite.sqlite3_column_name(self.statement, i).split("[")[0].strip() + desc.append((name, None, None, None, None, None, None)) + return desc + +class Row(object): + def __init__(self, cursor, values): + self.description = cursor.description + self.values = values + + def __getitem__(self, item): + if type(item) is int: + return self.values[item] + else: + item = item.lower() + for idx, desc in enumerate(self.description): + if desc[0].lower() == item: + return self.values[idx] + raise KeyError + + def keys(self): + return [desc[0] for desc in self.description] + + def __eq__(self, other): + if not isinstance(other, Row): + return NotImplemented + if self.description != other.description: + return False + if self.values != other.values: + return False + return True + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(tuple(self.description)) ^ hash(tuple(self.values)) + +def _check_remaining_sql(s): + state = "NORMAL" + for char in s: + if char == chr(0): + return 0 + elif char == '-': + if state == "NORMAL": + state = "LINECOMMENT_1" + elif state == "LINECOMMENT_1": + state = "IN_LINECOMMENT" + elif char in (' ', '\t'): + pass + elif char == '\n': + if state == "IN_LINECOMMENT": + state = "NORMAL" + elif char == '/': + if state == "NORMAL": + state = "COMMENTSTART_1" + elif state == "COMMENTEND_1": + state = "NORMAL" + elif state == "COMMENTSTART_1": + return 1 + elif char == '*': + if state == "NORMAL": + return 1 + elif state == "LINECOMMENT_1": + return 1 + elif state == "COMMENTSTART_1": + state = "IN_COMMENT" + elif state == "IN_COMMENT": + state = "COMMENTEND_1" + else: + if state == "COMMENTEND_1": + state = "IN_COMMENT" + elif state == "IN_LINECOMMENT": + pass + elif state == "IN_COMMENT": + pass + else: + return 1 + return 0 + +def register_adapter(typ, callable): + adapters[typ, PrepareProtocol] = callable + +def register_converter(name, callable): + converters[name.upper()] = callable + +def _convert_params(con, nargs, params): + _params = [] + for i in range(nargs): + typ = sqlite.sqlite3_value_type(params[i]) + if typ == SQLITE_INTEGER: + val = sqlite.sqlite3_value_int64(params[i]) + if -sys.maxint-1 <= val <= sys.maxint: + val = int(val) + elif typ == SQLITE_FLOAT: + val = sqlite.sqlite3_value_double(params[i]) + elif typ == SQLITE_BLOB: + blob_len = sqlite.sqlite3_value_bytes(params[i]) + blob = sqlite.sqlite3_value_blob(params[i]) + val = buffer(string_at(blob, blob_len)) + elif typ == SQLITE_NULL: + val = None + elif typ == SQLITE_TEXT: + val = sqlite.sqlite3_value_text(params[i]) + # XXX changed from con.text_factory + val = unicode(val, 'utf-8') + else: + raise NotImplementedError + _params.append(val) + return _params + +def _convert_result(con, val): + if val is None: + sqlite.sqlite3_result_null(con) + elif isinstance(val, (bool, int, long)): + sqlite.sqlite3_result_int64(con, int(val)) + elif isinstance(val, str): + # XXX ignoring unicode issue + sqlite.sqlite3_result_text(con, val, len(val), SQLITE_TRANSIENT) + elif isinstance(val, unicode): + val = val.encode('utf-8') + sqlite.sqlite3_result_text(con, val, len(val), SQLITE_TRANSIENT) + elif isinstance(val, float): + sqlite.sqlite3_result_double(con, val) + elif isinstance(val, buffer): + sqlite.sqlite3_result_blob(con, str(val), len(val), SQLITE_TRANSIENT) + else: + raise NotImplementedError + +def function_callback(real_cb, context, nargs, c_params): + params = _convert_params(context, nargs, c_params) + try: + val = real_cb(*params) + except Exception, e: + msg = "user-defined function raised exception" + sqlite.sqlite3_result_error(context, msg, len(msg)) + else: + _convert_result(context, val) + +FUNC = CFUNCTYPE(None, c_void_p, c_int, POINTER(c_void_p)) +STEP = CFUNCTYPE(None, c_void_p, c_int, POINTER(c_void_p)) +FINAL = CFUNCTYPE(None, c_void_p) +sqlite.sqlite3_create_function.argtypes = [c_void_p, c_char_p, c_int, c_int, c_void_p, FUNC, STEP, FINAL] +sqlite.sqlite3_create_function.restype = c_int + +sqlite.sqlite3_aggregate_context.argtypes = [c_void_p, c_int] +sqlite.sqlite3_aggregate_context.restype = c_void_p + +COLLATION = CFUNCTYPE(c_int, c_void_p, c_int, c_void_p, c_int, c_void_p) +sqlite.sqlite3_create_collation.argtypes = [c_void_p, c_char_p, c_int, c_void_p, COLLATION] +sqlite.sqlite3_create_collation.restype = c_int + +PROGRESS = CFUNCTYPE(c_int, c_void_p) +sqlite.sqlite3_progress_handler.argtypes = [c_void_p, c_int, PROGRESS, c_void_p] +sqlite.sqlite3_progress_handler.restype = c_int + +AUTHORIZER = CFUNCTYPE(c_int, c_void_p, c_int, c_char_p, c_char_p, c_char_p, c_char_p) +sqlite.sqlite3_set_authorizer.argtypes = [c_void_p, AUTHORIZER, c_void_p] +sqlite.sqlite3_set_authorizer.restype = c_int + +converters = {} +adapters = {} + +class PrepareProtocol(object): + pass + +def register_adapters_and_converters(): + def adapt_date(val): + return val.isoformat() + + def adapt_datetime(val): + return val.isoformat(" ") + + def convert_date(val): + return datetime.date(*map(int, val.split("-"))) + + def convert_timestamp(val): + datepart, timepart = val.split(" ") + year, month, day = map(int, datepart.split("-")) + timepart_full = timepart.split(".") + hours, minutes, seconds = map(int, timepart_full[0].split(":")) + if len(timepart_full) == 2: + microseconds = int(timepart_full[1]) + else: + microseconds = 0 + + val = datetime.datetime(year, month, day, hours, minutes, seconds, microseconds) + return val + + + register_adapter(datetime.date, adapt_date) + register_adapter(datetime.datetime, adapt_datetime) + register_converter("date", convert_date) + register_converter("timestamp", convert_timestamp) + +def OptimizedUnicode(s): + try: + val = unicode(s, "ascii").encode("ascii") + except UnicodeDecodeError: + val = unicode(s, "utf-8") + return val + +register_adapters_and_converters() diff --git a/lib_pypy/cmath.py b/lib_pypy/cmath.py deleted file mode 100644 --- a/lib_pypy/cmath.py +++ /dev/null @@ -1,288 +0,0 @@ -"""This module is always available. It provides access to mathematical -functions for complex numbers.""" - -# Complex math module - -# much code borrowed from mathmodule.c - -import math -from math import e, pi - -try: from __pypy__ import builtinify -except ImportError: builtinify = lambda f: f - - -# constants -_one = complex(1., 0.) -_half = complex(0.5, 0.) -_i = complex(0., 1.) -_halfi = complex(0., 0.5) - - - -# internal functions not available from Python -def _to_complex(x): - if isinstance(x, complex): - return x - if isinstance(x, (str, unicode)): - raise TypeError('float or complex required') - return complex(x) - -def _prodi(x): - x = _to_complex(x) - real = -x.imag - imag = x.real - return complex(real, imag) - - - at builtinify -def phase(x): - x = _to_complex(x) - return math.atan2(x.imag, x.real) - - - at builtinify -def polar(x): - x = _to_complex(x) - phi = math.atan2(x.imag, x.real) - r = abs(x) - return r, phi - - - at builtinify -def rect(r, phi): - return complex(r * math.cos(phi), r * math.sin(phi)) - - - at builtinify -def acos(x): - """acos(x) - - Return the arc cosine of x.""" - - x = _to_complex(x) - return -(_prodi(log((x+(_i*sqrt((_one-(x*x)))))))) - - - at builtinify -def acosh(x): - """acosh(x) - - Return the hyperbolic arccosine of x.""" - - x = _to_complex(x) - z = log(_sqrt_half*(sqrt(x+_one)+sqrt(x-_one))) - return z+z - - - at builtinify -def asin(x): - """asin(x) - - Return the arc sine of x.""" - - x = _to_complex(x) - # -i * log[(sqrt(1-x**2) + i*x] - squared = x*x - sqrt_1_minus_x_sq = sqrt(_one-squared) - return -(_prodi(log((sqrt_1_minus_x_sq+_prodi(x))))) - - - at builtinify -def asinh(x): - """asinh(x) - - Return the hyperbolic arc sine of x.""" - - x = _to_complex(x) - z = log((_sqrt_half * (sqrt(x+_i)+sqrt((x-_i))) )) - return z+z - - - at builtinify -def atan(x): - """atan(x) - - Return the arc tangent of x.""" - - x = _to_complex(x) - return _halfi*log(((_i+x)/(_i-x))) - - - at builtinify -def atanh(x): - """atanh(x) - - Return the hyperbolic arc tangent of x.""" - - x = _to_complex(x) - return _half*log((_one+x)/(_one-x)) - - - at builtinify -def cos(x): - """cos(x) - - Return the cosine of x.""" - - x = _to_complex(x) - real = math.cos(x.real) * math.cosh(x.imag) - imag = -math.sin(x.real) * math.sinh(x.imag) - return complex(real, imag) - - - at builtinify -def cosh(x): - """cosh(x) - - Return the hyperbolic cosine of x.""" - - x = _to_complex(x) - real = math.cos(x.imag) * math.cosh(x.real) - imag = math.sin(x.imag) * math.sinh(x.real) - return complex(real, imag) - - - at builtinify -def exp(x): - """exp(x) - - Return the exponential value e**x.""" - - x = _to_complex(x) - l = math.exp(x.real) - real = l * math.cos(x.imag) - imag = l * math.sin(x.imag) - return complex(real, imag) - - - at builtinify -def log(x, base=None): - """log(x) - - Return the natural logarithm of x.""" - - if base is not None: - return log(x) / log(base) - x = _to_complex(x) - l = math.hypot(x.real,x.imag) - imag = math.atan2(x.imag, x.real) - real = math.log(l) - return complex(real, imag) - - - at builtinify -def log10(x): - """log10(x) - - Return the base-10 logarithm of x.""" - - x = _to_complex(x) - l = math.hypot(x.real, x.imag) - imag = math.atan2(x.imag, x.real)/math.log(10.) - real = math.log10(l) - return complex(real, imag) - - - at builtinify -def sin(x): - """sin(x) - - Return the sine of x.""" - - x = _to_complex(x) - real = math.sin(x.real) * math.cosh(x.imag) - imag = math.cos(x.real) * math.sinh(x.imag) - return complex(real, imag) - - - at builtinify -def sinh(x): - """sinh(x) - - Return the hyperbolic sine of x.""" - - x = _to_complex(x) - real = math.cos(x.imag) * math.sinh(x.real) - imag = math.sin(x.imag) * math.cosh(x.real) - return complex(real, imag) - - - at builtinify -def sqrt(x): - """sqrt(x) - - Return the square root of x.""" - - x = _to_complex(x) - if x.real == 0. and x.imag == 0.: - real, imag = 0, 0 - else: - s = math.sqrt(0.5*(math.fabs(x.real) + math.hypot(x.real,x.imag))) - d = 0.5*x.imag/s - if x.real > 0.: - real = s - imag = d - elif x.imag >= 0.: - real = d - imag = s - else: - real = -d - imag = -s - return complex(real, imag) - -_sqrt_half = sqrt(_half) - - - at builtinify -def tan(x): - """tan(x) - - Return the tangent of x.""" - - x = _to_complex(x) - sr = math.sin(x.real) - cr = math.cos(x.real) - shi = math.sinh(x.imag) - chi = math.cosh(x.imag) - rs = sr * chi - is_ = cr * shi - rc = cr * chi - ic = -sr * shi - d = rc*rc + ic * ic - real = (rs*rc + is_*ic) / d - imag = (is_*rc - rs*ic) / d - return complex(real, imag) - - - at builtinify -def tanh(x): - """tanh(x) - - Return the hyperbolic tangent of x.""" - - x = _to_complex(x) - si = math.sin(x.imag) - ci = math.cos(x.imag) - shr = math.sinh(x.real) - chr = math.cosh(x.real) - rs = ci * shr - is_ = si * chr - rc = ci * chr - ic = si * shr - d = rc*rc + ic*ic - real = (rs*rc + is_*ic) / d - imag = (is_*rc - rs*ic) / d - return complex(real, imag) - -def isnan(x): - """isnan(z) -> bool - Checks if the real or imaginary part of z not a number (NaN)""" - x = _to_complex(x) - return math.isnan(x.real) or math.isnan(x.imag) - -def isinf(x): - """isnan(z) -> bool - Checks if the real or imaginary part of z is infinite""" - x = _to_complex(x) - return math.isinf(x.real) or math.isinf(x.imag) diff --git a/lib_pypy/disassembler.py b/lib_pypy/disassembler.py new file mode 100644 --- /dev/null +++ b/lib_pypy/disassembler.py @@ -0,0 +1,298 @@ +"""Disassembler of Python byte code into mnemonics. + +Comes from standard library, modified for the purpose of having a structured +view on things +""" + +import sys +import types +import inspect + +from opcode import * +from opcode import __all__ as _opcodes_all + +__all__ = ["dis","disassemble","distb","disco"] + _opcodes_all +del _opcodes_all + +class Opcode(object): + """ An abstract base class for all opcode implementations + """ + def __init__(self, pos, lineno, arg=None, argstr=''): + self.pos = pos + self.arg = arg + self.argstr = argstr + self.lineno = lineno + self.line_starts_here = False + + def __repr__(self): + if self.arg is None: + return "<%s at %d>" % (self.__class__.__name__, self.pos) + return "<%s (%s) at %d>" % (self.__class__.__name__, self.arg, self.pos) + +class CodeRepresentation(object): + """ Representation of opcodes + """ + def __init__(self, opcodes, co, source): + self.opcodes = opcodes + self.co = co + self.map = {} + current_lineno = None + for opcode in opcodes: + self.map[opcode.pos] = opcode + if opcode.lineno != current_lineno: + opcode.line_starts_here = True + current_lineno = opcode.lineno + self.source = source.split("\n") + +def _setup(): + for opcode in opname: + if not opcode.startswith('<'): + class O(Opcode): + pass + opcode = opcode.replace('+', '_') + O.__name__ = opcode + globals()[opcode] = O + +_setup() + +def dis(x=None): + """Disassemble classes, methods, functions, or code. + + With no argument, disassemble the last traceback. + + """ + if x is None: + distb() + return + if type(x) is types.InstanceType: + x = x.__class__ + if hasattr(x, 'im_func'): + x = x.im_func + if hasattr(x, 'func_code'): + x = x.func_code + if hasattr(x, '__dict__'): + xxx + items = x.__dict__.items() + items.sort() + for name, x1 in items: + if type(x1) in (types.MethodType, + types.FunctionType, + types.CodeType, + types.ClassType): + print "Disassembly of %s:" % name + try: + dis(x1) + except TypeError, msg: + print "Sorry:", msg + print + elif hasattr(x, 'co_code'): + return disassemble(x) + elif isinstance(x, str): + return disassemble_string(x) + else: + raise TypeError, \ + "don't know how to disassemble %s objects" % \ + type(x).__name__ + +def distb(tb=None): + """Disassemble a traceback (default: last traceback).""" + if tb is None: + try: + tb = sys.last_traceback + except AttributeError: + raise RuntimeError, "no last traceback to disassemble" + while tb.tb_next: tb = tb.tb_next + disassemble(tb.tb_frame.f_code, tb.tb_lasti) + +def disassemble(co, lasti=-1): + """Disassemble a code object.""" + source = inspect.getsource(co) + code = co.co_code + labels = findlabels(code) + linestarts = dict(findlinestarts(co)) + n = len(code) + i = 0 + extended_arg = 0 + free = None + res = [] + lastline = co.co_firstlineno + while i < n: + c = code[i] + op = ord(c) + if i in linestarts: + lastline = linestarts[i] + + #if i == lasti: + # xxx + # print '-->', + #else: + # xxx + # print ' ', + #if i in labels: + # xxx + # print '>>', + #else: + # xxx + # print ' ', + #xxx + pos = i + i = i + 1 + if op >= HAVE_ARGUMENT: + oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg + opargstr = str(oparg) + extended_arg = 0 + i = i+2 + if op == EXTENDED_ARG: + extended_arg = oparg*65536L + if op in hasconst: + opargstr = repr(co.co_consts[oparg]) + elif op in hasname: + opargstr = co.co_names[oparg] + elif op in hasjrel: + opargstr = 'to ' + repr(i + oparg) + elif op in haslocal: + opargstr = co.co_varnames[oparg] + elif op in hascompare: + opargstr = cmp_op[oparg] + elif op in hasfree: + if free is None: + free = co.co_cellvars + co.co_freevars + opargstr = free[oparg] + else: + oparg = None + opargstr = '' + opcls = globals()[opname[op].replace('+', '_')] + res.append(opcls(pos, lastline, oparg, opargstr)) + return CodeRepresentation(res, co, source) + +def disassemble_string(code, lasti=-1, varnames=None, names=None, + constants=None): + labels = findlabels(code) + n = len(code) + i = 0 + while i < n: + c = code[i] + op = ord(c) + if i == lasti: + xxx + print '-->', + else: + xxx + print ' ', + if i in labels: + xxx + print '>>', + else: + xxx + print ' ', + xxxx + print repr(i).rjust(4), + print opname[op].ljust(15), + i = i+1 + if op >= HAVE_ARGUMENT: + oparg = ord(code[i]) + ord(code[i+1])*256 + i = i+2 + xxx + print repr(oparg).rjust(5), + if op in hasconst: + if constants: + xxx + print '(' + repr(constants[oparg]) + ')', + else: + xxx + print '(%d)'%oparg, + elif op in hasname: + if names is not None: + xxx + print '(' + names[oparg] + ')', + else: + xxx + print '(%d)'%oparg, + elif op in hasjrel: + xxx + print '(to ' + repr(i + oparg) + ')', + elif op in haslocal: + if varnames: + xxx + print '(' + varnames[oparg] + ')', + else: + xxx + print '(%d)' % oparg, + elif op in hascompare: + xxx + print '(' + cmp_op[oparg] + ')', + xxx + print + +disco = disassemble # XXX For backwards compatibility + +def findlabels(code): + """Detect all offsets in a byte code which are jump targets. + + Return the list of offsets. + + """ + labels = [] + n = len(code) + i = 0 + while i < n: + c = code[i] + op = ord(c) + i = i+1 + if op >= HAVE_ARGUMENT: + oparg = ord(code[i]) + ord(code[i+1])*256 + i = i+2 + label = -1 + if op in hasjrel: + label = i+oparg + elif op in hasjabs: + label = oparg + if label >= 0: + if label not in labels: + labels.append(label) + return labels + +def findlinestarts(code): + """Find the offsets in a byte code which are start of lines in the source. + + Generate pairs (offset, lineno) as described in Python/compile.c. + + """ + byte_increments = [ord(c) for c in code.co_lnotab[0::2]] + line_increments = [ord(c) for c in code.co_lnotab[1::2]] + + lastlineno = None + lineno = code.co_firstlineno + addr = 0 + for byte_incr, line_incr in zip(byte_increments, line_increments): + if byte_incr: + if lineno != lastlineno: + yield (addr, lineno) + lastlineno = lineno + addr += byte_incr + lineno += line_incr + if lineno != lastlineno: + yield (addr, lineno) + +def _test(): + """Simple test program to disassemble a file.""" + if sys.argv[1:]: + if sys.argv[2:]: + sys.stderr.write("usage: python dis.py [-|file]\n") + sys.exit(2) + fn = sys.argv[1] + if not fn or fn == "-": + fn = None + else: + fn = None + if fn is None: + f = sys.stdin + else: + f = open(fn) + source = f.read() + if fn is not None: + f.close() + else: + fn = "" + code = compile(source, fn, "exec") + dis(code) diff --git a/lib_pypy/pypy_test/inprogress_test_binascii_extra.py b/lib_pypy/pypy_test/inprogress_test_binascii_extra.py --- a/lib_pypy/pypy_test/inprogress_test_binascii_extra.py +++ b/lib_pypy/pypy_test/inprogress_test_binascii_extra.py @@ -1,5 +1,5 @@ from __future__ import absolute_import -from .. import binascii +from lib_pypy import binascii def test_uu(): assert binascii.b2a_uu('1234567') == "',3(S-#4V-P \n" diff --git a/lib_pypy/pypy_test/test_binascii.py b/lib_pypy/pypy_test/test_binascii.py --- a/lib_pypy/pypy_test/test_binascii.py +++ b/lib_pypy/pypy_test/test_binascii.py @@ -1,6 +1,6 @@ from __future__ import absolute_import import py -from .. import binascii +from lib_pypy import binascii # Create binary test data data = "The quick brown fox jumps over the lazy dog.\r\n" diff --git a/lib_pypy/pypy_test/test_collections.py b/lib_pypy/pypy_test/test_collections.py --- a/lib_pypy/pypy_test/test_collections.py +++ b/lib_pypy/pypy_test/test_collections.py @@ -1,5 +1,5 @@ from __future__ import absolute_import -from .. import _collections as collections +from lib_pypy import _collections as collections import py class TestDeque: diff --git a/lib_pypy/pypy_test/test_coroutine.py b/lib_pypy/pypy_test/test_coroutine.py --- a/lib_pypy/pypy_test/test_coroutine.py +++ b/lib_pypy/pypy_test/test_coroutine.py @@ -2,7 +2,7 @@ from py.test import skip, raises try: - from ..stackless import coroutine, CoroutineExit + from lib_pypy.stackless import coroutine, CoroutineExit except ImportError, e: skip('cannot import stackless: %s' % (e,)) diff --git a/lib_pypy/pypy_test/test_ctypes_support.py b/lib_pypy/pypy_test/test_ctypes_support.py --- a/lib_pypy/pypy_test/test_ctypes_support.py +++ b/lib_pypy/pypy_test/test_ctypes_support.py @@ -5,7 +5,7 @@ try: from ctypes_support import standard_c_lib, get_errno, set_errno except ImportError: # on top of cpython - from ..ctypes_support import standard_c_lib, get_errno, set_errno + from lib_pypy.ctypes_support import standard_c_lib, get_errno, set_errno def test_stdlib_and_errno(): diff --git a/lib_pypy/pypy_test/test_datetime.py b/lib_pypy/pypy_test/test_datetime.py --- a/lib_pypy/pypy_test/test_datetime.py +++ b/lib_pypy/pypy_test/test_datetime.py @@ -1,6 +1,7 @@ from __future__ import absolute_import +import py -from .. import datetime +from lib_pypy import datetime def test_repr(): print datetime @@ -8,7 +9,9 @@ assert repr(datetime.datetime(1,2,3)) == expected def test_strptime(): - import time + import time, sys + if sys.version_info < (2, 6): + py.test.skip("needs the _strptime module") string = '2004-12-01 13:02:47' format = '%Y-%m-%d %H:%M:%S' diff --git a/lib_pypy/pypy_test/test_dbm_extra.py b/lib_pypy/pypy_test/test_dbm_extra.py --- a/lib_pypy/pypy_test/test_dbm_extra.py +++ b/lib_pypy/pypy_test/test_dbm_extra.py @@ -2,7 +2,7 @@ import py from pypy.tool.udir import udir try: - from .. import dbm + from lib_pypy import dbm except ImportError, e: py.test.skip(e) diff --git a/lib_pypy/pypy_test/test_defaultdict.py b/lib_pypy/pypy_test/test_defaultdict.py --- a/lib_pypy/pypy_test/test_defaultdict.py +++ b/lib_pypy/pypy_test/test_defaultdict.py @@ -10,7 +10,7 @@ import copy -from .._collections import defaultdict +from lib_pypy._collections import defaultdict def foobar(): return list diff --git a/lib_pypy/pypy_test/test_deque_extra.py b/lib_pypy/pypy_test/test_deque_extra.py --- a/lib_pypy/pypy_test/test_deque_extra.py +++ b/lib_pypy/pypy_test/test_deque_extra.py @@ -7,7 +7,7 @@ class Test_deque: def setup_method(self,method): - from .._collections import deque + from lib_pypy._collections import deque self.deque = deque self.d = deque(range(n)) diff --git a/lib_pypy/pypy_test/test_grp_extra.py b/lib_pypy/pypy_test/test_grp_extra.py --- a/lib_pypy/pypy_test/test_grp_extra.py +++ b/lib_pypy/pypy_test/test_grp_extra.py @@ -1,7 +1,7 @@ from __future__ import absolute_import import py try: - from .. import grp + from lib_pypy import grp except ImportError: py.test.skip("No grp module on this platform") diff --git a/lib_pypy/pypy_test/test_locale.py b/lib_pypy/pypy_test/test_locale.py --- a/lib_pypy/pypy_test/test_locale.py +++ b/lib_pypy/pypy_test/test_locale.py @@ -2,10 +2,10 @@ import py import sys -from ..ctypes_config_cache import rebuild +from lib_pypy.ctypes_config_cache import rebuild rebuild.rebuild_one('locale.ctc.py') -from .. import _locale +from lib_pypy import _locale def setup_module(mod): diff --git a/lib_pypy/pypy_test/test_marshal_extra.py b/lib_pypy/pypy_test/test_marshal_extra.py --- a/lib_pypy/pypy_test/test_marshal_extra.py +++ b/lib_pypy/pypy_test/test_marshal_extra.py @@ -2,7 +2,7 @@ import py import sys import marshal as cpy_marshal -from .. import _marshal as marshal +from lib_pypy import _marshal as marshal from pypy.tool.udir import udir diff --git a/lib_pypy/pypy_test/test_md5_extra.py b/lib_pypy/pypy_test/test_md5_extra.py --- a/lib_pypy/pypy_test/test_md5_extra.py +++ b/lib_pypy/pypy_test/test_md5_extra.py @@ -6,7 +6,7 @@ from __future__ import absolute_import import md5 # CPython's implementation in C. -from .. import _md5 as pymd5 +from lib_pypy import _md5 as pymd5 # Helpers... diff --git a/lib_pypy/pypy_test/test_pyexpat.py b/lib_pypy/pypy_test/test_pyexpat.py --- a/lib_pypy/pypy_test/test_pyexpat.py +++ b/lib_pypy/pypy_test/test_pyexpat.py @@ -5,10 +5,10 @@ import StringIO, sys import unittest, py -from ..ctypes_config_cache import rebuild +from lib_pypy.ctypes_config_cache import rebuild rebuild.rebuild_one('pyexpat.ctc.py') -from .. import pyexpat +from lib_pypy import pyexpat #from xml.parsers import expat expat = pyexpat diff --git a/lib_pypy/pypy_test/test_resource.py b/lib_pypy/pypy_test/test_resource.py --- a/lib_pypy/pypy_test/test_resource.py +++ b/lib_pypy/pypy_test/test_resource.py @@ -1,8 +1,8 @@ from __future__ import absolute_import -from ..ctypes_config_cache import rebuild +from lib_pypy.ctypes_config_cache import rebuild rebuild.rebuild_one('resource.ctc.py') -from .. import resource +from lib_pypy import resource def test_resource(): x = resource.getrusage(resource.RUSAGE_SELF) diff --git a/lib_pypy/pypy_test/test_sha_extra.py b/lib_pypy/pypy_test/test_sha_extra.py --- a/lib_pypy/pypy_test/test_sha_extra.py +++ b/lib_pypy/pypy_test/test_sha_extra.py @@ -4,7 +4,7 @@ # Publication 180-1, Secure Hash Standard, 1995 April 17 # http://www.itl.nist.gov/div897/pubs/fip180-1.htm from __future__ import absolute_import -from .. import _sha as pysha +from lib_pypy import _sha as pysha class TestSHA: def check(self, data, digest): diff --git a/lib_pypy/pypy_test/test_stackless.py b/lib_pypy/pypy_test/test_stackless.py --- a/lib_pypy/pypy_test/test_stackless.py +++ b/lib_pypy/pypy_test/test_stackless.py @@ -10,7 +10,7 @@ import stackless except ImportError: try: - from .. import stackless + from lib_pypy import stackless except ImportError, e: skip('cannot import stackless: %s' % (e,)) diff --git a/lib_pypy/pypy_test/test_stackless_pickling.py b/lib_pypy/pypy_test/test_stackless_pickling.py --- a/lib_pypy/pypy_test/test_stackless_pickling.py +++ b/lib_pypy/pypy_test/test_stackless_pickling.py @@ -8,7 +8,7 @@ import stackless except ImportError: try: - from .. import stackless as stackless + from lib_pypy import stackless as stackless except ImportError, e: skip('cannot import stackless: %s' % (e,)) diff --git a/lib_pypy/pypy_test/test_struct_extra.py b/lib_pypy/pypy_test/test_struct_extra.py --- a/lib_pypy/pypy_test/test_struct_extra.py +++ b/lib_pypy/pypy_test/test_struct_extra.py @@ -1,5 +1,5 @@ from __future__ import absolute_import -from .. import struct +from lib_pypy import struct def test_simple(): morezeros = '\x00' * (struct.calcsize('l')-4) diff --git a/lib_pypy/pypy_test/test_structseq.py b/lib_pypy/pypy_test/test_structseq.py --- a/lib_pypy/pypy_test/test_structseq.py +++ b/lib_pypy/pypy_test/test_structseq.py @@ -1,6 +1,6 @@ from __future__ import absolute_import import py -from .._structseq import structseqfield, structseqtype +from lib_pypy._structseq import structseqfield, structseqtype class mydata: diff --git a/lib_pypy/pypy_test/test_syslog.py b/lib_pypy/pypy_test/test_syslog.py --- a/lib_pypy/pypy_test/test_syslog.py +++ b/lib_pypy/pypy_test/test_syslog.py @@ -1,10 +1,10 @@ from __future__ import absolute_import # XXX very minimal test -from ..ctypes_config_cache import rebuild +from lib_pypy.ctypes_config_cache import rebuild rebuild.rebuild_one('syslog.ctc.py') -from .. import syslog +from lib_pypy import syslog def test_syslog(): diff --git a/pypy/annotation/binaryop.py b/pypy/annotation/binaryop.py --- a/pypy/annotation/binaryop.py +++ b/pypy/annotation/binaryop.py @@ -23,6 +23,7 @@ from pypy.annotation.bookkeeper import getbookkeeper from pypy.objspace.flow.model import Variable, Constant from pypy.rlib import rarithmetic +from pypy.tool.error import AnnotatorError # convenience only! def immutablevalue(x): @@ -819,6 +820,24 @@ class __extend__(pairtype(SomePBC, SomeObject)): def getitem((pbc, o)): + if not pbc.isNone(): + raise AnnotatorError("getitem on %r" % pbc) + return s_ImpossibleValue + + def setitem((pbc, o), s_value): + if not pbc.isNone(): + raise AnnotatorError("setitem on %r" % pbc) + +class __extend__(pairtype(SomePBC, SomeString)): + def add((pbc, o)): + if not pbc.isNone(): + raise AnnotatorError('add on %r' % pbc) + return s_ImpossibleValue + +class __extend__(pairtype(SomeString, SomePBC)): + def add((o, pbc)): + if not pbc.isNone(): + raise AnnotatorError('add on %r' % pbc) return s_ImpossibleValue class __extend__(pairtype(SomeExternalObject, SomeExternalObject)): diff --git a/pypy/annotation/classdef.py b/pypy/annotation/classdef.py --- a/pypy/annotation/classdef.py +++ b/pypy/annotation/classdef.py @@ -124,9 +124,9 @@ "allowed" % (self.name, homedef) ) - self.bookkeeper.warning("demoting method %s " - "to base class %s" % - (self.name, homedef)) + #self.bookkeeper.warning("demoting method %s " + # "to base class %s" % + # (self.name, homedef)) break # check for attributes forbidden by slots or _attrs_ diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -163,11 +163,16 @@ immutable = True def __eq__(self, other): - # NaN unpleasantness. if (type(self) is SomeFloat and type(other) is SomeFloat and - self.is_constant() and other.is_constant() and - isnan(self.const) and isnan(other.const)): - return True + self.is_constant() and other.is_constant()): + # NaN unpleasantness. + if isnan(self.const) and isnan(other.const): + return True + # 0.0 vs -0.0 unpleasantness. + if not self.const and not other.const: + from pypy.rlib.rarithmetic import copysign + return copysign(1., self.const) == copysign(1., other.const) + # return super(SomeFloat, self).__eq__(other) def can_be_none(self): diff --git a/pypy/annotation/unaryop.py b/pypy/annotation/unaryop.py --- a/pypy/annotation/unaryop.py +++ b/pypy/annotation/unaryop.py @@ -303,6 +303,7 @@ def method_pop(lst, s_index=None): lst.listdef.resize() return lst.listdef.read_item() + method_pop.can_only_throw = [IndexError] def method_index(lst, s_value): getbookkeeper().count("list_index") @@ -642,6 +643,16 @@ class __extend__(SomeBuiltin): + def _can_only_throw(bltn, *args): + analyser_func = getattr(bltn.analyser, 'im_func', None) + can_only_throw = getattr(analyser_func, 'can_only_throw', None) + if can_only_throw is None or isinstance(can_only_throw, list): + return can_only_throw + if bltn.s_self is not None: + return can_only_throw(bltn.s_self, *args) + else: + return can_only_throw(*args) + def simple_call(bltn, *args): if bltn.s_self is not None: return bltn.analyser(bltn.s_self, *args) @@ -649,6 +660,7 @@ if bltn.methodname: getbookkeeper().count(bltn.methodname.replace('.', '_'), *args) return bltn.analyser(*args) + simple_call.can_only_throw = _can_only_throw def call(bltn, args, implicit_init=False): args_s, kwds = args.unpack() @@ -670,7 +682,8 @@ getattr.can_only_throw = [] def setattr(pbc, s_attr, s_value): - getbookkeeper().warning("setattr not wanted on %r" % (pbc,)) + if not pbc.isNone(): + raise AnnotatorError("setattr on %r" % pbc) def call(pbc, args): bookkeeper = getbookkeeper() diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -4,6 +4,7 @@ from pypy.config.config import OptionDescription, BoolOption, IntOption, ArbitraryOption from pypy.config.config import ChoiceOption, StrOption, to_optparse, Config from pypy.config.config import ConflictConfigError +from pypy.config.translationoption import IS_64_BITS modulepath = py.path.local(__file__).dirpath().dirpath().join("module") all_modules = [p.basename for p in modulepath.listdir() @@ -18,7 +19,7 @@ default_modules = essential_modules.copy() default_modules.update(dict.fromkeys( ["_codecs", "gc", "_weakref", "marshal", "errno", "imp", - "math", "_sre", "_pickle_support", "operator", + "math", "cmath", "_sre", "_pickle_support", "operator", "parser", "symbol", "token", "_ast", "_io", "_random", "__pypy__", "_testing"])) @@ -26,12 +27,13 @@ # --allworkingmodules working_modules = default_modules.copy() working_modules.update(dict.fromkeys( - ["_socket", "unicodedata", "mmap", "fcntl", + ["_socket", "unicodedata", "mmap", "fcntl", "_locale", "rctime" , "select", "zipimport", "_lsprof", "crypt", "signal", "_rawffi", "termios", "zlib", "bz2", "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", - "_bisect", "binascii", "_multiprocessing", '_warnings'] + "_bisect", "binascii", "_multiprocessing", '_warnings', + "_collections"] )) translation_modules = default_modules.copy() @@ -91,6 +93,7 @@ "bz2" : ["pypy.module.bz2.interp_bz2"], "pyexpat" : ["pypy.module.pyexpat.interp_pyexpat"], "_ssl" : ["pypy.module._ssl.interp_ssl"], + "_hashlib" : ["pypy.module._ssl.interp_ssl"], "_minimal_curses": ["pypy.module._minimal_curses.fficurses"], } @@ -164,7 +167,7 @@ suggests=[("objspace.allworkingmodules", False)]), BoolOption("geninterp", "specify whether geninterp should be used", - default=True), + default=False), BoolOption("logbytecodes", "keep track of bytecode usage", @@ -212,6 +215,11 @@ IntOption("prebuiltintto", "highest integer which is prebuilt", default=100, cmdline="--prebuiltintto"), + BoolOption("withsmalllong", "use a version of 'long' in a C long long", + default=False, + requires=[("objspace.std.withsmallint", False)]), + # ^^^ because of missing delegate_xx2yy + BoolOption("withstrjoin", "use strings optimized for addition", default=False), @@ -349,6 +357,8 @@ config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) config.objspace.std.suggest(newshortcut=True) + if not IS_64_BITS: + config.objspace.std.suggest(withsmalllong=True) # extra costly optimizations only go in level 3 if level == '3': @@ -364,6 +374,8 @@ config.objspace.std.suggest(withmapdict=True) config.objspace.std.suggest(withstrslice=True) config.objspace.std.suggest(withstrjoin=True) + if not IS_64_BITS: + config.objspace.std.suggest(withsmalllong=True) # xxx other options? ropes maybe? # completely disable geninterp in a level 0 translation diff --git a/pypy/conftest.py b/pypy/conftest.py --- a/pypy/conftest.py +++ b/pypy/conftest.py @@ -128,7 +128,7 @@ for name in ('int', 'long', 'str', 'unicode'): setattr(self, 'w_' + name, eval(name)) - + def appexec(self, args, body): body = body.lstrip() @@ -150,7 +150,7 @@ def str_w(self, w_str): return w_str - def newdict(self): + def newdict(self, module=None): return {} def newtuple(self, iterable): @@ -294,16 +294,6 @@ py.test.skip("need translated pypy with: %s, got %s" %(ropts,options)) -def getwithoutbinding(x, name): - try: - return x.__dict__[name] - except (AttributeError, KeyError): - for cls in getmro(x.__class__): - if name in cls.__dict__: - return cls.__dict__[name] - # uh? not found anywhere, fall back (which might raise AttributeError) - return getattr(x, name) - class LazyObjSpaceGetter(object): def __get__(self, obj, cls=None): space = gettestobjspace() @@ -429,10 +419,7 @@ for name in dir(instance): if name.startswith('w_'): if option.runappdirect: - # if the value is a function living on the class, - # don't turn it into a bound method here - obj = getwithoutbinding(instance, name) - setattr(instance, name[2:], obj) + setattr(instance, name[2:], getattr(instance, name)) else: obj = getattr(instance, name) if isinstance(obj, types.MethodType): diff --git a/pypy/doc/architecture.txt b/pypy/doc/architecture.txt --- a/pypy/doc/architecture.txt +++ b/pypy/doc/architecture.txt @@ -14,7 +14,7 @@ We aim to provide: * a common translation and support framework for producing - implementations of dynamic languages, emphasising a clean + implementations of dynamic languages, emphasizing a clean separation between language specification and implementation aspects. diff --git a/pypy/doc/buildtool.txt b/pypy/doc/buildtool.txt --- a/pypy/doc/buildtool.txt +++ b/pypy/doc/buildtool.txt @@ -165,7 +165,7 @@ Note that there is a test project in 'tool/build/testproject' that can serve as an example. -Prerequisities +Prerequisites -------------- Your project can use the build tool if: diff --git a/pypy/doc/carbonpython.txt b/pypy/doc/carbonpython.txt --- a/pypy/doc/carbonpython.txt +++ b/pypy/doc/carbonpython.txt @@ -167,7 +167,7 @@ return self.x -Note that the type of ``self`` must not be speficied: it will +Note that the type of ``self`` must not be specified: it will automatically assumed to be ``MyClass``. The ``__init__`` method is not automatically mapped to the .NET @@ -182,7 +182,7 @@ outside; the RPython compiler automatically calls ``__init__`` whenever an RPython class is instantiated. -In the future this discrepacy will be fixed and the ``__init__`` +In the future this discrepancy will be fixed and the ``__init__`` method will be automatically mapped to the constructor. @@ -208,7 +208,7 @@ At the moment there is no special syntax support for indexers and properties: for example, you can't access ArrayList's elements using -the square bracked notation, but you have to call the call the +the square bracket notation, but you have to call the call the ``get_Item`` and ``set_Item`` methods; similarly, to access a property ``XXX`` you need to call ``get_XXX`` and ``set_XXX``:: diff --git a/pypy/doc/cleanup-todo.txt b/pypy/doc/cleanup-todo.txt --- a/pypy/doc/cleanup-todo.txt +++ b/pypy/doc/cleanup-todo.txt @@ -15,7 +15,7 @@ simplify translator/c/gc.py - clean up the tangle of including headers in the C backend - make approach for loading modules more sane, mixedmodule capture - too many platform dependecies especially for pypy-cli + too many platform dependencies especially for pypy-cli - review pdbplus, especially the graph commands, also in the light of https://codespeak.net/issue/pypy-dev/issue303 and the fact that we can have more than one translator/annotator around (with the diff --git a/pypy/doc/cli-backend.txt b/pypy/doc/cli-backend.txt --- a/pypy/doc/cli-backend.txt +++ b/pypy/doc/cli-backend.txt @@ -14,7 +14,7 @@ While in an ideal world we might suppose GenCLI to run fine with every implementation conforming to that standard, we know the world we -live in is far from ideal, so extra efforts can be needed to mantain +live in is far from ideal, so extra efforts can be needed to maintain compatibility with more than one implementation. At the moment of writing the two most popular implementations of the @@ -84,7 +84,7 @@ returning the name of the helpers and one subclass for each of the two supported platforms. -Since Microfost ``ilasm`` is not capable of compiling the PyPy +Since Microsoft ``ilasm`` is not capable of compiling the PyPy standard interpreter due to its size, on Windows machines we also look for an existing Mono installation: if present, we use CLR for everything except the assembling phase, for which we use Mono's @@ -132,7 +132,7 @@ while .NET only supports Unicode with the char type. There are at least two ways to map plain Char to CTS: - - map UniChar to char, thus mantaining the original distinction + - map UniChar to char, thus maintaining the original distinction between the two types: this has the advantage of being a one-to-one translation, but has the disadvantage that RPython strings will not be recognized as .NET strings, since they only @@ -234,7 +234,7 @@ subtle bugs in more complex ones, because the two exception hierarchies don't completely overlap. -At the moment we've choosen to build an RPython exception hierarchy +At the moment we've chosen to build an RPython exception hierarchy completely independent from the CLI one, but this means that we can't rely on exceptions raised by built-in operations. The currently implemented solution is to do an exception translation on-the-fly. @@ -242,7 +242,7 @@ As an example consider the RPython int_add_ovf operation, that sums two integers and raises an OverflowError exception in case of overflow. For implementing it we can use the built-in add.ovf CLI -instruction that raises System.OverflowExcepion when the result +instruction that raises System.OverflowException when the result overflows, catch that exception and throw a new one:: .try @@ -267,7 +267,7 @@ represented by flow graphs that we need to translate CLI IL code. Flow graphs are expressed in a format that is very suitable for being translated to low level code, so that phase is quite straightforward, -though the code is a bit involed because we need to take care of three +though the code is a bit involved because we need to take care of three different types of blocks. The code doing this work is located in the Function.render @@ -278,7 +278,7 @@ statement used for indicating the virtual machine the number and type of local variables used. -Then it sequentally renders all blocks in the graph, starting from the +Then it sequentially renders all blocks in the graph, starting from the start block; special care is taken for the return block which is always rendered at last to meet CLI requirements. diff --git a/pypy/doc/clr-module.txt b/pypy/doc/clr-module.txt --- a/pypy/doc/clr-module.txt +++ b/pypy/doc/clr-module.txt @@ -139,5 +139,5 @@ >>>> Application.Run(frm) Unfortunately at the moment you can't do much more than this with Windows -Forms, because we still miss support for delegates and so it's not possibile +Forms, because we still miss support for delegates and so it's not possible to handle events. diff --git a/pypy/doc/coding-guide.txt b/pypy/doc/coding-guide.txt --- a/pypy/doc/coding-guide.txt +++ b/pypy/doc/coding-guide.txt @@ -219,7 +219,7 @@ ``range`` and ``xrange`` are identical. ``range`` does not necessarily create an array, only if the result is modified. It is allowed everywhere and completely - implemented. The only visible difference to CPython is the inaccessability + implemented. The only visible difference to CPython is the inaccessibility of the ``xrange`` fields start, stop and step. **definitions** @@ -306,7 +306,7 @@ concerns mainly method calls, when the method is overridden or in any way given different definitions in different classes. It also concerns the less common case of explicitly manipulated function objects. - Describing the exact compability rules is rather involved (but if you + Describing the exact compatibility rules is rather involved (but if you break them, you should get explicit errors from the rtyper and not obscure crashes.) @@ -466,9 +466,9 @@ (>=0.13.0) can be run with the ``--rpython-mode`` command line option. This option enables the RPython checker which will checks for some of the restrictions RPython adds on standard Python code (and uses a -more agressive type inference than the one used by default by +more aggressive type inference than the one used by default by pylint). The full list of checks is available in the documentation of -Pylin. +Pylint. RPylint can be a nice tool to get some information about how much work will be needed to convert a piece of Python code to RPython, or to get @@ -636,7 +636,7 @@ When we need access to interpreter-level objects we put the module into `pypy/module`_. Such modules use a `mixed module mechanism`_ -which makes it convenient to use both interpreter- and applicationlevel parts +which makes it convenient to use both interpreter- and application-level parts for the implementation. Note that there is no extra facility for pure-interpreter level modules, you just write a mixed module and leave the application-level part empty. diff --git a/pypy/doc/config/index.txt b/pypy/doc/config/index.txt --- a/pypy/doc/config/index.txt +++ b/pypy/doc/config/index.txt @@ -33,7 +33,7 @@ There is an `overview`_ of all command line arguments that can be passed in either position. -Many of the more interesting object space options enable optimzations, +Many of the more interesting object space options enable optimizations, which are described in `Standard Interpreter Optimizations`_, or allow the creation of objects that can barely be imagined in CPython, which are documented in `What PyPy can do for your objects`_. diff --git a/pypy/doc/config/objspace.std.withsmalllong.txt b/pypy/doc/config/objspace.std.withsmalllong.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.std.withsmalllong.txt @@ -0,0 +1,5 @@ +Enable "small longs", an additional implementation of the Python +type "long", implemented with a C long long. It is mostly useful +on 32-bit; on 64-bit, a C long long is the same as a C long, so +its usefulness is limited to Python objects of type "long" that +would anyway fit in an "int". diff --git a/pypy/doc/config/objspace.usemodules._collections.txt b/pypy/doc/config/objspace.usemodules._collections.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.usemodules._collections.txt @@ -0,0 +1,2 @@ +Use the '_collections' module. +Used by the 'collections' standard lib module. This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.usemodules._rawffi.txt b/pypy/doc/config/objspace.usemodules._rawffi.txt --- a/pypy/doc/config/objspace.usemodules._rawffi.txt +++ b/pypy/doc/config/objspace.usemodules._rawffi.txt @@ -1,3 +1,3 @@ An experimental module providing very low-level interface to C-level libraries, for use when implementing ctypes, not -indended for a direct use at all. \ No newline at end of file +intended for a direct use at all. \ No newline at end of file diff --git a/pypy/doc/config/objspace.usemodules.cmath.txt b/pypy/doc/config/objspace.usemodules.cmath.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.usemodules.cmath.txt @@ -0,0 +1,2 @@ +Use the 'cmath' module. +This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.usemodules.posix.txt b/pypy/doc/config/objspace.usemodules.posix.txt --- a/pypy/doc/config/objspace.usemodules.posix.txt +++ b/pypy/doc/config/objspace.usemodules.posix.txt @@ -1,3 +1,3 @@ Use the essential 'posix' module. This module is essential, included by default and cannot be removed (even when -specified explicitly, the option gets overriden later). +specified explicitly, the option gets overridden later). diff --git a/pypy/doc/config/objspace.usemodules.readline.txt b/pypy/doc/config/objspace.usemodules.readline.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.readline.txt +++ /dev/null @@ -1,1 +0,0 @@ -Use the 'readline' module. diff --git a/pypy/doc/config/objspace.usemodules.zipimport.txt b/pypy/doc/config/objspace.usemodules.zipimport.txt --- a/pypy/doc/config/objspace.usemodules.zipimport.txt +++ b/pypy/doc/config/objspace.usemodules.zipimport.txt @@ -1,3 +1,3 @@ -This module implements zipimport mechanise described +This module implements zipimport mechanism described in PEP 302. It's supposed to work and translate, so it's included by default \ No newline at end of file diff --git a/pypy/doc/config/translation.backendopt.clever_malloc_removal.txt b/pypy/doc/config/translation.backendopt.clever_malloc_removal.txt --- a/pypy/doc/config/translation.backendopt.clever_malloc_removal.txt +++ b/pypy/doc/config/translation.backendopt.clever_malloc_removal.txt @@ -1,6 +1,6 @@ Try to inline flowgraphs based on whether doing so would enable malloc removal (:config:`translation.backendopt.mallocs`.) by eliminating -calls that result in escaping. This is an experimental optimisation, +calls that result in escaping. This is an experimental optimization, also right now some eager inlining is necessary for helpers doing malloc itself to be inlined first for this to be effective. This option enable also an extra subsequent malloc removal phase. diff --git a/pypy/doc/config/translation.backendopt.inline.txt b/pypy/doc/config/translation.backendopt.inline.txt --- a/pypy/doc/config/translation.backendopt.inline.txt +++ b/pypy/doc/config/translation.backendopt.inline.txt @@ -7,4 +7,4 @@ inlined is needed for malloc removal (:config:`translation.backendopt.mallocs`) to be effective. -This optimisation is used by default. +This optimization is used by default. diff --git a/pypy/doc/config/translation.backendopt.print_statistics.txt b/pypy/doc/config/translation.backendopt.print_statistics.txt --- a/pypy/doc/config/translation.backendopt.print_statistics.txt +++ b/pypy/doc/config/translation.backendopt.print_statistics.txt @@ -1,2 +1,2 @@ Debugging option. Print statics about the forest of flowgraphs as they -go through the various backend optimisations. \ No newline at end of file +go through the various backend optimizations. \ No newline at end of file diff --git a/pypy/doc/config/translation.backendopt.profile_based_inline.txt b/pypy/doc/config/translation.backendopt.profile_based_inline.txt --- a/pypy/doc/config/translation.backendopt.profile_based_inline.txt +++ b/pypy/doc/config/translation.backendopt.profile_based_inline.txt @@ -7,4 +7,4 @@ The option takes as value a string which is the arguments to pass to the program for the instrumented run. -This optimisation is not used by default. \ No newline at end of file +This optimization is not used by default. \ No newline at end of file diff --git a/pypy/doc/config/translation.gcrootfinder.txt b/pypy/doc/config/translation.gcrootfinder.txt --- a/pypy/doc/config/translation.gcrootfinder.txt +++ b/pypy/doc/config/translation.gcrootfinder.txt @@ -2,7 +2,7 @@ methods, this is mostly only interesting for framework GCs. For those you have a choice of various alternatives: - - use a shadow stack (XXX link to paper), e.g. explicitely maintaining a stack + - use a shadow stack (XXX link to paper), e.g. explicitly maintaining a stack of roots - use stackless to find roots by unwinding the stack. Requires diff --git a/pypy/doc/config/translation.list_comprehension_operations.txt b/pypy/doc/config/translation.list_comprehension_operations.txt --- a/pypy/doc/config/translation.list_comprehension_operations.txt +++ b/pypy/doc/config/translation.list_comprehension_operations.txt @@ -1,2 +1,2 @@ -Experimental optimisation for list comprehensions in RPython. +Experimental optimization for list comprehensions in RPython. diff --git a/pypy/doc/config/translation.type_system.txt b/pypy/doc/config/translation.type_system.txt --- a/pypy/doc/config/translation.type_system.txt +++ b/pypy/doc/config/translation.type_system.txt @@ -1,4 +1,4 @@ Which type system to use when rtyping_. This option should not be set -explicitely. +explicitly. .. _rtyping: ../rtyper.html diff --git a/pypy/doc/config/translation.vanilla.txt b/pypy/doc/config/translation.vanilla.txt --- a/pypy/doc/config/translation.vanilla.txt +++ b/pypy/doc/config/translation.vanilla.txt @@ -1,2 +1,2 @@ -Try to make the resulting compiled program as portable (=moveable to another +Try to make the resulting compiled program as portable (=movable to another machine) as possible. Which is not much. diff --git a/pypy/doc/configuration.txt b/pypy/doc/configuration.txt --- a/pypy/doc/configuration.txt +++ b/pypy/doc/configuration.txt @@ -89,7 +89,7 @@ ``default`` specifies the default value of the option. ``requires`` is a list of two-element tuples describing the requirements when the option is set to true, ``suggests`` is a list of the same structure but - the options in there are only suggested, not absolutely neccessary. The + the options in there are only suggested, not absolutely necessary. The difference is small: if the current option is set to True, both the required and the suggested options are set. The required options cannot be changed later, though. ``negation`` specifies whether the negative @@ -127,7 +127,7 @@ Configuration Objects --------------------- -``Config`` objects hold the choosen values for the options (of the default, +``Config`` objects hold the chosen values for the options (of the default, if no choice was made). A ``Config`` object is described by an ``OptionDescription`` instance. The attributes of the ``Config`` objects are the names of the children of the ``OptionDescription``. Example:: diff --git a/pypy/doc/cpython_differences.txt b/pypy/doc/cpython_differences.txt --- a/pypy/doc/cpython_differences.txt +++ b/pypy/doc/cpython_differences.txt @@ -160,7 +160,7 @@ ---------------------------- Officially, CPython has no rule at all for when exactly -overriden method of subclasses of built-in types get +overridden method of subclasses of built-in types get implicitly called or not. As an approximation, these methods are never called by other built-in methods of the same object. For example, an overridden ``__getitem__()`` in a subclass of @@ -211,9 +211,15 @@ PyPy. On CPython it would set the maximum number of nested calls that can occur before a RuntimeError is raised; on PyPy overflowing the stack also causes RuntimeErrors, but the limit - is checked at a lower level. (The limit is currenty hard-coded + is checked at a lower level. (The limit is currently hard-coded at 768 KB, corresponding to roughly 1480 Python calls on Linux.) +* assignment to ``__class__`` is limited to the cases where it + works on CPython 2.5. On CPython 2.6 and 2.7 it works in a bit + more cases, which are not supported by PyPy so far. (If needed, + it could be supported, but then it will likely work in many + *more* case on PyPy than on CPython 2.6/2.7.) + .. include:: _ref.txt diff --git a/pypy/doc/dev_method.txt b/pypy/doc/dev_method.txt --- a/pypy/doc/dev_method.txt +++ b/pypy/doc/dev_method.txt @@ -77,11 +77,11 @@ pairs. It is a method that fits distributed teams well because it gets the team -focused around clear (and challenging) goals while working collarobative +focused around clear (and challenging) goals while working collaborative (pairprogramming, status meeting, discussions etc) as well as accelerated (short increments and tasks, "doing" and testing instead of long start ups of planning and requirement gathering). This means that most of the time a sprint -is a great way of getting results, but also to get new people aquinted with +is a great way of getting results, but also to get new people acquainted with the codebase. It is also a great method for dissemination and learning within the team because of the pairprogramming. @@ -112,7 +112,7 @@ month before the event. Beforehand we have some rough plans called "between sprints" and the sprintplan is based on the status of those issues but also with a focus on upcoming releases and deliverables. Usually its the core - developers who does this but the transparancy and participation have + developers who does this but the transparency and participation have increased since we started with our weekly "pypy-sync meetings" on IRC. The sync meetings in combination with a rough in between planning makes it easier for other developer to follow the progress and thus participating in @@ -121,7 +121,7 @@ The goal needs to be challenging or it won't rally the full effort of the team, but it must not be unrealistic as that tends to be very frustrating and dissatisfying. It is also very important to take into account the - particpants when you set the goal for the sprint. If the sprint takes place + participants when you set the goal for the sprint. If the sprint takes place connected to a conference (or similar open events) the goals for the actual coding progress should be set lower (or handled in another way) and focus should shift to dissemination and getting new/interested people to a @@ -140,8 +140,8 @@ evacuated by 23:00? These are important questions that can gravely affect the "feel and atmosphere" of the sprint as well as the desired results! - Also, somewhat close to low cost places to eat and accomodate - participants. Facilities for making tea/coffe as well as some kind of + Also, somewhat close to low cost places to eat and accommodate + participants. Facilities for making tea/coffee as well as some kind of refrigerator for storing food. A permanent Internet connection is a must - has the venue were the sprint is planned to be weird rules for access to their network etc etc? @@ -151,7 +151,7 @@ project also owns one beamer - specifically for sprint purposes. The person making sure that the requirements for a good sprint venue is - being met should therefore have very good local connections or, preferrably + being met should therefore have very good local connections or, preferably live there. 3. Information - discussions about content and goals (pre announcements) are @@ -164,7 +164,7 @@ the sprint announcements points to information about local transportation (to the country and to the city and to the venue), currency issues, food and restaurants etc. There are also webpages in which people announce when - they will arrive and where they are accomodated. + they will arrive and where they are accommodated. The planning text for the sprint is updated up till the sprint and is then used during the status meetings and between to track work. After the sprint @@ -174,11 +174,11 @@ people involved. One very important strategy when planning the venue is cost - efficiency. Keeping accomodation and food/travel costs as low as possible + efficiency. Keeping accommodation and food/travel costs as low as possible makes sure that more people can afford to visit or join the sprint fully. The partially EU funded parts of the project do have a so called sprint budget which we use to try to help developers to participate in our sprints - (travel expenses and accomodation) and because most of the funding is so + (travel expenses and accommodation) and because most of the funding is so called matched funding we pay for most of our expenses in our own organizations and companies anyway. @@ -205,8 +205,8 @@ formally responsible for the sprints. Suggestions for off hours activities and social events for the break day is - a good way of emphazising how important it is to take breaks - some - pointers in that direction from the local organiziser is good. + a good way of emphasizing how important it is to take breaks - some + pointers in that direction from the local organizer is good. At the end of the sprint we do a technical summary (did we achieve the goals/content), what should be a rough focus for the work until the next @@ -231,7 +231,7 @@ Of course. Just follow the work on pypy-dev and if you specifically are interested in information about our sprints - subscribe to -pypy-sprint at codespeak.net and read the news on codespeak for anouncements etc. +pypy-sprint at codespeak.net and read the news on codespeak for announcements etc. If you think we should sprint in your town - send us an email - we are very interested in using sprints as away of making contact with active developers @@ -264,10 +264,10 @@ * Louvain-La-Neuve March 2006 * Leysin April 2006 * Tokyo April 2006 - * D�sseldorf June 2006 + * Düsseldorf June 2006 * Europython/Geneva July 2006 * Limerick Aug 2006 - * D�sseldorf Oct 2006 + * Düsseldorf Oct 2006 * Leysin Jan 2007 * Hildesheim Feb 2007 @@ -280,10 +280,10 @@ Samuele Pedroni Christian Tismer Laura Creighton - Jacob Hall�n + Jacob Hallén Michael Hudson Richard Emslie - Anders Chrigstr�m + Anders Chrigström Alex Martelli Ludovic Aubry Adrien DiMascio @@ -317,7 +317,7 @@ Alan McIntyre Lutz Paelike Michael Chermside - Beatrice D�ring + Beatrice Düring Boris Feigin Amaury Forgeot d'Arc Andrew Thompson @@ -335,8 +335,8 @@ Michael Twomey Wanja Saatkamp Alexandre Fayolle - Rapha�l Collet - Gr�goire Dooms + Raphaël Collet + Grégoire Dooms Sanghyeon Seo Yutaka Niibe Yusei Tahara diff --git a/pypy/doc/discussion/VM-integration.txt b/pypy/doc/discussion/VM-integration.txt --- a/pypy/doc/discussion/VM-integration.txt +++ b/pypy/doc/discussion/VM-integration.txt @@ -24,7 +24,7 @@ - handle native .NET objects as transparently as possible - automatically apply obvious Python <--> .NET conversions when - crossing the borders (e.g. intgers, string, etc.) + crossing the borders (e.g. integers, string, etc.) One possible solution is the "proxy" approach, in which we manually (un)wrap/(un)box all the objects when they cross the border. @@ -64,13 +64,13 @@ stateful part and a behavioral part. We need to introduce the new ootypesystem type ``Pair``: it acts like -a Record but it hasn't its own identiy: the id of the Pair is the id +a Record but it hasn't its own identity: the id of the Pair is the id of its first member. XXX about ``Pair``: I'm not sure this is totally right. It means that an object can change identity simply by changing the value of a field??? Maybe we could add the constraint that the "id" field - can't be modifiend after initialization (but it's not easy to + can't be modified after initialization (but it's not easy to enforce). XXX-2 about ``Pair``: how to implement it in the backends? One diff --git a/pypy/doc/discussion/chained_getattr.txt b/pypy/doc/discussion/chained_getattr.txt --- a/pypy/doc/discussion/chained_getattr.txt +++ b/pypy/doc/discussion/chained_getattr.txt @@ -25,7 +25,7 @@ now for the LOAD_CHAINED_GLOBAL bytecode implementation: - Module dicts have a special implemnetation, providing: + Module dicts have a special implementation, providing: - an extra "fastlookup" rpython-dict serving as a cache for LOAD_CHAINED_GLOBAL places within the modules: diff --git a/pypy/doc/discussion/cli-optimizations.txt b/pypy/doc/discussion/cli-optimizations.txt --- a/pypy/doc/discussion/cli-optimizations.txt +++ b/pypy/doc/discussion/cli-optimizations.txt @@ -1,7 +1,7 @@ Possible optimizations for the CLI backend ========================================== -Stack push/pop optimitazion +Stack push/pop optimization --------------------------- The CLI's VM is a stack based machine: this fact doesn't play nicely @@ -61,7 +61,7 @@ generated by Microsoft CLR, yet. Thus, we might consider to optimize it manually; it should not be so -difficult, but it is not trivial becasue we have to make sure that the +difficult, but it is not trivial because we have to make sure that the dropped locals are used only once. @@ -79,8 +79,8 @@ subtle bugs in more complex ones, because the two exception hierarchies don't completely overlap. -For now I've choosen to build an RPython exception hierarchy -completely indipendent from the CLI one, but this means that we can't +For now I've chosen to build an RPython exception hierarchy +completely independent from the CLI one, but this means that we can't rely on exceptions raised by standard operations. The currently implemented solution is to do an exception translation on-the-fly; for example, the 'ind_add_ovf' is translated into the following IL code:: @@ -105,7 +105,7 @@ I.e., it catches the builtin OverflowException and raises a RPython OverflowError. -I haven't misured timings yet, but I guess that this machinery brings +I haven't measured timings yet, but I guess that this machinery brings to some performance penalties even in the non-overflow case; a possible optimization is to do the on-the-fly translation only when it is strictly necessary, i.e. only when the except clause catches an @@ -188,7 +188,7 @@ The current implementations of ll_dict_getitem and ll_dict_get in ootypesystem.rdict do two consecutive lookups (calling ll_contains and ll_get) on the same key. We might cache the result of -pypylib.Dict.ll_contains so that the succesive ll_get don't need a +pypylib.Dict.ll_contains so that the successive ll_get don't need a lookup. Btw, we need some profiling before choosing the best way. Or we could directly refactor ootypesystem.rdict for doing a single lookup. diff --git a/pypy/doc/discussion/compiled-swamp.txt b/pypy/doc/discussion/compiled-swamp.txt --- a/pypy/doc/discussion/compiled-swamp.txt +++ b/pypy/doc/discussion/compiled-swamp.txt @@ -3,7 +3,7 @@ * benchmarks * tests -* compliancy tests +* compliance tests * play1 * downloads * ... diff --git a/pypy/doc/discussion/ctypes_todo.txt b/pypy/doc/discussion/ctypes_todo.txt --- a/pypy/doc/discussion/ctypes_todo.txt +++ b/pypy/doc/discussion/ctypes_todo.txt @@ -3,7 +3,7 @@ * Write down missing parts and port all tests, eventually adding additional tests. - - for unions and structs, late assignement of _fields_ is somewhat buggy. + - for unions and structs, late assignment of _fields_ is somewhat buggy. Tests about behavior of getattr working properly on instances are missing or not comprehensive. Some tests are skipped because I didn't understand the details. @@ -15,7 +15,7 @@ - there are features, which we don't support like buffer() and array() protocols. - - are the _CData_value return lifetime/gc sematics correct? + - are the _CData_value return lifetime/gc semantics correct? - for some ABIs we will need completely filled ffitypes to do the right thing for passing structures by value, we are now passing enough diff --git a/pypy/doc/discussion/distribution-implementation.txt b/pypy/doc/discussion/distribution-implementation.txt --- a/pypy/doc/discussion/distribution-implementation.txt +++ b/pypy/doc/discussion/distribution-implementation.txt @@ -14,7 +14,7 @@ First we do split objects into value-only primitives (like int) and other. Basically immutable builtin types which cannot contain user-level objects -(int, float, long, str, None, etc.) will be always transfered as value-only +(int, float, long, str, None, etc.) will be always transferred as value-only objects (having no states etc.). The every other object (user created classes, instances, modules, lists, tuples, etc. etc.) are always executed by reference. (Of course if somebody wants to ie. copy the instance, he can marshal/pickle @@ -65,7 +65,7 @@ but not needed yet. * From the remote point of view, every exported object which needs such - has got a local apropriate storage W_LocalXXX where XXX is a type + has got a local appropriate storage W_LocalXXX where XXX is a type by which it could be accessed from a wire. The real pain: @@ -76,7 +76,7 @@ in annotator, which sucks a lot). The (some) idea is to wrap all the methods with additional checks, but that's both unclear and probably not necessary. -XXX If we can easily change underlaying implementation of an object, than +XXX If we can easily change underlying implementation of an object, than this might become way easier. Right now I'll try to have it working and thing about RPython later. diff --git a/pypy/doc/discussion/distribution-newattempt.txt b/pypy/doc/discussion/distribution-newattempt.txt --- a/pypy/doc/discussion/distribution-newattempt.txt +++ b/pypy/doc/discussion/distribution-newattempt.txt @@ -54,7 +54,7 @@ We need: -* app-level primitives for having 'remote proxy' accessable +* app-level primitives for having 'remote proxy' accessible * some "serialiser" which is not truly serialising stuff, but making sure communication will go. diff --git a/pypy/doc/discussion/distribution-roadmap.txt b/pypy/doc/discussion/distribution-roadmap.txt --- a/pypy/doc/discussion/distribution-roadmap.txt +++ b/pypy/doc/discussion/distribution-roadmap.txt @@ -13,7 +13,7 @@ some kind of remote control over program execution. For start I would suggest using RMI (Remote Method Invocation) and remote object access (in case of low level it would be struct access). For the simplicity -it will make some sense to target high-level platform at the beggining +it will make some sense to target high-level platform at the beginning (CLI platform seems like obvious choice), which provides more primitives for performing such operations. To make attempt easier, I'll provide some subset of type system to be serializable which can go as parameters @@ -52,7 +52,7 @@ --------------------------------------- The biggest step here is to provide JIT integration into distribution -system. This should allow to make it really usefull (probably compile-time +system. This should allow to make it really useful (probably compile-time distribution will not work for example for whole Python interpreter, because of too huge granularity). This is quite unclear for me how to do that (JIT is not complete and I don't know too much about it). Probably we diff --git a/pypy/doc/discussion/gc.txt b/pypy/doc/discussion/gc.txt --- a/pypy/doc/discussion/gc.txt +++ b/pypy/doc/discussion/gc.txt @@ -9,7 +9,7 @@ ============= This document tries to gather gc-related issues which are very recent -or in-developement. Also, it tries to document needed gc refactorings +or in-development. Also, it tries to document needed gc refactorings and expected performance of certain gc-related operations. Problem area diff --git a/pypy/doc/discussion/howtoimplementpickling.txt b/pypy/doc/discussion/howtoimplementpickling.txt --- a/pypy/doc/discussion/howtoimplementpickling.txt +++ b/pypy/doc/discussion/howtoimplementpickling.txt @@ -39,7 +39,7 @@ of the program and its state (example: a Forth backend), I would see it as a valid solution, since it is relocatable. It is of course a possible fall-back to write -sucn a backend of we fail otherwise. +such a backend of we fail otherwise. There are some simple steps and some more difficult ones. Let's start with the simple. @@ -94,7 +94,7 @@ the prickelpit.c file in the Stackless distribution. As a conclusion, pickling of tasklets is an addition to Stackless, -but not meand to be an extension to Python. The need to support +but not meant to be an extension to Python. The need to support pickling of certain objects should not change the interface. It is better to decouple this and to use surrogate types for pickling which cannot collide with future additions to Python. @@ -145,7 +145,7 @@ SLP switching strategies ........................ -SLP has undergone several rewrites. The first implemenation was aiming +SLP has undergone several rewrites. The first implementation was aiming at complete collaboration. A new frame's execution was deferred until all the preparational C function calls had left the C stack. There was no extra state to be saved. @@ -195,13 +195,13 @@ Right now, PyPy saves C state of functions in tiny activation records: the alive variables of a block, together with the entry point of -the function tnat was left. +the function that was left. This is an improvement over storing raw stack slices, but the pattern is similar: The C stack state gets restored when we switch. In this sense, it was the astonishing resume when Richard and I discussed this last week: PyPy essentially does a variant of Hard switching! At least it -does a compromize that does not really help with pickling. +does a compromise that does not really help with pickling. On the other hand, this approach is half the way. It turns out to be an improvement over SLP not to have to avoid recursions in the @@ -240,7 +240,7 @@ we really need to restore before we can do the function call? - the argument decoding is done, already, and the fact that we could have done - the function call shows, that no exception occured. We can ignore the rest + the function call shows, that no exception occurred. We can ignore the rest of this activation record and do the housekeeping. - the frame is prepared, and arguments are stored in it. The operation @@ -268,7 +268,7 @@ A rough sketch of the necessary analysis: for every block in an RPython function that can reach unwind: -Analyse control flow. It should be immediately leading to +Analyze control flow. It should be immediately leading to the return block with only one output variable. All other alive variables should have ended their liveness in this block. diff --git a/pypy/doc/discussion/improve-rpython.txt b/pypy/doc/discussion/improve-rpython.txt --- a/pypy/doc/discussion/improve-rpython.txt +++ b/pypy/doc/discussion/improve-rpython.txt @@ -4,7 +4,7 @@ Improve the interpreter API --------------------------- -- Rationalize the modules, and the names, of the differents functions needed to +- Rationalize the modules, and the names, of the different functions needed to implement a pypy module. A typical rpython file is likely to contain many `import` statements:: @@ -34,7 +34,7 @@ ---------------- - Arithmetic with unsigned integer, and between integer of different signedness, - when this is not ambiguous. At least, comparison and assignement with + when this is not ambiguous. At least, comparison and assignment with constants should be allowed. - Allocate variables on the stack, and pass their address ("by reference") to diff --git a/pypy/doc/discussion/outline-external-ootype.txt b/pypy/doc/discussion/outline-external-ootype.txt --- a/pypy/doc/discussion/outline-external-ootype.txt +++ b/pypy/doc/discussion/outline-external-ootype.txt @@ -108,7 +108,7 @@ Foo = ootype.ExternalInstance({'bar': ([Signed, Signed], Float)}) -Then, the annotation for Foo's intances is SomeExternalInstance(Foo). +Then, the annotation for Foo's instances is SomeExternalInstance(Foo). This way, the transformation from high-level types to platform-level types is straightforward and correct. @@ -156,7 +156,7 @@ Exceptions ~~~~~~~~~~ -.NET and JVM users want to catch external exceptions in a natuarl way; +.NET and JVM users want to catch external exceptions in a natural way; e.g.:: try: @@ -164,7 +164,7 @@ except System.OverflowException: ... -This is not straighforward because to make the flow objspace happy the +This is not straightforward because to make the flow objspace happy the object which represent System.OverflowException must be a real Python class that inherits from Exception. @@ -200,7 +200,7 @@ necessary to rewrite a part of it. To represent low-level types, it uses NativeInstance, a subclass of -ootype.Instance that contains all the informations needed by the +ootype.Instance that contains all the information needed by the backend to reference the class (e.g., the namespace). It also supports overloading. @@ -209,5 +209,5 @@ helpers. It might be saner to use another annotation not to mix apples and oranges, maybe factoring out common code. -I don't know whether and how much code can be reused from the exising +I don't know whether and how much code can be reused from the existing bltregistry. diff --git a/pypy/doc/discussion/security-ideas.txt b/pypy/doc/discussion/security-ideas.txt --- a/pypy/doc/discussion/security-ideas.txt +++ b/pypy/doc/discussion/security-ideas.txt @@ -191,7 +191,7 @@ def serve_admin(token): print "Highest big is:", declassify(highest_bid, token=token) -The declassify() function reads the value if the "token" is priviledged +The declassify() function reads the value if the "token" is privileged enough, and raises an exception otherwise. What are we protecting here? The fact that we need the administrator diff --git a/pypy/doc/discussion/somepbc-refactoring-plan.txt b/pypy/doc/discussion/somepbc-refactoring-plan.txt --- a/pypy/doc/discussion/somepbc-refactoring-plan.txt +++ b/pypy/doc/discussion/somepbc-refactoring-plan.txt @@ -27,7 +27,7 @@ To be declared in module pypy.annotator.desc, with a mapping annotator.bookkeeper.descs = {: } -accessed with bookkeepeer.getdesc(). +accessed with bookkeeper.getdesc(). Maybe later the module should be moved out of pypy.annotation but for now I suppose that it's the best place. diff --git a/pypy/doc/discussion/summer-of-pypy-pytest.txt b/pypy/doc/discussion/summer-of-pypy-pytest.txt --- a/pypy/doc/discussion/summer-of-pypy-pytest.txt +++ b/pypy/doc/discussion/summer-of-pypy-pytest.txt @@ -16,7 +16,7 @@ Remote imports: --------------- -On the beggining of communication, master server sends to client +On the beginning of communication, master server sends to client import hook code, which then can import all needed libraries. Libraries are uploaded server -> client if they're needed (when @@ -47,7 +47,7 @@ Then we transfer output data to server as string, possibly tweaking file names (which is quite easy). -Delivarables: +Deliverables: ============= - better use of testing machines diff --git a/pypy/doc/discussion/thoughts_string_interning.txt b/pypy/doc/discussion/thoughts_string_interning.txt --- a/pypy/doc/discussion/thoughts_string_interning.txt +++ b/pypy/doc/discussion/thoughts_string_interning.txt @@ -8,7 +8,7 @@ the dict lookup method will find the string always by identity, saving the need to do a string comparison. -Interned Srings in CPython +Interned Strings in CPython -------------------------- CPython keeps an internal dictionary named ``interned`` for all of these @@ -137,7 +137,7 @@ D:\pypy\dist\pypy\translator\goal> -This was just an exercize to get an idea. For sure this is not to be checked in. +This was just an exercise to get an idea. For sure this is not to be checked in. Instead, I'm attaching the simple patch here for reference. :: diff --git a/pypy/doc/discussion/use_case_of_logic.txt b/pypy/doc/discussion/use_case_of_logic.txt --- a/pypy/doc/discussion/use_case_of_logic.txt +++ b/pypy/doc/discussion/use_case_of_logic.txt @@ -13,10 +13,10 @@ We define Business Logic as expressing consistency (as an example) on a set of objects in a business application. -For exeample checking the consistency of a calculation before +For example checking the consistency of a calculation before committing the changes. -The domain is quite rich in example of uses of Busines Logic. +The domain is quite rich in example of uses of Business Logic. Datamining =========== @@ -26,7 +26,7 @@ Databases ========= -Validity contraints for the data can be expressed as constraints. +Validity constraints for the data can be expressed as constraints. Constraints can be used to perform type inference when querying the database. @@ -34,7 +34,7 @@ Semantic web ============= -The use case is like the database case, except the ontology langauge +The use case is like the database case, except the ontology language it self is born out of Descriptive Logic @@ -50,8 +50,8 @@ Configuration ============== -User configuration can use information infered from : the current -user, current plantform , version requirements, ... +User configuration can use information inferred from : the current +user, current platforms , version requirements, ... The validity of the configuration can be checked with the constraints. @@ -61,15 +61,15 @@ Timetables, process scheduling, task scheduling. -Use rules to determin when to execute tasks (only start batch, if load +Use rules to determine when to execute tasks (only start batch, if load is low, and previous batch is finished. Load sharing. -Route optimisation. Planning the routes of a technitian based on tools +Route optimization. Planning the routes of a technician based on tools needed and such -An example is scheduling a confenre like Europython see: +An example is scheduling a conference like Europython see: http://lists.logilab.org/pipermail/python-logic/2005-May/000107.html diff --git a/pypy/doc/distribution.txt b/pypy/doc/distribution.txt --- a/pypy/doc/distribution.txt +++ b/pypy/doc/distribution.txt @@ -10,7 +10,7 @@ The implementation uses an RPC-like protocol, which accesses only members of objects, rather than whole objects. This means it -does not rely on objects being picklable, nor on having the same +does not rely on objects being pickleable, nor on having the same source code available on both sides. On each call, only the members that are used on the client side are retrieved, objects which are not used are merely references to their remote counterparts. @@ -27,7 +27,7 @@ remote side replies by providing a bound method. On the client this bound method appears as a remote reference: this reference is called with a remote reference to x as self, the integer 1 which is copied as a primitive type, a -reference to a list and a refence to y. The remote side receives this call, +reference to a list and a reference to y. The remote side receives this call, processes it as a call to the bound method x.foo, where 'x' is resolved as a local object, 1 as an immutable primitive, [1,2,3] as a reference to a mutable primitive and y as a reference to a remote object. If the type of y is not @@ -74,7 +74,7 @@ - No attribute access - - Arguments of calls must be picklable on one side and unpicklable on + - Arguments of calls must be pickleable on one side and unpickleable on remote side, which means they must share source code, they do not become remote references @@ -105,7 +105,7 @@ - two way RPC (unlike Pyro) - - also heavy restrictions on objects - they must sublcass certain class + - also heavy restrictions on objects - they must subclass certain class .. _`Pyro`: http://pyro.sourceforge.net/ .. _`transparent proxies`: objspace-proxies.html#tproxy diff --git a/pypy/doc/eventhistory.txt b/pypy/doc/eventhistory.txt --- a/pypy/doc/eventhistory.txt +++ b/pypy/doc/eventhistory.txt @@ -1,7 +1,7 @@ The PyPy project is a worldwide collaborative effort and its - members are organising sprints and presenting results at conferences + members are organizing sprints and presenting results at conferences all year round. **This page is no longer maintained!** See `our blog`_ for upcoming events. @@ -120,7 +120,7 @@ The Mallorca sprint that took place in Palma de Mallorca is over. Topics included progressing with the JIT work started in Göteborg -and Paris, GC and optimisation work, stackless, and +and Paris, GC and optimization work, stackless, and improving our way to write glue code for C libraries. Read more in `the announcement`_, there is a `sprint report`_ diff --git a/pypy/doc/extending.txt b/pypy/doc/extending.txt --- a/pypy/doc/extending.txt +++ b/pypy/doc/extending.txt @@ -12,8 +12,8 @@ Possibilities ============= -Right now, there are three posibilities of providing third-party modules -for the PyPy python interpreter (in order of usefulnes): +Right now, there are three possibilities of providing third-party modules +for the PyPy python interpreter (in order of usefulness): * Write them in pure python and use ctypes, see ctypes_ section @@ -39,7 +39,7 @@ We also provide a `ctypes-configure`_ for overcoming the platform dependencies, not relying on the ctypes codegen. This tool works by querying gcc about -platform-depenent details (compiling small snippets of C code and running +platform-dependent details (compiling small snippets of C code and running them), so it'll benefit not pypy-related ctypes-based modules as well. .. _`ctypes-configure`: http://codespeak.net/~fijal/configure.html @@ -91,9 +91,9 @@ compilation-check requires to recompile whole PyPy python interpreter, which takes 0.5-1h. We plan to solve this at some point in near future. -* although rpython is a garabage-collected language, the border between +* although rpython is a garbage-collected language, the border between C and RPython needs to be managed by hand (each object that goes into the - C level must be explicitely freed) XXX we try to solve this + C level must be explicitly freed) XXX we try to solve this Some document is available `here`_ diff --git a/pypy/doc/extradoc.txt b/pypy/doc/extradoc.txt --- a/pypy/doc/extradoc.txt +++ b/pypy/doc/extradoc.txt @@ -323,7 +323,7 @@ * Jython_ is a Python implementation in Java. * IronPython_ a new Python implementation compiling Python into - Microsofts Common Language Runtime (CLR) Intermediate Language (IL). + Microsoft's Common Language Runtime (CLR) Intermediate Language (IL). * Tunes_ is not entirely unrelated. The web site changed a lot, but a snapshot of the `old Tunes Wiki`_ is available on codespeak; browsing diff --git a/pypy/doc/faq.txt b/pypy/doc/faq.txt --- a/pypy/doc/faq.txt +++ b/pypy/doc/faq.txt @@ -147,7 +147,7 @@ .. _`prolog and javascript`: --------------------------------------------------------------- -Can PyPy support intepreters for other languages beyond Python? +Can PyPy support interpreters for other languages beyond Python? --------------------------------------------------------------- The toolsuite that translates the PyPy interpreter is quite diff --git a/pypy/doc/getting-started.txt b/pypy/doc/getting-started.txt --- a/pypy/doc/getting-started.txt +++ b/pypy/doc/getting-started.txt @@ -25,10 +25,6 @@ Just the facts ============== -.. _gettingpypy: -.. _`latest stable version via subversion`: -.. _`get via Subversion`: - Clone the repository -------------------- @@ -69,11 +65,30 @@ .. _`our nightly tests:`: http://buildbot.pypy.org/summary?branch= +If you want to commit to our repository on bitbucket, you will have to +install subversion in addition to mercurial. + +Installing using virtualenv +--------------------------- + +It is often convenient to run pypy inside a virtualenv. To do this +you need a recent version of virtualenv -- 1.5 or greater. You can +then install PyPy both from a precompiled tarball or from a mercurial +checkout:: + + # from a tarball + $ virtualenv -p /opt/pypy-c-jit-41718-3fb486695f20-linux/bin/pypy my-pypy-env + + # from the mercurial checkout + $ virtualenv -p /path/to/pypy/pypy/translator/goal/pypy-c my-pypy-env + +Note that bin/python is now a symlink to bin/pypy. + Where to go from here ---------------------- -After you succesfully managed to get PyPy's source you can read more about: +After you successfully manage to get PyPy's source you can read more about: - `Building and using PyPy's Python interpreter`_ - `Learning more about the translation toolchain and how to develop (with) PyPy`_ diff --git a/pypy/doc/glossary.txt b/pypy/doc/glossary.txt --- a/pypy/doc/glossary.txt +++ b/pypy/doc/glossary.txt @@ -195,7 +195,7 @@ **translation-time** In the context of the JIT_, translation time is when the PyPy - source is being analysed and the JIT itself is being created. + source is being analyzed and the JIT itself is being created. .. _`translator`: diff --git a/pypy/doc/index-report.txt b/pypy/doc/index-report.txt --- a/pypy/doc/index-report.txt +++ b/pypy/doc/index-report.txt @@ -30,7 +30,7 @@ `D08.2 JIT Compiler Architecture`_ is a report about the Architecture and working of our JIT compiler generator. *(2007-05-01)* -`D08.1 JIT Compiler Release`_ reports on our sucessfully including a +`D08.1 JIT Compiler Release`_ reports on our successfully including a JIT compiler for Python and the novel framework we used to automatically generate it in PyPy 1.0. *(2007-04-30)* @@ -76,7 +76,7 @@ `D02.1 Development Tools and Website`_ is a report -about the codespeak_ development environment and aditional tool support for the +about the codespeak_ development environment and additional tool support for the PyPy development process. *(2007-03-21)* `D03.1 Extension Compiler`_ is a report about diff --git a/pypy/doc/index.txt b/pypy/doc/index.txt --- a/pypy/doc/index.txt +++ b/pypy/doc/index.txt From noreply at buildbot.pypy.org Fri Sep 23 13:12:03 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:03 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Fixed delete_slice in EmptyListStrategy Message-ID: <20110923111203.D18B182211@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47448:d6a714086628 Date: 2011-03-01 18:36 +0100 http://bitbucket.org/pypy/pypy/changeset/d6a714086628/ Log: Fixed delete_slice in EmptyListStrategy diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -205,7 +205,7 @@ raise IndexError def deleteslice(self, w_list, start, step, slicelength): - raise IndexError + pass def pop(self, w_list, index): raise IndexError From noreply at buildbot.pypy.org Fri Sep 23 13:12:05 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:05 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Added RangeListStrategy Message-ID: <20110923111205.0C4AC820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47449:a17bc9c78e16 Date: 2011-03-02 14:43 +0100 http://bitbucket.org/pypy/pypy/changeset/a17bc9c78e16/ Log: Added RangeListStrategy diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -7,6 +7,7 @@ from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice from pypy.objspace.std import slicetype from pypy.interpreter import gateway, baseobjspace +from pypy.rlib.objectmodel import instantiate from pypy.rlib.listsort import TimSort from pypy.interpreter.argument import Signature @@ -21,6 +22,11 @@ assert wrapper._from_where == from_where return wrapper._content +def make_range_list(space, start, step, length): + storage = cast_to_void_star((start, step, length)) + strategy = RangeListStrategy(space) + return W_ListObject.from_storage_and_strategy(space, storage, strategy) + # don't know where to put this function, so it is global for now def get_strategy_from_list_objects(space, list_w): if list_w == []: @@ -59,6 +65,14 @@ w_self.strategy = get_strategy_from_list_objects(space, wrappeditems) w_self.strategy.init_from_list_w(w_self, wrappeditems) + @staticmethod + def from_storage_and_strategy(space, storage, strategy): + w_self = instantiate(W_ListObject) + w_self.space = space + w_self.strategy = strategy + w_self.storage = storage + return w_self + def __repr__(w_self): """ representation for debugging purposes """ return "%s(%s)" % (w_self.__class__.__name__, w_self.getitems()) @@ -228,6 +242,37 @@ def reverse(self, w_list): pass +class RangeListStrategy(ListStrategy): + + def wrap(self, intval): + return self.space.wrap(intval) + + def unwrap(self, w_int): + return self.space.int_w(w_int) + + def init_from_list_w(self, w_list, list_w): + raise NotImplementedError + + def cast_from_void_star(self, storage): + return cast_from_void_star(storage, "integer") + + def getitems(self, w_list): + l = self.cast_from_void_star(w_list.storage) + start = l[0] + step = l[1] + length = l[2] + + r = [None] * length + + i = start + n = 0 + while n < length: + r[n] = self.wrap(i) + i += step + n += 1 + + return r + class AbstractUnwrappedStrategy(ListStrategy): def wrap(self, unwrapped): @@ -559,6 +604,7 @@ def iter__List(space, w_list): from pypy.objspace.std import iterobject + import pdb; pdb.set_trace() return iterobject.W_FastListIterObject(w_list, w_list.getitems()) def add__List_List(space, w_list1, w_list2): From noreply at buildbot.pypy.org Fri Sep 23 13:12:06 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:06 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Use RangeListStrategy instead of RangeListObject Message-ID: <20110923111206.3A009820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47450:4c5fbb6e58e6 Date: 2011-03-02 14:48 +0100 http://bitbucket.org/pypy/pypy/changeset/4c5fbb6e58e6/ Log: Use RangeListStrategy instead of RangeListObject diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -97,10 +97,12 @@ return space.newlist(res_w) -def range_withspecialized_implementation(space, start, step, howmany): +def range_withspecialized_implementation(space, start, step, length): assert space.config.objspace.std.withrangelist - from pypy.objspace.std.rangeobject import W_RangeListObject - return W_RangeListObject(start, step, howmany) + from pypy.objspace.std.listobject import make_range_list + return make_range_list(space, start, step, length) + #from pypy.objspace.std.rangeobject import W_RangeListObject + #return W_RangeListObject(start, step, length) bigint_one = rbigint.fromint(1) From noreply at buildbot.pypy.org Fri Sep 23 13:12:07 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:07 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Changed W_ListObject iterable to use getitem Message-ID: <20110923111207.6C49E820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47451:e31f975d8b0f Date: 2011-03-02 14:49 +0100 http://bitbucket.org/pypy/pypy/changeset/e31f975d8b0f/ Log: Changed W_ListObject iterable to use getitem diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -30,9 +30,9 @@ """Sequence iterator specialized for lists, accessing directly their RPython-level list of wrapped objects. """ - def __init__(w_self, w_seq, wrappeditems): + def __init__(w_self, w_seq): W_AbstractSeqIterObject.__init__(w_self, w_seq) - w_self.listitems = wrappeditems + w_self.w_seq = w_seq class W_FastTupleIterObject(W_AbstractSeqIterObject): """Sequence iterator specialized for tuples, accessing @@ -102,13 +102,13 @@ return w_seqiter def next__FastListIter(space, w_seqiter): - if w_seqiter.listitems is None: + if w_seqiter.w_seq is None: raise OperationError(space.w_StopIteration, space.w_None) index = w_seqiter.index try: - w_item = w_seqiter.listitems[index] + w_item = w_seqiter.w_seq.getitem(index) except IndexError: - w_seqiter.listitems = None + w_seqiter.w_seq = None w_seqiter.w_seq = None raise OperationError(space.w_StopIteration, space.w_None) w_seqiter.index = index + 1 diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -604,8 +604,7 @@ def iter__List(space, w_list): from pypy.objspace.std import iterobject - import pdb; pdb.set_trace() - return iterobject.W_FastListIterObject(w_list, w_list.getitems()) + return iterobject.W_FastListIterObject(w_list) def add__List_List(space, w_list1, w_list2): return W_ListObject(space, w_list1.getitems() + w_list2.getitems()) From noreply at buildbot.pypy.org Fri Sep 23 13:12:08 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:08 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Implemented RangeListStrategy (not_forced tests still not working) Message-ID: <20110923111208.999D0820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47452:a2106499f6e3 Date: 2011-03-08 14:04 +0100 http://bitbucket.org/pypy/pypy/changeset/a2106499f6e3/ Log: Implemented RangeListStrategy (not_forced tests still not working) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -23,7 +23,7 @@ return wrapper._content def make_range_list(space, start, step, length): - storage = cast_to_void_star((start, step, length)) + storage = cast_to_void_star((start, step, length), "integer") strategy = RangeListStrategy(space) return W_ListObject.from_storage_and_strategy(space, storage, strategy) @@ -75,7 +75,7 @@ def __repr__(w_self): """ representation for debugging purposes """ - return "%s(%s)" % (w_self.__class__.__name__, w_self.getitems()) + return "%s(%s, %s)" % (w_self.__class__.__name__, w_self.strategy, w_self.storage._content) def unwrap(w_list, space): # for tests only! @@ -244,6 +244,12 @@ class RangeListStrategy(ListStrategy): + def switch_to_integer_strategy(self, w_list): + #XXX write storage directly to avoid wrapping and unwrapping + list_w = w_list.getitems() + w_list.strategy = IntegerListStrategy(self.space) + w_list.strategy.init_from_list_w(w_list, list_w) + def wrap(self, intval): return self.space.wrap(intval) @@ -256,12 +262,27 @@ def cast_from_void_star(self, storage): return cast_from_void_star(storage, "integer") + def length(self, w_list): + return self.cast_from_void_star(w_list.storage)[2] + + def getitem(self, w_list, i): + v = self.cast_from_void_star(w_list.storage) + start = v[0] + step = v[1] + length = v[2] + if i < 0: + i += length + if i < 0: + raise IndexError + elif i >= length: + raise IndexError + return self.wrap(start + i * step) + def getitems(self, w_list): l = self.cast_from_void_star(w_list.storage) start = l[0] step = l[1] length = l[2] - r = [None] * length i = start @@ -273,6 +294,61 @@ return r + def getslice(self, w_list, start, stop, step, length): + v = self.cast_from_void_star(w_list.storage) + old_start = v[0] + old_step = v[1] + old_length = v[2] + + new_start = self.unwrap(w_list.getitem(start)) + new_step = old_step * step + return make_range_list(self.space, new_start, new_step, length) + + def append(self, w_list, w_item): + #XXX maybe check later if w_item fits in range to keep RangeListStrategy + if is_W_IntObject(w_item): + self.switch_to_integer_strategy(w_list) + else: + w_list.switch_to_object_strategy(w_list) + w_list.append(w_item) + + def inplace_mul(self, w_list, times): + self.switch_to_integer_strategy(w_list) + w_list.inplace_mul(times) + + def deleteitem(self, w_list, index): + self.switch_to_integer_strategy(w_list) + w_list.deleteitem(index) + + def deleteslice(self, w_list, start, step, slicelength): + self.switch_to_integer_strategy(w_list) + w_list.deleteslice(start, step, slicelength) + + def pop(self, w_list, index): + self.switch_to_integer_strategy(w_list) + return w_list.pop(index) + + def setitem(self, w_list, index, w_item): + self.switch_to_integer_strategy(w_list) + w_list.setitem(index, w_item) + + def setslice(self, w_list, start, step, slicelength, sequence_w): + self.switch_to_integer_strategy(w_list) + w_list.setslice(start, step, slicelength) + + def insert(self, w_list, index, w_item): + self.switch_to_integer_strategy(w_list) + w_list.insert(index, w_item) + + def extend(self, w_list, items_w): + self.switch_to_integer_strategy(w_list) + w_list.extend(items_w) + + def reverse(self, w_list): + #XXX better: switch start, end, and negate step? + self.switch_to_integer_strategy(w_list) + w_list.reverse() + class AbstractUnwrappedStrategy(ListStrategy): def wrap(self, unwrapped): diff --git a/pypy/objspace/std/test/test_rangeobject.py b/pypy/objspace/std/test/test_rangeobject.py --- a/pypy/objspace/std/test/test_rangeobject.py +++ b/pypy/objspace/std/test/test_rangeobject.py @@ -13,7 +13,7 @@ import __pypy__ def f(r): return (isinstance(r, list) and - "W_ListObject" not in __pypy__.internal_repr(r)) + "RangeListStrategy" in __pypy__.internal_repr(r)) return f """) cls.w_SORT_FORCES_LISTS = cls.space.wrap(False) From noreply at buildbot.pypy.org Fri Sep 23 13:12:09 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:09 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Added tests for RangeListStrategy Message-ID: <20110923111209.C869A820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47453:75956a940aea Date: 2011-03-08 15:06 +0100 http://bitbucket.org/pypy/pypy/changeset/75956a940aea/ Log: Added tests for RangeListStrategy diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -309,7 +309,7 @@ if is_W_IntObject(w_item): self.switch_to_integer_strategy(w_list) else: - w_list.switch_to_object_strategy(w_list) + w_list.switch_to_object_strategy() w_list.append(w_item) def inplace_mul(self, w_list, times): diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -1,4 +1,4 @@ -from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, StringListStrategy +from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, StringListStrategy, RangeListStrategy, make_range_list from pypy.objspace.std.test.test_listobject import TestW_ListObject class TestW_ListStrategies(TestW_ListObject): @@ -145,3 +145,15 @@ l.extend(W_ListObject(self.space, [self.space.wrap(4), self.space.wrap(5), self.space.wrap(6)])) assert isinstance(l.strategy, IntegerListStrategy) + def test_rangelist(self): + l = make_range_list(self.space, 1,3,7) + assert isinstance(l.strategy, RangeListStrategy) + v = l.pop(6) + assert self.space.eq_w(v, self.space.wrap(19)) + assert isinstance(l.strategy, IntegerListStrategy) + + l = make_range_list(self.space, 1,3,7) + assert isinstance(l.strategy, RangeListStrategy) + l.append(self.space.wrap("string")) + assert isinstance(l.strategy, ObjectListStrategy) + From noreply at buildbot.pypy.org Fri Sep 23 13:12:11 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:11 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Try to keep rangelist on append and pop Message-ID: <20110923111211.051D4820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47454:ed567fc8e950 Date: 2011-03-09 10:30 +0100 http://bitbucket.org/pypy/pypy/changeset/ed567fc8e950/ Log: Try to keep rangelist on append and pop diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -305,8 +305,15 @@ return make_range_list(self.space, new_start, new_step, length) def append(self, w_list, w_item): - #XXX maybe check later if w_item fits in range to keep RangeListStrategy if is_W_IntObject(w_item): + l = self.cast_from_void_star(w_list.storage) + step = l[1] + last_in_range = self.getitem(w_list, -1) + if self.unwrap(w_item) - step == self.unwrap(last_in_range): + new = cast_to_void_star((l[0],l[1],l[2]+1), "integer") + w_list.storage = new + return + self.switch_to_integer_strategy(w_list) else: w_list.switch_to_object_strategy() @@ -325,6 +332,23 @@ w_list.deleteslice(start, step, slicelength) def pop(self, w_list, index): + #XXX move this to list_pop_List_ANY + if index < 0: + index += self.length(w_list) + + l = self.cast_from_void_star(w_list.storage) + if index == 0: + r = self.getitem(w_list, 0) + new = cast_to_void_star((l[0]+l[1],l[1],l[2]-1), "integer") + w_list.storage = new + return r + + if index == self.length(w_list)-1: + r = self.getitem(w_list, -1) + new = cast_to_void_star((l[0],l[1],l[2]-1), "integer") + w_list.storage = new + return r + self.switch_to_integer_strategy(w_list) return w_list.pop(index) diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -148,8 +148,8 @@ def test_rangelist(self): l = make_range_list(self.space, 1,3,7) assert isinstance(l.strategy, RangeListStrategy) - v = l.pop(6) - assert self.space.eq_w(v, self.space.wrap(19)) + v = l.pop(5) + assert self.space.eq_w(v, self.space.wrap(16)) assert isinstance(l.strategy, IntegerListStrategy) l = make_range_list(self.space, 1,3,7) @@ -157,3 +157,25 @@ l.append(self.space.wrap("string")) assert isinstance(l.strategy, ObjectListStrategy) + l = make_range_list(self.space, 1,1,5) + assert isinstance(l.strategy, RangeListStrategy) + l.append(self.space.wrap(19)) + assert isinstance(l.strategy, IntegerListStrategy) + + def test_keep_range(self): + # simple list + l = make_range_list(self.space, 1,1,5) + assert isinstance(l.strategy, RangeListStrategy) + x = l.pop(0) + assert self.space.eq_w(x, self.space.wrap(1)) + assert isinstance(l.strategy, RangeListStrategy) + l.pop(-1) + assert isinstance(l.strategy, RangeListStrategy) + l.append(self.space.wrap(5)) + assert isinstance(l.strategy, RangeListStrategy) + + # complex list + l = make_range_list(self.space, 1,3,5) + assert isinstance(l.strategy, RangeListStrategy) + l.append(self.space.wrap(16)) + assert isinstance(l.strategy, RangeListStrategy) From noreply at buildbot.pypy.org Fri Sep 23 13:12:12 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:12 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Switch to IntegerStrategy without wrapping; Reverse without switching Message-ID: <20110923111212.33851820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47455:2b05113ecfe1 Date: 2011-03-09 10:48 +0100 http://bitbucket.org/pypy/pypy/changeset/2b05113ecfe1/ Log: Switch to IntegerStrategy without wrapping; Reverse without switching diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -246,9 +246,9 @@ def switch_to_integer_strategy(self, w_list): #XXX write storage directly to avoid wrapping and unwrapping - list_w = w_list.getitems() + items = self.getitems(w_list, unwrapped=True) w_list.strategy = IntegerListStrategy(self.space) - w_list.strategy.init_from_list_w(w_list, list_w) + w_list.storage = w_list.strategy.cast_to_void_star(items) def wrap(self, intval): return self.space.wrap(intval) @@ -278,7 +278,7 @@ raise IndexError return self.wrap(start + i * step) - def getitems(self, w_list): + def getitems(self, w_list, unwrapped=False): l = self.cast_from_void_star(w_list.storage) start = l[0] step = l[1] @@ -288,7 +288,10 @@ i = start n = 0 while n < length: - r[n] = self.wrap(i) + if unwrapped: + r[n] = i + else: + r[n] = self.wrap(i) i += step n += 1 @@ -369,9 +372,12 @@ w_list.extend(items_w) def reverse(self, w_list): - #XXX better: switch start, end, and negate step? - self.switch_to_integer_strategy(w_list) - w_list.reverse() + v = self.cast_from_void_star(w_list.storage) + last = w_list.getitem(-1) #XXX wrapped + length = v[2] + skip = v[1] + new = cast_to_void_star((self.unwrap(last), -skip, length), "integer") + w_list.storage = new class AbstractUnwrappedStrategy(ListStrategy): From noreply at buildbot.pypy.org Fri Sep 23 13:12:13 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:13 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Switch RangeListStrategy to EmptyListStrategy if length is zero Message-ID: <20110923111213.62AF9820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47456:025d68e4eb4c Date: 2011-03-09 11:38 +0100 http://bitbucket.org/pypy/pypy/changeset/025d68e4eb4c/ Log: Switch RangeListStrategy to EmptyListStrategy if length is zero diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -23,8 +23,12 @@ return wrapper._content def make_range_list(space, start, step, length): - storage = cast_to_void_star((start, step, length), "integer") - strategy = RangeListStrategy(space) + if length <= 0: + storage = cast_to_void_star(None) + strategy = EmptyListStrategy(space) + else: + storage = cast_to_void_star((start, step, length), "integer") + strategy = RangeListStrategy(space) return W_ListObject.from_storage_and_strategy(space, storage, strategy) # don't know where to put this function, so it is global for now @@ -339,17 +343,20 @@ if index < 0: index += self.length(w_list) + #XXX merge these parts l = self.cast_from_void_star(w_list.storage) if index == 0: - r = self.getitem(w_list, 0) + r = self.getitem(w_list, index) new = cast_to_void_star((l[0]+l[1],l[1],l[2]-1), "integer") w_list.storage = new + w_list.check_empty_strategy() return r if index == self.length(w_list)-1: - r = self.getitem(w_list, -1) + r = self.getitem(w_list, index) new = cast_to_void_star((l[0],l[1],l[2]-1), "integer") w_list.storage = new + w_list.check_empty_strategy() return r self.switch_to_integer_strategy(w_list) diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -179,3 +179,15 @@ assert isinstance(l.strategy, RangeListStrategy) l.append(self.space.wrap(16)) assert isinstance(l.strategy, RangeListStrategy) + + def test_empty_range(self): + l = make_range_list(self.space, 0, 0, 0) + assert isinstance(l.strategy, EmptyListStrategy) + + l = make_range_list(self.space, 1, 1, 10) + print l.getitems() + for i in l.getitems(): + assert isinstance(l.strategy, RangeListStrategy) + l.pop(-1) + + assert isinstance(l.strategy, EmptyListStrategy) diff --git a/pypy/objspace/std/test/test_rangeobject.py b/pypy/objspace/std/test/test_rangeobject.py --- a/pypy/objspace/std/test/test_rangeobject.py +++ b/pypy/objspace/std/test/test_rangeobject.py @@ -44,12 +44,9 @@ def test_empty_range(self): r = range(10, 10) - if not self.SORT_FORCES_LISTS: - r.sort(reverse=True) assert len(r) == 0 assert list(reversed(r)) == [] assert r[:] == [] - assert self.not_forced(r) def test_repr(self): r = range(5) @@ -73,23 +70,6 @@ r.reverse() assert r == [2, 1, 1] - def test_sort(self): - if self.SORT_FORCES_LISTS: - skip("sort() forces these lists") - r = range(10, -1, -1) - r.sort() - assert self.not_forced(r) - assert r == range(11) - r = range(11) - r.sort(reverse=True) - assert self.not_forced(r) - assert r == range(10, -1, -1) - r = range(100) - r[0] = 999 - assert not self.not_forced(r) - r.sort() - assert r == range(1, 100) + [999] - def test_pop(self): r = range(10) res = r.pop() From noreply at buildbot.pypy.org Fri Sep 23 13:12:14 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:14 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Refactored casting Message-ID: <20110923111214.8F9C5820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47457:42c65af8f36f Date: 2011-03-09 14:00 +0100 http://bitbucket.org/pypy/pypy/changeset/42c65af8f36f/ Log: Refactored casting diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -24,11 +24,11 @@ def make_range_list(space, start, step, length): if length <= 0: - storage = cast_to_void_star(None) strategy = EmptyListStrategy(space) + storage = strategy.cast_to_void_star(None) else: - storage = cast_to_void_star((start, step, length), "integer") strategy = RangeListStrategy(space) + storage = strategy.cast_to_void_star((start, step, length)) return W_ListObject.from_storage_and_strategy(space, storage, strategy) # don't know where to put this function, so it is global for now @@ -199,7 +199,15 @@ def init_from_list_w(self, w_list, list_w): assert len(list_w) == 0 - w_list.storage = cast_to_void_star(None) + w_list.storage = self.cast_to_void_star(None) + + @staticmethod + def cast_from_void_star(storage): + return cast_from_void_star(storage, "empty") + + @staticmethod + def cast_to_void_star(obj): + return cast_to_void_star(obj, "empty") def length(self, w_list): return 0 @@ -249,7 +257,6 @@ class RangeListStrategy(ListStrategy): def switch_to_integer_strategy(self, w_list): - #XXX write storage directly to avoid wrapping and unwrapping items = self.getitems(w_list, unwrapped=True) w_list.strategy = IntegerListStrategy(self.space) w_list.storage = w_list.strategy.cast_to_void_star(items) @@ -263,8 +270,13 @@ def init_from_list_w(self, w_list, list_w): raise NotImplementedError - def cast_from_void_star(self, storage): - return cast_from_void_star(storage, "integer") + @staticmethod + def cast_from_void_star(storage): + return cast_from_void_star(storage, "range") + + @staticmethod + def cast_to_void_star(obj): + return cast_to_void_star(obj, "range") def length(self, w_list): return self.cast_from_void_star(w_list.storage)[2] @@ -317,7 +329,7 @@ step = l[1] last_in_range = self.getitem(w_list, -1) if self.unwrap(w_item) - step == self.unwrap(last_in_range): - new = cast_to_void_star((l[0],l[1],l[2]+1), "integer") + new = self.cast_to_void_star((l[0],l[1],l[2]+1)) w_list.storage = new return @@ -347,14 +359,14 @@ l = self.cast_from_void_star(w_list.storage) if index == 0: r = self.getitem(w_list, index) - new = cast_to_void_star((l[0]+l[1],l[1],l[2]-1), "integer") + new = self.cast_to_void_star((l[0]+l[1],l[1],l[2]-1)) w_list.storage = new w_list.check_empty_strategy() return r if index == self.length(w_list)-1: r = self.getitem(w_list, index) - new = cast_to_void_star((l[0],l[1],l[2]-1), "integer") + new = self.cast_to_void_star((l[0],l[1],l[2]-1)) w_list.storage = new w_list.check_empty_strategy() return r @@ -383,7 +395,7 @@ last = w_list.getitem(-1) #XXX wrapped length = v[2] skip = v[1] - new = cast_to_void_star((self.unwrap(last), -skip, length), "integer") + new = self.cast_to_void_star((self.unwrap(last), -skip, length)) w_list.storage = new class AbstractUnwrappedStrategy(ListStrategy): @@ -394,7 +406,12 @@ def unwrap(self, wrapped): raise NotImplementedError - def cast_from_void_star(self, storage): + @staticmethod + def cast_from_void_star(storage): + raise NotImplementedError("abstract base class") + + @staticmethod + def cast_to_void_star(obj): raise NotImplementedError("abstract base class") def is_correct_type(self, w_obj): @@ -590,9 +607,14 @@ def wrap(self, item): return item - def cast_from_void_star(self, storage): + @staticmethod + def cast_from_void_star(storage): return cast_from_void_star(storage, "object") + @staticmethod + def cast_to_void_star(obj): + return cast_to_void_star(obj, "object") + def is_correct_type(self, w_obj): return True @@ -600,7 +622,7 @@ return ObjectListStrategy is type(w_list.strategy) def init_from_list_w(self, w_list, list_w): - w_list.storage = cast_to_void_star(list_w, "object") + w_list.storage = self.cast_to_void_star(list_w) class IntegerListStrategy(AbstractUnwrappedStrategy): @@ -610,11 +632,13 @@ def unwrap(self, w_int): return self.space.int_w(w_int) - def cast_from_void_star(self, storage): + @staticmethod + def cast_from_void_star(storage): return cast_from_void_star(storage, "integer") - def cast_to_void_star(self, l): - return cast_to_void_star(l, "integer") + @staticmethod + def cast_to_void_star(obj): + return cast_to_void_star(obj, "integer") def is_correct_type(self, w_obj): return is_W_IntObject(w_obj) @@ -630,11 +654,13 @@ def unwrap(self, w_string): return self.space.str_w(w_string) - def cast_from_void_star(self, storage): + @staticmethod + def cast_from_void_star(storage): return cast_from_void_star(storage, "string") - def cast_to_void_star(self, l): - return cast_to_void_star(l, "string") + @staticmethod + def cast_to_void_star(obj): + return cast_to_void_star(obj, "string") def is_correct_type(self, w_obj): return is_W_StringObject(w_obj) From noreply at buildbot.pypy.org Fri Sep 23 13:12:15 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:15 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Refactored RangeListStrateg.pop() Message-ID: <20110923111215.BBF1A820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47458:6f8c358e4389 Date: 2011-03-09 14:12 +0100 http://bitbucket.org/pypy/pypy/changeset/6f8c358e4389/ Log: Refactored RangeListStrateg.pop() diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -357,16 +357,12 @@ #XXX merge these parts l = self.cast_from_void_star(w_list.storage) - if index == 0: + if index in [0, self.length(w_list)-1]: r = self.getitem(w_list, index) - new = self.cast_to_void_star((l[0]+l[1],l[1],l[2]-1)) - w_list.storage = new - w_list.check_empty_strategy() - return r - - if index == self.length(w_list)-1: - r = self.getitem(w_list, index) - new = self.cast_to_void_star((l[0],l[1],l[2]-1)) + if index == 0: + new = self.cast_to_void_star((l[0]+l[1],l[1],l[2]-1)) + else: + new = self.cast_to_void_star((l[0],l[1],l[2]-1)) w_list.storage = new w_list.check_empty_strategy() return r From noreply at buildbot.pypy.org Fri Sep 23 13:12:16 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:16 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Forget to delete comment Message-ID: <20110923111216.E740B820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47459:36b0dfe66616 Date: 2011-03-09 14:15 +0100 http://bitbucket.org/pypy/pypy/changeset/36b0dfe66616/ Log: Forget to delete comment diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -355,7 +355,6 @@ if index < 0: index += self.length(w_list) - #XXX merge these parts l = self.cast_from_void_star(w_list.storage) if index in [0, self.length(w_list)-1]: r = self.getitem(w_list, index) From noreply at buildbot.pypy.org Fri Sep 23 13:12:18 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:18 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Finally use (real) rerased Message-ID: <20110923111218.1FC56820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47460:aa3d3bbda4d5 Date: 2011-03-09 14:40 +0100 http://bitbucket.org/pypy/pypy/changeset/aa3d3bbda4d5/ Log: Finally use (real) rerased diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -9,19 +9,9 @@ from pypy.interpreter import gateway, baseobjspace from pypy.rlib.objectmodel import instantiate from pypy.rlib.listsort import TimSort +from pypy.rlib import rerased from pypy.interpreter.argument import Signature -class cast_to_void_star(object): - # this will later be replaced by something in rlib.rerased - def __init__(self, content, from_where=""): - self._content = content - self._from_where = from_where - -def cast_from_void_star(wrapper, from_where=""): - # this will later be replaced by something in rlib.rerased - assert wrapper._from_where == from_where - return wrapper._content - def make_range_list(space, start, step, length): if length <= 0: strategy = EmptyListStrategy(space) @@ -201,13 +191,9 @@ assert len(list_w) == 0 w_list.storage = self.cast_to_void_star(None) - @staticmethod - def cast_from_void_star(storage): - return cast_from_void_star(storage, "empty") - - @staticmethod - def cast_to_void_star(obj): - return cast_to_void_star(obj, "empty") + cast_to_void_star, cast_from_void_star = rerased.new_erasing_pair("empty") + cast_to_void_star = staticmethod(cast_to_void_star) + cast_from_void_star = staticmethod(cast_from_void_star) def length(self, w_list): return 0 @@ -270,13 +256,9 @@ def init_from_list_w(self, w_list, list_w): raise NotImplementedError - @staticmethod - def cast_from_void_star(storage): - return cast_from_void_star(storage, "range") - - @staticmethod - def cast_to_void_star(obj): - return cast_to_void_star(obj, "range") + cast_to_void_star, cast_from_void_star = rerased.new_erasing_pair("range") + cast_to_void_star = staticmethod(cast_to_void_star) + cast_from_void_star = staticmethod(cast_from_void_star) def length(self, w_list): return self.cast_from_void_star(w_list.storage)[2] @@ -602,13 +584,9 @@ def wrap(self, item): return item - @staticmethod - def cast_from_void_star(storage): - return cast_from_void_star(storage, "object") - - @staticmethod - def cast_to_void_star(obj): - return cast_to_void_star(obj, "object") + cast_to_void_star, cast_from_void_star = rerased.new_erasing_pair("range") + cast_to_void_star = staticmethod(cast_to_void_star) + cast_from_void_star = staticmethod(cast_from_void_star) def is_correct_type(self, w_obj): return True @@ -627,13 +605,9 @@ def unwrap(self, w_int): return self.space.int_w(w_int) - @staticmethod - def cast_from_void_star(storage): - return cast_from_void_star(storage, "integer") - - @staticmethod - def cast_to_void_star(obj): - return cast_to_void_star(obj, "integer") + cast_to_void_star, cast_from_void_star = rerased.new_erasing_pair("range") + cast_to_void_star = staticmethod(cast_to_void_star) + cast_from_void_star = staticmethod(cast_from_void_star) def is_correct_type(self, w_obj): return is_W_IntObject(w_obj) @@ -649,13 +623,9 @@ def unwrap(self, w_string): return self.space.str_w(w_string) - @staticmethod - def cast_from_void_star(storage): - return cast_from_void_star(storage, "string") - - @staticmethod - def cast_to_void_star(obj): - return cast_to_void_star(obj, "string") + cast_to_void_star, cast_from_void_star = rerased.new_erasing_pair("range") + cast_to_void_star = staticmethod(cast_to_void_star) + cast_from_void_star = staticmethod(cast_from_void_star) def is_correct_type(self, w_obj): return is_W_StringObject(w_obj) From noreply at buildbot.pypy.org Fri Sep 23 13:12:19 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:19 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Strategies are now singletons Message-ID: <20110923111219.4C156820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47461:69819f8b5be9 Date: 2011-03-09 15:06 +0100 http://bitbucket.org/pypy/pypy/changeset/69819f8b5be9/ Log: Strategies are now singletons diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -14,33 +14,33 @@ def make_range_list(space, start, step, length): if length <= 0: - strategy = EmptyListStrategy(space) + strategy = space.fromcache(EmptyListStrategy) storage = strategy.cast_to_void_star(None) else: - strategy = RangeListStrategy(space) + strategy = space.fromcache(RangeListStrategy)(space) storage = strategy.cast_to_void_star((start, step, length)) return W_ListObject.from_storage_and_strategy(space, storage, strategy) # don't know where to put this function, so it is global for now def get_strategy_from_list_objects(space, list_w): if list_w == []: - return EmptyListStrategy(space) + return space.fromcache(EmptyListStrategy) # check for ints for e in list_w: if not is_W_IntObject(e): break if e is list_w[-1]: - return IntegerListStrategy(space) + return space.fromcache(IntegerListStrategy) # check for ints for e in list_w: if not is_W_StringObject(e): break if e is list_w[-1]: - return StringListStrategy(space) + return space.fromcache(StringListStrategy) - return ObjectListStrategy(space) + return space.fromcache(ObjectListStrategy) def is_W_IntObject(w_object): from pypy.objspace.std.intobject import W_IntObject @@ -78,12 +78,12 @@ def switch_to_object_strategy(self): list_w = self.getitems() - self.strategy = ObjectListStrategy(self.space) + self.strategy = self.space.fromcache(ObjectListStrategy) self.strategy.init_from_list_w(self, list_w) def check_empty_strategy(self): if self.length() == 0: - self.strategy = EmptyListStrategy(self.space) + self.strategy = self.space.fromcache(EmptyListStrategy) self.strategy.init_from_list_w(self, []) # ___________________________________________________ @@ -244,7 +244,7 @@ def switch_to_integer_strategy(self, w_list): items = self.getitems(w_list, unwrapped=True) - w_list.strategy = IntegerListStrategy(self.space) + w_list.strategy = self.space.fromcache(IntegerListStrategy) w_list.storage = w_list.strategy.cast_to_void_star(items) def wrap(self, intval): From noreply at buildbot.pypy.org Fri Sep 23 13:12:20 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:20 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Check correct type with strategy instance instead of class Message-ID: <20110923111220.775A9820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47462:b9213520c508 Date: 2011-03-11 11:32 +0100 http://bitbucket.org/pypy/pypy/changeset/b9213520c508/ Log: Check correct type with strategy instance instead of class diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -17,7 +17,7 @@ strategy = space.fromcache(EmptyListStrategy) storage = strategy.cast_to_void_star(None) else: - strategy = space.fromcache(RangeListStrategy)(space) + strategy = space.fromcache(RangeListStrategy) storage = strategy.cast_to_void_star((start, step, length)) return W_ListObject.from_storage_and_strategy(space, storage, strategy) @@ -474,7 +474,7 @@ assert slicelength >= 0 items = self.cast_from_void_star(w_list.storage) - if (type(self) is not ObjectListStrategy and + if (self is not self.space.fromcache(ObjectListStrategy) and not self.list_is_correct_type(W_ListObject(self.space, sequence_w)) and len(sequence_w) != 0): w_list.switch_to_object_strategy() @@ -584,7 +584,7 @@ def wrap(self, item): return item - cast_to_void_star, cast_from_void_star = rerased.new_erasing_pair("range") + cast_to_void_star, cast_from_void_star = rerased.new_erasing_pair("object") cast_to_void_star = staticmethod(cast_to_void_star) cast_from_void_star = staticmethod(cast_from_void_star) @@ -592,7 +592,7 @@ return True def list_is_correct_type(self, w_list): - return ObjectListStrategy is type(w_list.strategy) + return w_list.strategy is self.space.fromcache(ObjectListStrategy) def init_from_list_w(self, w_list, list_w): w_list.storage = self.cast_to_void_star(list_w) @@ -605,7 +605,7 @@ def unwrap(self, w_int): return self.space.int_w(w_int) - cast_to_void_star, cast_from_void_star = rerased.new_erasing_pair("range") + cast_to_void_star, cast_from_void_star = rerased.new_erasing_pair("integer") cast_to_void_star = staticmethod(cast_to_void_star) cast_from_void_star = staticmethod(cast_from_void_star) @@ -613,7 +613,7 @@ return is_W_IntObject(w_obj) def list_is_correct_type(self, w_list): - return IntegerListStrategy is type(w_list.strategy) + return w_list.strategy is self.space.fromcache(IntegerListStrategy) class StringListStrategy(AbstractUnwrappedStrategy): @@ -623,7 +623,7 @@ def unwrap(self, w_string): return self.space.str_w(w_string) - cast_to_void_star, cast_from_void_star = rerased.new_erasing_pair("range") + cast_to_void_star, cast_from_void_star = rerased.new_erasing_pair("string") cast_to_void_star = staticmethod(cast_to_void_star) cast_from_void_star = staticmethod(cast_from_void_star) @@ -631,7 +631,7 @@ return is_W_StringObject(w_obj) def list_is_correct_type(self, w_list): - return StringListStrategy is type(w_list.strategy) + return w_list.strategy is self.space.fromcache(StringListStrategy) # _______________________________________________________ From noreply at buildbot.pypy.org Fri Sep 23 13:12:21 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:21 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Cast sequence to W_ListObject (translation fix) Message-ID: <20110923111221.A3F53820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47463:eb7765a263ed Date: 2011-03-15 14:47 +0100 http://bitbucket.org/pypy/pypy/changeset/eb7765a263ed/ Log: Cast sequence to W_ListObject (translation fix) diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -102,14 +102,16 @@ return w_seqiter def next__FastListIter(space, w_seqiter): - if w_seqiter.w_seq is None: + from pypy.objspace.std.listobject import W_ListObject + w_seq = w_seqiter.w_seq + if w_seq is None: raise OperationError(space.w_StopIteration, space.w_None) + assert isinstance(w_seq, W_ListObject) index = w_seqiter.index try: - w_item = w_seqiter.w_seq.getitem(index) + w_item = w_seq.getitem(index) except IndexError: w_seqiter.w_seq = None - w_seqiter.w_seq = None raise OperationError(space.w_StopIteration, space.w_None) w_seqiter.index = index + 1 return w_item From noreply at buildbot.pypy.org Fri Sep 23 13:12:22 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:22 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Each strategy must implement (not inherit) it's own length method (translation fix) Message-ID: <20110923111222.D118B820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47464:40905329581f Date: 2011-03-15 14:51 +0100 http://bitbucket.org/pypy/pypy/changeset/40905329581f/ Log: Each strategy must implement (not inherit) it's own length method (translation fix) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -375,7 +375,8 @@ new = self.cast_to_void_star((self.unwrap(last), -skip, length)) w_list.storage = new -class AbstractUnwrappedStrategy(ListStrategy): +class AbstractUnwrappedStrategy(object): + _mixin_ = True def wrap(self, unwrapped): raise NotImplementedError @@ -577,7 +578,7 @@ def reverse(self, w_list): self.cast_from_void_star(w_list.storage).reverse() -class ObjectListStrategy(AbstractUnwrappedStrategy): +class ObjectListStrategy(AbstractUnwrappedStrategy, ListStrategy): def unwrap(self, w_obj): return w_obj @@ -597,7 +598,7 @@ def init_from_list_w(self, w_list, list_w): w_list.storage = self.cast_to_void_star(list_w) -class IntegerListStrategy(AbstractUnwrappedStrategy): +class IntegerListStrategy(AbstractUnwrappedStrategy, ListStrategy): def wrap(self, intval): return self.space.wrap(intval) @@ -615,7 +616,7 @@ def list_is_correct_type(self, w_list): return w_list.strategy is self.space.fromcache(IntegerListStrategy) -class StringListStrategy(AbstractUnwrappedStrategy): +class StringListStrategy(AbstractUnwrappedStrategy, ListStrategy): def wrap(self, stringval): return self.space.wrap(stringval) From noreply at buildbot.pypy.org Fri Sep 23 13:12:24 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:24 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Use unresizable list here (translation fix) Message-ID: <20110923111224.08B5E820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47465:eb82a1290057 Date: 2011-03-15 16:25 +0100 http://bitbucket.org/pypy/pypy/changeset/eb82a1290057/ Log: Use unresizable list here (translation fix) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -402,7 +402,7 @@ if isinstance(w_obj, W_TupleObject): t = w_obj.wrappeditems elif isinstance(w_obj, W_ListObject): - t = w_obj.getitems() + t = w_obj.getitems()[:] else: if unroll: return make_sure_not_resized(ObjSpace.unpackiterable_unroll( From noreply at buildbot.pypy.org Fri Sep 23 13:12:25 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:25 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Stop must not be negative (translation fix); Wrapping/Unwrapping not neccessary here Message-ID: <20110923111225.395BE820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47466:a88228a2c43d Date: 2011-03-15 16:27 +0100 http://bitbucket.org/pypy/pypy/changeset/a88228a2c43d/ Log: Stop must not be negative (translation fix); Wrapping/Unwrapping not neccessary here diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -78,8 +78,8 @@ def switch_to_object_strategy(self): list_w = self.getitems() - self.strategy = self.space.fromcache(ObjectListStrategy) - self.strategy.init_from_list_w(self, list_w) + strategy = self.strategy = self.space.fromcache(ObjectListStrategy) + strategy.init_from_list_w(self, list_w) def check_empty_strategy(self): if self.length() == 0: @@ -416,10 +416,11 @@ def getslice(self, w_list, start, stop, step, length): if step == 1: - # XXX ineffecient cause items are wrapped and unwrapped again - # later: W_ListObject constructor for unwrapped items - l = w_list.getitems() - return W_ListObject(self.space, l[start:stop]) + l = self.cast_from_void_star(w_list.storage) + assert stop >= 0 + sublist = l[start:stop] + storage = self.cast_to_void_star(sublist) + return W_ListObject.from_storage_and_strategy(self.space, storage, self) else: subitems_w = [None] * length for i in range(length): @@ -598,6 +599,8 @@ def init_from_list_w(self, w_list, list_w): w_list.storage = self.cast_to_void_star(list_w) + # XXX implement getitems without copying here + class IntegerListStrategy(AbstractUnwrappedStrategy, ListStrategy): def wrap(self, intval): From noreply at buildbot.pypy.org Fri Sep 23 13:12:26 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:26 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Unvisited path: Added test and fixed error Message-ID: <20110923111226.6623E820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47467:8efc606bcbbe Date: 2011-03-15 17:54 +0100 http://bitbucket.org/pypy/pypy/changeset/8efc606bcbbe/ Log: Unvisited path: Added test and fixed error diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -502,7 +502,7 @@ else: assert delta==0 # start<0 is only possible with slicelength==0 elif len2 != slicelength: # No resize for extended slices - raise operationerrfmt(space.w_ValueError, "attempt to " + raise operationerrfmt(self.space.w_ValueError, "attempt to " "assign sequence of size %d to extended slice of size %d", len2, slicelength) diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -638,6 +638,9 @@ assert l == [0, 'b', 2] assert l is l0 + l = [1,2,3] + raises(ValueError, "l[0:2:2] = [1,2,3,4]") + def test_recursive_repr(self): l = [] assert repr(l) == '[]' From noreply at buildbot.pypy.org Fri Sep 23 13:12:27 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:27 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Two more translation fixes Message-ID: <20110923111227.92319820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47468:174a022afec3 Date: 2011-03-16 09:47 +0100 http://bitbucket.org/pypy/pypy/changeset/174a022afec3/ Log: Two more translation fixes diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -244,8 +244,8 @@ def switch_to_integer_strategy(self, w_list): items = self.getitems(w_list, unwrapped=True) - w_list.strategy = self.space.fromcache(IntegerListStrategy) - w_list.storage = w_list.strategy.cast_to_void_star(items) + strategy = w_list.strategy = self.space.fromcache(IntegerListStrategy) + w_list.storage = strategy.cast_to_void_star(items) def wrap(self, intval): return self.space.wrap(intval) @@ -417,6 +417,7 @@ def getslice(self, w_list, start, stop, step, length): if step == 1: l = self.cast_from_void_star(w_list.storage) + assert start >= 0 assert stop >= 0 sublist = l[start:stop] storage = self.cast_to_void_star(sublist) From noreply at buildbot.pypy.org Fri Sep 23 13:12:28 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:28 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Spezialized function for getitems (wrapped/unwrapped); Fixed list initialisation with (translation error) Message-ID: <20110923111228.BF253820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47469:05f252f29f4e Date: 2011-03-16 11:14 +0100 http://bitbucket.org/pypy/pypy/changeset/05f252f29f4e/ Log: Spezialized function for getitems (wrapped/unwrapped); Fixed list initialisation with (translation error) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -7,7 +7,7 @@ from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice from pypy.objspace.std import slicetype from pypy.interpreter import gateway, baseobjspace -from pypy.rlib.objectmodel import instantiate +from pypy.rlib.objectmodel import instantiate, specialize from pypy.rlib.listsort import TimSort from pypy.rlib import rerased from pypy.interpreter.argument import Signature @@ -243,7 +243,7 @@ class RangeListStrategy(ListStrategy): def switch_to_integer_strategy(self, w_list): - items = self.getitems(w_list, unwrapped=True) + items = self._getitem_range(w_list, False) strategy = w_list.strategy = self.space.fromcache(IntegerListStrategy) w_list.storage = strategy.cast_to_void_star(items) @@ -276,7 +276,11 @@ raise IndexError return self.wrap(start + i * step) - def getitems(self, w_list, unwrapped=False): + def getitems(self, w_list): + return self._getitem_range(w_list, True) + + @specialize.arg(2) + def _getitem_range(self, w_list, wrap_items): l = self.cast_from_void_star(w_list.storage) start = l[0] step = l[1] @@ -286,10 +290,10 @@ i = start n = 0 while n < length: - if unwrapped: + if wrap_items: + r[n] = self.wrap(i) + else: r[n] = i - else: - r[n] = self.wrap(i) i += step n += 1 @@ -492,7 +496,7 @@ delta = -delta newsize = oldsize + delta # XXX support this in rlist! - items += [None] * delta + items += [self._none_value] * delta lim = start+len2 i = newsize - 1 while i >= lim: @@ -581,6 +585,8 @@ self.cast_from_void_star(w_list.storage).reverse() class ObjectListStrategy(AbstractUnwrappedStrategy, ListStrategy): + _none_value = None + def unwrap(self, w_obj): return w_obj @@ -603,6 +609,7 @@ # XXX implement getitems without copying here class IntegerListStrategy(AbstractUnwrappedStrategy, ListStrategy): + _none_value = 0 def wrap(self, intval): return self.space.wrap(intval) @@ -621,6 +628,7 @@ return w_list.strategy is self.space.fromcache(IntegerListStrategy) class StringListStrategy(AbstractUnwrappedStrategy, ListStrategy): + _none_value = None def wrap(self, stringval): return self.space.wrap(stringval) From noreply at buildbot.pypy.org Fri Sep 23 13:12:29 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:29 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Another list init fix Message-ID: <20110923111229.EA61D820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47470:1914a506afec Date: 2011-03-16 11:24 +0100 http://bitbucket.org/pypy/pypy/changeset/1914a506afec/ Log: Another list init fix diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -285,8 +285,10 @@ start = l[0] step = l[1] length = l[2] - r = [None] * length - + if wrap_items: + r = [None] * length + else: + r = [0] * length i = start n = 0 while n < length: From noreply at buildbot.pypy.org Fri Sep 23 13:12:31 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:31 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Fixed untested path in RangeListStrategy.setslice Message-ID: <20110923111231.234BE820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47471:e7955881eadb Date: 2011-03-16 11:46 +0100 http://bitbucket.org/pypy/pypy/changeset/e7955881eadb/ Log: Fixed untested path in RangeListStrategy.setslice diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -363,7 +363,7 @@ def setslice(self, w_list, start, step, slicelength, sequence_w): self.switch_to_integer_strategy(w_list) - w_list.setslice(start, step, slicelength) + w_list.setslice(start, step, slicelength, sequence_w) def insert(self, w_list, index, w_item): self.switch_to_integer_strategy(w_list) diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -191,3 +191,9 @@ l.pop(-1) assert isinstance(l.strategy, EmptyListStrategy) + + def test_range_setslice(self): + l = make_range_list(self.space, 1, 3, 5) + assert isinstance(l.strategy, RangeListStrategy) + l.setslice(0, 1, 3, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + assert isinstance(l.strategy, IntegerListStrategy) From noreply at buildbot.pypy.org Fri Sep 23 13:12:32 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:32 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Deactivate old rangelist Message-ID: <20110923111232.4F275820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47472:3000f02317b9 Date: 2011-03-16 12:56 +0100 http://bitbucket.org/pypy/pypy/changeset/3000f02317b9/ Log: Deactivate old rangelist diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -350,7 +350,7 @@ config.objspace.opcodes.suggest(CALL_LIKELY_BUILTIN=True) if level in ['2', '3', 'jit']: config.objspace.opcodes.suggest(CALL_METHOD=True) - config.objspace.std.suggest(withrangelist=True) + config.objspace.std.suggest(withrangelist=False) config.objspace.std.suggest(withmethodcache=True) config.objspace.std.suggest(withprebuiltchar=True) config.objspace.std.suggest(builtinshortcut=True) @@ -369,7 +369,7 @@ # memory-saving optimizations if level == 'mem': config.objspace.std.suggest(withprebuiltint=True) - config.objspace.std.suggest(withrangelist=True) + config.objspace.std.suggest(withrangelist=False) config.objspace.std.suggest(withprebuiltchar=True) config.objspace.std.suggest(withmapdict=True) config.objspace.std.suggest(withstrslice=True) From noreply at buildbot.pypy.org Fri Sep 23 13:12:33 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:33 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Replaced some more wrappeditems Message-ID: <20110923111233.7AE07820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47473:f367e25b4ed2 Date: 2011-03-16 13:06 +0100 http://bitbucket.org/pypy/pypy/changeset/f367e25b4ed2/ Log: Replaced some more wrappeditems diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py --- a/pypy/module/cpyext/listobject.py +++ b/pypy/module/cpyext/listobject.py @@ -32,7 +32,7 @@ Py_DecRef(space, w_item) if not isinstance(w_list, W_ListObject): PyErr_BadInternalCall(space) - wrappeditems = w_list.wrappeditems + wrappeditems = w_list.getitems() if index < 0 or index >= len(wrappeditems): raise OperationError(space.w_IndexError, space.wrap( "list assignment index out of range")) @@ -47,7 +47,7 @@ IndexError exception.""" if not isinstance(w_list, W_ListObject): PyErr_BadInternalCall(space) - wrappeditems = w_list.wrappeditems + wrappeditems = w_list.getitems() if index < 0 or index >= len(wrappeditems): raise OperationError(space.w_IndexError, space.wrap( "list index out of range")) @@ -74,7 +74,7 @@ """Macro form of PyList_Size() without error checking. """ assert isinstance(w_list, W_ListObject) - return len(w_list.wrappeditems) + return len(w_list.getitems()) @cpython_api([PyObject], Py_ssize_t, error=-1) diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py --- a/pypy/module/cpyext/sequence.py +++ b/pypy/module/cpyext/sequence.py @@ -57,7 +57,7 @@ PySequence_Fast(), o is not NULL, and that i is within bounds. """ if isinstance(w_obj, listobject.W_ListObject): - w_res = w_obj.wrappeditems[index] + w_res = w_obj.getitem(index) else: assert isinstance(w_obj, tupleobject.W_TupleObject) w_res = w_obj.wrappeditems[index] @@ -71,7 +71,7 @@ PySequence_Fast_GET_SIZE() is faster because it can assume o is a list or tuple.""" if isinstance(w_obj, listobject.W_ListObject): - return len(w_obj.wrappeditems) + return w_obj.length() assert isinstance(w_obj, tupleobject.W_TupleObject) return len(w_obj.wrappeditems) diff --git a/pypy/objspace/std/marshal_impl.py b/pypy/objspace/std/marshal_impl.py --- a/pypy/objspace/std/marshal_impl.py +++ b/pypy/objspace/std/marshal_impl.py @@ -300,7 +300,7 @@ register(TYPE_TUPLE, unmarshal_Tuple) def marshal_w__List(space, w_list, m): - items = w_list.wrappeditems[:] + items = w_list.getitems() m.put_tuple_w(TYPE_LIST, items) def unmarshal_List(space, u, tc): From noreply at buildbot.pypy.org Fri Sep 23 13:12:34 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:34 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Need to make a fixed list here Message-ID: <20110923111234.A633B820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47474:05c02d1ce657 Date: 2011-03-16 13:26 +0100 http://bitbucket.org/pypy/pypy/changeset/05c02d1ce657/ Log: Need to make a fixed list here diff --git a/pypy/objspace/std/marshal_impl.py b/pypy/objspace/std/marshal_impl.py --- a/pypy/objspace/std/marshal_impl.py +++ b/pypy/objspace/std/marshal_impl.py @@ -300,7 +300,7 @@ register(TYPE_TUPLE, unmarshal_Tuple) def marshal_w__List(space, w_list, m): - items = w_list.getitems() + items = w_list.getitems()[:] m.put_tuple_w(TYPE_LIST, items) def unmarshal_List(space, u, tc): From noreply at buildbot.pypy.org Fri Sep 23 13:12:40 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:40 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Merge default Message-ID: <20110923111240.881FA820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47475:f7da3f99894b Date: 2011-03-16 14:48 +0100 http://bitbucket.org/pypy/pypy/changeset/f7da3f99894b/ Log: Merge default diff too long, truncating to 10000 out of 39738 lines diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,11 @@ include/*.h lib_pypy/ctypes_config_cache/_[^_]*_*.py pypy/_cache +pypy/doc/*.html +pypy/doc/config/*.html +pypy/doc/discussion/*.html +pypy/module/test_lib_pypy/ctypes_tests/*.o +pypy/translator/c/src/dtoa.o pypy/translator/goal/pypy-c pypy/translator/goal/target*-c release/ \ No newline at end of file diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -13,6 +13,7 @@ ^pypy/module/cpyext/test/.+\.o$ ^pypy/module/cpyext/test/.+\.obj$ ^pypy/module/cpyext/test/.+\.manifest$ +^pypy/module/test_lib_pypy/ctypes_tests/.+\.o$ ^pypy/doc/.+\.html$ ^pypy/doc/basicblock\.asc$ ^pypy/doc/.+\.svninfo$ @@ -61,3 +62,5 @@ ^pypy/doc/image/stackless_informal\.png$ ^pypy/doc/image/parsing_example.+\.png$ ^compiled +^.git/ +^release/ \ No newline at end of file diff --git a/.hgsubstate b/.hgsubstate --- a/.hgsubstate +++ b/.hgsubstate @@ -1,3 +1,3 @@ 80037 greenlet 80348 lib_pypy/pyrepl -80037 testrunner +80409 testrunner diff --git a/README b/README --- a/README +++ b/README @@ -1,6 +1,6 @@ -====================================== -PyPy: Python in Python implementation -====================================== +===================================== +PyPy: Python in Python Implementation +===================================== Welcome to PyPy! @@ -13,18 +13,12 @@ http://pypy.org/ -We invite you to head over to our detailed getting-started document: - - pypy/doc/getting-started.html or - pypy/doc/getting-started.txt - (local if you got a source tarball or svn checkout) +The getting-started document will help guide you: http://codespeak.net/pypy/dist/pypy/doc/getting-started.html -which gives you many good starting and entry points into playing with -PyPy. It will also point you to our documentation section which is -generated from information in the pypy/doc directory. - -Enjoy and send us feedback! +It will also point you to the rest of the documentation which is generated +from files in the pypy/doc directory within the source repositories. Enjoy +and send us feedback! the pypy-dev team diff --git a/_pytest/__init__.py b/_pytest/__init__.py new file mode 100644 --- /dev/null +++ b/_pytest/__init__.py @@ -0,0 +1,1 @@ +# diff --git a/_pytest/assertion.py b/_pytest/assertion.py new file mode 100644 --- /dev/null +++ b/_pytest/assertion.py @@ -0,0 +1,179 @@ +""" +support for presented detailed information in failing assertions. +""" +import py +import sys +from _pytest.monkeypatch import monkeypatch + +def pytest_addoption(parser): + group = parser.getgroup("debugconfig") + group._addoption('--no-assert', action="store_true", default=False, + dest="noassert", + help="disable python assert expression reinterpretation."), + +def pytest_configure(config): + # The _reprcompare attribute on the py.code module is used by + # py._code._assertionnew to detect this plugin was loaded and in + # turn call the hooks defined here as part of the + # DebugInterpreter. + config._monkeypatch = m = monkeypatch() + warn_about_missing_assertion() + if not config.getvalue("noassert") and not config.getvalue("nomagic"): + def callbinrepr(op, left, right): + hook_result = config.hook.pytest_assertrepr_compare( + config=config, op=op, left=left, right=right) + for new_expl in hook_result: + if new_expl: + return '\n~'.join(new_expl) + m.setattr(py.builtin.builtins, + 'AssertionError', py.code._AssertionError) + m.setattr(py.code, '_reprcompare', callbinrepr) + +def pytest_unconfigure(config): + config._monkeypatch.undo() + +def warn_about_missing_assertion(): + try: + assert False + except AssertionError: + pass + else: + sys.stderr.write("WARNING: failing tests may report as passing because " + "assertions are turned off! (are you using python -O?)\n") + +# Provide basestring in python3 +try: + basestring = basestring +except NameError: + basestring = str + + +def pytest_assertrepr_compare(op, left, right): + """return specialised explanations for some operators/operands""" + width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op + left_repr = py.io.saferepr(left, maxsize=int(width/2)) + right_repr = py.io.saferepr(right, maxsize=width-len(left_repr)) + summary = '%s %s %s' % (left_repr, op, right_repr) + + issequence = lambda x: isinstance(x, (list, tuple)) + istext = lambda x: isinstance(x, basestring) + isdict = lambda x: isinstance(x, dict) + isset = lambda x: isinstance(x, set) + + explanation = None + try: + if op == '==': + if istext(left) and istext(right): + explanation = _diff_text(left, right) + elif issequence(left) and issequence(right): + explanation = _compare_eq_sequence(left, right) + elif isset(left) and isset(right): + explanation = _compare_eq_set(left, right) + elif isdict(left) and isdict(right): + explanation = _diff_text(py.std.pprint.pformat(left), + py.std.pprint.pformat(right)) + elif op == 'not in': + if istext(left) and istext(right): + explanation = _notin_text(left, right) + except py.builtin._sysex: + raise + except: + excinfo = py.code.ExceptionInfo() + explanation = ['(pytest_assertion plugin: representation of ' + 'details failed. Probably an object has a faulty __repr__.)', + str(excinfo) + ] + + + if not explanation: + return None + + # Don't include pageloads of data, should be configurable + if len(''.join(explanation)) > 80*8: + explanation = ['Detailed information too verbose, truncated'] + + return [summary] + explanation + + +def _diff_text(left, right): + """Return the explanation for the diff between text + + This will skip leading and trailing characters which are + identical to keep the diff minimal. + """ + explanation = [] + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: + break + if i > 42: + i -= 10 # Provide some context + explanation = ['Skipping %s identical ' + 'leading characters in diff' % i] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += ['Skipping %s identical ' + 'trailing characters in diff' % i] + left = left[:-i] + right = right[:-i] + explanation += [line.strip('\n') + for line in py.std.difflib.ndiff(left.splitlines(), + right.splitlines())] + return explanation + + +def _compare_eq_sequence(left, right): + explanation = [] + for i in range(min(len(left), len(right))): + if left[i] != right[i]: + explanation += ['At index %s diff: %r != %r' % + (i, left[i], right[i])] + break + if len(left) > len(right): + explanation += ['Left contains more items, ' + 'first extra item: %s' % py.io.saferepr(left[len(right)],)] + elif len(left) < len(right): + explanation += ['Right contains more items, ' + 'first extra item: %s' % py.io.saferepr(right[len(left)],)] + return explanation # + _diff_text(py.std.pprint.pformat(left), + # py.std.pprint.pformat(right)) + + +def _compare_eq_set(left, right): + explanation = [] + diff_left = left - right + diff_right = right - left + if diff_left: + explanation.append('Extra items in the left set:') + for item in diff_left: + explanation.append(py.io.saferepr(item)) + if diff_right: + explanation.append('Extra items in the right set:') + for item in diff_right: + explanation.append(py.io.saferepr(item)) + return explanation + + +def _notin_text(term, text): + index = text.find(term) + head = text[:index] + tail = text[index+len(term):] + correct_text = head + tail + diff = _diff_text(correct_text, text) + newdiff = ['%s is contained here:' % py.io.saferepr(term, maxsize=42)] + for line in diff: + if line.startswith('Skipping'): + continue + if line.startswith('- '): + continue + if line.startswith('+ '): + newdiff.append(' ' + line[2:]) + else: + newdiff.append(line) + return newdiff diff --git a/_pytest/capture.py b/_pytest/capture.py new file mode 100644 --- /dev/null +++ b/_pytest/capture.py @@ -0,0 +1,226 @@ +""" per-test stdout/stderr capturing mechanisms, ``capsys`` and ``capfd`` function arguments. """ + +import pytest, py +import os + +def pytest_addoption(parser): + group = parser.getgroup("general") + group._addoption('--capture', action="store", default=None, + metavar="method", type="choice", choices=['fd', 'sys', 'no'], + help="per-test capturing method: one of fd (default)|sys|no.") + group._addoption('-s', action="store_const", const="no", dest="capture", + help="shortcut for --capture=no.") + +def addouterr(rep, outerr): + repr = getattr(rep, 'longrepr', None) + if not hasattr(repr, 'addsection'): + return + for secname, content in zip(["out", "err"], outerr): + if content: + repr.addsection("Captured std%s" % secname, content.rstrip()) + +def pytest_unconfigure(config): + # registered in config.py during early conftest.py loading + capman = config.pluginmanager.getplugin('capturemanager') + while capman._method2capture: + name, cap = capman._method2capture.popitem() + # XXX logging module may wants to close it itself on process exit + # otherwise we could do finalization here and call "reset()". + cap.suspend() + +class NoCapture: + def startall(self): + pass + def resume(self): + pass + def reset(self): + pass + def suspend(self): + return "", "" + +class CaptureManager: + def __init__(self): + self._method2capture = {} + + def _maketempfile(self): + f = py.std.tempfile.TemporaryFile() + newf = py.io.dupfile(f, encoding="UTF-8") + f.close() + return newf + + def _makestringio(self): + return py.io.TextIO() + + def _getcapture(self, method): + if method == "fd": + return py.io.StdCaptureFD(now=False, + out=self._maketempfile(), err=self._maketempfile() + ) + elif method == "sys": + return py.io.StdCapture(now=False, + out=self._makestringio(), err=self._makestringio() + ) + elif method == "no": + return NoCapture() + else: + raise ValueError("unknown capturing method: %r" % method) + + def _getmethod_preoptionparse(self, args): + if '-s' in args or "--capture=no" in args: + return "no" + elif hasattr(os, 'dup') and '--capture=sys' not in args: + return "fd" + else: + return "sys" + + def _getmethod(self, config, fspath): + if config.option.capture: + method = config.option.capture + else: + try: + method = config._conftest.rget("option_capture", path=fspath) + except KeyError: + method = "fd" + if method == "fd" and not hasattr(os, 'dup'): # e.g. jython + method = "sys" + return method + + def resumecapture_item(self, item): + method = self._getmethod(item.config, item.fspath) + if not hasattr(item, 'outerr'): + item.outerr = ('', '') # we accumulate outerr on the item + return self.resumecapture(method) + + def resumecapture(self, method): + if hasattr(self, '_capturing'): + raise ValueError("cannot resume, already capturing with %r" % + (self._capturing,)) + cap = self._method2capture.get(method) + self._capturing = method + if cap is None: + self._method2capture[method] = cap = self._getcapture(method) + cap.startall() + else: + cap.resume() + + def suspendcapture(self, item=None): + self.deactivate_funcargs() + if hasattr(self, '_capturing'): + method = self._capturing + cap = self._method2capture.get(method) + if cap is not None: + outerr = cap.suspend() + del self._capturing + if item: + outerr = (item.outerr[0] + outerr[0], + item.outerr[1] + outerr[1]) + return outerr + if hasattr(item, 'outerr'): + return item.outerr + return "", "" + + def activate_funcargs(self, pyfuncitem): + if not hasattr(pyfuncitem, 'funcargs'): + return + assert not hasattr(self, '_capturing_funcargs') + self._capturing_funcargs = capturing_funcargs = [] + for name, capfuncarg in pyfuncitem.funcargs.items(): + if name in ('capsys', 'capfd'): + capturing_funcargs.append(capfuncarg) + capfuncarg._start() + + def deactivate_funcargs(self): + capturing_funcargs = getattr(self, '_capturing_funcargs', None) + if capturing_funcargs is not None: + while capturing_funcargs: + capfuncarg = capturing_funcargs.pop() + capfuncarg._finalize() + del self._capturing_funcargs + + def pytest_make_collect_report(self, __multicall__, collector): + method = self._getmethod(collector.config, collector.fspath) + try: + self.resumecapture(method) + except ValueError: + return # recursive collect, XXX refactor capturing + # to allow for more lightweight recursive capturing + try: + rep = __multicall__.execute() + finally: + outerr = self.suspendcapture() + addouterr(rep, outerr) + return rep + + @pytest.mark.tryfirst + def pytest_runtest_setup(self, item): + self.resumecapture_item(item) + + @pytest.mark.tryfirst + def pytest_runtest_call(self, item): + self.resumecapture_item(item) + self.activate_funcargs(item) + + @pytest.mark.tryfirst + def pytest_runtest_teardown(self, item): + self.resumecapture_item(item) + + def pytest__teardown_final(self, __multicall__, session): + method = self._getmethod(session.config, None) + self.resumecapture(method) + try: + rep = __multicall__.execute() + finally: + outerr = self.suspendcapture() + if rep: + addouterr(rep, outerr) + return rep + + def pytest_keyboard_interrupt(self, excinfo): + if hasattr(self, '_capturing'): + self.suspendcapture() + + @pytest.mark.tryfirst + def pytest_runtest_makereport(self, __multicall__, item, call): + self.deactivate_funcargs() + rep = __multicall__.execute() + outerr = self.suspendcapture(item) + if not rep.passed: + addouterr(rep, outerr) + if not rep.passed or rep.when == "teardown": + outerr = ('', '') + item.outerr = outerr + return rep + +def pytest_funcarg__capsys(request): + """enables capturing of writes to sys.stdout/sys.stderr and makes + captured output available via ``capsys.readouterr()`` method calls + which return a ``(out, err)`` tuple. + """ + return CaptureFuncarg(py.io.StdCapture) + +def pytest_funcarg__capfd(request): + """enables capturing of writes to file descriptors 1 and 2 and makes + captured output available via ``capsys.readouterr()`` method calls + which return a ``(out, err)`` tuple. + """ + if not hasattr(os, 'dup'): + py.test.skip("capfd funcarg needs os.dup") + return CaptureFuncarg(py.io.StdCaptureFD) + +class CaptureFuncarg: + def __init__(self, captureclass): + self.capture = captureclass(now=False) + + def _start(self): + self.capture.startall() + + def _finalize(self): + if hasattr(self, 'capture'): + self.capture.reset() + del self.capture + + def readouterr(self): + return self.capture.readouterr() + + def close(self): + self._finalize() diff --git a/_pytest/config.py b/_pytest/config.py new file mode 100644 --- /dev/null +++ b/_pytest/config.py @@ -0,0 +1,434 @@ +""" command line options, ini-file and conftest.py processing. """ + +import py +import sys, os +from _pytest.core import PluginManager +import pytest + +def pytest_cmdline_parse(pluginmanager, args): + config = Config(pluginmanager) + config.parse(args) + if config.option.debug: + config.trace.root.setwriter(sys.stderr.write) + return config + +class Parser: + """ Parser for command line arguments. """ + + def __init__(self, usage=None, processopt=None): + self._anonymous = OptionGroup("custom options", parser=self) + self._groups = [] + self._processopt = processopt + self._usage = usage + self._inidict = {} + self._ininames = [] + self.hints = [] + + def processoption(self, option): + if self._processopt: + if option.dest: + self._processopt(option) + + def addnote(self, note): + self._notes.append(note) + + def getgroup(self, name, description="", after=None): + """ get (or create) a named option Group. + + :name: unique name of the option group. + :description: long description for --help output. + :after: name of other group, used for ordering --help output. + """ + for group in self._groups: + if group.name == name: + return group + group = OptionGroup(name, description, parser=self) + i = 0 + for i, grp in enumerate(self._groups): + if grp.name == after: + break + self._groups.insert(i+1, group) + return group + + def addoption(self, *opts, **attrs): + """ add an optparse-style option. """ + self._anonymous.addoption(*opts, **attrs) + + def parse(self, args): + self.optparser = optparser = MyOptionParser(self) + groups = self._groups + [self._anonymous] + for group in groups: + if group.options: + desc = group.description or group.name + optgroup = py.std.optparse.OptionGroup(optparser, desc) + optgroup.add_options(group.options) + optparser.add_option_group(optgroup) + return self.optparser.parse_args([str(x) for x in args]) + + def parse_setoption(self, args, option): + parsedoption, args = self.parse(args) + for name, value in parsedoption.__dict__.items(): + setattr(option, name, value) + return args + + def addini(self, name, help, type=None, default=None): + """ add an ini-file option with the given name and description. """ + assert type in (None, "pathlist", "args", "linelist") + self._inidict[name] = (help, type, default) + self._ininames.append(name) + +class OptionGroup: + def __init__(self, name, description="", parser=None): + self.name = name + self.description = description + self.options = [] + self.parser = parser + + def addoption(self, *optnames, **attrs): + """ add an option to this group. """ + option = py.std.optparse.Option(*optnames, **attrs) + self._addoption_instance(option, shortupper=False) + + def _addoption(self, *optnames, **attrs): + option = py.std.optparse.Option(*optnames, **attrs) + self._addoption_instance(option, shortupper=True) + + def _addoption_instance(self, option, shortupper=False): + if not shortupper: + for opt in option._short_opts: + if opt[0] == '-' and opt[1].islower(): + raise ValueError("lowercase shortoptions reserved") + if self.parser: + self.parser.processoption(option) + self.options.append(option) + + +class MyOptionParser(py.std.optparse.OptionParser): + def __init__(self, parser): + self._parser = parser + py.std.optparse.OptionParser.__init__(self, usage=parser._usage, + add_help_option=False) + def format_epilog(self, formatter): + hints = self._parser.hints + if hints: + s = "\n".join(["hint: " + x for x in hints]) + "\n" + s = "\n" + s + "\n" + return s + return "" + +class Conftest(object): + """ the single place for accessing values and interacting + towards conftest modules from py.test objects. + """ + def __init__(self, onimport=None, confcutdir=None): + self._path2confmods = {} + self._onimport = onimport + self._conftestpath2mod = {} + self._confcutdir = confcutdir + + def setinitial(self, args): + """ try to find a first anchor path for looking up global values + from conftests. This function is usually called _before_ + argument parsing. conftest files may add command line options + and we thus have no completely safe way of determining + which parts of the arguments are actually related to options + and which are file system paths. We just try here to get + bootstrapped ... + """ + current = py.path.local() + opt = '--confcutdir' + for i in range(len(args)): + opt1 = str(args[i]) + if opt1.startswith(opt): + if opt1 == opt: + if len(args) > i: + p = current.join(args[i+1], abs=True) + elif opt1.startswith(opt + "="): + p = current.join(opt1[len(opt)+1:], abs=1) + self._confcutdir = p + break + for arg in args + [current]: + if hasattr(arg, 'startswith') and arg.startswith("--"): + continue + anchor = current.join(arg, abs=1) + if anchor.check(): # we found some file object + self._path2confmods[None] = self.getconftestmodules(anchor) + # let's also consider test* dirs + if anchor.check(dir=1): + for x in anchor.listdir("test*"): + if x.check(dir=1): + self.getconftestmodules(x) + break + else: + assert 0, "no root of filesystem?" + + def getconftestmodules(self, path): + """ return a list of imported conftest modules for the given path. """ + try: + clist = self._path2confmods[path] + except KeyError: + if path is None: + raise ValueError("missing default confest.") + dp = path.dirpath() + clist = [] + if dp != path: + cutdir = self._confcutdir + if cutdir and path != cutdir and not path.relto(cutdir): + pass + else: + conftestpath = path.join("conftest.py") + if conftestpath.check(file=1): + clist.append(self.importconftest(conftestpath)) + clist[:0] = self.getconftestmodules(dp) + self._path2confmods[path] = clist + # be defensive: avoid changes from caller side to + # affect us by always returning a copy of the actual list + return clist[:] + + def rget(self, name, path=None): + mod, value = self.rget_with_confmod(name, path) + return value + + def rget_with_confmod(self, name, path=None): + modules = self.getconftestmodules(path) + modules.reverse() + for mod in modules: + try: + return mod, getattr(mod, name) + except AttributeError: + continue + raise KeyError(name) + + def importconftest(self, conftestpath): + assert conftestpath.check(), conftestpath + try: + return self._conftestpath2mod[conftestpath] + except KeyError: + pkgpath = conftestpath.pypkgpath() + if pkgpath is None: + _ensure_removed_sysmodule(conftestpath.purebasename) + self._conftestpath2mod[conftestpath] = mod = conftestpath.pyimport() + dirpath = conftestpath.dirpath() + if dirpath in self._path2confmods: + for path, mods in self._path2confmods.items(): + if path and path.relto(dirpath) or path == dirpath: + assert mod not in mods + mods.append(mod) + self._postimport(mod) + return mod + + def _postimport(self, mod): + if self._onimport: + self._onimport(mod) + return mod + +def _ensure_removed_sysmodule(modname): + try: + del sys.modules[modname] + except KeyError: + pass + +class CmdOptions(object): + """ holds cmdline options as attributes.""" + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + def __repr__(self): + return "" %(self.__dict__,) + +class Config(object): + """ access to configuration values, pluginmanager and plugin hooks. """ + def __init__(self, pluginmanager=None): + #: command line option values, usually added via parser.addoption(...) + #: or parser.getgroup(...).addoption(...) calls + self.option = CmdOptions() + self._parser = Parser( + usage="usage: %prog [options] [file_or_dir] [file_or_dir] [...]", + processopt=self._processopt, + ) + #: a pluginmanager instance + self.pluginmanager = pluginmanager or PluginManager(load=True) + self.trace = self.pluginmanager.trace.root.get("config") + self._conftest = Conftest(onimport=self._onimportconftest) + self.hook = self.pluginmanager.hook + self._inicache = {} + + def _onimportconftest(self, conftestmodule): + self.trace("loaded conftestmodule %r" %(conftestmodule,)) + self.pluginmanager.consider_conftest(conftestmodule) + + def _processopt(self, opt): + if hasattr(opt, 'default') and opt.dest: + if not hasattr(self.option, opt.dest): + setattr(self.option, opt.dest, opt.default) + + def _getmatchingplugins(self, fspath): + allconftests = self._conftest._conftestpath2mod.values() + plugins = [x for x in self.pluginmanager.getplugins() + if x not in allconftests] + plugins += self._conftest.getconftestmodules(fspath) + return plugins + + def _setinitialconftest(self, args): + # capture output during conftest init (#issue93) + from _pytest.capture import CaptureManager + capman = CaptureManager() + self.pluginmanager.register(capman, 'capturemanager') + # will be unregistered in capture.py's unconfigure() + capman.resumecapture(capman._getmethod_preoptionparse(args)) + try: + try: + self._conftest.setinitial(args) + finally: + out, err = capman.suspendcapture() # logging might have got it + except: + sys.stdout.write(out) + sys.stderr.write(err) + raise + + def _initini(self, args): + self.inicfg = getcfg(args, ["pytest.ini", "tox.ini", "setup.cfg"]) + self._parser.addini('addopts', 'extra command line options', 'args') + self._parser.addini('minversion', 'minimally required pytest version') + + def _preparse(self, args, addopts=True): + self._initini(args) + if addopts: + args[:] = self.getini("addopts") + args + self._checkversion() + self.pluginmanager.consider_preparse(args) + self.pluginmanager.consider_setuptools_entrypoints() + self.pluginmanager.consider_env() + self._setinitialconftest(args) + self.pluginmanager.do_addoption(self._parser) + if addopts: + self.hook.pytest_cmdline_preparse(config=self, args=args) + + def _checkversion(self): + minver = self.inicfg.get('minversion', None) + if minver: + ver = minver.split(".") + myver = pytest.__version__.split(".") + if myver < ver: + raise pytest.UsageError( + "%s:%d: requires pytest-%s, actual pytest-%s'" %( + self.inicfg.config.path, self.inicfg.lineof('minversion'), + minver, pytest.__version__)) + + def parse(self, args): + # parse given cmdline arguments into this config object. + # Note that this can only be called once per testing process. + assert not hasattr(self, 'args'), ( + "can only parse cmdline args at most once per Config object") + self._preparse(args) + self._parser.hints.extend(self.pluginmanager._hints) + args = self._parser.parse_setoption(args, self.option) + if not args: + args.append(py.std.os.getcwd()) + self.args = args + + def getini(self, name): + """ return configuration value from an ini file. If the + specified name hasn't been registered through a prior ``parse.addini`` + call (usually from a plugin), a ValueError is raised. """ + try: + return self._inicache[name] + except KeyError: + self._inicache[name] = val = self._getini(name) + return val + + def _getini(self, name): + try: + description, type, default = self._parser._inidict[name] + except KeyError: + raise ValueError("unknown configuration value: %r" %(name,)) + try: + value = self.inicfg[name] + except KeyError: + if default is not None: + return default + if type is None: + return '' + return [] + if type == "pathlist": + dp = py.path.local(self.inicfg.config.path).dirpath() + l = [] + for relpath in py.std.shlex.split(value): + l.append(dp.join(relpath, abs=True)) + return l + elif type == "args": + return py.std.shlex.split(value) + elif type == "linelist": + return [t for t in map(lambda x: x.strip(), value.split("\n")) if t] + else: + assert type is None + return value + + def _getconftest_pathlist(self, name, path=None): + try: + mod, relroots = self._conftest.rget_with_confmod(name, path) + except KeyError: + return None + modpath = py.path.local(mod.__file__).dirpath() + l = [] + for relroot in relroots: + if not isinstance(relroot, py.path.local): + relroot = relroot.replace("/", py.path.local.sep) + relroot = modpath.join(relroot, abs=True) + l.append(relroot) + return l + + def _getconftest(self, name, path=None, check=False): + if check: + self._checkconftest(name) + return self._conftest.rget(name, path) + + def getvalue(self, name, path=None): + """ return ``name`` value looked set from command line options. + + (deprecated) if we can't find the option also lookup + the name in a matching conftest file. + """ + try: + return getattr(self.option, name) + except AttributeError: + return self._getconftest(name, path, check=False) + + def getvalueorskip(self, name, path=None): + """ (deprecated) return getvalue(name) or call + py.test.skip if no value exists. """ + __tracebackhide__ = True + try: + val = self.getvalue(name, path) + if val is None: + raise KeyError(name) + return val + except KeyError: + py.test.skip("no %r value found" %(name,)) + + +def getcfg(args, inibasenames): + args = [x for x in args if str(x)[0] != "-"] + if not args: + args = [py.path.local()] + for arg in args: + arg = py.path.local(arg) + for base in arg.parts(reverse=True): + for inibasename in inibasenames: + p = base.join(inibasename) + if p.check(): + iniconfig = py.iniconfig.IniConfig(p) + if 'pytest' in iniconfig.sections: + return iniconfig['pytest'] + return {} + +def findupwards(current, basename): + current = py.path.local(current) + while 1: + p = current.join(basename) + if p.check(): + return p + p = current.dirpath() + if p == current: + return + current = p + diff --git a/_pytest/core.py b/_pytest/core.py new file mode 100644 --- /dev/null +++ b/_pytest/core.py @@ -0,0 +1,457 @@ +""" +pytest PluginManager, basic initialization and tracing. +(c) Holger Krekel 2004-2010 +""" +import sys, os +import inspect +import py +from _pytest import hookspec # the extension point definitions + +assert py.__version__.split(".")[:2] >= ['1', '4'], ("installation problem: " + "%s is too old, remove or upgrade 'py'" % (py.__version__)) + +default_plugins = ( + "config mark main terminal runner python pdb unittest capture skipping " + "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript " + "junitxml resultlog doctest").split() + +class TagTracer: + def __init__(self, prefix="[pytest] "): + self._tag2proc = {} + self.writer = None + self.indent = 0 + self.prefix = prefix + + def get(self, name): + return TagTracerSub(self, (name,)) + + def processmessage(self, tags, args): + if self.writer is not None: + if args: + indent = " " * self.indent + content = " ".join(map(str, args)) + self.writer("%s%s%s\n" %(self.prefix, indent, content)) + try: + self._tag2proc[tags](tags, args) + except KeyError: + pass + + def setwriter(self, writer): + self.writer = writer + + def setprocessor(self, tags, processor): + if isinstance(tags, str): + tags = tuple(tags.split(":")) + else: + assert isinstance(tags, tuple) + self._tag2proc[tags] = processor + +class TagTracerSub: + def __init__(self, root, tags): + self.root = root + self.tags = tags + def __call__(self, *args): + self.root.processmessage(self.tags, args) + def setmyprocessor(self, processor): + self.root.setprocessor(self.tags, processor) + def get(self, name): + return self.__class__(self.root, self.tags + (name,)) + +class PluginManager(object): + def __init__(self, load=False): + self._name2plugin = {} + self._listattrcache = {} + self._plugins = [] + self._hints = [] + self.trace = TagTracer().get("pluginmanage") + self._plugin_distinfo = [] + if os.environ.get('PYTEST_DEBUG'): + err = sys.stderr + encoding = getattr(err, 'encoding', 'utf8') + try: + err = py.io.dupfile(err, encoding=encoding) + except Exception: + pass + self.trace.root.setwriter(err.write) + self.hook = HookRelay([hookspec], pm=self) + self.register(self) + if load: + for spec in default_plugins: + self.import_plugin(spec) + + def register(self, plugin, name=None, prepend=False): + assert not self.isregistered(plugin), plugin + name = name or getattr(plugin, '__name__', str(id(plugin))) + if name in self._name2plugin: + return False + #self.trace("registering", name, plugin) + self._name2plugin[name] = plugin + self.call_plugin(plugin, "pytest_addhooks", {'pluginmanager': self}) + self.hook.pytest_plugin_registered(manager=self, plugin=plugin) + if not prepend: + self._plugins.append(plugin) + else: + self._plugins.insert(0, plugin) + return True + + def unregister(self, plugin=None, name=None): + if plugin is None: + plugin = self.getplugin(name=name) + self._plugins.remove(plugin) + self.hook.pytest_plugin_unregistered(plugin=plugin) + for name, value in list(self._name2plugin.items()): + if value == plugin: + del self._name2plugin[name] + + def isregistered(self, plugin, name=None): + if self.getplugin(name) is not None: + return True + for val in self._name2plugin.values(): + if plugin == val: + return True + + def addhooks(self, spec): + self.hook._addhooks(spec, prefix="pytest_") + + def getplugins(self): + return list(self._plugins) + + def skipifmissing(self, name): + if not self.hasplugin(name): + py.test.skip("plugin %r is missing" % name) + + def hasplugin(self, name): + return bool(self.getplugin(name)) + + def getplugin(self, name): + if name is None: + return None + try: + return self._name2plugin[name] + except KeyError: + return self._name2plugin.get("_pytest." + name, None) + + # API for bootstrapping + # + def _envlist(self, varname): + val = py.std.os.environ.get(varname, None) + if val is not None: + return val.split(',') + return () + + def consider_env(self): + for spec in self._envlist("PYTEST_PLUGINS"): + self.import_plugin(spec) + + def consider_setuptools_entrypoints(self): + try: + from pkg_resources import iter_entry_points, DistributionNotFound + except ImportError: + return # XXX issue a warning + for ep in iter_entry_points('pytest11'): + name = ep.name + if name.startswith("pytest_"): + name = name[7:] + if ep.name in self._name2plugin or name in self._name2plugin: + continue + try: + plugin = ep.load() + except DistributionNotFound: + continue + self._plugin_distinfo.append((ep.dist, plugin)) + self.register(plugin, name=name) + + def consider_preparse(self, args): + for opt1,opt2 in zip(args, args[1:]): + if opt1 == "-p": + if opt2.startswith("no:"): + name = opt2[3:] + if self.getplugin(name) is not None: + self.unregister(None, name=name) + self._name2plugin[name] = -1 + else: + if self.getplugin(opt2) is None: + self.import_plugin(opt2) + + def consider_conftest(self, conftestmodule): + if self.register(conftestmodule, name=conftestmodule.__file__): + self.consider_module(conftestmodule) + + def consider_module(self, mod): + attr = getattr(mod, "pytest_plugins", ()) + if attr: + if not isinstance(attr, (list, tuple)): + attr = (attr,) + for spec in attr: + self.import_plugin(spec) + + def import_plugin(self, modname): + assert isinstance(modname, str) + if self.getplugin(modname) is not None: + return + try: + #self.trace("importing", modname) + mod = importplugin(modname) + except KeyboardInterrupt: + raise + except ImportError: + if modname.startswith("pytest_"): + return self.import_plugin(modname[7:]) + raise + except: + e = py.std.sys.exc_info()[1] + if not hasattr(py.test, 'skip'): + raise + elif not isinstance(e, py.test.skip.Exception): + raise + self._hints.append("skipped plugin %r: %s" %((modname, e.msg))) + else: + self.register(mod, modname) + self.consider_module(mod) + + def pytest_plugin_registered(self, plugin): + import pytest + dic = self.call_plugin(plugin, "pytest_namespace", {}) or {} + if dic: + self._setns(pytest, dic) + if hasattr(self, '_config'): + self.call_plugin(plugin, "pytest_addoption", + {'parser': self._config._parser}) + self.call_plugin(plugin, "pytest_configure", + {'config': self._config}) + + def _setns(self, obj, dic): + import pytest + for name, value in dic.items(): + if isinstance(value, dict): + mod = getattr(obj, name, None) + if mod is None: + modname = "pytest.%s" % name + mod = py.std.types.ModuleType(modname) + sys.modules[modname] = mod + mod.__all__ = [] + setattr(obj, name, mod) + obj.__all__.append(name) + self._setns(mod, value) + else: + setattr(obj, name, value) + obj.__all__.append(name) + #if obj != pytest: + # pytest.__all__.append(name) + setattr(pytest, name, value) + + def pytest_terminal_summary(self, terminalreporter): + tw = terminalreporter._tw + if terminalreporter.config.option.traceconfig: + for hint in self._hints: + tw.line("hint: %s" % hint) + + def do_addoption(self, parser): + mname = "pytest_addoption" + methods = reversed(self.listattr(mname)) + MultiCall(methods, {'parser': parser}).execute() + + def do_configure(self, config): + assert not hasattr(self, '_config') + self._config = config + config.hook.pytest_configure(config=self._config) + + def do_unconfigure(self, config): + config = self._config + del self._config + config.hook.pytest_unconfigure(config=config) + config.pluginmanager.unregister(self) + + def notify_exception(self, excinfo): + excrepr = excinfo.getrepr(funcargs=True, showlocals=True) + res = self.hook.pytest_internalerror(excrepr=excrepr) + if not py.builtin.any(res): + for line in str(excrepr).split("\n"): + sys.stderr.write("INTERNALERROR> %s\n" %line) + sys.stderr.flush() + + def listattr(self, attrname, plugins=None): + if plugins is None: + plugins = self._plugins + key = (attrname,) + tuple(plugins) + try: + return list(self._listattrcache[key]) + except KeyError: + pass + l = [] + last = [] + for plugin in plugins: + try: + meth = getattr(plugin, attrname) + if hasattr(meth, 'tryfirst'): + last.append(meth) + elif hasattr(meth, 'trylast'): + l.insert(0, meth) + else: + l.append(meth) + except AttributeError: + continue + l.extend(last) + self._listattrcache[key] = list(l) + return l + + def call_plugin(self, plugin, methname, kwargs): + return MultiCall(methods=self.listattr(methname, plugins=[plugin]), + kwargs=kwargs, firstresult=True).execute() + + +def importplugin(importspec): + name = importspec + try: + mod = "_pytest." + name + return __import__(mod, None, None, '__doc__') + except ImportError: + #e = py.std.sys.exc_info()[1] + #if str(e).find(name) == -1: + # raise + pass # + return __import__(importspec, None, None, '__doc__') + +class MultiCall: + """ execute a call into multiple python functions/methods. """ + def __init__(self, methods, kwargs, firstresult=False): + self.methods = list(methods) + self.kwargs = kwargs + self.results = [] + self.firstresult = firstresult + + def __repr__(self): + status = "%d results, %d meths" % (len(self.results), len(self.methods)) + return "" %(status, self.kwargs) + + def execute(self): + while self.methods: + method = self.methods.pop() + kwargs = self.getkwargs(method) + res = method(**kwargs) + if res is not None: + self.results.append(res) + if self.firstresult: + return res + if not self.firstresult: + return self.results + + def getkwargs(self, method): + kwargs = {} + for argname in varnames(method): + try: + kwargs[argname] = self.kwargs[argname] + except KeyError: + if argname == "__multicall__": + kwargs[argname] = self + return kwargs + +def varnames(func): + try: + return func._varnames + except AttributeError: + pass + if not inspect.isfunction(func) and not inspect.ismethod(func): + func = getattr(func, '__call__', func) + ismethod = inspect.ismethod(func) + rawcode = py.code.getrawcode(func) + try: + x = rawcode.co_varnames[ismethod:rawcode.co_argcount] + except AttributeError: + x = () + py.builtin._getfuncdict(func)['_varnames'] = x + return x + +class HookRelay: + def __init__(self, hookspecs, pm, prefix="pytest_"): + if not isinstance(hookspecs, list): + hookspecs = [hookspecs] + self._hookspecs = [] + self._pm = pm + self.trace = pm.trace.root.get("hook") + for hookspec in hookspecs: + self._addhooks(hookspec, prefix) + + def _addhooks(self, hookspecs, prefix): + self._hookspecs.append(hookspecs) + added = False + for name, method in vars(hookspecs).items(): + if name.startswith(prefix): + firstresult = getattr(method, 'firstresult', False) + hc = HookCaller(self, name, firstresult=firstresult) + setattr(self, name, hc) + added = True + #print ("setting new hook", name) + if not added: + raise ValueError("did not find new %r hooks in %r" %( + prefix, hookspecs,)) + + +class HookCaller: + def __init__(self, hookrelay, name, firstresult): + self.hookrelay = hookrelay + self.name = name + self.firstresult = firstresult + self.trace = self.hookrelay.trace + + def __repr__(self): + return "" %(self.name,) + + def __call__(self, **kwargs): + methods = self.hookrelay._pm.listattr(self.name) + return self._docall(methods, kwargs) + + def pcall(self, plugins, **kwargs): + methods = self.hookrelay._pm.listattr(self.name, plugins=plugins) + return self._docall(methods, kwargs) + + def _docall(self, methods, kwargs): + self.trace(self.name, kwargs) + self.trace.root.indent += 1 + mc = MultiCall(methods, kwargs, firstresult=self.firstresult) + try: + res = mc.execute() + if res: + self.trace("finish", self.name, "-->", res) + finally: + self.trace.root.indent -= 1 + return res + +_preinit = [] + +def _preloadplugins(): + _preinit.append(PluginManager(load=True)) + +def main(args=None, plugins=None): + """ returned exit code integer, after an in-process testing run + with the given command line arguments, preloading an optional list + of passed in plugin objects. """ + if args is None: + args = sys.argv[1:] + elif isinstance(args, py.path.local): + args = [str(args)] + elif not isinstance(args, (tuple, list)): + if not isinstance(args, str): + raise ValueError("not a string or argument list: %r" % (args,)) + args = py.std.shlex.split(args) + if _preinit: + _pluginmanager = _preinit.pop(0) + else: # subsequent calls to main will create a fresh instance + _pluginmanager = PluginManager(load=True) + hook = _pluginmanager.hook + try: + if plugins: + for plugin in plugins: + _pluginmanager.register(plugin) + config = hook.pytest_cmdline_parse( + pluginmanager=_pluginmanager, args=args) + exitstatus = hook.pytest_cmdline_main(config=config) + except UsageError: + e = sys.exc_info()[1] + sys.stderr.write("ERROR: %s\n" %(e.args[0],)) + exitstatus = 3 + return exitstatus + +class UsageError(Exception): + """ error in py.test usage or invocation""" + diff --git a/_pytest/doctest.py b/_pytest/doctest.py new file mode 100644 --- /dev/null +++ b/_pytest/doctest.py @@ -0,0 +1,87 @@ +""" discover and run doctests in modules and test files.""" + +import pytest, py +from py._code.code import TerminalRepr, ReprFileLocation + +def pytest_addoption(parser): + group = parser.getgroup("collect") + group.addoption("--doctest-modules", + action="store_true", default=False, + help="run doctests in all .py modules", + dest="doctestmodules") + group.addoption("--doctest-glob", + action="store", default="test*.txt", metavar="pat", + help="doctests file matching pattern, default: test*.txt", + dest="doctestglob") + +def pytest_collect_file(path, parent): + config = parent.config + if path.ext == ".py": + if config.option.doctestmodules: + return DoctestModule(path, parent) + elif (path.ext in ('.txt', '.rst') and parent.session.isinitpath(path)) or \ + path.check(fnmatch=config.getvalue("doctestglob")): + return DoctestTextfile(path, parent) + +class ReprFailDoctest(TerminalRepr): + def __init__(self, reprlocation, lines): + self.reprlocation = reprlocation + self.lines = lines + def toterminal(self, tw): + for line in self.lines: + tw.line(line) + self.reprlocation.toterminal(tw) + +class DoctestItem(pytest.Item): + def repr_failure(self, excinfo): + doctest = py.std.doctest + if excinfo.errisinstance((doctest.DocTestFailure, + doctest.UnexpectedException)): + doctestfailure = excinfo.value + example = doctestfailure.example + test = doctestfailure.test + filename = test.filename + lineno = test.lineno + example.lineno + 1 + message = excinfo.type.__name__ + reprlocation = ReprFileLocation(filename, lineno, message) + checker = py.std.doctest.OutputChecker() + REPORT_UDIFF = py.std.doctest.REPORT_UDIFF + filelines = py.path.local(filename).readlines(cr=0) + i = max(test.lineno, max(0, lineno - 10)) # XXX? + lines = [] + for line in filelines[i:lineno]: + lines.append("%03d %s" % (i+1, line)) + i += 1 + if excinfo.errisinstance(doctest.DocTestFailure): + lines += checker.output_difference(example, + doctestfailure.got, REPORT_UDIFF).split("\n") + else: + inner_excinfo = py.code.ExceptionInfo(excinfo.value.exc_info) + lines += ["UNEXPECTED EXCEPTION: %s" % + repr(inner_excinfo.value)] + + return ReprFailDoctest(reprlocation, lines) + else: + return super(DoctestItem, self).repr_failure(excinfo) + + def reportinfo(self): + return self.fspath, None, "[doctest]" + +class DoctestTextfile(DoctestItem, pytest.File): + def runtest(self): + doctest = py.std.doctest + failed, tot = doctest.testfile( + str(self.fspath), module_relative=False, + optionflags=doctest.ELLIPSIS, + raise_on_error=True, verbose=0) + +class DoctestModule(DoctestItem, pytest.File): + def runtest(self): + doctest = py.std.doctest + if self.fspath.basename == "conftest.py": + module = self.config._conftest.importconftest(self.fspath) + else: + module = self.fspath.pyimport() + failed, tot = doctest.testmod( + module, raise_on_error=True, verbose=0, + optionflags=doctest.ELLIPSIS) diff --git a/_pytest/genscript.py b/_pytest/genscript.py new file mode 100755 --- /dev/null +++ b/_pytest/genscript.py @@ -0,0 +1,69 @@ +""" generate a single-file self-contained version of py.test """ +import py + +def find_toplevel(name): + for syspath in py.std.sys.path: + base = py.path.local(syspath) + lib = base/name + if lib.check(dir=1): + return lib + mod = base.join("%s.py" % name) + if mod.check(file=1): + return mod + raise LookupError(name) + +def pkgname(toplevel, rootpath, path): + parts = path.parts()[len(rootpath.parts()):] + return '.'.join([toplevel] + [x.purebasename for x in parts]) + +def pkg_to_mapping(name): + toplevel = find_toplevel(name) + name2src = {} + if toplevel.check(file=1): # module + name2src[toplevel.purebasename] = toplevel.read() + else: # package + for pyfile in toplevel.visit('*.py'): + pkg = pkgname(name, toplevel, pyfile) + name2src[pkg] = pyfile.read() + return name2src + +def compress_mapping(mapping): + data = py.std.pickle.dumps(mapping, 2) + data = py.std.zlib.compress(data, 9) + data = py.std.base64.encodestring(data) + data = data.decode('ascii') + return data + + +def compress_packages(names): + mapping = {} + for name in names: + mapping.update(pkg_to_mapping(name)) + return compress_mapping(mapping) + +def generate_script(entry, packages): + data = compress_packages(packages) + tmpl = py.path.local(__file__).dirpath().join('standalonetemplate.py') + exe = tmpl.read() + exe = exe.replace('@SOURCES@', data) + exe = exe.replace('@ENTRY@', entry) + return exe + + +def pytest_addoption(parser): + group = parser.getgroup("debugconfig") + group.addoption("--genscript", action="store", default=None, + dest="genscript", metavar="path", + help="create standalone py.test script at given target path.") + +def pytest_cmdline_main(config): + genscript = config.getvalue("genscript") + if genscript: + script = generate_script( + 'import py; raise SystemExit(py.test.cmdline.main())', + ['py', '_pytest', 'pytest'], + ) + + genscript = py.path.local(genscript) + genscript.write(script) + return 0 diff --git a/_pytest/helpconfig.py b/_pytest/helpconfig.py new file mode 100644 --- /dev/null +++ b/_pytest/helpconfig.py @@ -0,0 +1,177 @@ +""" version info, help messages, tracing configuration. """ +import py +import pytest +import inspect, sys +from _pytest.core import varnames + +def pytest_addoption(parser): + group = parser.getgroup('debugconfig') + group.addoption('--version', action="store_true", + help="display pytest lib version and import information.") + group._addoption("-h", "--help", action="store_true", dest="help", + help="show help message and configuration info") + group._addoption('-p', action="append", dest="plugins", default = [], + metavar="name", + help="early-load given plugin (multi-allowed).") + group.addoption('--traceconfig', + action="store_true", dest="traceconfig", default=False, + help="trace considerations of conftest.py files."), + group._addoption('--nomagic', + action="store_true", dest="nomagic", default=False, + help="don't reinterpret asserts, no traceback cutting. ") + group.addoption('--debug', + action="store_true", dest="debug", default=False, + help="generate and show internal debugging information.") + + +def pytest_cmdline_main(config): + if config.option.version: + p = py.path.local(pytest.__file__) + sys.stderr.write("This is py.test version %s, imported from %s\n" % + (pytest.__version__, p)) + plugininfo = getpluginversioninfo(config) + if plugininfo: + for line in plugininfo: + sys.stderr.write(line + "\n") + return 0 + elif config.option.help: + config.pluginmanager.do_configure(config) + showhelp(config) + return 0 + +def showhelp(config): + tw = py.io.TerminalWriter() + tw.write(config._parser.optparser.format_help()) + tw.line() + tw.line() + #tw.sep( "=", "config file settings") + tw.line("[pytest] ini-options in the next " + "pytest.ini|tox.ini|setup.cfg file:") + tw.line() + + for name in config._parser._ininames: + help, type, default = config._parser._inidict[name] + if type is None: + type = "string" + spec = "%s (%s)" % (name, type) + line = " %-24s %s" %(spec, help) + tw.line(line[:tw.fullwidth]) + + tw.line() ; tw.line() + #tw.sep("=") + return + + tw.line("conftest.py options:") + tw.line() + conftestitems = sorted(config._parser._conftestdict.items()) + for name, help in conftest_options + conftestitems: + line = " %-15s %s" %(name, help) + tw.line(line[:tw.fullwidth]) + tw.line() + #tw.sep( "=") + +conftest_options = [ + ('pytest_plugins', 'list of plugin names to load'), +] + +def getpluginversioninfo(config): + lines = [] + plugininfo = config.pluginmanager._plugin_distinfo + if plugininfo: + lines.append("setuptools registered plugins:") + for dist, plugin in plugininfo: + loc = getattr(plugin, '__file__', repr(plugin)) + content = "%s-%s at %s" % (dist.project_name, dist.version, loc) + lines.append(" " + content) + return lines + +def pytest_report_header(config): + lines = [] + if config.option.debug or config.option.traceconfig: + lines.append("using: pytest-%s pylib-%s" % + (pytest.__version__,py.__version__)) + + verinfo = getpluginversioninfo(config) + if verinfo: + lines.extend(verinfo) + + if config.option.traceconfig: + lines.append("active plugins:") + plugins = [] + items = config.pluginmanager._name2plugin.items() + for name, plugin in items: + if hasattr(plugin, '__file__'): + r = plugin.__file__ + else: + r = repr(plugin) + lines.append(" %-20s: %s" %(name, r)) + return lines + + +# ===================================================== +# validate plugin syntax and hooks +# ===================================================== + +def pytest_plugin_registered(manager, plugin): + methods = collectattr(plugin) + hooks = {} + for hookspec in manager.hook._hookspecs: + hooks.update(collectattr(hookspec)) + + stringio = py.io.TextIO() + def Print(*args): + if args: + stringio.write(" ".join(map(str, args))) + stringio.write("\n") + + fail = False + while methods: + name, method = methods.popitem() + #print "checking", name + if isgenerichook(name): + continue + if name not in hooks: + if not getattr(method, 'optionalhook', False): + Print("found unknown hook:", name) + fail = True + else: + #print "checking", method + method_args = list(varnames(method)) + if '__multicall__' in method_args: + method_args.remove('__multicall__') + hook = hooks[name] + hookargs = varnames(hook) + for arg in method_args: + if arg not in hookargs: + Print("argument %r not available" %(arg, )) + Print("actual definition: %s" %(formatdef(method))) + Print("available hook arguments: %s" % + ", ".join(hookargs)) + fail = True + break + #if not fail: + # print "matching hook:", formatdef(method) + if fail: + name = getattr(plugin, '__name__', plugin) + raise PluginValidationError("%s:\n%s" % (name, stringio.getvalue())) + +class PluginValidationError(Exception): + """ plugin failed validation. """ + +def isgenerichook(name): + return name == "pytest_plugins" or \ + name.startswith("pytest_funcarg__") + +def collectattr(obj): + methods = {} + for apiname in dir(obj): + if apiname.startswith("pytest_"): + methods[apiname] = getattr(obj, apiname) + return methods + +def formatdef(func): + return "%s%s" % ( + func.__name__, + inspect.formatargspec(*inspect.getargspec(func)) + ) + diff --git a/_pytest/hookspec.py b/_pytest/hookspec.py new file mode 100644 --- /dev/null +++ b/_pytest/hookspec.py @@ -0,0 +1,222 @@ +""" hook specifications for pytest plugins, invoked from main.py and builtin plugins. """ + +# ------------------------------------------------------------------------- +# Initialization +# ------------------------------------------------------------------------- + +def pytest_addhooks(pluginmanager): + """called at plugin load time to allow adding new hooks via a call to + pluginmanager.registerhooks(module).""" + + +def pytest_namespace(): + """return dict of name->object to be made globally available in + the py.test/pytest namespace. This hook is called before command + line options are parsed. + """ + +def pytest_cmdline_parse(pluginmanager, args): + """return initialized config object, parsing the specified args. """ +pytest_cmdline_parse.firstresult = True + +def pytest_cmdline_preparse(config, args): + """modify command line arguments before option parsing. """ + +def pytest_addoption(parser): + """add optparse-style options and ini-style config values via calls + to ``parser.addoption`` and ``parser.addini(...)``. + """ + +def pytest_cmdline_main(config): + """ called for performing the main command line action. The default + implementation will invoke the configure hooks and runtest_mainloop. """ +pytest_cmdline_main.firstresult = True + +def pytest_configure(config): + """ called after command line options have been parsed. + and all plugins and initial conftest files been loaded. + """ + +def pytest_unconfigure(config): + """ called before test process is exited. """ + +def pytest_runtestloop(session): + """ called for performing the main runtest loop + (after collection finished). """ +pytest_runtestloop.firstresult = True + +# ------------------------------------------------------------------------- +# collection hooks +# ------------------------------------------------------------------------- + +def pytest_collection(session): + """ perform the collection protocol for the given session. """ +pytest_collection.firstresult = True + +def pytest_collection_modifyitems(session, config, items): + """ called after collection has been performed, may filter or re-order + the items in-place.""" + +def pytest_collection_finish(session): + """ called after collection has been performed and modified. """ + +def pytest_ignore_collect(path, config): + """ return True to prevent considering this path for collection. + This hook is consulted for all files and directories prior to calling + more specific hooks. + """ +pytest_ignore_collect.firstresult = True + +def pytest_collect_directory(path, parent): + """ called before traversing a directory for collection files. """ +pytest_collect_directory.firstresult = True + +def pytest_collect_file(path, parent): + """ return collection Node or None for the given path. Any new node + needs to have the specified ``parent`` as a parent.""" + +# logging hooks for collection +def pytest_collectstart(collector): + """ collector starts collecting. """ + +def pytest_itemcollected(item): + """ we just collected a test item. """ + +def pytest_collectreport(report): + """ collector finished collecting. """ + +def pytest_deselected(items): + """ called for test items deselected by keyword. """ + +def pytest_make_collect_report(collector): + """ perform ``collector.collect()`` and return a CollectReport. """ +pytest_make_collect_report.firstresult = True + +# ------------------------------------------------------------------------- +# Python test function related hooks +# ------------------------------------------------------------------------- + +def pytest_pycollect_makemodule(path, parent): + """ return a Module collector or None for the given path. + This hook will be called for each matching test module path. + The pytest_collect_file hook needs to be used if you want to + create test modules for files that do not match as a test module. + """ +pytest_pycollect_makemodule.firstresult = True + +def pytest_pycollect_makeitem(collector, name, obj): + """ return custom item/collector for a python object in a module, or None. """ +pytest_pycollect_makeitem.firstresult = True + +def pytest_pyfunc_call(pyfuncitem): + """ call underlying test function. """ +pytest_pyfunc_call.firstresult = True + +def pytest_generate_tests(metafunc): + """ generate (multiple) parametrized calls to a test function.""" + +# ------------------------------------------------------------------------- +# generic runtest related hooks +# ------------------------------------------------------------------------- +def pytest_itemstart(item, node=None): + """ (deprecated, use pytest_runtest_logstart). """ + +def pytest_runtest_protocol(item): + """ implements the standard runtest_setup/call/teardown protocol including + capturing exceptions and calling reporting hooks on the results accordingly. + + :return boolean: True if no further hook implementations should be invoked. + """ +pytest_runtest_protocol.firstresult = True + +def pytest_runtest_logstart(nodeid, location): + """ signal the start of a test run. """ + +def pytest_runtest_setup(item): + """ called before ``pytest_runtest_call(item)``. """ + +def pytest_runtest_call(item): + """ called to execute the test ``item``. """ + +def pytest_runtest_teardown(item): + """ called after ``pytest_runtest_call``. """ + +def pytest_runtest_makereport(item, call): + """ return a :py:class:`_pytest.runner.TestReport` object + for the given :py:class:`pytest.Item` and + :py:class:`_pytest.runner.CallInfo`. + """ +pytest_runtest_makereport.firstresult = True + +def pytest_runtest_logreport(report): + """ process item test report. """ + +# special handling for final teardown - somewhat internal for now +def pytest__teardown_final(session): + """ called before test session finishes. """ +pytest__teardown_final.firstresult = True + +def pytest__teardown_final_logerror(report, session): + """ called if runtest_teardown_final failed. """ + +# ------------------------------------------------------------------------- +# test session related hooks +# ------------------------------------------------------------------------- + +def pytest_sessionstart(session): + """ before session.main() is called. """ + +def pytest_sessionfinish(session, exitstatus): + """ whole test run finishes. """ + + +# ------------------------------------------------------------------------- +# hooks for customising the assert methods +# ------------------------------------------------------------------------- + +def pytest_assertrepr_compare(config, op, left, right): + """return explanation for comparisons in failing assert expressions. + + Return None for no custom explanation, otherwise return a list + of strings. The strings will be joined by newlines but any newlines + *in* a string will be escaped. Note that all but the first line will + be indented sligthly, the intention is for the first line to be a summary. + """ + +# ------------------------------------------------------------------------- +# hooks for influencing reporting (invoked from _pytest_terminal) +# ------------------------------------------------------------------------- + +def pytest_report_header(config): + """ return a string to be displayed as header info for terminal reporting.""" + +def pytest_report_teststatus(report): + """ return result-category, shortletter and verbose word for reporting.""" +pytest_report_teststatus.firstresult = True + +def pytest_terminal_summary(terminalreporter): + """ add additional section in terminal summary reporting. """ + +# ------------------------------------------------------------------------- +# doctest hooks +# ------------------------------------------------------------------------- + +def pytest_doctest_prepare_content(content): + """ return processed content for a given doctest""" +pytest_doctest_prepare_content.firstresult = True + +# ------------------------------------------------------------------------- +# error handling and internal debugging hooks +# ------------------------------------------------------------------------- + +def pytest_plugin_registered(plugin, manager): + """ a new py lib plugin got registered. """ + +def pytest_plugin_unregistered(plugin): + """ a py lib plugin got unregistered. """ + +def pytest_internalerror(excrepr): + """ called for internal errors. """ + +def pytest_keyboard_interrupt(excinfo): + """ called for keyboard interrupt. """ diff --git a/_pytest/junitxml.py b/_pytest/junitxml.py new file mode 100644 --- /dev/null +++ b/_pytest/junitxml.py @@ -0,0 +1,173 @@ +""" report test results in JUnit-XML format, for use with Hudson and build integration servers. + +Based on initial code from Ross Lawley. +""" + +import py +import os +import time + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting") + group.addoption('--junitxml', action="store", dest="xmlpath", + metavar="path", default=None, + help="create junit-xml style report file at given path.") + group.addoption('--junitprefix', action="store", dest="junitprefix", + metavar="str", default=None, + help="prepend prefix to classnames in junit-xml output") + +def pytest_configure(config): + xmlpath = config.option.xmlpath + if xmlpath: + config._xml = LogXML(xmlpath, config.option.junitprefix) + config.pluginmanager.register(config._xml) + +def pytest_unconfigure(config): + xml = getattr(config, '_xml', None) + if xml: + del config._xml + config.pluginmanager.unregister(xml) + +class LogXML(object): + def __init__(self, logfile, prefix): + self.logfile = logfile + self.prefix = prefix + self.test_logs = [] + self.passed = self.skipped = 0 + self.failed = self.errors = 0 + self._durations = {} + + def _opentestcase(self, report): + names = report.nodeid.split("::") + names[0] = names[0].replace("/", '.') + names = tuple(names) + d = {'time': self._durations.pop(names, "0")} + names = [x.replace(".py", "") for x in names if x != "()"] + classnames = names[:-1] + if self.prefix: + classnames.insert(0, self.prefix) + d['classname'] = ".".join(classnames) + d['name'] = py.xml.escape(names[-1]) + attrs = ['%s="%s"' % item for item in sorted(d.items())] + self.test_logs.append("\n" % " ".join(attrs)) + + def _closetestcase(self): + self.test_logs.append("") + + def appendlog(self, fmt, *args): + args = tuple([py.xml.escape(arg) for arg in args]) + self.test_logs.append(fmt % args) + + def append_pass(self, report): + self.passed += 1 + self._opentestcase(report) + self._closetestcase() + + def append_failure(self, report): + self._opentestcase(report) + #msg = str(report.longrepr.reprtraceback.extraline) + if "xfail" in report.keywords: + self.appendlog( + '') + self.skipped += 1 + else: + self.appendlog('%s', + report.longrepr) + self.failed += 1 + self._closetestcase() + + def append_collect_failure(self, report): + self._opentestcase(report) + #msg = str(report.longrepr.reprtraceback.extraline) + self.appendlog('%s', + report.longrepr) + self._closetestcase() + self.errors += 1 + + def append_collect_skipped(self, report): + self._opentestcase(report) + #msg = str(report.longrepr.reprtraceback.extraline) + self.appendlog('%s', + report.longrepr) + self._closetestcase() + self.skipped += 1 + + def append_error(self, report): + self._opentestcase(report) + self.appendlog('%s', + report.longrepr) + self._closetestcase() + self.errors += 1 + + def append_skipped(self, report): + self._opentestcase(report) + if "xfail" in report.keywords: + self.appendlog( + '%s', + report.keywords['xfail']) + else: + self.appendlog("") + self._closetestcase() + self.skipped += 1 + + def pytest_runtest_logreport(self, report): + if report.passed: + self.append_pass(report) + elif report.failed: + if report.when != "call": + self.append_error(report) + else: + self.append_failure(report) + elif report.skipped: + self.append_skipped(report) + + def pytest_runtest_call(self, item, __multicall__): + names = tuple(item.listnames()) + start = time.time() + try: + return __multicall__.execute() + finally: + self._durations[names] = time.time() - start + + def pytest_collectreport(self, report): + if not report.passed: + if report.failed: + self.append_collect_failure(report) + else: + self.append_collect_skipped(report) + + def pytest_internalerror(self, excrepr): + self.errors += 1 + data = py.xml.escape(excrepr) + self.test_logs.append( + '\n' + ' ' + '%s' % data) + + def pytest_sessionstart(self, session): + self.suite_start_time = time.time() + + def pytest_sessionfinish(self, session, exitstatus, __multicall__): + if py.std.sys.version_info[0] < 3: + logfile = py.std.codecs.open(self.logfile, 'w', encoding='utf-8') + else: + logfile = open(self.logfile, 'w', encoding='utf-8') + + suite_stop_time = time.time() + suite_time_delta = suite_stop_time - self.suite_start_time + numtests = self.passed + self.failed + logfile.write('') + logfile.write('') + logfile.writelines(self.test_logs) + logfile.write('') + logfile.close() + + def pytest_terminal_summary(self, terminalreporter): + terminalreporter.write_sep("-", "generated xml file: %s" % (self.logfile)) diff --git a/_pytest/main.py b/_pytest/main.py new file mode 100644 --- /dev/null +++ b/_pytest/main.py @@ -0,0 +1,534 @@ +""" core implementation of testing process: init, session, runtest loop. """ + +import py +import pytest, _pytest +import os, sys +tracebackcutdir = py.path.local(_pytest.__file__).dirpath() + +# exitcodes for the command line +EXIT_OK = 0 +EXIT_TESTSFAILED = 1 +EXIT_INTERRUPTED = 2 +EXIT_INTERNALERROR = 3 + +def pytest_addoption(parser): + parser.addini("norecursedirs", "directory patterns to avoid for recursion", + type="args", default=('.*', 'CVS', '_darcs', '{arch}')) + #parser.addini("dirpatterns", + # "patterns specifying possible locations of test files", + # type="linelist", default=["**/test_*.txt", + # "**/test_*.py", "**/*_test.py"] + #) + group = parser.getgroup("general", "running and selection options") + group._addoption('-x', '--exitfirst', action="store_true", default=False, + dest="exitfirst", + help="exit instantly on first error or failed test."), + group._addoption('--maxfail', metavar="num", + action="store", type="int", dest="maxfail", default=0, + help="exit after first num failures or errors.") + + group = parser.getgroup("collect", "collection") + group.addoption('--collectonly', + action="store_true", dest="collectonly", + help="only collect tests, don't execute them."), + group.addoption('--pyargs', action="store_true", + help="try to interpret all arguments as python packages.") + group.addoption("--ignore", action="append", metavar="path", + help="ignore path during collection (multi-allowed).") + group.addoption('--confcutdir', dest="confcutdir", default=None, + metavar="dir", + help="only load conftest.py's relative to specified dir.") + + group = parser.getgroup("debugconfig", + "test session debugging and configuration") + group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir", + help="base temporary directory for this test run.") + + +def pytest_namespace(): + return dict(collect=dict(Item=Item, Collector=Collector, File=File)) + +def pytest_configure(config): + py.test.config = config # compatibiltiy + if config.option.exitfirst: + config.option.maxfail = 1 + +def pytest_cmdline_main(config): + """ default command line protocol for initialization, session, + running tests and reporting. """ + session = Session(config) + session.exitstatus = EXIT_OK + try: + config.pluginmanager.do_configure(config) + config.hook.pytest_sessionstart(session=session) + config.hook.pytest_collection(session=session) + config.hook.pytest_runtestloop(session=session) + except pytest.UsageError: + raise + except KeyboardInterrupt: + excinfo = py.code.ExceptionInfo() + config.hook.pytest_keyboard_interrupt(excinfo=excinfo) + session.exitstatus = EXIT_INTERRUPTED + except: + excinfo = py.code.ExceptionInfo() + config.pluginmanager.notify_exception(excinfo) + session.exitstatus = EXIT_INTERNALERROR + if excinfo.errisinstance(SystemExit): + sys.stderr.write("mainloop: caught Spurious SystemExit!\n") + if not session.exitstatus and session._testsfailed: + session.exitstatus = EXIT_TESTSFAILED + config.hook.pytest_sessionfinish(session=session, + exitstatus=session.exitstatus) + config.pluginmanager.do_unconfigure(config) + return session.exitstatus + +def pytest_collection(session): + session.perform_collect() + hook = session.config.hook + hook.pytest_collection_modifyitems(session=session, + config=session.config, items=session.items) + hook.pytest_collection_finish(session=session) + return True + +def pytest_runtestloop(session): + if session.config.option.collectonly: + return True + for item in session.session.items: + item.config.hook.pytest_runtest_protocol(item=item) + if session.shouldstop: + raise session.Interrupted(session.shouldstop) + return True + +def pytest_ignore_collect(path, config): + p = path.dirpath() + ignore_paths = config._getconftest_pathlist("collect_ignore", path=p) + ignore_paths = ignore_paths or [] + excludeopt = config.getvalue("ignore") + if excludeopt: + ignore_paths.extend([py.path.local(x) for x in excludeopt]) + return path in ignore_paths + +class HookProxy: + def __init__(self, fspath, config): + self.fspath = fspath + self.config = config + def __getattr__(self, name): + hookmethod = getattr(self.config.hook, name) + def call_matching_hooks(**kwargs): + plugins = self.config._getmatchingplugins(self.fspath) + return hookmethod.pcall(plugins, **kwargs) + return call_matching_hooks + +def compatproperty(name): + def fget(self): + return getattr(pytest, name) + return property(fget, None, None, + "deprecated attribute %r, use pytest.%s" % (name,name)) + +class Node(object): + """ base class for all Nodes in the collection tree. + Collector subclasses have children, Items are terminal nodes.""" + + def __init__(self, name, parent=None, config=None, session=None): + #: a unique name with the scope of the parent + self.name = name + + #: the parent collector node. + self.parent = parent + + #: the test config object + self.config = config or parent.config + + #: the collection this node is part of + self.session = session or parent.session + + #: filesystem path where this node was collected from + self.fspath = getattr(parent, 'fspath', None) + self.ihook = self.session.gethookproxy(self.fspath) + self.keywords = {self.name: True} + + Module = compatproperty("Module") + Class = compatproperty("Class") + Instance = compatproperty("Instance") + Function = compatproperty("Function") + File = compatproperty("File") + Item = compatproperty("Item") + + def _getcustomclass(self, name): + cls = getattr(self, name) + if cls != getattr(pytest, name): + py.log._apiwarn("2.0", "use of node.%s is deprecated, " + "use pytest_pycollect_makeitem(...) to create custom " + "collection nodes" % name) + return cls + + def __repr__(self): + return "<%s %r>" %(self.__class__.__name__, getattr(self, 'name', None)) + + # methods for ordering nodes + @property + def nodeid(self): + try: + return self._nodeid + except AttributeError: + self._nodeid = x = self._makeid() + return x + + def _makeid(self): + return self.parent.nodeid + "::" + self.name + + def __eq__(self, other): + if not isinstance(other, Node): + return False + return self.__class__ == other.__class__ and \ + self.name == other.name and self.parent == other.parent + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.name, self.parent)) + + def setup(self): + pass + + def teardown(self): + pass + + def _memoizedcall(self, attrname, function): + exattrname = "_ex_" + attrname + failure = getattr(self, exattrname, None) + if failure is not None: + py.builtin._reraise(failure[0], failure[1], failure[2]) + if hasattr(self, attrname): + return getattr(self, attrname) + try: + res = function() + except py.builtin._sysex: + raise + except: + failure = py.std.sys.exc_info() + setattr(self, exattrname, failure) + raise + setattr(self, attrname, res) + return res + + def listchain(self): + """ return list of all parent collectors up to self, + starting from root of collection tree. """ + l = [self] + while 1: + x = l[0] + if x.parent is not None: # and x.parent.parent is not None: + l.insert(0, x.parent) + else: + return l + + def listnames(self): + return [x.name for x in self.listchain()] + + def getplugins(self): + return self.config._getmatchingplugins(self.fspath) + + def getparent(self, cls): + current = self + while current and not isinstance(current, cls): + current = current.parent + return current + + def _prunetraceback(self, excinfo): + pass + + def _repr_failure_py(self, excinfo, style=None): + if self.config.option.fulltrace: + style="long" + else: + self._prunetraceback(excinfo) + # XXX should excinfo.getrepr record all data and toterminal() + # process it? + if style is None: + if self.config.option.tbstyle == "short": + style = "short" + else: + style = "long" + return excinfo.getrepr(funcargs=True, + showlocals=self.config.option.showlocals, + style=style) + + repr_failure = _repr_failure_py + +class Collector(Node): + """ Collector instances create children through collect() + and thus iteratively build a tree. + """ + class CollectError(Exception): + """ an error during collection, contains a custom message. """ + + def collect(self): + """ returns a list of children (items and collectors) + for this collection node. + """ + raise NotImplementedError("abstract") + + def repr_failure(self, excinfo): + """ represent a collection failure. """ + if excinfo.errisinstance(self.CollectError): + exc = excinfo.value + return str(exc.args[0]) + return self._repr_failure_py(excinfo, style="short") + + def _memocollect(self): + """ internal helper method to cache results of calling collect(). """ + return self._memoizedcall('_collected', lambda: list(self.collect())) + + def _prunetraceback(self, excinfo): + if hasattr(self, 'fspath'): + path = self.fspath + traceback = excinfo.traceback + ntraceback = traceback.cut(path=self.fspath) + if ntraceback == traceback: + ntraceback = ntraceback.cut(excludepath=tracebackcutdir) + excinfo.traceback = ntraceback.filter() + +class FSCollector(Collector): + def __init__(self, fspath, parent=None, config=None, session=None): + fspath = py.path.local(fspath) # xxx only for test_resultlog.py? + name = fspath.basename + if parent is not None: + rel = fspath.relto(parent.fspath) + if rel: + name = rel + name = name.replace(os.sep, "/") + super(FSCollector, self).__init__(name, parent, config, session) + self.fspath = fspath + + def _makeid(self): + if self == self.session: + return "." + relpath = self.session.fspath.bestrelpath(self.fspath) + if os.sep != "/": + relpath = relpath.replace(os.sep, "/") + return relpath + +class File(FSCollector): + """ base class for collecting tests from a file. """ + +class Item(Node): + """ a basic test invocation item. Note that for a single function + there might be multiple test invocation items. + """ + def reportinfo(self): + return self.fspath, None, "" + + @property + def location(self): + try: + return self._location + except AttributeError: + location = self.reportinfo() + # bestrelpath is a quite slow function + cache = self.config.__dict__.setdefault("_bestrelpathcache", {}) + try: + fspath = cache[location[0]] + except KeyError: + fspath = self.session.fspath.bestrelpath(location[0]) + cache[location[0]] = fspath + location = (fspath, location[1], str(location[2])) + self._location = location + return location + +class NoMatch(Exception): + """ raised if matching cannot locate a matching names. """ + +class Session(FSCollector): + class Interrupted(KeyboardInterrupt): + """ signals an interrupted test run. """ + __module__ = 'builtins' # for py3 + + def __init__(self, config): + super(Session, self).__init__(py.path.local(), parent=None, + config=config, session=self) + assert self.config.pluginmanager.register(self, name="session", prepend=True) + self._testsfailed = 0 + self.shouldstop = False + self.trace = config.trace.root.get("collection") + self._norecursepatterns = config.getini("norecursedirs") + + def pytest_collectstart(self): + if self.shouldstop: + raise self.Interrupted(self.shouldstop) + + def pytest_runtest_logreport(self, report): + if report.failed and 'xfail' not in getattr(report, 'keywords', []): + self._testsfailed += 1 + maxfail = self.config.getvalue("maxfail") + if maxfail and self._testsfailed >= maxfail: + self.shouldstop = "stopping after %d failures" % ( + self._testsfailed) + pytest_collectreport = pytest_runtest_logreport + + def isinitpath(self, path): + return path in self._initialpaths + + def gethookproxy(self, fspath): + return HookProxy(fspath, self.config) + + def perform_collect(self, args=None, genitems=True): + if args is None: + args = self.config.args + self.trace("perform_collect", self, args) + self.trace.root.indent += 1 + self._notfound = [] + self._initialpaths = set() + self._initialparts = [] + for arg in args: + parts = self._parsearg(arg) + self._initialparts.append(parts) + self._initialpaths.add(parts[0]) + self.ihook.pytest_collectstart(collector=self) + rep = self.ihook.pytest_make_collect_report(collector=self) + self.ihook.pytest_collectreport(report=rep) + self.trace.root.indent -= 1 + if self._notfound: + for arg, exc in self._notfound: + line = "(no name %r in any of %r)" % (arg, exc.args[0]) + raise pytest.UsageError("not found: %s\n%s" %(arg, line)) + if not genitems: + return rep.result + else: + self.items = items = [] + if rep.passed: + for node in rep.result: + self.items.extend(self.genitems(node)) + return items + + def collect(self): + for parts in self._initialparts: + arg = "::".join(map(str, parts)) + self.trace("processing argument", arg) + self.trace.root.indent += 1 + try: + for x in self._collect(arg): + yield x + except NoMatch: + # we are inside a make_report hook so + # we cannot directly pass through the exception + self._notfound.append((arg, sys.exc_info()[1])) + self.trace.root.indent -= 1 + break + self.trace.root.indent -= 1 + + def _collect(self, arg): + names = self._parsearg(arg) + path = names.pop(0) + if path.check(dir=1): + assert not names, "invalid arg %r" %(arg,) + for path in path.visit(fil=lambda x: x.check(file=1), + rec=self._recurse, bf=True, sort=True): + for x in self._collectfile(path): + yield x + else: + assert path.check(file=1) + for x in self.matchnodes(self._collectfile(path), names): + yield x + + def _collectfile(self, path): + ihook = self.gethookproxy(path) + if not self.isinitpath(path): + if ihook.pytest_ignore_collect(path=path, config=self.config): + return () + return ihook.pytest_collect_file(path=path, parent=self) + + def _recurse(self, path): + ihook = self.gethookproxy(path.dirpath()) + if ihook.pytest_ignore_collect(path=path, config=self.config): + return + for pat in self._norecursepatterns: + if path.check(fnmatch=pat): + return False + ihook = self.gethookproxy(path) + ihook.pytest_collect_directory(path=path, parent=self) + return True + + def _tryconvertpyarg(self, x): + try: + mod = __import__(x, None, None, ['__doc__']) + except (ValueError, ImportError): + return x + p = py.path.local(mod.__file__) + if p.purebasename == "__init__": + p = p.dirpath() + else: + p = p.new(basename=p.purebasename+".py") + return str(p) + + def _parsearg(self, arg): + """ return (fspath, names) tuple after checking the file exists. """ + arg = str(arg) + if self.config.option.pyargs: + arg = self._tryconvertpyarg(arg) + parts = str(arg).split("::") + relpath = parts[0].replace("/", os.sep) + path = self.fspath.join(relpath, abs=True) + if not path.check(): + if self.config.option.pyargs: + msg = "file or package not found: " + else: + msg = "file not found: " + raise pytest.UsageError(msg + arg) + parts[0] = path + return parts + + def matchnodes(self, matching, names): + self.trace("matchnodes", matching, names) + self.trace.root.indent += 1 + nodes = self._matchnodes(matching, names) + num = len(nodes) + self.trace("matchnodes finished -> ", num, "nodes") + self.trace.root.indent -= 1 + if num == 0: + raise NoMatch(matching, names[:1]) + return nodes + + def _matchnodes(self, matching, names): + if not matching or not names: + return matching + name = names[0] + assert name + nextnames = names[1:] + resultnodes = [] + for node in matching: + if isinstance(node, pytest.Item): + if not names: + resultnodes.append(node) + continue + assert isinstance(node, pytest.Collector) + node.ihook.pytest_collectstart(collector=node) + rep = node.ihook.pytest_make_collect_report(collector=node) + if rep.passed: + has_matched = False + for x in rep.result: + if x.name == name: + resultnodes.extend(self.matchnodes([x], nextnames)) + has_matched = True + # XXX accept IDs that don't have "()" for class instances + if not has_matched and len(rep.result) == 1 and x.name == "()": + nextnames.insert(0, name) + resultnodes.extend(self.matchnodes([x], nextnames)) + node.ihook.pytest_collectreport(report=rep) + return resultnodes + + def genitems(self, node): + self.trace("genitems", node) + if isinstance(node, pytest.Item): + node.ihook.pytest_itemcollected(item=node) + yield node + else: + assert isinstance(node, pytest.Collector) + node.ihook.pytest_collectstart(collector=node) + rep = node.ihook.pytest_make_collect_report(collector=node) + if rep.passed: + for subnode in rep.result: + for x in self.genitems(subnode): + yield x + node.ihook.pytest_collectreport(report=rep) diff --git a/_pytest/mark.py b/_pytest/mark.py new file mode 100644 --- /dev/null +++ b/_pytest/mark.py @@ -0,0 +1,176 @@ +""" generic mechanism for marking and selecting python functions. """ +import pytest, py + +def pytest_namespace(): + return {'mark': MarkGenerator()} + +def pytest_addoption(parser): + group = parser.getgroup("general") + group._addoption('-k', + action="store", dest="keyword", default='', metavar="KEYWORDEXPR", + help="only run tests which match given keyword expression. " + "An expression consists of space-separated terms. " + "Each term must match. Precede a term with '-' to negate. " + "Terminate expression with ':' to make the first match match " + "all subsequent tests (usually file-order). ") + +def pytest_collection_modifyitems(items, config): + keywordexpr = config.option.keyword + if not keywordexpr: + return + selectuntil = False + if keywordexpr[-1] == ":": + selectuntil = True + keywordexpr = keywordexpr[:-1] + + remaining = [] + deselected = [] + for colitem in items: + if keywordexpr and skipbykeyword(colitem, keywordexpr): + deselected.append(colitem) + else: + remaining.append(colitem) + if selectuntil: + keywordexpr = None + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + +def skipbykeyword(colitem, keywordexpr): + """ return True if they given keyword expression means to + skip this collector/item. + """ + if not keywordexpr: + return + + itemkeywords = getkeywords(colitem) + for key in filter(None, keywordexpr.split()): + eor = key[:1] == '-' + if eor: + key = key[1:] + if not (eor ^ matchonekeyword(key, itemkeywords)): + return True + +def getkeywords(node): + keywords = {} + while node is not None: + keywords.update(node.keywords) + node = node.parent + return keywords + + +def matchonekeyword(key, itemkeywords): + for elem in key.split("."): + for kw in itemkeywords: + if elem in kw: + break + else: + return False + return True + +class MarkGenerator: + """ Factory for :class:`MarkDecorator` objects - exposed as + a ``py.test.mark`` singleton instance. Example:: + + import py + @py.test.mark.slowtest + def test_function(): + pass + + will set a 'slowtest' :class:`MarkInfo` object + on the ``test_function`` object. """ + + def __getattr__(self, name): + if name[0] == "_": + raise AttributeError(name) + return MarkDecorator(name) + +class MarkDecorator: + """ A decorator for test functions and test classes. When applied + it will create :class:`MarkInfo` objects which may be + :ref:`retrieved by hooks as item keywords `. + MarkDecorator instances are often created like this:: + + mark1 = py.test.mark.NAME # simple MarkDecorator + mark2 = py.test.mark.NAME(name1=value) # parametrized MarkDecorator + + and can then be applied as decorators to test functions:: + + @mark2 + def test_function(): + pass + """ + def __init__(self, name, args=None, kwargs=None): + self.markname = name + self.args = args or () + self.kwargs = kwargs or {} + + def __repr__(self): + d = self.__dict__.copy() + name = d.pop('markname') + return "" %(name, d) + + def __call__(self, *args, **kwargs): + """ if passed a single callable argument: decorate it with mark info. + otherwise add *args/**kwargs in-place to mark information. """ + if args: + func = args[0] + if len(args) == 1 and hasattr(func, '__call__') or \ + hasattr(func, '__bases__'): + if hasattr(func, '__bases__'): + if hasattr(func, 'pytestmark'): + l = func.pytestmark + if not isinstance(l, list): + func.pytestmark = [l, self] + else: + l.append(self) + else: + func.pytestmark = [self] + else: + holder = getattr(func, self.markname, None) + if holder is None: + holder = MarkInfo(self.markname, self.args, self.kwargs) + setattr(func, self.markname, holder) + else: + holder.kwargs.update(self.kwargs) + holder.args += self.args + return func + kw = self.kwargs.copy() + kw.update(kwargs) + args = self.args + args + return self.__class__(self.markname, args=args, kwargs=kw) + +class MarkInfo: + """ Marking object created by :class:`MarkDecorator` instances. """ + def __init__(self, name, args, kwargs): + #: name of attribute + self.name = name + #: positional argument list, empty if none specified + self.args = args + #: keyword argument dictionary, empty if nothing specified + self.kwargs = kwargs + + def __repr__(self): + return "" % ( + self._name, self.args, self.kwargs) + +def pytest_itemcollected(item): + if not isinstance(item, pytest.Function): + return + try: + func = item.obj.__func__ + except AttributeError: + func = getattr(item.obj, 'im_func', item.obj) + pyclasses = (pytest.Class, pytest.Module) + for node in item.listchain(): + if isinstance(node, pyclasses): + marker = getattr(node.obj, 'pytestmark', None) + if marker is not None: + if isinstance(marker, list): + for mark in marker: + mark(func) + else: + marker(func) + node = node.parent + item.keywords.update(py.builtin._getfuncdict(func)) diff --git a/_pytest/monkeypatch.py b/_pytest/monkeypatch.py new file mode 100644 --- /dev/null +++ b/_pytest/monkeypatch.py @@ -0,0 +1,103 @@ +""" monkeypatching and mocking functionality. """ + +import os, sys + +def pytest_funcarg__monkeypatch(request): + """The returned ``monkeypatch`` funcarg provides these + helper methods to modify objects, dictionaries or os.environ:: + + monkeypatch.setattr(obj, name, value, raising=True) + monkeypatch.delattr(obj, name, raising=True) + monkeypatch.setitem(mapping, name, value) + monkeypatch.delitem(obj, name, raising=True) + monkeypatch.setenv(name, value, prepend=False) + monkeypatch.delenv(name, value, raising=True) + monkeypatch.syspath_prepend(path) + + All modifications will be undone after the requesting + test function has finished. The ``raising`` + parameter determines if a KeyError or AttributeError + will be raised if the set/deletion operation has no target. + """ + mpatch = monkeypatch() + request.addfinalizer(mpatch.undo) + return mpatch + +notset = object() + +class monkeypatch: + """ object keeping a record of setattr/item/env/syspath changes. """ + def __init__(self): + self._setattr = [] + self._setitem = [] + + def setattr(self, obj, name, value, raising=True): + """ set attribute ``name`` on ``obj`` to ``value``, by default + raise AttributeEror if the attribute did not exist. """ + oldval = getattr(obj, name, notset) + if raising and oldval is notset: + raise AttributeError("%r has no attribute %r" %(obj, name)) + self._setattr.insert(0, (obj, name, oldval)) + setattr(obj, name, value) + + def delattr(self, obj, name, raising=True): + """ delete attribute ``name`` from ``obj``, by default raise + AttributeError it the attribute did not previously exist. """ + if not hasattr(obj, name): + if raising: + raise AttributeError(name) + else: + self._setattr.insert(0, (obj, name, getattr(obj, name, notset))) + delattr(obj, name) + + def setitem(self, dic, name, value): + """ set dictionary entry ``name`` to value. """ + self._setitem.insert(0, (dic, name, dic.get(name, notset))) + dic[name] = value + + def delitem(self, dic, name, raising=True): + """ delete ``name`` from dict, raise KeyError if it doesn't exist.""" + if name not in dic: + if raising: + raise KeyError(name) + else: + self._setitem.insert(0, (dic, name, dic.get(name, notset))) + del dic[name] + + def setenv(self, name, value, prepend=None): + """ set environment variable ``name`` to ``value``. if ``prepend`` + is a character, read the current environment variable value + and prepend the ``value`` adjoined with the ``prepend`` character.""" + value = str(value) + if prepend and name in os.environ: + value = value + prepend + os.environ[name] + self.setitem(os.environ, name, value) + + def delenv(self, name, raising=True): + """ delete ``name`` from environment, raise KeyError it not exists.""" + self.delitem(os.environ, name, raising=raising) + + def syspath_prepend(self, path): + """ prepend ``path`` to ``sys.path`` list of import locations. """ + if not hasattr(self, '_savesyspath'): + self._savesyspath = sys.path[:] + sys.path.insert(0, str(path)) + + def undo(self): + """ undo previous changes. This call consumes the + undo stack. Calling it a second time has no effect unless + you do more monkeypatching after the undo call.""" + for obj, name, value in self._setattr: + if value is not notset: + setattr(obj, name, value) + else: + delattr(obj, name) + self._setattr[:] = [] + for dictionary, name, value in self._setitem: + if value is notset: + del dictionary[name] + else: + dictionary[name] = value + self._setitem[:] = [] + if hasattr(self, '_savesyspath'): + sys.path[:] = self._savesyspath diff --git a/_pytest/nose.py b/_pytest/nose.py new file mode 100644 --- /dev/null +++ b/_pytest/nose.py @@ -0,0 +1,47 @@ +""" run test suites written for nose. """ + +import pytest, py +import inspect +import sys + +def pytest_runtest_makereport(__multicall__, item, call): + SkipTest = getattr(sys.modules.get('nose', None), 'SkipTest', None) + if SkipTest: + if call.excinfo and call.excinfo.errisinstance(SkipTest): + # let's substitute the excinfo with a py.test.skip one + call2 = call.__class__(lambda: py.test.skip(str(call.excinfo.value)), call.when) + call.excinfo = call2.excinfo + + +def pytest_runtest_setup(item): + if isinstance(item, (pytest.Function)): + if isinstance(item.parent, pytest.Generator): + gen = item.parent + if not hasattr(gen, '_nosegensetup'): + call_optional(gen.obj, 'setup') + if isinstance(gen.parent, pytest.Instance): + call_optional(gen.parent.obj, 'setup') + gen._nosegensetup = True + if not call_optional(item.obj, 'setup'): + # call module level setup if there is no object level one + call_optional(item.parent.obj, 'setup') + +def pytest_runtest_teardown(item): + if isinstance(item, pytest.Function): + if not call_optional(item.obj, 'teardown'): + call_optional(item.parent.obj, 'teardown') + #if hasattr(item.parent, '_nosegensetup'): + # #call_optional(item._nosegensetup, 'teardown') + # del item.parent._nosegensetup + +def pytest_make_collect_report(collector): + if isinstance(collector, pytest.Generator): + call_optional(collector.obj, 'setup') + +def call_optional(obj, name): + method = getattr(obj, name, None) + if method: + # If there's any problems allow the exception to raise rather than + # silently ignoring them + method() + return True diff --git a/_pytest/pastebin.py b/_pytest/pastebin.py new file mode 100644 --- /dev/null +++ b/_pytest/pastebin.py @@ -0,0 +1,63 @@ +""" submit failure or test session information to a pastebin service. """ +import py, sys + +class url: + base = "http://paste.pocoo.org" + xmlrpc = base + "/xmlrpc/" + show = base + "/show/" + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting") + group._addoption('--pastebin', metavar="mode", + action='store', dest="pastebin", default=None, + type="choice", choices=['failed', 'all'], + help="send failed|all info to Pocoo pastebin service.") + +def pytest_configure(__multicall__, config): + import tempfile + __multicall__.execute() + if config.option.pastebin == "all": + config._pastebinfile = tempfile.TemporaryFile('w+') + tr = config.pluginmanager.getplugin('terminalreporter') + oldwrite = tr._tw.write + def tee_write(s, **kwargs): + oldwrite(s, **kwargs) + config._pastebinfile.write(str(s)) + tr._tw.write = tee_write + +def pytest_unconfigure(config): + if hasattr(config, '_pastebinfile'): + config._pastebinfile.seek(0) + sessionlog = config._pastebinfile.read() + config._pastebinfile.close() + del config._pastebinfile + proxyid = getproxy().newPaste("python", sessionlog) + pastebinurl = "%s%s" % (url.show, proxyid) + sys.stderr.write("pastebin session-log: %s\n" % pastebinurl) + tr = config.pluginmanager.getplugin('terminalreporter') + del tr._tw.__dict__['write'] + +def getproxy(): + return py.std.xmlrpclib.ServerProxy(url.xmlrpc).pastes + +def pytest_terminal_summary(terminalreporter): + if terminalreporter.config.option.pastebin != "failed": + return + tr = terminalreporter + if 'failed' in tr.stats: + terminalreporter.write_sep("=", "Sending information to Paste Service") + if tr.config.option.debug: + terminalreporter.write_line("xmlrpcurl: %s" %(url.xmlrpc,)) + serverproxy = getproxy() + for rep in terminalreporter.stats.get('failed'): + try: + msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc + except AttributeError: + msg = tr._getfailureheadline(rep) + tw = py.io.TerminalWriter(stringio=True) + rep.toterminal(tw) + s = tw.stringio.getvalue() + assert len(s) + proxyid = serverproxy.newPaste("python", s) + pastebinurl = "%s%s" % (url.show, proxyid) + tr.write_line("%s --> %s" %(msg, pastebinurl)) diff --git a/_pytest/pdb.py b/_pytest/pdb.py new file mode 100644 --- /dev/null +++ b/_pytest/pdb.py @@ -0,0 +1,79 @@ +""" interactive debugging with PDB, the Python Debugger. """ + +import pytest, py +import sys + +def pytest_addoption(parser): + group = parser.getgroup("general") + group._addoption('--pdb', + action="store_true", dest="usepdb", default=False, + help="start the interactive Python debugger on errors.") + +def pytest_namespace(): + return {'set_trace': pytestPDB().set_trace} + +def pytest_configure(config): + if config.getvalue("usepdb"): + config.pluginmanager.register(PdbInvoke(), 'pdbinvoke') + +class pytestPDB: + """ Pseudo PDB that defers to the real pdb. """ + item = None + + def set_trace(self): + """ invoke PDB set_trace debugging, dropping any IO capturing. """ + frame = sys._getframe().f_back + item = getattr(self, 'item', None) + if item is not None: + capman = item.config.pluginmanager.getplugin("capturemanager") + out, err = capman.suspendcapture() + if hasattr(item, 'outerr'): + item.outerr = (item.outerr[0] + out, item.outerr[1] + err) + tw = py.io.TerminalWriter() + tw.line() + tw.sep(">", "PDB set_trace (IO-capturing turned off)") + py.std.pdb.Pdb().set_trace(frame) + +def pdbitem(item): + pytestPDB.item = item +pytest_runtest_setup = pytest_runtest_call = pytest_runtest_teardown = pdbitem + +def pytest_runtest_makereport(): + pytestPDB.item = None + +class PdbInvoke: + @pytest.mark.tryfirst + def pytest_runtest_makereport(self, item, call, __multicall__): + rep = __multicall__.execute() + if not call.excinfo or \ + call.excinfo.errisinstance(pytest.skip.Exception) or \ + call.excinfo.errisinstance(py.std.bdb.BdbQuit): + return rep + if "xfail" in rep.keywords: + return rep + # we assume that the above execute() suspended capturing + # XXX we re-use the TerminalReporter's terminalwriter + # because this seems to avoid some encoding related troubles + # for not completely clear reasons. + tw = item.config.pluginmanager.getplugin("terminalreporter")._tw + tw.line() + tw.sep(">", "traceback") + rep.toterminal(tw) + tw.sep(">", "entering PDB") + post_mortem(call.excinfo._excinfo[2]) + rep._pdbshown = True + return rep + +def post_mortem(t): + pdb = py.std.pdb + class Pdb(pdb.Pdb): + def get_stack(self, f, t): + stack, i = pdb.Pdb.get_stack(self, f, t) + if f is None: + i = max(0, len(stack) - 1) + while i and stack[i][0].f_locals.get("__tracebackhide__", False): + i-=1 + return stack, i + p = Pdb() + p.reset() + p.interaction(None, t) diff --git a/_pytest/pytester.py b/_pytest/pytester.py new file mode 100644 --- /dev/null +++ b/_pytest/pytester.py @@ -0,0 +1,674 @@ +""" (disabled by default) support for testing py.test and py.test plugins. """ + +import py, pytest +import sys, os +import re +import inspect +import time +from fnmatch import fnmatch +from _pytest.main import Session +from py.builtin import print_ +from _pytest.core import HookRelay + +def pytest_addoption(parser): + group = parser.getgroup("pylib") + group.addoption('--no-tools-on-path', + action="store_true", dest="notoolsonpath", default=False, + help=("discover tools on PATH instead of going through py.cmdline.") + ) + +def pytest_configure(config): + # This might be called multiple times. Only take the first. + global _pytest_fullpath + import pytest + try: + _pytest_fullpath + except NameError: + _pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc")) + +def pytest_funcarg___pytest(request): + return PytestArg(request) + +class PytestArg: + def __init__(self, request): + self.request = request + + def gethookrecorder(self, hook): + hookrecorder = HookRecorder(hook._pm) + hookrecorder.start_recording(hook._hookspecs) + self.request.addfinalizer(hookrecorder.finish_recording) + return hookrecorder + +class ParsedCall: + def __init__(self, name, locals): + assert '_name' not in locals + self.__dict__.update(locals) + self.__dict__.pop('self') + self._name = name + + def __repr__(self): + d = self.__dict__.copy() + del d['_name'] + return "" %(self._name, d) + +class HookRecorder: + def __init__(self, pluginmanager): + self._pluginmanager = pluginmanager + self.calls = [] + self._recorders = {} + + def start_recording(self, hookspecs): + if not isinstance(hookspecs, (list, tuple)): + hookspecs = [hookspecs] + for hookspec in hookspecs: + assert hookspec not in self._recorders + class RecordCalls: + _recorder = self + for name, method in vars(hookspec).items(): + if name[0] != "_": + setattr(RecordCalls, name, self._makecallparser(method)) + recorder = RecordCalls() + self._recorders[hookspec] = recorder + self._pluginmanager.register(recorder) + self.hook = HookRelay(hookspecs, pm=self._pluginmanager, + prefix="pytest_") + + def finish_recording(self): + for recorder in self._recorders.values(): + self._pluginmanager.unregister(recorder) + self._recorders.clear() + + def _makecallparser(self, method): + name = method.__name__ + args, varargs, varkw, default = py.std.inspect.getargspec(method) + if not args or args[0] != "self": + args.insert(0, 'self') + fspec = py.std.inspect.formatargspec(args, varargs, varkw, default) + # we use exec because we want to have early type + # errors on wrong input arguments, using + # *args/**kwargs delays this and gives errors + # elsewhere + exec (py.code.compile(""" + def %(name)s%(fspec)s: + self._recorder.calls.append( + ParsedCall(%(name)r, locals())) + """ % locals())) + return locals()[name] + + def getcalls(self, names): + if isinstance(names, str): + names = names.split() + for name in names: + for cls in self._recorders: + if name in vars(cls): + break + else: + raise ValueError("callname %r not found in %r" %( + name, self._recorders.keys())) + l = [] + for call in self.calls: + if call._name in names: + l.append(call) + return l + + def contains(self, entries): + __tracebackhide__ = True + from py.builtin import print_ + i = 0 + entries = list(entries) + backlocals = py.std.sys._getframe(1).f_locals + while entries: + name, check = entries.pop(0) + for ind, call in enumerate(self.calls[i:]): + if call._name == name: + print_("NAMEMATCH", name, call) + if eval(check, backlocals, call.__dict__): + print_("CHECKERMATCH", repr(check), "->", call) + else: + print_("NOCHECKERMATCH", repr(check), "-", call) + continue + i += ind + 1 + break + print_("NONAMEMATCH", name, "with", call) + else: + py.test.fail("could not find %r check %r" % (name, check)) + + def popcall(self, name): + __tracebackhide__ = True + for i, call in enumerate(self.calls): + if call._name == name: + del self.calls[i] + return call + lines = ["could not find call %r, in:" % (name,)] + lines.extend([" %s" % str(x) for x in self.calls]) + py.test.fail("\n".join(lines)) + + def getcall(self, name): + l = self.getcalls(name) + assert len(l) == 1, (name, l) + return l[0] + + +def pytest_funcarg__linecomp(request): + return LineComp() + +def pytest_funcarg__LineMatcher(request): + return LineMatcher + +def pytest_funcarg__testdir(request): + tmptestdir = TmpTestdir(request) + return tmptestdir + +rex_outcome = re.compile("(\d+) (\w+)") +class RunResult: + def __init__(self, ret, outlines, errlines, duration): + self.ret = ret + self.outlines = outlines + self.errlines = errlines + self.stdout = LineMatcher(outlines) + self.stderr = LineMatcher(errlines) + self.duration = duration + + def parseoutcomes(self): + for line in reversed(self.outlines): + if 'seconds' in line: + outcomes = rex_outcome.findall(line) + if outcomes: + d = {} + for num, cat in outcomes: + d[cat] = int(num) + return d + +class TmpTestdir: + def __init__(self, request): + self.request = request + self.Config = request.config.__class__ + self._pytest = request.getfuncargvalue("_pytest") + # XXX remove duplication with tmpdir plugin + basetmp = request.config._tmpdirhandler.ensuretemp("testdir") + name = request.function.__name__ + for i in range(100): + try: + tmpdir = basetmp.mkdir(name + str(i)) + except py.error.EEXIST: + continue + break + # we need to create another subdir + # because Directory.collect() currently loads + # conftest.py from sibling directories + self.tmpdir = tmpdir.mkdir(name) + self.plugins = [] + self._syspathremove = [] + self.chdir() # always chdir + self.request.addfinalizer(self.finalize) + + def __repr__(self): + return "" % (self.tmpdir,) + + def finalize(self): + for p in self._syspathremove: + py.std.sys.path.remove(p) + if hasattr(self, '_olddir'): + self._olddir.chdir() + # delete modules that have been loaded from tmpdir + for name, mod in list(sys.modules.items()): + if mod: + fn = getattr(mod, '__file__', None) + if fn and fn.startswith(str(self.tmpdir)): + del sys.modules[name] + + def getreportrecorder(self, obj): + if hasattr(obj, 'config'): + obj = obj.config + if hasattr(obj, 'hook'): + obj = obj.hook + assert hasattr(obj, '_hookspecs'), obj + reprec = ReportRecorder(obj) + reprec.hookrecorder = self._pytest.gethookrecorder(obj) + reprec.hook = reprec.hookrecorder.hook + return reprec + + def chdir(self): + old = self.tmpdir.chdir() + if not hasattr(self, '_olddir'): + self._olddir = old + + def _makefile(self, ext, args, kwargs): + items = list(kwargs.items()) + if args: + source = "\n".join(map(str, args)) + "\n" + basename = self.request.function.__name__ + items.insert(0, (basename, source)) + ret = None + for name, value in items: + p = self.tmpdir.join(name).new(ext=ext) + source = str(py.code.Source(value)).lstrip() + p.write(source.encode("utf-8"), "wb") + if ret is None: + ret = p + return ret + + + def makefile(self, ext, *args, **kwargs): + return self._makefile(ext, args, kwargs) + + def makeini(self, source): + return self.makefile('cfg', setup=source) + + def makeconftest(self, source): + return self.makepyfile(conftest=source) + + def makeini(self, source): + return self.makefile('.ini', tox=source) + + def getinicfg(self, source): + p = self.makeini(source) + return py.iniconfig.IniConfig(p)['pytest'] + + def makepyfile(self, *args, **kwargs): + return self._makefile('.py', args, kwargs) + + def maketxtfile(self, *args, **kwargs): + return self._makefile('.txt', args, kwargs) + + def syspathinsert(self, path=None): + if path is None: + path = self.tmpdir + py.std.sys.path.insert(0, str(path)) + self._syspathremove.append(str(path)) + + def mkdir(self, name): + return self.tmpdir.mkdir(name) + + def mkpydir(self, name): + p = self.mkdir(name) + p.ensure("__init__.py") + return p + + Session = Session + def getnode(self, config, arg): + session = Session(config) + assert '::' not in str(arg) + p = py.path.local(arg) + x = session.fspath.bestrelpath(p) + return session.perform_collect([x], genitems=False)[0] + + def getpathnode(self, path): + config = self.parseconfig(path) + session = Session(config) + x = session.fspath.bestrelpath(path) + return session.perform_collect([x], genitems=False)[0] + + def genitems(self, colitems): + session = colitems[0].session + result = [] + for colitem in colitems: + result.extend(session.genitems(colitem)) + return result + + def inline_genitems(self, *args): + #config = self.parseconfig(*args) + config = self.parseconfigure(*args) + rec = self.getreportrecorder(config) + session = Session(config) + session.perform_collect() + return session.items, rec + + def runitem(self, source): + # used from runner functional tests + item = self.getitem(source) + # the test class where we are called from wants to provide the runner + testclassinstance = py.builtin._getimself(self.request.function) + runner = testclassinstance.getrunner() + return runner(item) + + def inline_runsource(self, source, *cmdlineargs): + p = self.makepyfile(source) + l = list(cmdlineargs) + [p] + return self.inline_run(*l) + + def inline_runsource1(self, *args): + args = list(args) + source = args.pop() + p = self.makepyfile(source) + l = list(args) + [p] + reprec = self.inline_run(*l) + reports = reprec.getreports("pytest_runtest_logreport") + assert len(reports) == 1, reports + return reports[0] + + def inline_run(self, *args): + args = ("-s", ) + args # otherwise FD leakage + config = self.parseconfig(*args) + reprec = self.getreportrecorder(config) + #config.pluginmanager.do_configure(config) + config.hook.pytest_cmdline_main(config=config) + #config.pluginmanager.do_unconfigure(config) + return reprec + + def config_preparse(self): + config = self.Config() + for plugin in self.plugins: + if isinstance(plugin, str): + config.pluginmanager.import_plugin(plugin) + else: + if isinstance(plugin, dict): + plugin = PseudoPlugin(plugin) + if not config.pluginmanager.isregistered(plugin): + config.pluginmanager.register(plugin) + return config + + def parseconfig(self, *args): + if not args: + args = (self.tmpdir,) + config = self.config_preparse() + args = list(args) + for x in args: + if str(x).startswith('--basetemp'): + break + else: + args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp')) + config.parse(args) + return config + + def reparseconfig(self, args=None): + """ this is used from tests that want to re-invoke parse(). """ + if not args: + args = [self.tmpdir] + oldconfig = getattr(py.test, 'config', None) + try: + c = py.test.config = self.Config() + c.basetemp = py.path.local.make_numbered_dir(prefix="reparse", + keep=0, rootdir=self.tmpdir, lock_timeout=None) + c.parse(args) + return c + finally: + py.test.config = oldconfig + + def parseconfigure(self, *args): + config = self.parseconfig(*args) + config.pluginmanager.do_configure(config) + self.request.addfinalizer(lambda: + config.pluginmanager.do_unconfigure(config)) + return config + + def getitem(self, source, funcname="test_func"): + for item in self.getitems(source): + if item.name == funcname: + return item + assert 0, "%r item not found in module:\n%s" %(funcname, source) + + def getitems(self, source): + modcol = self.getmodulecol(source) + return self.genitems([modcol]) + + def getmodulecol(self, source, configargs=(), withinit=False): + kw = {self.request.function.__name__: py.code.Source(source).strip()} + path = self.makepyfile(**kw) + if withinit: + self.makepyfile(__init__ = "#") + self.config = config = self.parseconfigure(path, *configargs) + node = self.getnode(config, path) + #config.pluginmanager.do_unconfigure(config) + return node + + def collect_by_name(self, modcol, name): + for colitem in modcol._memocollect(): + if colitem.name == name: + return colitem + + def popen(self, cmdargs, stdout, stderr, **kw): + env = os.environ.copy() + env['PYTHONPATH'] = os.pathsep.join(filter(None, [ + str(os.getcwd()), env.get('PYTHONPATH', '')])) + kw['env'] = env + #print "env", env + return py.std.subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw) + + def pytestmain(self, *args, **kwargs): + ret = pytest.main(*args, **kwargs) + if ret == 2: + raise KeyboardInterrupt() + def run(self, *cmdargs): + return self._run(*cmdargs) + + def _run(self, *cmdargs): + cmdargs = [str(x) for x in cmdargs] + p1 = self.tmpdir.join("stdout") + p2 = self.tmpdir.join("stderr") + print_("running", cmdargs, "curdir=", py.path.local()) + f1 = p1.open("wb") + f2 = p2.open("wb") + now = time.time() + popen = self.popen(cmdargs, stdout=f1, stderr=f2, + close_fds=(sys.platform != "win32")) + ret = popen.wait() + f1.close() + f2.close() + out = p1.read("rb") + out = getdecoded(out).splitlines() + err = p2.read("rb") + err = getdecoded(err).splitlines() + def dump_lines(lines, fp): + try: + for line in lines: + py.builtin.print_(line, file=fp) + except UnicodeEncodeError: + print("couldn't print to %s because of encoding" % (fp,)) + dump_lines(out, sys.stdout) + dump_lines(err, sys.stderr) + return RunResult(ret, out, err, time.time()-now) + + def runpybin(self, scriptname, *args): + fullargs = self._getpybinargs(scriptname) + args + return self.run(*fullargs) + + def _getpybinargs(self, scriptname): + if not self.request.config.getvalue("notoolsonpath"): + # XXX we rely on script refering to the correct environment + # we cannot use "(py.std.sys.executable,script)" + # becaue on windows the script is e.g. a py.test.exe + return (py.std.sys.executable, _pytest_fullpath,) + else: + py.test.skip("cannot run %r with --no-tools-on-path" % scriptname) + + def runpython(self, script, prepend=True): + if prepend: + s = self._getsysprepend() + if s: + script.write(s + "\n" + script.read()) + return self.run(sys.executable, script) + + def _getsysprepend(self): + if self.request.config.getvalue("notoolsonpath"): + s = "import sys;sys.path.insert(0,%r);" % str(py._pydir.dirpath()) + else: + s = "" + return s + + def runpython_c(self, command): + command = self._getsysprepend() + command + return self.run(py.std.sys.executable, "-c", command) + + def runpytest(self, *args): + p = py.path.local.make_numbered_dir(prefix="runpytest-", + keep=None, rootdir=self.tmpdir) + args = ('--basetemp=%s' % p, ) + args + #for x in args: + # if '--confcutdir' in str(x): + # break + #else: + # pass + # args = ('--confcutdir=.',) + args + plugins = [x for x in self.plugins if isinstance(x, str)] + if plugins: + args = ('-p', plugins[0]) + args + return self.runpybin("py.test", *args) + + def spawn_pytest(self, string, expect_timeout=10.0): + if self.request.config.getvalue("notoolsonpath"): + py.test.skip("--no-tools-on-path prevents running pexpect-spawn tests") + basetemp = self.tmpdir.mkdir("pexpect") + invoke = " ".join(map(str, self._getpybinargs("py.test"))) + cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string) + return self.spawn(cmd, expect_timeout=expect_timeout) + + def spawn(self, cmd, expect_timeout=10.0): + pexpect = py.test.importorskip("pexpect", "2.4") + if hasattr(sys, 'pypy_version_info') and '64' in py.std.platform.machine(): + pytest.skip("pypy-64 bit not supported") + logfile = self.tmpdir.join("spawn.out") + child = pexpect.spawn(cmd, logfile=logfile.open("w")) + child.timeout = expect_timeout + return child + +def getdecoded(out): + try: + return out.decode("utf-8") + except UnicodeDecodeError: + return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % ( + py.io.saferepr(out),) + +class PseudoPlugin: + def __init__(self, vars): + self.__dict__.update(vars) + +class ReportRecorder(object): + def __init__(self, hook): + self.hook = hook + self.pluginmanager = hook._pm + self.pluginmanager.register(self) + + def getcall(self, name): + return self.hookrecorder.getcall(name) + + def popcall(self, name): + return self.hookrecorder.popcall(name) + + def getcalls(self, names): + """ return list of ParsedCall instances matching the given eventname. """ + return self.hookrecorder.getcalls(names) + + # functionality for test reports + + def getreports(self, names="pytest_runtest_logreport pytest_collectreport"): + return [x.report for x in self.getcalls(names)] + + def matchreport(self, inamepart="", names="pytest_runtest_logreport pytest_collectreport", when=None): + """ return a testreport whose dotted import path matches """ + l = [] + for rep in self.getreports(names=names): + if when and getattr(rep, 'when', None) != when: + continue + if not inamepart or inamepart in rep.nodeid.split("::"): + l.append(rep) + if not l: + raise ValueError("could not find test report matching %r: no test reports at all!" % + (inamepart,)) + if len(l) > 1: + raise ValueError("found more than one testreport matching %r: %s" %( + inamepart, l)) + return l[0] + + def getfailures(self, names='pytest_runtest_logreport pytest_collectreport'): + return [rep for rep in self.getreports(names) if rep.failed] + + def getfailedcollections(self): + return self.getfailures('pytest_collectreport') + + def listoutcomes(self): + passed = [] + skipped = [] + failed = [] + for rep in self.getreports("pytest_runtest_logreport"): + if rep.passed: + if rep.when == "call": + passed.append(rep) + elif rep.skipped: + skipped.append(rep) + elif rep.failed: + failed.append(rep) + return passed, skipped, failed + + def countoutcomes(self): + return [len(x) for x in self.listoutcomes()] + + def assertoutcome(self, passed=0, skipped=0, failed=0): + realpassed, realskipped, realfailed = self.listoutcomes() + assert passed == len(realpassed) + assert skipped == len(realskipped) + assert failed == len(realfailed) + + def clear(self): + self.hookrecorder.calls[:] = [] + + def unregister(self): + self.pluginmanager.unregister(self) + self.hookrecorder.finish_recording() + +class LineComp: + def __init__(self): + self.stringio = py.io.TextIO() + + def assert_contains_lines(self, lines2): + """ assert that lines2 are contained (linearly) in lines1. + return a list of extralines found. + """ + __tracebackhide__ = True + val = self.stringio.getvalue() + self.stringio.truncate(0) + self.stringio.seek(0) + lines1 = val.split("\n") + return LineMatcher(lines1).fnmatch_lines(lines2) + +class LineMatcher: + def __init__(self, lines): + self.lines = lines + + def str(self): + return "\n".join(self.lines) + + def _getlines(self, lines2): + if isinstance(lines2, str): + lines2 = py.code.Source(lines2) + if isinstance(lines2, py.code.Source): + lines2 = lines2.strip().lines + return lines2 + + def fnmatch_lines_random(self, lines2): + lines2 = self._getlines(lines2) + for line in lines2: + for x in self.lines: + if line == x or fnmatch(x, line): + print_("matched: ", repr(line)) + break + else: + raise ValueError("line %r not found in output" % line) + + def fnmatch_lines(self, lines2): + def show(arg1, arg2): + py.builtin.print_(arg1, arg2, file=py.std.sys.stderr) + lines2 = self._getlines(lines2) + lines1 = self.lines[:] + nextline = None + extralines = [] + __tracebackhide__ = True + for line in lines2: + nomatchprinted = False + while lines1: + nextline = lines1.pop(0) + if line == nextline: + show("exact match:", repr(line)) + break + elif fnmatch(nextline, line): + show("fnmatch:", repr(line)) + show(" with:", repr(nextline)) + break + else: + if not nomatchprinted: + show("nomatch:", repr(line)) + nomatchprinted = True + show(" and:", repr(nextline)) + extralines.append(nextline) + else: + py.test.fail("remains unmatched: %r, see stderr" % (line,)) diff --git a/_pytest/python.py b/_pytest/python.py new file mode 100644 --- /dev/null +++ b/_pytest/python.py @@ -0,0 +1,870 @@ +""" Python test discovery, setup and run of test functions. """ +import py +import inspect +import sys +import pytest +from py._code.code import TerminalRepr + +import _pytest +cutdir = py.path.local(_pytest.__file__).dirpath() + +def pytest_addoption(parser): + group = parser.getgroup("general") + group.addoption('--funcargs', + action="store_true", dest="showfuncargs", default=False, + help="show available function arguments, sorted by plugin") + parser.addini("python_files", type="args", + default=('test_*.py', '*_test.py'), + help="glob-style file patterns for Python test module discovery") + parser.addini("python_classes", type="args", default=("Test",), + help="prefixes for Python test class discovery") + parser.addini("python_functions", type="args", default=("test",), + help="prefixes for Python test function and method discovery") + +def pytest_cmdline_main(config): + if config.option.showfuncargs: + showfuncargs(config) + return 0 + + at pytest.mark.trylast +def pytest_namespace(): + raises.Exception = pytest.fail.Exception + return { + 'raises' : raises, + 'collect': { + 'Module': Module, 'Class': Class, 'Instance': Instance, + 'Function': Function, 'Generator': Generator, + '_fillfuncargs': fillfuncargs} + } + +def pytest_funcarg__pytestconfig(request): + """ the pytest config object with access to command line opts.""" + return request.config + +def pytest_pyfunc_call(__multicall__, pyfuncitem): + if not __multicall__.execute(): + testfunction = pyfuncitem.obj + if pyfuncitem._isyieldedfunction(): + testfunction(*pyfuncitem._args) + else: + funcargs = pyfuncitem.funcargs + testfunction(**funcargs) + +def pytest_collect_file(path, parent): + ext = path.ext + pb = path.purebasename + if ext == ".py": + if not parent.session.isinitpath(path): + for pat in parent.config.getini('python_files'): + if path.fnmatch(pat): + break + else: + return + return parent.ihook.pytest_pycollect_makemodule( + path=path, parent=parent) + +def pytest_pycollect_makemodule(path, parent): + return Module(path, parent) + +def pytest_pycollect_makeitem(__multicall__, collector, name, obj): + res = __multicall__.execute() + if res is not None: + return res + if collector._istestclasscandidate(name, obj): + #if hasattr(collector.obj, 'unittest'): + # return # we assume it's a mixin class for a TestCase derived one + Class = collector._getcustomclass("Class") + return Class(name, parent=collector) + elif collector.funcnamefilter(name) and hasattr(obj, '__call__'): + if is_generator(obj): + return Generator(name, parent=collector) + else: + return collector._genfunctions(name, obj) + +def is_generator(func): + try: + return py.code.getrawcode(func).co_flags & 32 # generator function + except AttributeError: # builtin functions have no bytecode + # assume them to not be generators + return False + +class PyobjMixin(object): + def obj(): + def fget(self): + try: + return self._obj + except AttributeError: + self._obj = obj = self._getobj() + return obj + def fset(self, value): + self._obj = value + return property(fget, fset, None, "underlying python object") + obj = obj() + + def _getobj(self): + return getattr(self.parent.obj, self.name) + + def getmodpath(self, stopatmodule=True, includemodule=False): + """ return python path relative to the containing module. """ + chain = self.listchain() + chain.reverse() + parts = [] + for node in chain: + if isinstance(node, Instance): + continue + name = node.name + if isinstance(node, Module): + assert name.endswith(".py") + name = name[:-3] + if stopatmodule: + if includemodule: + parts.append(name) + break + parts.append(name) + parts.reverse() + s = ".".join(parts) + return s.replace(".[", "[") + + def _getfslineno(self): + try: + return self._fslineno + except AttributeError: + pass + obj = self.obj + # xxx let decorators etc specify a sane ordering + if hasattr(obj, 'place_as'): + obj = obj.place_as + + self._fslineno = py.code.getfslineno(obj) + return self._fslineno + + def reportinfo(self): + # XXX caching? + obj = self.obj + if hasattr(obj, 'compat_co_firstlineno'): + # nose compatibility + fspath = sys.modules[obj.__module__].__file__ + if fspath.endswith(".pyc"): + fspath = fspath[:-1] + #assert 0 + #fn = inspect.getsourcefile(obj) or inspect.getfile(obj) + lineno = obj.compat_co_firstlineno + modpath = obj.__module__ + else: + fspath, lineno = self._getfslineno() + modpath = self.getmodpath() + return fspath, lineno, modpath + +class PyCollectorMixin(PyobjMixin, pytest.Collector): + + def funcnamefilter(self, name): + for prefix in self.config.getini("python_functions"): + if name.startswith(prefix): + return True + + def classnamefilter(self, name): + for prefix in self.config.getini("python_classes"): + if name.startswith(prefix): + return True + + def collect(self): + # NB. we avoid random getattrs and peek in the __dict__ instead + # (XXX originally introduced from a PyPy need, still true?) + dicts = [getattr(self.obj, '__dict__', {})] + for basecls in inspect.getmro(self.obj.__class__): + dicts.append(basecls.__dict__) + seen = {} + l = [] + for dic in dicts: + for name, obj in dic.items(): + if name in seen: + continue + seen[name] = True + if name[0] != "_": + res = self.makeitem(name, obj) + if res is None: + continue + if not isinstance(res, list): + res = [res] + l.extend(res) + l.sort(key=lambda item: item.reportinfo()[:2]) + return l + + def makeitem(self, name, obj): + return self.ihook.pytest_pycollect_makeitem( + collector=self, name=name, obj=obj) + + def _istestclasscandidate(self, name, obj): + if self.classnamefilter(name) and \ + inspect.isclass(obj): + if hasinit(obj): + # XXX WARN + return False + return True + + def _genfunctions(self, name, funcobj): + module = self.getparent(Module).obj + clscol = self.getparent(Class) + cls = clscol and clscol.obj or None + metafunc = Metafunc(funcobj, config=self.config, + cls=cls, module=module) + gentesthook = self.config.hook.pytest_generate_tests + extra = [module] + if cls is not None: + extra.append(cls()) + plugins = self.getplugins() + extra + gentesthook.pcall(plugins, metafunc=metafunc) + Function = self._getcustomclass("Function") + if not metafunc._calls: + return Function(name, parent=self) + l = [] + for callspec in metafunc._calls: + subname = "%s[%s]" %(name, callspec.id) + function = Function(name=subname, parent=self, + callspec=callspec, callobj=funcobj, keywords={callspec.id:True}) + l.append(function) + return l + + +class Module(pytest.File, PyCollectorMixin): + def _getobj(self): + return self._memoizedcall('_obj', self._importtestmodule) + + def _importtestmodule(self): + # we assume we are only called once per module + try: + mod = self.fspath.pyimport(ensuresyspath=True) + except SyntaxError: + excinfo = py.code.ExceptionInfo() + raise self.CollectError(excinfo.getrepr(style="short")) + except self.fspath.ImportMismatchError: + e = sys.exc_info()[1] + raise self.CollectError( + "import file mismatch:\n" + "imported module %r has this __file__ attribute:\n" + " %s\n" + "which is not the same as the test file we want to collect:\n" + " %s\n" + "HINT: use a unique basename for your test file modules" + % e.args + ) + #print "imported test module", mod + self.config.pluginmanager.consider_module(mod) + return mod + + def setup(self): + if hasattr(self.obj, 'setup_module'): + #XXX: nose compat hack, move to nose plugin + # if it takes a positional arg, its probably a pytest style one + # so we pass the current module object + if inspect.getargspec(self.obj.setup_module)[0]: + self.obj.setup_module(self.obj) + else: + self.obj.setup_module() + + def teardown(self): + if hasattr(self.obj, 'teardown_module'): + #XXX: nose compat hack, move to nose plugin + # if it takes a positional arg, its probably a py.test style one + # so we pass the current module object + if inspect.getargspec(self.obj.teardown_module)[0]: + self.obj.teardown_module(self.obj) + else: + self.obj.teardown_module() + +class Class(PyCollectorMixin, pytest.Collector): + + def collect(self): + return [self._getcustomclass("Instance")(name="()", parent=self)] + + def setup(self): + setup_class = getattr(self.obj, 'setup_class', None) + if setup_class is not None: + setup_class = getattr(setup_class, 'im_func', setup_class) + setup_class(self.obj) + + def teardown(self): + teardown_class = getattr(self.obj, 'teardown_class', None) + if teardown_class is not None: + teardown_class = getattr(teardown_class, 'im_func', teardown_class) + teardown_class(self.obj) + +class Instance(PyCollectorMixin, pytest.Collector): + def _getobj(self): + return self.parent.obj() + + def newinstance(self): + self.obj = self._getobj() + return self.obj + +class FunctionMixin(PyobjMixin): + """ mixin for the code common to Function and Generator. + """ + def setup(self): + """ perform setup for this test function. """ + if hasattr(self, '_preservedparent'): + obj = self._preservedparent + elif isinstance(self.parent, Instance): + obj = self.parent.newinstance() + self.obj = self._getobj() + else: + obj = self.parent.obj + if inspect.ismethod(self.obj): + name = 'setup_method' + else: + name = 'setup_function' + setup_func_or_method = getattr(obj, name, None) + if setup_func_or_method is not None: + setup_func_or_method(self.obj) + + def teardown(self): + """ perform teardown for this test function. """ + if inspect.ismethod(self.obj): + name = 'teardown_method' + else: + name = 'teardown_function' + obj = self.parent.obj + teardown_func_or_meth = getattr(obj, name, None) + if teardown_func_or_meth is not None: + teardown_func_or_meth(self.obj) + + def _prunetraceback(self, excinfo): + if hasattr(self, '_obj') and not self.config.option.fulltrace: + code = py.code.Code(self.obj) + path, firstlineno = code.path, code.firstlineno + traceback = excinfo.traceback + ntraceback = traceback.cut(path=path, firstlineno=firstlineno) + if ntraceback == traceback: + ntraceback = ntraceback.cut(path=path) + if ntraceback == traceback: + ntraceback = ntraceback.cut(excludepath=cutdir) + excinfo.traceback = ntraceback.filter() + + def _repr_failure_py(self, excinfo, style="long"): + if excinfo.errisinstance(FuncargRequest.LookupError): + fspath, lineno, msg = self.reportinfo() + lines, _ = inspect.getsourcelines(self.obj) + for i, line in enumerate(lines): + if line.strip().startswith('def'): + return FuncargLookupErrorRepr(fspath, lineno, + lines[:i+1], str(excinfo.value)) + if excinfo.errisinstance(pytest.fail.Exception): + if not excinfo.value.pytrace: + return str(excinfo.value) + return super(FunctionMixin, self)._repr_failure_py(excinfo, + style=style) + + def repr_failure(self, excinfo, outerr=None): + assert outerr is None, "XXX outerr usage is deprecated" + return self._repr_failure_py(excinfo, + style=self.config.option.tbstyle) + +class FuncargLookupErrorRepr(TerminalRepr): + def __init__(self, filename, firstlineno, deflines, errorstring): + self.deflines = deflines + self.errorstring = errorstring + self.filename = filename + self.firstlineno = firstlineno + + def toterminal(self, tw): + tw.line() + for line in self.deflines: + tw.line(" " + line.strip()) + for line in self.errorstring.split("\n"): + tw.line(" " + line.strip(), red=True) + tw.line() + tw.line("%s:%d" % (self.filename, self.firstlineno+1)) + +class Generator(FunctionMixin, PyCollectorMixin, pytest.Collector): + def collect(self): + # test generators are seen as collectors but they also + # invoke setup/teardown on popular request + # (induced by the common "test_*" naming shared with normal tests) + self.config._setupstate.prepare(self) + # see FunctionMixin.setup and test_setupstate_is_preserved_134 + self._preservedparent = self.parent.obj + l = [] + seen = {} + for i, x in enumerate(self.obj()): + name, call, args = self.getcallargs(x) + if not py.builtin.callable(call): + raise TypeError("%r yielded non callable test %r" %(self.obj, call,)) + if name is None: + name = "[%d]" % i + else: + name = "['%s']" % name + if name in seen: + raise ValueError("%r generated tests with non-unique name %r" %(self, name)) + seen[name] = True + l.append(self.Function(name, self, args=args, callobj=call)) + return l + + def getcallargs(self, obj): + if not isinstance(obj, (tuple, list)): + obj = (obj,) + # explict naming + if isinstance(obj[0], py.builtin._basestring): + name = obj[0] + obj = obj[1:] + else: + name = None + call, args = obj[0], obj[1:] + return name, call, args + + +# +# Test Items +# +_dummy = object() +class Function(FunctionMixin, pytest.Item): + """ a Function Item is responsible for setting up + and executing a Python callable test object. + """ + _genid = None + def __init__(self, name, parent=None, args=None, config=None, + callspec=None, callobj=_dummy, keywords=None, session=None): + super(Function, self).__init__(name, parent, + config=config, session=session) + self._args = args + if self._isyieldedfunction(): + assert not callspec, ( + "yielded functions (deprecated) cannot have funcargs") + else: + if callspec is not None: + self.funcargs = callspec.funcargs or {} + self._genid = callspec.id + if hasattr(callspec, "param"): + self._requestparam = callspec.param + else: + self.funcargs = {} + if callobj is not _dummy: + self._obj = callobj + self.function = getattr(self.obj, 'im_func', self.obj) + self.keywords.update(py.builtin._getfuncdict(self.obj) or {}) + if keywords: + self.keywords.update(keywords) + + def _getobj(self): + name = self.name + i = name.find("[") # parametrization + if i != -1: + name = name[:i] + return getattr(self.parent.obj, name) + + def _isyieldedfunction(self): + return self._args is not None + + def runtest(self): + """ execute the underlying test function. """ + self.ihook.pytest_pyfunc_call(pyfuncitem=self) + + def setup(self): + super(Function, self).setup() + if hasattr(self, 'funcargs'): + fillfuncargs(self) + + def __eq__(self, other): + try: + return (self.name == other.name and + self._args == other._args and + self.parent == other.parent and + self.obj == other.obj and + getattr(self, '_genid', None) == + getattr(other, '_genid', None) + ) + except AttributeError: + pass + return False + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.parent, self.name)) + +def hasinit(obj): + init = getattr(obj, '__init__', None) + if init: + if init != object.__init__: + return True + + +def getfuncargnames(function, startindex=None): + # XXX merge with main.py's varnames + argnames = py.std.inspect.getargs(py.code.getrawcode(function))[0] + if startindex is None: + startindex = py.std.inspect.ismethod(function) and 1 or 0 + defaults = getattr(function, 'func_defaults', + getattr(function, '__defaults__', None)) or () + numdefaults = len(defaults) + if numdefaults: + return argnames[startindex:-numdefaults] + return argnames[startindex:] + +def fillfuncargs(function): + """ fill missing funcargs. """ + request = FuncargRequest(pyfuncitem=function) + request._fillfuncargs() + +_notexists = object() +class CallSpec: + def __init__(self, funcargs, id, param): + self.funcargs = funcargs + self.id = id + if param is not _notexists: + self.param = param + def __repr__(self): + return "" %( + self.id, getattr(self, 'param', '?'), self.funcargs) + +class Metafunc: + def __init__(self, function, config=None, cls=None, module=None): + self.config = config + self.module = module + self.function = function + self.funcargnames = getfuncargnames(function, + startindex=int(cls is not None)) + self.cls = cls + self.module = module + self._calls = [] + self._ids = py.builtin.set() + + def addcall(self, funcargs=None, id=_notexists, param=_notexists): + """ add a new call to the underlying test function during the + collection phase of a test run. Note that request.addcall() is + called during the test collection phase prior and independently + to actual test execution. Therefore you should perform setup + of resources in a funcarg factory which can be instrumented + with the ``param``. + + :arg funcargs: argument keyword dictionary used when invoking + the test function. + + :arg id: used for reporting and identification purposes. If you + don't supply an `id` the length of the currently + list of calls to the test function will be used. + + :arg param: will be exposed to a later funcarg factory invocation + through the ``request.param`` attribute. It allows to + defer test fixture setup activities to when an actual + test is run. + """ + assert funcargs is None or isinstance(funcargs, dict) + if funcargs is not None: + for name in funcargs: + if name not in self.funcargnames: + pytest.fail("funcarg %r not used in this function." % name) + if id is None: + raise ValueError("id=None not allowed") + if id is _notexists: + id = len(self._calls) + id = str(id) + if id in self._ids: + raise ValueError("duplicate id %r" % id) + self._ids.add(id) + self._calls.append(CallSpec(funcargs, id, param)) + +class FuncargRequest: + """ A request for function arguments from a test function. + + Note that there is an optional ``param`` attribute in case + there was an invocation to metafunc.addcall(param=...). + If no such call was done in a ``pytest_generate_tests`` + hook, the attribute will not be present. + """ + _argprefix = "pytest_funcarg__" + _argname = None + + class LookupError(LookupError): + """ error on performing funcarg request. """ + + def __init__(self, pyfuncitem): + self._pyfuncitem = pyfuncitem + if hasattr(pyfuncitem, '_requestparam'): + self.param = pyfuncitem._requestparam + extra = [obj for obj in (self.module, self.instance) if obj] + self._plugins = pyfuncitem.getplugins() + extra + self._funcargs = self._pyfuncitem.funcargs.copy() + self._name2factory = {} + self._currentarg = None + + @property + def function(self): + """ function object of the test invocation. """ + return self._pyfuncitem.obj + + @property + def keywords(self): + """ keywords of the test function item. + + .. versionadded:: 2.0 + """ + return self._pyfuncitem.keywords + + @property + def module(self): + """ module where the test function was collected. """ + return self._pyfuncitem.getparent(pytest.Module).obj + + @property + def cls(self): + """ class (can be None) where the test function was collected. """ + clscol = self._pyfuncitem.getparent(pytest.Class) + if clscol: + return clscol.obj + @property + def instance(self): + """ instance (can be None) on which test function was collected. """ + return py.builtin._getimself(self.function) + + @property + def config(self): + """ the pytest config object associated with this request. """ + return self._pyfuncitem.config + + @property + def fspath(self): + """ the file system path of the test module which collected this test. """ + return self._pyfuncitem.fspath + + def _fillfuncargs(self): + argnames = getfuncargnames(self.function) + if argnames: + assert not getattr(self._pyfuncitem, '_args', None), ( + "yielded functions cannot have funcargs") + for argname in argnames: + if argname not in self._pyfuncitem.funcargs: + self._pyfuncitem.funcargs[argname] = self.getfuncargvalue(argname) + + + def applymarker(self, marker): + """ apply a marker to a single test function invocation. + This method is useful if you don't want to have a keyword/marker + on all function invocations. + + :arg marker: a :py:class:`_pytest.mark.MarkDecorator` object + created by a call to ``py.test.mark.NAME(...)``. + """ + if not isinstance(marker, py.test.mark.XYZ.__class__): + raise ValueError("%r is not a py.test.mark.* object") + self._pyfuncitem.keywords[marker.markname] = marker + + def cached_setup(self, setup, teardown=None, scope="module", extrakey=None): + """ return a testing resource managed by ``setup`` & + ``teardown`` calls. ``scope`` and ``extrakey`` determine when the + ``teardown`` function will be called so that subsequent calls to + ``setup`` would recreate the resource. + + :arg teardown: function receiving a previously setup resource. + :arg setup: a no-argument function creating a resource. + :arg scope: a string value out of ``function``, ``class``, ``module`` + or ``session`` indicating the caching lifecycle of the resource. + :arg extrakey: added to internal caching key of (funcargname, scope). + """ + if not hasattr(self.config, '_setupcache'): + self.config._setupcache = {} # XXX weakref? + cachekey = (self._currentarg, self._getscopeitem(scope), extrakey) + cache = self.config._setupcache + try: + val = cache[cachekey] + except KeyError: + val = setup() + cache[cachekey] = val + if teardown is not None: + def finalizer(): + del cache[cachekey] + teardown(val) + self._addfinalizer(finalizer, scope=scope) + return val + + def getfuncargvalue(self, argname): + """ Retrieve a function argument by name for this test + function invocation. This allows one function argument factory + to call another function argument factory. If there are two + funcarg factories for the same test function argument the first + factory may use ``getfuncargvalue`` to call the second one and + do something additional with the resource. + """ + try: + return self._funcargs[argname] + except KeyError: + pass + if argname not in self._name2factory: + self._name2factory[argname] = self.config.pluginmanager.listattr( + plugins=self._plugins, + attrname=self._argprefix + str(argname) + ) + #else: we are called recursively + if not self._name2factory[argname]: + self._raiselookupfailed(argname) + funcargfactory = self._name2factory[argname].pop() + oldarg = self._currentarg + self._currentarg = argname + try: + self._funcargs[argname] = res = funcargfactory(request=self) + finally: + self._currentarg = oldarg + return res + + def _getscopeitem(self, scope): + if scope == "function": + return self._pyfuncitem + elif scope == "session": + return None + elif scope == "class": + x = self._pyfuncitem.getparent(pytest.Class) + if x is not None: + return x + scope = "module" + if scope == "module": + return self._pyfuncitem.getparent(pytest.Module) + raise ValueError("unknown finalization scope %r" %(scope,)) + + def addfinalizer(self, finalizer): + """add finalizer function to be called after test function + finished execution. """ + self._addfinalizer(finalizer, scope="function") + + def _addfinalizer(self, finalizer, scope): + colitem = self._getscopeitem(scope) + self.config._setupstate.addfinalizer( + finalizer=finalizer, colitem=colitem) + + def __repr__(self): + return "" %(self._pyfuncitem) + + def _raiselookupfailed(self, argname): + available = [] + for plugin in self._plugins: + for name in vars(plugin): + if name.startswith(self._argprefix): + name = name[len(self._argprefix):] + if name not in available: + available.append(name) + fspath, lineno, msg = self._pyfuncitem.reportinfo() + msg = "LookupError: no factory found for function argument %r" % (argname,) + msg += "\n available funcargs: %s" %(", ".join(available),) + msg += "\n use 'py.test --funcargs [testpath]' for help on them." + raise self.LookupError(msg) + +def showfuncargs(config): + from _pytest.main import Session + session = Session(config) + session.perform_collect() + if session.items: + plugins = session.items[0].getplugins() + else: + plugins = session.getplugins() + curdir = py.path.local() + tw = py.io.TerminalWriter() + verbose = config.getvalue("verbose") + for plugin in plugins: + available = [] + for name, factory in vars(plugin).items(): + if name.startswith(FuncargRequest._argprefix): + name = name[len(FuncargRequest._argprefix):] + if name not in available: + available.append([name, factory]) + if available: + pluginname = plugin.__name__ + for name, factory in available: + loc = getlocation(factory, curdir) + if verbose: + funcargspec = "%s -- %s" %(name, loc,) + else: + funcargspec = name + tw.line(funcargspec, green=True) + doc = factory.__doc__ or "" + if doc: + for line in doc.split("\n"): + tw.line(" " + line.strip()) + else: + tw.line(" %s: no docstring available" %(loc,), + red=True) + +def getlocation(function, curdir): + import inspect + fn = py.path.local(inspect.getfile(function)) + lineno = py.builtin._getcode(function).co_firstlineno + if fn.relto(curdir): + fn = fn.relto(curdir) + return "%s:%d" %(fn, lineno+1) + +# builtin pytest.raises helper + +def raises(ExpectedException, *args, **kwargs): + """ assert that a code block/function call raises @ExpectedException + and raise a failure exception otherwise. + + If using Python 2.5 or above, you may use this function as a + context manager:: + + >>> with raises(ZeroDivisionError): + ... 1/0 + + Or you can specify a callable by passing a to-be-called lambda:: + + >>> raises(ZeroDivisionError, lambda: 1/0) + + + or you can specify an arbitrary callable with arguments:: + + >>> def f(x): return 1/x + ... + >>> raises(ZeroDivisionError, f, 0) + + >>> raises(ZeroDivisionError, f, x=0) + + + A third possibility is to use a string which which will + be executed:: + + >>> raises(ZeroDivisionError, "f(0)") + + """ + __tracebackhide__ = True + + if not args: + return RaisesContext(ExpectedException) + elif isinstance(args[0], str): + code, = args + assert isinstance(code, str) + frame = sys._getframe(1) + loc = frame.f_locals.copy() + loc.update(kwargs) + #print "raises frame scope: %r" % frame.f_locals + try: + code = py.code.Source(code).compile() + py.builtin.exec_(code, frame.f_globals, loc) + # XXX didn'T mean f_globals == f_locals something special? + # this is destroyed here ... + except ExpectedException: + return py.code.ExceptionInfo() + else: + func = args[0] + try: + func(*args[1:], **kwargs) + except ExpectedException: + return py.code.ExceptionInfo() + k = ", ".join(["%s=%r" % x for x in kwargs.items()]) + if k: + k = ', ' + k + expr = '%s(%r%s)' %(getattr(func, '__name__', func), args, k) + pytest.fail("DID NOT RAISE") + +class RaisesContext(object): + def __init__(self, ExpectedException): + self.ExpectedException = ExpectedException + self.excinfo = None + + def __enter__(self): + self.excinfo = object.__new__(py.code.ExceptionInfo) + return self.excinfo + + def __exit__(self, *tp): + __tracebackhide__ = True + if tp[0] is None: + pytest.fail("DID NOT RAISE") + self.excinfo.__init__(tp) + return issubclass(self.excinfo.type, self.ExpectedException) + diff --git a/_pytest/recwarn.py b/_pytest/recwarn.py new file mode 100644 --- /dev/null +++ b/_pytest/recwarn.py @@ -0,0 +1,99 @@ +""" recording warnings during test function execution. """ + +import py +import sys, os + +def pytest_funcarg__recwarn(request): + """Return a WarningsRecorder instance that provides these methods: + + * ``pop(category=None)``: return last warning matching the category. + * ``clear()``: clear list of warnings + + See http://docs.python.org/library/warnings.html for information + on warning categories. + """ + if sys.version_info >= (2,7): + import warnings + oldfilters = warnings.filters[:] + warnings.simplefilter('default') + def reset_filters(): + warnings.filters[:] = oldfilters + request.addfinalizer(reset_filters) + wrec = WarningsRecorder() + request.addfinalizer(wrec.finalize) + return wrec + +def pytest_namespace(): + return {'deprecated_call': deprecated_call} + +def deprecated_call(func, *args, **kwargs): + """ assert that calling ``func(*args, **kwargs)`` + triggers a DeprecationWarning. + """ + warningmodule = py.std.warnings + l = [] + oldwarn_explicit = getattr(warningmodule, 'warn_explicit') + def warn_explicit(*args, **kwargs): + l.append(args) + oldwarn_explicit(*args, **kwargs) + oldwarn = getattr(warningmodule, 'warn') + def warn(*args, **kwargs): + l.append(args) + oldwarn(*args, **kwargs) + + warningmodule.warn_explicit = warn_explicit + warningmodule.warn = warn + try: + ret = func(*args, **kwargs) + finally: + warningmodule.warn_explicit = warn_explicit + warningmodule.warn = warn + if not l: + #print warningmodule + __tracebackhide__ = True + raise AssertionError("%r did not produce DeprecationWarning" %(func,)) + return ret + + +class RecordedWarning: + def __init__(self, message, category, filename, lineno, line): + self.message = message + self.category = category + self.filename = filename + self.lineno = lineno + self.line = line + +class WarningsRecorder: + def __init__(self): + warningmodule = py.std.warnings + self.list = [] + def showwarning(message, category, filename, lineno, line=0): + self.list.append(RecordedWarning( + message, category, filename, lineno, line)) + try: + self.old_showwarning(message, category, + filename, lineno, line=line) + except TypeError: + # < python2.6 + self.old_showwarning(message, category, filename, lineno) + self.old_showwarning = warningmodule.showwarning + warningmodule.showwarning = showwarning + + def pop(self, cls=Warning): + """ pop the first recorded warning, raise exception if not exists.""" + for i, w in enumerate(self.list): + if issubclass(w.category, cls): + return self.list.pop(i) + __tracebackhide__ = True + assert 0, "%r not found in %r" %(cls, self.list) + + #def resetregistry(self): + # import warnings + # warnings.onceregistry.clear() + # warnings.__warningregistry__.clear() + + def clear(self): + self.list[:] = [] + + def finalize(self): + py.std.warnings.showwarning = self.old_showwarning diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py new file mode 100644 --- /dev/null +++ b/_pytest/resultlog.py @@ -0,0 +1,93 @@ +""" (disabled by default) create result information in a plain text file. """ + +import py + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting", "resultlog plugin options") + group.addoption('--resultlog', action="store", dest="resultlog", + metavar="path", default=None, + help="path for machine-readable result log.") + +def pytest_configure(config): + resultlog = config.option.resultlog + # prevent opening resultlog on slave nodes (xdist) + if resultlog and not hasattr(config, 'slaveinput'): + logfile = open(resultlog, 'w', 1) # line buffered + config._resultlog = ResultLog(config, logfile) + config.pluginmanager.register(config._resultlog) + +def pytest_unconfigure(config): + resultlog = getattr(config, '_resultlog', None) + if resultlog: + resultlog.logfile.close() + del config._resultlog + config.pluginmanager.unregister(resultlog) + +def generic_path(item): + chain = item.listchain() + gpath = [chain[0].name] + fspath = chain[0].fspath + fspart = False + for node in chain[1:]: + newfspath = node.fspath + if newfspath == fspath: + if fspart: + gpath.append(':') + fspart = False + else: + gpath.append('.') + else: + gpath.append('/') + fspart = True + name = node.name + if name[0] in '([': + gpath.pop() + gpath.append(name) + fspath = newfspath + return ''.join(gpath) + +class ResultLog(object): + def __init__(self, config, logfile): + self.config = config + self.logfile = logfile # preferably line buffered + + def write_log_entry(self, testpath, lettercode, longrepr): + py.builtin.print_("%s %s" % (lettercode, testpath), file=self.logfile) + for line in longrepr.splitlines(): + py.builtin.print_(" %s" % line, file=self.logfile) + + def log_outcome(self, report, lettercode, longrepr): + testpath = getattr(report, 'nodeid', None) + if testpath is None: + testpath = report.fspath + self.write_log_entry(testpath, lettercode, longrepr) + + def pytest_runtest_logreport(self, report): + res = self.config.hook.pytest_report_teststatus(report=report) + code = res[1] + if code == 'x': + longrepr = str(report.longrepr) + elif code == 'X': + longrepr = '' + elif report.passed: + longrepr = "" + elif report.failed: + longrepr = str(report.longrepr) + elif report.skipped: + longrepr = str(report.longrepr[2]) + self.log_outcome(report, code, longrepr) + + def pytest_collectreport(self, report): + if not report.passed: + if report.failed: + code = "F" + longrepr = str(report.longrepr.reprcrash) + else: + assert report.skipped + code = "S" + longrepr = "%s:%d: %s" % report.longrepr + self.log_outcome(report, code, longrepr) + + def pytest_internalerror(self, excrepr): + path = excrepr.reprcrash.path + self.write_log_entry(path, '!', str(excrepr)) diff --git a/_pytest/runner.py b/_pytest/runner.py new file mode 100644 --- /dev/null +++ b/_pytest/runner.py @@ -0,0 +1,390 @@ +""" basic collect and runtest protocol implementations """ + +import py, sys +from py._code.code import TerminalRepr + +def pytest_namespace(): + return { + 'fail' : fail, + 'skip' : skip, + 'importorskip' : importorskip, + 'exit' : exit, + } + +# +# pytest plugin hooks + +# XXX move to pytest_sessionstart and fix py.test owns tests +def pytest_configure(config): + config._setupstate = SetupState() + +def pytest_sessionfinish(session, exitstatus): + if hasattr(session.config, '_setupstate'): + hook = session.config.hook + rep = hook.pytest__teardown_final(session=session) + if rep: + hook.pytest__teardown_final_logerror(session=session, report=rep) + session.exitstatus = 1 + +class NodeInfo: + def __init__(self, location): + self.location = location + +def pytest_runtest_protocol(item): + item.ihook.pytest_runtest_logstart( + nodeid=item.nodeid, location=item.location, + ) + runtestprotocol(item) + return True + +def runtestprotocol(item, log=True): + rep = call_and_report(item, "setup", log) + reports = [rep] + if rep.passed: + reports.append(call_and_report(item, "call", log)) + reports.append(call_and_report(item, "teardown", log)) + return reports + +def pytest_runtest_setup(item): + item.config._setupstate.prepare(item) + +def pytest_runtest_call(item): + item.runtest() + +def pytest_runtest_teardown(item): + item.config._setupstate.teardown_exact(item) + +def pytest__teardown_final(session): + call = CallInfo(session.config._setupstate.teardown_all, when="teardown") + if call.excinfo: + ntraceback = call.excinfo.traceback .cut(excludepath=py._pydir) + call.excinfo.traceback = ntraceback.filter() + longrepr = call.excinfo.getrepr(funcargs=True) + return TeardownErrorReport(longrepr) + +def pytest_report_teststatus(report): + if report.when in ("setup", "teardown"): + if report.failed: + # category, shortletter, verbose-word + return "error", "E", "ERROR" + elif report.skipped: + return "skipped", "s", "SKIPPED" + else: + return "", "", "" + + +# +# Implementation + +def call_and_report(item, when, log=True): + call = call_runtest_hook(item, when) + hook = item.ihook + report = hook.pytest_runtest_makereport(item=item, call=call) + if log and (when == "call" or not report.passed): + hook.pytest_runtest_logreport(report=report) + return report + +def call_runtest_hook(item, when): + hookname = "pytest_runtest_" + when + ihook = getattr(item.ihook, hookname) + return CallInfo(lambda: ihook(item=item), when=when) + +class CallInfo: + """ Result/Exception info a function invocation. """ + #: None or ExceptionInfo object. + excinfo = None + def __init__(self, func, when): + #: context of invocation: one of "setup", "call", + #: "teardown", "memocollect" + self.when = when + try: + self.result = func() + except KeyboardInterrupt: + raise + except: + self.excinfo = py.code.ExceptionInfo() + + def __repr__(self): + if self.excinfo: + status = "exception: %s" % str(self.excinfo.value) + else: + status = "result: %r" % (self.result,) + return "" % (self.when, status) + +def getslaveinfoline(node): + try: + return node._slaveinfocache + except AttributeError: + d = node.slaveinfo + ver = "%s.%s.%s" % d['version_info'][:3] + node._slaveinfocache = s = "[%s] %s -- Python %s %s" % ( + d['id'], d['sysplatform'], ver, d['executable']) + return s + +class BaseReport(object): + def toterminal(self, out): + longrepr = self.longrepr + if hasattr(self, 'node'): + out.line(getslaveinfoline(self.node)) + if hasattr(longrepr, 'toterminal'): + longrepr.toterminal(out) + else: + out.line(str(longrepr)) + + passed = property(lambda x: x.outcome == "passed") + failed = property(lambda x: x.outcome == "failed") + skipped = property(lambda x: x.outcome == "skipped") + + @property + def fspath(self): + return self.nodeid.split("::")[0] + +def pytest_runtest_makereport(item, call): + when = call.when + keywords = dict([(x,1) for x in item.keywords]) + excinfo = call.excinfo + if not call.excinfo: + outcome = "passed" + longrepr = None + else: + excinfo = call.excinfo + if not isinstance(excinfo, py.code.ExceptionInfo): + outcome = "failed" + longrepr = excinfo + elif excinfo.errisinstance(py.test.skip.Exception): + outcome = "skipped" + r = excinfo._getreprcrash() + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + if call.when == "call": + longrepr = item.repr_failure(excinfo) + else: # exception in setup or teardown + longrepr = item._repr_failure_py(excinfo) + return TestReport(item.nodeid, item.location, + keywords, outcome, longrepr, when) + +class TestReport(BaseReport): + """ Basic test report object (also used for setup and teardown calls if + they fail). + """ + def __init__(self, nodeid, location, + keywords, outcome, longrepr, when): + #: normalized collection node id + self.nodeid = nodeid + + #: a (filesystempath, lineno, domaininfo) tuple indicating the + #: actual location of a test item - it might be different from the + #: collected one e.g. if a method is inherited from a different module. + self.location = location + + #: a name -> value dictionary containing all keywords and + #: markers associated with a test invocation. + self.keywords = keywords + + #: test outcome, always one of "passed", "failed", "skipped". + self.outcome = outcome + + #: None or a failure representation. + self.longrepr = longrepr + + #: one of 'setup', 'call', 'teardown' to indicate runtest phase. + self.when = when + + def __repr__(self): + return "" % ( + self.nodeid, self.when, self.outcome) + +class TeardownErrorReport(BaseReport): + outcome = "failed" + when = "teardown" + def __init__(self, longrepr): + self.longrepr = longrepr + +def pytest_make_collect_report(collector): + call = CallInfo(collector._memocollect, "memocollect") + longrepr = None + if not call.excinfo: + outcome = "passed" + else: + if call.excinfo.errisinstance(py.test.skip.Exception): + outcome = "skipped" + r = collector._repr_failure_py(call.excinfo, "line").reprcrash + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + errorinfo = collector.repr_failure(call.excinfo) + if not hasattr(errorinfo, "toterminal"): + errorinfo = CollectErrorRepr(errorinfo) + longrepr = errorinfo + return CollectReport(collector.nodeid, outcome, longrepr, + getattr(call, 'result', None)) + +class CollectReport(BaseReport): + def __init__(self, nodeid, outcome, longrepr, result): + self.nodeid = nodeid + self.outcome = outcome + self.longrepr = longrepr + self.result = result or [] + + @property + def location(self): + return (self.fspath, None, self.fspath) + + def __repr__(self): + return "" % ( + self.nodeid, len(self.result), self.outcome) + +class CollectErrorRepr(TerminalRepr): + def __init__(self, msg): + self.longrepr = msg + def toterminal(self, out): + out.line(str(self.longrepr), red=True) + +class SetupState(object): + """ shared state for setting up/tearing down test items or collectors. """ + def __init__(self): + self.stack = [] + self._finalizers = {} + + def addfinalizer(self, finalizer, colitem): + """ attach a finalizer to the given colitem. + if colitem is None, this will add a finalizer that + is called at the end of teardown_all(). + """ + assert hasattr(finalizer, '__call__') + #assert colitem in self.stack + self._finalizers.setdefault(colitem, []).append(finalizer) + + def _pop_and_teardown(self): + colitem = self.stack.pop() + self._teardown_with_finalization(colitem) + + def _callfinalizers(self, colitem): + finalizers = self._finalizers.pop(colitem, None) + while finalizers: + fin = finalizers.pop() + fin() + + def _teardown_with_finalization(self, colitem): + self._callfinalizers(colitem) + if colitem: + colitem.teardown() + for colitem in self._finalizers: + assert colitem is None or colitem in self.stack + + def teardown_all(self): + while self.stack: + self._pop_and_teardown() + self._teardown_with_finalization(None) + assert not self._finalizers + + def teardown_exact(self, item): + if self.stack and item == self.stack[-1]: + self._pop_and_teardown() + else: + self._callfinalizers(item) + + def prepare(self, colitem): + """ setup objects along the collector chain to the test-method + and teardown previously setup objects.""" + needed_collectors = colitem.listchain() + while self.stack: + if self.stack == needed_collectors[:len(self.stack)]: + break + self._pop_and_teardown() + # check if the last collection node has raised an error + for col in self.stack: + if hasattr(col, '_prepare_exc'): + py.builtin._reraise(*col._prepare_exc) + for col in needed_collectors[len(self.stack):]: + self.stack.append(col) + try: + col.setup() + except Exception: + col._prepare_exc = sys.exc_info() + raise + +# ============================================================= +# Test OutcomeExceptions and helpers for creating them. + + +class OutcomeException(Exception): + """ OutcomeException and its subclass instances indicate and + contain info about test and collection outcomes. + """ + def __init__(self, msg=None, pytrace=True): + self.msg = msg + self.pytrace = pytrace + + def __repr__(self): + if self.msg: + return str(self.msg) + return "<%s instance>" %(self.__class__.__name__,) + __str__ = __repr__ + +class Skipped(OutcomeException): + # XXX hackish: on 3k we fake to live in the builtins + # in order to have Skipped exception printing shorter/nicer + __module__ = 'builtins' + +class Failed(OutcomeException): + """ raised from an explicit call to py.test.fail() """ + __module__ = 'builtins' + +class Exit(KeyboardInterrupt): + """ raised for immediate program exits (no tracebacks/summaries)""" + def __init__(self, msg="unknown reason"): + self.msg = msg + KeyboardInterrupt.__init__(self, msg) + +# exposed helper methods + +def exit(msg): + """ exit testing process as if KeyboardInterrupt was triggered. """ + __tracebackhide__ = True + raise Exit(msg) + +exit.Exception = Exit + +def skip(msg=""): + """ skip an executing test with the given message. Note: it's usually + better to use the py.test.mark.skipif marker to declare a test to be + skipped under certain conditions like mismatching platforms or + dependencies. See the pytest_skipping plugin for details. + """ + __tracebackhide__ = True + raise Skipped(msg=msg) +skip.Exception = Skipped + +def fail(msg="", pytrace=True): + """ explicitely fail an currently-executing test with the given Message. + if @pytrace is not True the msg represents the full failure information. + """ + __tracebackhide__ = True + raise Failed(msg=msg, pytrace=pytrace) +fail.Exception = Failed + + +def importorskip(modname, minversion=None): + """ return imported module if it has a higher __version__ than the + optionally specified 'minversion' - otherwise call py.test.skip() + with a message detailing the mismatch. + """ + __tracebackhide__ = True + compile(modname, '', 'eval') # to catch syntaxerrors + try: + mod = __import__(modname, None, None, ['__doc__']) + except ImportError: + py.test.skip("could not import %r" %(modname,)) + if minversion is None: + return mod + verattr = getattr(mod, '__version__', None) + if isinstance(minversion, str): + minver = minversion.split(".") + else: + minver = list(minversion) + if verattr is None or verattr.split(".") < minver: + py.test.skip("module %r has __version__ %r, required is: %r" %( + modname, verattr, minversion)) + return mod diff --git a/_pytest/skipping.py b/_pytest/skipping.py new file mode 100644 --- /dev/null +++ b/_pytest/skipping.py @@ -0,0 +1,246 @@ +""" support for skip/xfail functions and markers. """ + +import py, pytest +import sys + +def pytest_addoption(parser): + group = parser.getgroup("general") + group.addoption('--runxfail', + action="store_true", dest="runxfail", default=False, + help="run tests even if they are marked xfail") + +def pytest_namespace(): + return dict(xfail=xfail) + +class XFailed(pytest.fail.Exception): + """ raised from an explicit call to py.test.xfail() """ + +def xfail(reason=""): + """ xfail an executing test or setup functions with the given reason.""" + __tracebackhide__ = True + raise XFailed(reason) +xfail.Exception = XFailed + +class MarkEvaluator: + def __init__(self, item, name): + self.item = item + self.name = name + + @property + def holder(self): + return self.item.keywords.get(self.name, None) + def __bool__(self): + return bool(self.holder) + __nonzero__ = __bool__ + + def wasvalid(self): + return not hasattr(self, 'exc') + + def istrue(self): + try: + return self._istrue() + except KeyboardInterrupt: + raise + except: + self.exc = sys.exc_info() + if isinstance(self.exc[1], SyntaxError): + msg = [" " * (self.exc[1].offset + 4) + "^",] + msg.append("SyntaxError: invalid syntax") + else: + msg = py.std.traceback.format_exception_only(*self.exc[:2]) + pytest.fail("Error evaluating %r expression\n" + " %s\n" + "%s" + %(self.name, self.expr, "\n".join(msg)), + pytrace=False) + + def _getglobals(self): + d = {'os': py.std.os, 'sys': py.std.sys, 'config': self.item.config} + func = self.item.obj + try: + d.update(func.__globals__) + except AttributeError: + d.update(func.func_globals) + return d + + def _istrue(self): + if self.holder: + d = self._getglobals() + if self.holder.args: + self.result = False + for expr in self.holder.args: + self.expr = expr + if isinstance(expr, str): + result = cached_eval(self.item.config, expr, d) + else: + pytest.fail("expression is not a string") + if result: + self.result = True + self.expr = expr + break + else: + self.result = True + return getattr(self, 'result', False) + + def get(self, attr, default=None): + return self.holder.kwargs.get(attr, default) + + def getexplanation(self): + expl = self.get('reason', None) + if not expl: + if not hasattr(self, 'expr'): + return "" + else: + return "condition: " + str(self.expr) + return expl + + +def pytest_runtest_setup(item): + if not isinstance(item, pytest.Function): + return + evalskip = MarkEvaluator(item, 'skipif') + if evalskip.istrue(): + py.test.skip(evalskip.getexplanation()) + item._evalxfail = MarkEvaluator(item, 'xfail') + check_xfail_no_run(item) + +def pytest_pyfunc_call(pyfuncitem): + check_xfail_no_run(pyfuncitem) + +def check_xfail_no_run(item): + if not item.config.option.runxfail: + evalxfail = item._evalxfail + if evalxfail.istrue(): + if not evalxfail.get('run', True): + py.test.xfail("[NOTRUN] " + evalxfail.getexplanation()) + +def pytest_runtest_makereport(__multicall__, item, call): + if not isinstance(item, pytest.Function): + return + if not (call.excinfo and + call.excinfo.errisinstance(py.test.xfail.Exception)): + evalxfail = getattr(item, '_evalxfail', None) + if not evalxfail: + return + if call.excinfo and call.excinfo.errisinstance(py.test.xfail.Exception): + if not item.config.getvalue("runxfail"): + rep = __multicall__.execute() + rep.keywords['xfail'] = "reason: " + call.excinfo.value.msg + rep.outcome = "skipped" + return rep + rep = __multicall__.execute() + evalxfail = item._evalxfail + if not item.config.option.runxfail: + if evalxfail.wasvalid() and evalxfail.istrue(): + if call.excinfo: + rep.outcome = "skipped" + rep.keywords['xfail'] = evalxfail.getexplanation() + elif call.when == "call": + rep.outcome = "failed" + rep.keywords['xfail'] = evalxfail.getexplanation() + return rep + if 'xfail' in rep.keywords: + del rep.keywords['xfail'] + return rep + +# called by terminalreporter progress reporting +def pytest_report_teststatus(report): + if 'xfail' in report.keywords: + if report.skipped: + return "xfailed", "x", "xfail" + elif report.failed: + return "xpassed", "X", "XPASS" + +# called by the terminalreporter instance/plugin +def pytest_terminal_summary(terminalreporter): + tr = terminalreporter + if not tr.reportchars: + #for name in "xfailed skipped failed xpassed": + # if not tr.stats.get(name, 0): + # tr.write_line("HINT: use '-r' option to see extra " + # "summary info about tests") + # break + return + + lines = [] + for char in tr.reportchars: + if char == "x": + show_xfailed(terminalreporter, lines) + elif char == "X": + show_xpassed(terminalreporter, lines) + elif char in "fF": + show_failed(terminalreporter, lines) + elif char in "sS": + show_skipped(terminalreporter, lines) + if lines: + tr._tw.sep("=", "short test summary info") + for line in lines: + tr._tw.line(line) + +def show_failed(terminalreporter, lines): + tw = terminalreporter._tw + failed = terminalreporter.stats.get("failed") + if failed: + for rep in failed: + pos = rep.nodeid + lines.append("FAIL %s" %(pos, )) + +def show_xfailed(terminalreporter, lines): + xfailed = terminalreporter.stats.get("xfailed") + if xfailed: + for rep in xfailed: + pos = rep.nodeid + reason = rep.keywords['xfail'] + lines.append("XFAIL %s" % (pos,)) + if reason: + lines.append(" " + str(reason)) + +def show_xpassed(terminalreporter, lines): + xpassed = terminalreporter.stats.get("xpassed") + if xpassed: + for rep in xpassed: + pos = rep.nodeid + reason = rep.keywords['xfail'] + lines.append("XPASS %s %s" %(pos, reason)) + +def cached_eval(config, expr, d): + if not hasattr(config, '_evalcache'): + config._evalcache = {} + try: + return config._evalcache[expr] + except KeyError: + #import sys + #print >>sys.stderr, ("cache-miss: %r" % expr) + exprcode = py.code.compile(expr, mode="eval") + config._evalcache[expr] = x = eval(exprcode, d) + return x + + +def folded_skips(skipped): + d = {} + for event in skipped: + key = event.longrepr + assert len(key) == 3, (event, key) + d.setdefault(key, []).append(event) + l = [] + for key, events in d.items(): + l.append((len(events),) + key) + return l + +def show_skipped(terminalreporter, lines): + tr = terminalreporter + skipped = tr.stats.get('skipped', []) + if skipped: + #if not tr.hasopt('skipped'): + # tr.write_line( + # "%d skipped tests, specify -rs for more info" % + # len(skipped)) + # return + fskips = folded_skips(skipped) + if fskips: + #tr.write_sep("_", "skipped test summary") + for num, fspath, lineno, reason in fskips: + if reason.startswith("Skipped: "): + reason = reason[9:] + lines.append("SKIP [%d] %s:%d: %s" % + (num, fspath, lineno, reason)) diff --git a/_pytest/standalonetemplate.py b/_pytest/standalonetemplate.py new file mode 100755 --- /dev/null +++ b/_pytest/standalonetemplate.py @@ -0,0 +1,63 @@ +#! /usr/bin/env python + +sources = """ + at SOURCES@""" + +import sys +import base64 +import zlib +import imp + +class DictImporter(object): + def __init__(self, sources): + self.sources = sources + + def find_module(self, fullname, path=None): + if fullname in self.sources: + return self + if fullname + '.__init__' in self.sources: + return self + return None + + def load_module(self, fullname): + # print "load_module:", fullname + from types import ModuleType + try: + s = self.sources[fullname] + is_pkg = False + except KeyError: + s = self.sources[fullname + '.__init__'] + is_pkg = True + + co = compile(s, fullname, 'exec') + module = sys.modules.setdefault(fullname, ModuleType(fullname)) + module.__file__ = "%s/%s" % (__file__, fullname) + module.__loader__ = self + if is_pkg: + module.__path__ = [fullname] + + do_exec(co, module.__dict__) + return sys.modules[fullname] + + def get_source(self, name): + res = self.sources.get(name) + if res is None: + res = self.sources.get(name + '.__init__') + return res + +if __name__ == "__main__": + if sys.version_info >= (3, 0): + exec("def do_exec(co, loc): exec(co, loc)\n") + import pickle + sources = sources.encode("ascii") # ensure bytes + sources = pickle.loads(zlib.decompress(base64.decodebytes(sources))) + else: + import cPickle as pickle + exec("def do_exec(co, loc): exec co in loc\n") + sources = pickle.loads(zlib.decompress(base64.decodestring(sources))) + + importer = DictImporter(sources) + sys.meta_path.append(importer) + + entry = "@ENTRY@" + do_exec(entry, locals()) diff --git a/_pytest/terminal.py b/_pytest/terminal.py new file mode 100644 --- /dev/null +++ b/_pytest/terminal.py @@ -0,0 +1,451 @@ +""" terminal reporting of the full testing process. + +This is a good source for looking at the various reporting hooks. +""" +import pytest, py +import sys +import os + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting", "reporting", after="general") + group._addoption('-v', '--verbose', action="count", + dest="verbose", default=0, help="increase verbosity."), + group._addoption('-q', '--quiet', action="count", + dest="quiet", default=0, help="decreate verbosity."), + group._addoption('-r', + action="store", dest="reportchars", default=None, metavar="chars", + help="show extra test summary info as specified by chars (f)ailed, " + "(s)skipped, (x)failed, (X)passed.") + group._addoption('-l', '--showlocals', + action="store_true", dest="showlocals", default=False, + help="show locals in tracebacks (disabled by default).") + group._addoption('--report', + action="store", dest="report", default=None, metavar="opts", + help="(deprecated, use -r)") + group._addoption('--tb', metavar="style", + action="store", dest="tbstyle", default='long', + type="choice", choices=['long', 'short', 'no', 'line', 'native'], + help="traceback print mode (long/short/line/native/no).") + group._addoption('--fulltrace', + action="store_true", dest="fulltrace", default=False, + help="don't cut any tracebacks (default is to cut).") + +def pytest_configure(config): + config.option.verbose -= config.option.quiet + # we try hard to make printing resilient against + # later changes on FD level. + stdout = py.std.sys.stdout + if hasattr(os, 'dup') and hasattr(stdout, 'fileno'): + try: + newfd = os.dup(stdout.fileno()) + #print "got newfd", newfd + except ValueError: + pass + else: + stdout = os.fdopen(newfd, stdout.mode, 1) + config._toclose = stdout + reporter = TerminalReporter(config, stdout) + config.pluginmanager.register(reporter, 'terminalreporter') + if config.option.debug or config.option.traceconfig: + def mywriter(tags, args): + msg = " ".join(map(str, args)) + reporter.write_line("[traceconfig] " + msg) + config.trace.root.setprocessor("pytest:config", mywriter) + +def pytest_unconfigure(config): + if hasattr(config, '_toclose'): + #print "closing", config._toclose, config._toclose.fileno() + config._toclose.close() + +def getreportopt(config): + reportopts = "" + optvalue = config.option.report + if optvalue: + py.builtin.print_("DEPRECATED: use -r instead of --report option.", + file=py.std.sys.stderr) + if optvalue: + for setting in optvalue.split(","): + setting = setting.strip() + if setting == "skipped": + reportopts += "s" + elif setting == "xfailed": + reportopts += "x" + reportchars = config.option.reportchars + if reportchars: + for char in reportchars: + if char not in reportopts: + reportopts += char + return reportopts + +def pytest_report_teststatus(report): + if report.passed: + letter = "." + elif report.skipped: + letter = "s" + elif report.failed: + letter = "F" + if report.when != "call": + letter = "f" + return report.outcome, letter, report.outcome.upper() + +class TerminalReporter: + def __init__(self, config, file=None): + self.config = config + self.verbosity = self.config.option.verbose + self.showheader = self.verbosity >= 0 + self.showfspath = self.verbosity >= 0 + self.showlongtestinfo = self.verbosity > 0 + self._numcollected = 0 + + self.stats = {} + self.curdir = py.path.local() + if file is None: + file = py.std.sys.stdout + self._tw = py.io.TerminalWriter(file) + self.currentfspath = None + self.reportchars = getreportopt(config) + self.hasmarkup = self._tw.hasmarkup + + def hasopt(self, char): + char = {'xfailed': 'x', 'skipped': 's'}.get(char,char) + return char in self.reportchars + + def write_fspath_result(self, fspath, res): + if fspath != self.currentfspath: + self.currentfspath = fspath + #fspath = self.curdir.bestrelpath(fspath) + self._tw.line() + #relpath = self.curdir.bestrelpath(fspath) + self._tw.write(fspath + " ") + self._tw.write(res) + + def write_ensure_prefix(self, prefix, extra="", **kwargs): + if self.currentfspath != prefix: + self._tw.line() + self.currentfspath = prefix + self._tw.write(prefix) + if extra: + self._tw.write(extra, **kwargs) + self.currentfspath = -2 + + def ensure_newline(self): + if self.currentfspath: + self._tw.line() + self.currentfspath = None + + def write(self, content, **markup): + self._tw.write(content, **markup) + + def write_line(self, line, **markup): + line = str(line) + self.ensure_newline() + self._tw.line(line, **markup) + + def rewrite(self, line, **markup): + line = str(line) + self._tw.write("\r" + line, **markup) + + def write_sep(self, sep, title=None, **markup): + self.ensure_newline() + self._tw.sep(sep, title, **markup) + + def pytest_internalerror(self, excrepr): + for line in str(excrepr).split("\n"): + self.write_line("INTERNALERROR> " + line) + return 1 + + def pytest_plugin_registered(self, plugin): + if self.config.option.traceconfig: + msg = "PLUGIN registered: %s" %(plugin,) + # XXX this event may happen during setup/teardown time + # which unfortunately captures our output here + # which garbles our output if we use self.write_line + self.write_line(msg) + + def pytest_deselected(self, items): + self.stats.setdefault('deselected', []).extend(items) + + def pytest__teardown_final_logerror(self, report): + self.stats.setdefault("error", []).append(report) + + def pytest_runtest_logstart(self, nodeid, location): + # ensure that the path is printed before the + # 1st test of a module starts running + fspath = nodeid.split("::")[0] + if self.showlongtestinfo: + line = self._locationline(fspath, *location) + self.write_ensure_prefix(line, "") + elif self.showfspath: + self.write_fspath_result(fspath, "") + + def pytest_runtest_logreport(self, report): + rep = report + res = self.config.hook.pytest_report_teststatus(report=rep) + cat, letter, word = res + self.stats.setdefault(cat, []).append(rep) + if not letter and not word: + # probably passed setup/teardown + return + if self.verbosity <= 0: + if not hasattr(rep, 'node') and self.showfspath: + self.write_fspath_result(rep.fspath, letter) + else: + self._tw.write(letter) + else: + if isinstance(word, tuple): + word, markup = word + else: + if rep.passed: + markup = {'green':True} + elif rep.failed: + markup = {'red':True} + elif rep.skipped: + markup = {'yellow':True} + line = self._locationline(str(rep.fspath), *rep.location) + if not hasattr(rep, 'node'): + self.write_ensure_prefix(line, word, **markup) + #self._tw.write(word, **markup) + else: + self.ensure_newline() + if hasattr(rep, 'node'): + self._tw.write("[%s] " % rep.node.gateway.id) + self._tw.write(word, **markup) + self._tw.write(" " + line) + self.currentfspath = -2 + + def pytest_collection(self): + if not self.hasmarkup: + self.write("collecting ... ", bold=True) + + def pytest_collectreport(self, report): + if report.failed: + self.stats.setdefault("error", []).append(report) + elif report.skipped: + self.stats.setdefault("skipped", []).append(report) + items = [x for x in report.result if isinstance(x, pytest.Item)] + self._numcollected += len(items) + if self.hasmarkup: + #self.write_fspath_result(report.fspath, 'E') + self.report_collect() + + def report_collect(self, final=False): + errors = len(self.stats.get('error', [])) + skipped = len(self.stats.get('skipped', [])) + if final: + line = "collected " + else: + line = "collecting " + line += str(self._numcollected) + " items" + if errors: + line += " / %d errors" % errors + if skipped: + line += " / %d skipped" % skipped + if self.hasmarkup: + if final: + line += " \n" + self.rewrite(line, bold=True) + else: + self.write_line(line) + + def pytest_collection_modifyitems(self): + self.report_collect(True) + + def pytest_sessionstart(self, session): + self._sessionstarttime = py.std.time.time() + if not self.showheader: + return + self.write_sep("=", "test session starts", bold=True) + verinfo = ".".join(map(str, sys.version_info[:3])) + msg = "platform %s -- Python %s" % (sys.platform, verinfo) + if hasattr(sys, 'pypy_version_info'): + verinfo = ".".join(map(str, sys.pypy_version_info[:3])) + msg += "[pypy-%s]" % verinfo + msg += " -- pytest-%s" % (py.test.__version__) + if self.verbosity > 0 or self.config.option.debug or \ + getattr(self.config.option, 'pastebin', None): + msg += " -- " + str(sys.executable) + self.write_line(msg) + lines = self.config.hook.pytest_report_header(config=self.config) + lines.reverse() + for line in flatten(lines): + self.write_line(line) + + def pytest_collection_finish(self, session): + if self.config.option.collectonly: + self._printcollecteditems(session.items) + if self.stats.get('failed'): + self._tw.sep("!", "collection failures") + for rep in self.stats.get('failed'): + rep.toterminal(self._tw) + return 1 + return 0 + if not self.showheader: + return + #for i, testarg in enumerate(self.config.args): + # self.write_line("test path %d: %s" %(i+1, testarg)) + + def _printcollecteditems(self, items): + # to print out items and their parent collectors + # we take care to leave out Instances aka () + # because later versions are going to get rid of them anyway + if self.config.option.verbose < 0: + for item in items: + nodeid = item.nodeid + nodeid = nodeid.replace("::()::", "::") + self._tw.line(nodeid) + return + stack = [] + indent = "" + for item in items: + needed_collectors = item.listchain()[1:] # strip root node + while stack: + if stack == needed_collectors[:len(stack)]: + break + stack.pop() + for col in needed_collectors[len(stack):]: + stack.append(col) + #if col.name == "()": + # continue + indent = (len(stack)-1) * " " + self._tw.line("%s%s" %(indent, col)) + + def pytest_sessionfinish(self, exitstatus, __multicall__): + __multicall__.execute() + self._tw.line("") + if exitstatus in (0, 1, 2): + self.summary_errors() + self.summary_failures() + self.config.hook.pytest_terminal_summary(terminalreporter=self) + if exitstatus == 2: + self._report_keyboardinterrupt() + self.summary_deselected() + self.summary_stats() + + def pytest_keyboard_interrupt(self, excinfo): + self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True) + + def _report_keyboardinterrupt(self): + excrepr = self._keyboardinterrupt_memo + msg = excrepr.reprcrash.message + self.write_sep("!", msg) + if "KeyboardInterrupt" in msg: + if self.config.option.fulltrace: + excrepr.toterminal(self._tw) + else: + excrepr.reprcrash.toterminal(self._tw) + + def _locationline(self, collect_fspath, fspath, lineno, domain): + # collect_fspath comes from testid which has a "/"-normalized path + if fspath and fspath.replace("\\", "/") != collect_fspath: + fspath = "%s <- %s" % (collect_fspath, fspath) + if fspath: + line = str(fspath) + if lineno is not None: + lineno += 1 + line += ":" + str(lineno) + if domain: + line += ": " + str(domain) + else: + line = "[location]" + return line + " " + + def _getfailureheadline(self, rep): + if hasattr(rep, 'location'): + fspath, lineno, domain = rep.location + return domain + else: + return "test session" # XXX? + + def _getcrashline(self, rep): + try: + return str(rep.longrepr.reprcrash) + except AttributeError: + try: + return str(rep.longrepr)[:50] + except AttributeError: + return "" + + # + # summaries for sessionfinish + # + def getreports(self, name): + l = [] + for x in self.stats.get(name, []): + if not hasattr(x, '_pdbshown'): + l.append(x) + return l + + def summary_failures(self): + if self.config.option.tbstyle != "no": + reports = self.getreports('failed') + if not reports: + return + self.write_sep("=", "FAILURES") + for rep in reports: + if self.config.option.tbstyle == "line": + line = self._getcrashline(rep) + self.write_line(line) + else: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg) + rep.toterminal(self._tw) + + def summary_errors(self): + if self.config.option.tbstyle != "no": + reports = self.getreports('error') + if not reports: + return + self.write_sep("=", "ERRORS") + for rep in self.stats['error']: + msg = self._getfailureheadline(rep) + if not hasattr(rep, 'when'): + # collect + msg = "ERROR collecting " + msg + elif rep.when == "setup": + msg = "ERROR at setup of " + msg + elif rep.when == "teardown": + msg = "ERROR at teardown of " + msg + self.write_sep("_", msg) + rep.toterminal(self._tw) + + def summary_stats(self): + session_duration = py.std.time.time() - self._sessionstarttime + + keys = "failed passed skipped deselected".split() + for key in self.stats.keys(): + if key not in keys: + keys.append(key) + parts = [] + for key in keys: + val = self.stats.get(key, None) + if val: + parts.append("%d %s" %(len(val), key)) + line = ", ".join(parts) + # XXX coloring + msg = "%s in %.2f seconds" %(line, session_duration) + if self.verbosity >= 0: + self.write_sep("=", msg, bold=True) + else: + self.write_line(msg, bold=True) + + def summary_deselected(self): + if 'deselected' in self.stats: + self.write_sep("=", "%d tests deselected by %r" %( + len(self.stats['deselected']), self.config.option.keyword), bold=True) + +def repr_pythonversion(v=None): + if v is None: + v = sys.version_info + try: + return "%s.%s.%s-%s-%s" % v + except (TypeError, ValueError): + return str(v) + +def flatten(l): + for x in l: + if isinstance(x, (list, tuple)): + for y in flatten(x): + yield y + else: + yield x + diff --git a/_pytest/tmpdir.py b/_pytest/tmpdir.py new file mode 100644 --- /dev/null +++ b/_pytest/tmpdir.py @@ -0,0 +1,71 @@ +""" support for providing temporary directories to test functions. """ +import pytest, py +from _pytest.monkeypatch import monkeypatch + +class TempdirHandler: + def __init__(self, config): + self.config = config + self.trace = config.trace.get("tmpdir") + + def ensuretemp(self, string, dir=1): + """ (deprecated) return temporary directory path with + the given string as the trailing part. It is usually + better to use the 'tmpdir' function argument which + provides an empty unique-per-test-invocation directory + and is guaranteed to be empty. + """ + #py.log._apiwarn(">1.1", "use tmpdir function argument") + return self.getbasetemp().ensure(string, dir=dir) + + def mktemp(self, basename, numbered=True): + basetemp = self.getbasetemp() + if not numbered: + p = basetemp.mkdir(basename) + else: + p = py.path.local.make_numbered_dir(prefix=basename, + keep=0, rootdir=basetemp, lock_timeout=None) + self.trace("mktemp", p) + return p + + def getbasetemp(self): + """ return base temporary directory. """ + try: + return self._basetemp + except AttributeError: + basetemp = self.config.option.basetemp + if basetemp: + basetemp = py.path.local(basetemp) + if basetemp.check(): + basetemp.remove() + basetemp.mkdir() + else: + basetemp = py.path.local.make_numbered_dir(prefix='pytest-') + self._basetemp = t = basetemp + self.trace("new basetemp", t) + return t + + def finish(self): + self.trace("finish") + +def pytest_configure(config): + config._mp = mp = monkeypatch() + t = TempdirHandler(config) + mp.setattr(config, '_tmpdirhandler', t, raising=False) + mp.setattr(pytest, 'ensuretemp', t.ensuretemp, raising=False) + +def pytest_unconfigure(config): + config._tmpdirhandler.finish() + config._mp.undo() + +def pytest_funcarg__tmpdir(request): + """return a temporary directory path object + which is unique to each test function invocation, + created as a sub directory of the base temporary + directory. The returned object is a `py.path.local`_ + path object. + """ + name = request._pyfuncitem.name + name = py.std.re.sub("[\W]", "_", name) + x = request.config._tmpdirhandler.mktemp(name, numbered=True) + return x.realpath() + diff --git a/_pytest/unittest.py b/_pytest/unittest.py new file mode 100644 --- /dev/null +++ b/_pytest/unittest.py @@ -0,0 +1,143 @@ +""" discovery and running of std-library "unittest" style tests. """ +import pytest, py +import sys, pdb + +def pytest_pycollect_makeitem(collector, name, obj): + unittest = sys.modules.get('unittest') + if unittest is None: + return # nobody can have derived unittest.TestCase + try: + isunit = issubclass(obj, unittest.TestCase) + except KeyboardInterrupt: + raise + except Exception: + pass + else: + if isunit: + return UnitTestCase(name, parent=collector) + +class UnitTestCase(pytest.Class): + def collect(self): + loader = py.std.unittest.TestLoader() + for name in loader.getTestCaseNames(self.obj): + yield TestCaseFunction(name, parent=self) + + def setup(self): + meth = getattr(self.obj, 'setUpClass', None) + if meth is not None: + meth() + super(UnitTestCase, self).setup() + + def teardown(self): + meth = getattr(self.obj, 'tearDownClass', None) + if meth is not None: + meth() + super(UnitTestCase, self).teardown() + +class TestCaseFunction(pytest.Function): + _excinfo = None + + def __init__(self, name, parent): + super(TestCaseFunction, self).__init__(name, parent) + if hasattr(self._obj, 'todo'): + getattr(self._obj, 'im_func', self._obj).xfail = \ + pytest.mark.xfail(reason=str(self._obj.todo)) + + def setup(self): + self._testcase = self.parent.obj(self.name) + self._obj = getattr(self._testcase, self.name) + if hasattr(self._testcase, 'setup_method'): + self._testcase.setup_method(self._obj) + + def teardown(self): + if hasattr(self._testcase, 'teardown_method'): + self._testcase.teardown_method(self._obj) + + def startTest(self, testcase): + pass + + def _addexcinfo(self, rawexcinfo): + # unwrap potential exception info (see twisted trial support below) + rawexcinfo = getattr(rawexcinfo, '_rawexcinfo', rawexcinfo) + try: + excinfo = py.code.ExceptionInfo(rawexcinfo) + except TypeError: + try: + try: + l = py.std.traceback.format_exception(*rawexcinfo) + l.insert(0, "NOTE: Incompatible Exception Representation, " + "displaying natively:\n\n") + pytest.fail("".join(l), pytrace=False) + except (pytest.fail.Exception, KeyboardInterrupt): + raise + except: + pytest.fail("ERROR: Unknown Incompatible Exception " + "representation:\n%r" %(rawexcinfo,), pytrace=False) + except KeyboardInterrupt: + raise + except pytest.fail.Exception: + excinfo = py.code.ExceptionInfo() + self.__dict__.setdefault('_excinfo', []).append(excinfo) + + def addError(self, testcase, rawexcinfo): + self._addexcinfo(rawexcinfo) + def addFailure(self, testcase, rawexcinfo): + self._addexcinfo(rawexcinfo) + def addSkip(self, testcase, reason): + try: + pytest.skip(reason) + except pytest.skip.Exception: + self._addexcinfo(sys.exc_info()) + def addExpectedFailure(self, testcase, rawexcinfo, reason): + try: + pytest.xfail(str(reason)) + except pytest.xfail.Exception: + self._addexcinfo(sys.exc_info()) + def addUnexpectedSuccess(self, testcase, reason): + pass + def addSuccess(self, testcase): + pass + def stopTest(self, testcase): + pass + def runtest(self): + self._testcase(result=self) + + def _prunetraceback(self, excinfo): + pytest.Function._prunetraceback(self, excinfo) + excinfo.traceback = excinfo.traceback.filter(lambda x:not x.frame.f_globals.get('__unittest')) + + at pytest.mark.tryfirst +def pytest_runtest_makereport(item, call): + if isinstance(item, TestCaseFunction): + if item._excinfo: + call.excinfo = item._excinfo.pop(0) + del call.result + +# twisted trial support +def pytest_runtest_protocol(item, __multicall__): + if isinstance(item, TestCaseFunction): + if 'twisted.trial.unittest' in sys.modules: + ut = sys.modules['twisted.python.failure'] + Failure__init__ = ut.Failure.__init__.im_func + check_testcase_implements_trial_reporter() + def excstore(self, exc_value=None, exc_type=None, exc_tb=None): + if exc_value is None: + self._rawexcinfo = sys.exc_info() + else: + if exc_type is None: + exc_type = type(exc_value) + self._rawexcinfo = (exc_type, exc_value, exc_tb) + Failure__init__(self, exc_value, exc_type, exc_tb) + ut.Failure.__init__ = excstore + try: + return __multicall__.execute() + finally: + ut.Failure.__init__ = Failure__init__ + +def check_testcase_implements_trial_reporter(done=[]): + if done: + return + from zope.interface import classImplements + from twisted.trial.itrial import IReporter + classImplements(TestCaseFunction, IReporter) + done.append(1) diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -30,7 +30,7 @@ def pytest_addoption(parser): group = parser.getgroup("complicance testing options") group.addoption('-T', '--timeout', action="store", type="string", - default="100", dest="timeout", + default="1000", dest="timeout", help="fail a test module after the given timeout. " "specify in seconds or 'NUMmp' aka Mega-Pystones") group.addoption('--pypy', action="store", type="string", @@ -39,11 +39,8 @@ group.addoption('--filter', action="store", type="string", default=None, dest="unittest_filter", help="Similar to -k, XXX") -option = py.test.config.option - -def gettimeout(): +def gettimeout(timeout): from test import pystone - timeout = option.timeout.lower() if timeout.endswith('mp'): megapystone = float(timeout[:-2]) t, stone = pystone.Proc0(10000) @@ -340,7 +337,7 @@ RegrTest('test_peepholer.py'), RegrTest('test_pep247.py'), RegrTest('test_pep263.py'), - RegrTest('test_pep277.py', skip=only_win32), + RegrTest('test_pep277.py'), RegrTest('test_pep292.py'), RegrTest('test_pickle.py', core=True), RegrTest('test_pickletools.py', core=False), @@ -536,43 +533,22 @@ assert not missing, "non-listed tests:\n%s" % ('\n'.join(missing),) check_testmap_complete() -class RegrDirectory(py.test.collect.Directory): - """ The central hub for gathering CPython's compliance tests - Basically we work off the above 'testmap' - which describes for all test modules their specific - type. XXX If you find errors in the classification - please correct them! - """ - def get(self, name, cache={}): - if not cache: - for x in testmap: - cache[x.basename] = x - return cache.get(name, None) - - def collect(self): - we_are_in_modified = self.fspath == modregrtestdir - l = [] - for x in self.fspath.listdir(): - name = x.basename - regrtest = self.get(name) - if regrtest is not None: - if bool(we_are_in_modified) ^ regrtest.ismodified(): - continue - #if option.extracttests: - # l.append(InterceptedRunModule(name, self, regrtest)) - #else: - l.append(RunFileExternal(name, parent=self, regrtest=regrtest)) - return l +def pytest_configure(config): + config._basename2spec = cache = {} + for x in testmap: + cache[x.basename] = x -def pytest_collect_directory(parent, path): - # use RegrDirectory collector for both modified and unmodified tests - if path in (modregrtestdir, regrtestdir): - return RegrDirectory(path, parent) - -def pytest_ignore_collect(path): - # ignore all files - only RegrDirectory generates tests in lib-python - if path.check(file=1): - return True +def pytest_collect_file(path, parent, __multicall__): + # don't collect files except through this hook + # implemented by clearing the list of to-be-called + # remaining hook methods + __multicall__.methods[:] = [] + regrtest = parent.config._basename2spec.get(path.basename, None) + if regrtest is None: + return + if path.dirpath() not in (modregrtestdir, regrtestdir): + return + return RunFileExternal(path.basename, parent=parent, regrtest=regrtest) class RunFileExternal(py.test.collect.File): def __init__(self, name, parent, regrtest): @@ -589,7 +565,7 @@ # # testmethod: -# invoking in a seprate process: py.py TESTFILE +# invoking in a separate process: py.py TESTFILE # import os import time @@ -615,8 +591,8 @@ 'run-script', 'regrverbose.py') regrrun = str(regr_script) - - TIMEOUT = gettimeout() + option = self.config.option + TIMEOUT = gettimeout(option.timeout.lower()) if option.pypy: execpath = py.path.local(option.pypy) if not execpath.check(): @@ -707,8 +683,11 @@ return status, stdout.read(mode='rU'), stderr.read(mode='rU') def getresult(self, regrtest): - cmd = self.getinvocation(regrtest) - exit_status, test_stdout, test_stderr = self.getstatusouterr(cmd) + cmd = self.getinvocation(regrtest) + tempdir = py.test.ensuretemp(self.fspath.basename) + oldcwd = tempdir.chdir() + exit_status, test_stdout, test_stderr = self.getstatusouterr(cmd) + oldcwd.chdir() skipped = False timedout = test_stderr.rfind(26*"=" + "timedout" + 26*"=") != -1 if not timedout: diff --git a/lib-python/modified-2.7.0/ctypes/__init__.py b/lib-python/modified-2.7.0/ctypes/__init__.py --- a/lib-python/modified-2.7.0/ctypes/__init__.py +++ b/lib-python/modified-2.7.0/ctypes/__init__.py @@ -355,11 +355,12 @@ self._handle = handle def __repr__(self): - return "<%s '%s', handle %x at %x>" % \ + return "<%s '%s', handle %r at %x>" % \ (self.__class__.__name__, self._name, - (self._handle & (_sys.maxint*2 + 1)), + (self._handle), id(self) & (_sys.maxint*2 + 1)) + def __getattr__(self, name): if name.startswith('__') and name.endswith('__'): raise AttributeError(name) diff --git a/lib-python/modified-2.7.0/ctypes/test/test_callbacks.py b/lib-python/modified-2.7.0/ctypes/test/test_callbacks.py --- a/lib-python/modified-2.7.0/ctypes/test/test_callbacks.py +++ b/lib-python/modified-2.7.0/ctypes/test/test_callbacks.py @@ -1,7 +1,6 @@ import unittest from ctypes import * import _ctypes_test -from ctypes.test import xfail class Callbacks(unittest.TestCase): functype = CFUNCTYPE @@ -125,7 +124,6 @@ prototype = self.functype.im_func(object) self.assertRaises(TypeError, prototype, lambda: None) - @xfail def test_issue_7959(self): proto = self.functype.im_func(None) diff --git a/lib-python/modified-2.7.0/ctypes/test/test_cast.py b/lib-python/modified-2.7.0/ctypes/test/test_cast.py --- a/lib-python/modified-2.7.0/ctypes/test/test_cast.py +++ b/lib-python/modified-2.7.0/ctypes/test/test_cast.py @@ -2,8 +2,6 @@ import unittest import sys -from ctypes.test import xfail - class Test(unittest.TestCase): def test_array2pointer(self): diff --git a/lib-python/modified-2.7.0/ctypes/test/test_init.py b/lib-python/modified-2.7.0/ctypes/test/test_init.py --- a/lib-python/modified-2.7.0/ctypes/test/test_init.py +++ b/lib-python/modified-2.7.0/ctypes/test/test_init.py @@ -1,6 +1,5 @@ from ctypes import * import unittest -from ctypes.test import xfail class X(Structure): _fields_ = [("a", c_int), @@ -21,7 +20,6 @@ class InitTest(unittest.TestCase): - @xfail def test_get(self): # make sure the only accessing a nested structure # doesn't call the structure's __new__ and __init__ diff --git a/lib-python/modified-2.7.0/ctypes/test/test_macholib.py b/lib-python/modified-2.7.0/ctypes/test/test_macholib.py --- a/lib-python/modified-2.7.0/ctypes/test/test_macholib.py +++ b/lib-python/modified-2.7.0/ctypes/test/test_macholib.py @@ -52,7 +52,6 @@ '/usr/lib/libSystem.B.dylib') result = find_lib('z') - self.assertTrue(result.startswith('/usr/lib/libz.1')) self.assertTrue(result.endswith('.dylib')) self.assertEqual(find_lib('IOKit'), diff --git a/lib-python/modified-2.7.0/ctypes/test/test_objects.py b/lib-python/modified-2.7.0/ctypes/test/test_objects.py --- a/lib-python/modified-2.7.0/ctypes/test/test_objects.py +++ b/lib-python/modified-2.7.0/ctypes/test/test_objects.py @@ -22,7 +22,7 @@ >>> array[4] = 'foo bar' >>> array._objects -{'4': 'foo bar'} +{'4': } >>> array[4] 'foo bar' >>> @@ -47,9 +47,9 @@ >>> x.array[0] = 'spam spam spam' >>> x._objects -{'0:2': 'spam spam spam'} +{'0:2': } >>> x.array._b_base_._objects -{'0:2': 'spam spam spam'} +{'0:2': } >>> ''' diff --git a/lib-python/modified-2.7.0/ctypes/test/test_prototypes.py b/lib-python/modified-2.7.0/ctypes/test/test_prototypes.py --- a/lib-python/modified-2.7.0/ctypes/test/test_prototypes.py +++ b/lib-python/modified-2.7.0/ctypes/test/test_prototypes.py @@ -1,6 +1,5 @@ from ctypes import * import unittest -from ctypes.test import xfail # IMPORTANT INFO: # @@ -49,7 +48,6 @@ func.restype = c_long func.argtypes = None - @xfail def test_paramflags(self): # function returns c_void_p result, # and has a required parameter named 'input' diff --git a/lib-python/modified-2.7.0/ctypes/test/test_refcounts.py b/lib-python/modified-2.7.0/ctypes/test/test_refcounts.py --- a/lib-python/modified-2.7.0/ctypes/test/test_refcounts.py +++ b/lib-python/modified-2.7.0/ctypes/test/test_refcounts.py @@ -90,6 +90,7 @@ return a * b * 2 f = proto(func) + gc.collect() a = sys.getrefcount(ctypes.c_int) f(1, 2) self.assertEqual(sys.getrefcount(ctypes.c_int), a) diff --git a/lib-python/2.7.0/lib2to3/Grammar.txt b/lib-python/modified-2.7.0/lib2to3/Grammar.txt copy from lib-python/2.7.0/lib2to3/Grammar.txt copy to lib-python/modified-2.7.0/lib2to3/Grammar.txt diff --git a/lib-python/2.7.0/lib2to3/PatternGrammar.txt b/lib-python/modified-2.7.0/lib2to3/PatternGrammar.txt copy from lib-python/2.7.0/lib2to3/PatternGrammar.txt copy to lib-python/modified-2.7.0/lib2to3/PatternGrammar.txt diff --git a/lib-python/2.7.0/lib2to3/__init__.py b/lib-python/modified-2.7.0/lib2to3/__init__.py copy from lib-python/2.7.0/lib2to3/__init__.py copy to lib-python/modified-2.7.0/lib2to3/__init__.py diff --git a/lib-python/2.7.0/lib2to3/btm_matcher.py b/lib-python/modified-2.7.0/lib2to3/btm_matcher.py copy from lib-python/2.7.0/lib2to3/btm_matcher.py copy to lib-python/modified-2.7.0/lib2to3/btm_matcher.py diff --git a/lib-python/2.7.0/lib2to3/btm_utils.py b/lib-python/modified-2.7.0/lib2to3/btm_utils.py copy from lib-python/2.7.0/lib2to3/btm_utils.py copy to lib-python/modified-2.7.0/lib2to3/btm_utils.py diff --git a/lib-python/2.7.0/lib2to3/fixer_base.py b/lib-python/modified-2.7.0/lib2to3/fixer_base.py copy from lib-python/2.7.0/lib2to3/fixer_base.py copy to lib-python/modified-2.7.0/lib2to3/fixer_base.py diff --git a/lib-python/2.7.0/lib2to3/fixer_util.py b/lib-python/modified-2.7.0/lib2to3/fixer_util.py copy from lib-python/2.7.0/lib2to3/fixer_util.py copy to lib-python/modified-2.7.0/lib2to3/fixer_util.py diff --git a/lib-python/2.7.0/lib2to3/fixes/__init__.py b/lib-python/modified-2.7.0/lib2to3/fixes/__init__.py copy from lib-python/2.7.0/lib2to3/fixes/__init__.py copy to lib-python/modified-2.7.0/lib2to3/fixes/__init__.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_apply.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_apply.py copy from lib-python/2.7.0/lib2to3/fixes/fix_apply.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_apply.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_basestring.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_basestring.py copy from lib-python/2.7.0/lib2to3/fixes/fix_basestring.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_basestring.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_buffer.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_buffer.py copy from lib-python/2.7.0/lib2to3/fixes/fix_buffer.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_buffer.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_callable.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_callable.py copy from lib-python/2.7.0/lib2to3/fixes/fix_callable.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_callable.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_dict.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_dict.py copy from lib-python/2.7.0/lib2to3/fixes/fix_dict.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_dict.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_except.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_except.py copy from lib-python/2.7.0/lib2to3/fixes/fix_except.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_except.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_exec.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_exec.py copy from lib-python/2.7.0/lib2to3/fixes/fix_exec.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_exec.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_execfile.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_execfile.py copy from lib-python/2.7.0/lib2to3/fixes/fix_execfile.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_execfile.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_exitfunc.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_exitfunc.py copy from lib-python/2.7.0/lib2to3/fixes/fix_exitfunc.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_exitfunc.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_filter.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_filter.py copy from lib-python/2.7.0/lib2to3/fixes/fix_filter.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_filter.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_funcattrs.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_funcattrs.py copy from lib-python/2.7.0/lib2to3/fixes/fix_funcattrs.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_funcattrs.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_future.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_future.py copy from lib-python/2.7.0/lib2to3/fixes/fix_future.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_future.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_getcwdu.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_getcwdu.py copy from lib-python/2.7.0/lib2to3/fixes/fix_getcwdu.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_getcwdu.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_has_key.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_has_key.py copy from lib-python/2.7.0/lib2to3/fixes/fix_has_key.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_has_key.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_idioms.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_idioms.py copy from lib-python/2.7.0/lib2to3/fixes/fix_idioms.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_idioms.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_import.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_import.py copy from lib-python/2.7.0/lib2to3/fixes/fix_import.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_import.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_imports.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_imports.py copy from lib-python/2.7.0/lib2to3/fixes/fix_imports.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_imports.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_imports2.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_imports2.py copy from lib-python/2.7.0/lib2to3/fixes/fix_imports2.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_imports2.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_input.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_input.py copy from lib-python/2.7.0/lib2to3/fixes/fix_input.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_input.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_intern.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_intern.py copy from lib-python/2.7.0/lib2to3/fixes/fix_intern.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_intern.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_isinstance.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_isinstance.py copy from lib-python/2.7.0/lib2to3/fixes/fix_isinstance.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_isinstance.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_itertools.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_itertools.py copy from lib-python/2.7.0/lib2to3/fixes/fix_itertools.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_itertools.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_itertools_imports.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_itertools_imports.py copy from lib-python/2.7.0/lib2to3/fixes/fix_itertools_imports.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_itertools_imports.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_long.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_long.py copy from lib-python/2.7.0/lib2to3/fixes/fix_long.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_long.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_map.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_map.py copy from lib-python/2.7.0/lib2to3/fixes/fix_map.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_map.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_metaclass.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_metaclass.py copy from lib-python/2.7.0/lib2to3/fixes/fix_metaclass.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_metaclass.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_methodattrs.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_methodattrs.py copy from lib-python/2.7.0/lib2to3/fixes/fix_methodattrs.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_methodattrs.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_ne.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_ne.py copy from lib-python/2.7.0/lib2to3/fixes/fix_ne.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_ne.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_next.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_next.py copy from lib-python/2.7.0/lib2to3/fixes/fix_next.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_next.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_nonzero.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_nonzero.py copy from lib-python/2.7.0/lib2to3/fixes/fix_nonzero.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_nonzero.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_numliterals.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_numliterals.py copy from lib-python/2.7.0/lib2to3/fixes/fix_numliterals.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_numliterals.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_operator.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_operator.py copy from lib-python/2.7.0/lib2to3/fixes/fix_operator.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_operator.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_paren.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_paren.py copy from lib-python/2.7.0/lib2to3/fixes/fix_paren.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_paren.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_print.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_print.py copy from lib-python/2.7.0/lib2to3/fixes/fix_print.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_print.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_raise.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_raise.py copy from lib-python/2.7.0/lib2to3/fixes/fix_raise.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_raise.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_raw_input.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_raw_input.py copy from lib-python/2.7.0/lib2to3/fixes/fix_raw_input.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_raw_input.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_reduce.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_reduce.py copy from lib-python/2.7.0/lib2to3/fixes/fix_reduce.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_reduce.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_renames.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_renames.py copy from lib-python/2.7.0/lib2to3/fixes/fix_renames.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_renames.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_repr.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_repr.py copy from lib-python/2.7.0/lib2to3/fixes/fix_repr.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_repr.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_set_literal.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_set_literal.py copy from lib-python/2.7.0/lib2to3/fixes/fix_set_literal.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_set_literal.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_standarderror.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_standarderror.py copy from lib-python/2.7.0/lib2to3/fixes/fix_standarderror.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_standarderror.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_sys_exc.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_sys_exc.py copy from lib-python/2.7.0/lib2to3/fixes/fix_sys_exc.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_sys_exc.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_throw.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_throw.py copy from lib-python/2.7.0/lib2to3/fixes/fix_throw.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_throw.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_tuple_params.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_tuple_params.py copy from lib-python/2.7.0/lib2to3/fixes/fix_tuple_params.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_tuple_params.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_types.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_types.py copy from lib-python/2.7.0/lib2to3/fixes/fix_types.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_types.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_unicode.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_unicode.py copy from lib-python/2.7.0/lib2to3/fixes/fix_unicode.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_unicode.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_urllib.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_urllib.py copy from lib-python/2.7.0/lib2to3/fixes/fix_urllib.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_urllib.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_ws_comma.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_ws_comma.py copy from lib-python/2.7.0/lib2to3/fixes/fix_ws_comma.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_ws_comma.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_xrange.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_xrange.py copy from lib-python/2.7.0/lib2to3/fixes/fix_xrange.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_xrange.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_xreadlines.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_xreadlines.py copy from lib-python/2.7.0/lib2to3/fixes/fix_xreadlines.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_xreadlines.py diff --git a/lib-python/2.7.0/lib2to3/fixes/fix_zip.py b/lib-python/modified-2.7.0/lib2to3/fixes/fix_zip.py copy from lib-python/2.7.0/lib2to3/fixes/fix_zip.py copy to lib-python/modified-2.7.0/lib2to3/fixes/fix_zip.py diff --git a/lib-python/2.7.0/lib2to3/main.py b/lib-python/modified-2.7.0/lib2to3/main.py copy from lib-python/2.7.0/lib2to3/main.py copy to lib-python/modified-2.7.0/lib2to3/main.py diff --git a/lib-python/2.7.0/lib2to3/patcomp.py b/lib-python/modified-2.7.0/lib2to3/patcomp.py copy from lib-python/2.7.0/lib2to3/patcomp.py copy to lib-python/modified-2.7.0/lib2to3/patcomp.py diff --git a/lib-python/2.7.0/lib2to3/pgen2/__init__.py b/lib-python/modified-2.7.0/lib2to3/pgen2/__init__.py copy from lib-python/2.7.0/lib2to3/pgen2/__init__.py copy to lib-python/modified-2.7.0/lib2to3/pgen2/__init__.py diff --git a/lib-python/2.7.0/lib2to3/pgen2/conv.py b/lib-python/modified-2.7.0/lib2to3/pgen2/conv.py copy from lib-python/2.7.0/lib2to3/pgen2/conv.py copy to lib-python/modified-2.7.0/lib2to3/pgen2/conv.py diff --git a/lib-python/2.7.0/lib2to3/pgen2/driver.py b/lib-python/modified-2.7.0/lib2to3/pgen2/driver.py copy from lib-python/2.7.0/lib2to3/pgen2/driver.py copy to lib-python/modified-2.7.0/lib2to3/pgen2/driver.py diff --git a/lib-python/2.7.0/lib2to3/pgen2/grammar.py b/lib-python/modified-2.7.0/lib2to3/pgen2/grammar.py copy from lib-python/2.7.0/lib2to3/pgen2/grammar.py copy to lib-python/modified-2.7.0/lib2to3/pgen2/grammar.py diff --git a/lib-python/2.7.0/lib2to3/pgen2/literals.py b/lib-python/modified-2.7.0/lib2to3/pgen2/literals.py copy from lib-python/2.7.0/lib2to3/pgen2/literals.py copy to lib-python/modified-2.7.0/lib2to3/pgen2/literals.py diff --git a/lib-python/2.7.0/lib2to3/pgen2/parse.py b/lib-python/modified-2.7.0/lib2to3/pgen2/parse.py copy from lib-python/2.7.0/lib2to3/pgen2/parse.py copy to lib-python/modified-2.7.0/lib2to3/pgen2/parse.py diff --git a/lib-python/2.7.0/lib2to3/pgen2/pgen.py b/lib-python/modified-2.7.0/lib2to3/pgen2/pgen.py copy from lib-python/2.7.0/lib2to3/pgen2/pgen.py copy to lib-python/modified-2.7.0/lib2to3/pgen2/pgen.py diff --git a/lib-python/2.7.0/lib2to3/pgen2/token.py b/lib-python/modified-2.7.0/lib2to3/pgen2/token.py copy from lib-python/2.7.0/lib2to3/pgen2/token.py copy to lib-python/modified-2.7.0/lib2to3/pgen2/token.py diff --git a/lib-python/2.7.0/lib2to3/pgen2/tokenize.py b/lib-python/modified-2.7.0/lib2to3/pgen2/tokenize.py copy from lib-python/2.7.0/lib2to3/pgen2/tokenize.py copy to lib-python/modified-2.7.0/lib2to3/pgen2/tokenize.py diff --git a/lib-python/2.7.0/lib2to3/pygram.py b/lib-python/modified-2.7.0/lib2to3/pygram.py copy from lib-python/2.7.0/lib2to3/pygram.py copy to lib-python/modified-2.7.0/lib2to3/pygram.py diff --git a/lib-python/2.7.0/lib2to3/pytree.py b/lib-python/modified-2.7.0/lib2to3/pytree.py copy from lib-python/2.7.0/lib2to3/pytree.py copy to lib-python/modified-2.7.0/lib2to3/pytree.py --- a/lib-python/2.7.0/lib2to3/pytree.py +++ b/lib-python/modified-2.7.0/lib2to3/pytree.py @@ -741,11 +741,12 @@ elif self.name == "bare_name": yield self._bare_name_matches(nodes) else: - # The reason for this is that hitting the recursion limit usually - # results in some ugly messages about how RuntimeErrors are being - # ignored. - save_stderr = sys.stderr - sys.stderr = StringIO() + # There used to be some monkey patching of sys.stderr here, to + # silence the error message from the RuntimError, PyPy has removed + # this because it relied on reference counting. This is because the + # caller of this function doesn't consume this generator fully, so + # the finally statement that used to be here would only be executed + # when the gc happened to run. try: for count, r in self._recursive_matches(nodes, 0): if self.name: @@ -758,8 +759,6 @@ if self.name: r[self.name] = nodes[:count] yield count, r - finally: - sys.stderr = save_stderr def _iterative_matches(self, nodes): """Helper to iteratively yield the matches.""" diff --git a/lib-python/2.7.0/lib2to3/refactor.py b/lib-python/modified-2.7.0/lib2to3/refactor.py copy from lib-python/2.7.0/lib2to3/refactor.py copy to lib-python/modified-2.7.0/lib2to3/refactor.py diff --git a/lib-python/2.7.0/lib2to3/tests/__init__.py b/lib-python/modified-2.7.0/lib2to3/tests/__init__.py copy from lib-python/2.7.0/lib2to3/tests/__init__.py copy to lib-python/modified-2.7.0/lib2to3/tests/__init__.py diff --git a/lib-python/2.7.0/lib2to3/tests/data/README b/lib-python/modified-2.7.0/lib2to3/tests/data/README copy from lib-python/2.7.0/lib2to3/tests/data/README copy to lib-python/modified-2.7.0/lib2to3/tests/data/README diff --git a/lib-python/2.7.0/lib2to3/tests/data/bom.py b/lib-python/modified-2.7.0/lib2to3/tests/data/bom.py copy from lib-python/2.7.0/lib2to3/tests/data/bom.py copy to lib-python/modified-2.7.0/lib2to3/tests/data/bom.py diff --git a/lib-python/2.7.0/lib2to3/tests/data/crlf.py b/lib-python/modified-2.7.0/lib2to3/tests/data/crlf.py copy from lib-python/2.7.0/lib2to3/tests/data/crlf.py copy to lib-python/modified-2.7.0/lib2to3/tests/data/crlf.py diff --git a/lib-python/2.7.0/lib2to3/tests/data/different_encoding.py b/lib-python/modified-2.7.0/lib2to3/tests/data/different_encoding.py copy from lib-python/2.7.0/lib2to3/tests/data/different_encoding.py copy to lib-python/modified-2.7.0/lib2to3/tests/data/different_encoding.py diff --git a/lib-python/2.7.0/lib2to3/tests/data/fixers/bad_order.py b/lib-python/modified-2.7.0/lib2to3/tests/data/fixers/bad_order.py copy from lib-python/2.7.0/lib2to3/tests/data/fixers/bad_order.py copy to lib-python/modified-2.7.0/lib2to3/tests/data/fixers/bad_order.py diff --git a/lib-python/2.7.0/lib2to3/tests/data/fixers/myfixes/__init__.py b/lib-python/modified-2.7.0/lib2to3/tests/data/fixers/myfixes/__init__.py copy from lib-python/2.7.0/lib2to3/tests/data/fixers/myfixes/__init__.py copy to lib-python/modified-2.7.0/lib2to3/tests/data/fixers/myfixes/__init__.py diff --git a/lib-python/2.7.0/lib2to3/tests/data/fixers/myfixes/fix_explicit.py b/lib-python/modified-2.7.0/lib2to3/tests/data/fixers/myfixes/fix_explicit.py copy from lib-python/2.7.0/lib2to3/tests/data/fixers/myfixes/fix_explicit.py copy to lib-python/modified-2.7.0/lib2to3/tests/data/fixers/myfixes/fix_explicit.py diff --git a/lib-python/2.7.0/lib2to3/tests/data/fixers/myfixes/fix_first.py b/lib-python/modified-2.7.0/lib2to3/tests/data/fixers/myfixes/fix_first.py copy from lib-python/2.7.0/lib2to3/tests/data/fixers/myfixes/fix_first.py copy to lib-python/modified-2.7.0/lib2to3/tests/data/fixers/myfixes/fix_first.py diff --git a/lib-python/2.7.0/lib2to3/tests/data/fixers/myfixes/fix_last.py b/lib-python/modified-2.7.0/lib2to3/tests/data/fixers/myfixes/fix_last.py copy from lib-python/2.7.0/lib2to3/tests/data/fixers/myfixes/fix_last.py copy to lib-python/modified-2.7.0/lib2to3/tests/data/fixers/myfixes/fix_last.py diff --git a/lib-python/2.7.0/lib2to3/tests/data/fixers/myfixes/fix_parrot.py b/lib-python/modified-2.7.0/lib2to3/tests/data/fixers/myfixes/fix_parrot.py copy from lib-python/2.7.0/lib2to3/tests/data/fixers/myfixes/fix_parrot.py copy to lib-python/modified-2.7.0/lib2to3/tests/data/fixers/myfixes/fix_parrot.py diff --git a/lib-python/2.7.0/lib2to3/tests/data/fixers/myfixes/fix_preorder.py b/lib-python/modified-2.7.0/lib2to3/tests/data/fixers/myfixes/fix_preorder.py copy from lib-python/2.7.0/lib2to3/tests/data/fixers/myfixes/fix_preorder.py copy to lib-python/modified-2.7.0/lib2to3/tests/data/fixers/myfixes/fix_preorder.py diff --git a/lib-python/2.7.0/lib2to3/tests/data/fixers/no_fixer_cls.py b/lib-python/modified-2.7.0/lib2to3/tests/data/fixers/no_fixer_cls.py copy from lib-python/2.7.0/lib2to3/tests/data/fixers/no_fixer_cls.py copy to lib-python/modified-2.7.0/lib2to3/tests/data/fixers/no_fixer_cls.py diff --git a/lib-python/2.7.0/lib2to3/tests/data/fixers/parrot_example.py b/lib-python/modified-2.7.0/lib2to3/tests/data/fixers/parrot_example.py copy from lib-python/2.7.0/lib2to3/tests/data/fixers/parrot_example.py copy to lib-python/modified-2.7.0/lib2to3/tests/data/fixers/parrot_example.py diff --git a/lib-python/2.7.0/lib2to3/tests/data/infinite_recursion.py b/lib-python/modified-2.7.0/lib2to3/tests/data/infinite_recursion.py copy from lib-python/2.7.0/lib2to3/tests/data/infinite_recursion.py copy to lib-python/modified-2.7.0/lib2to3/tests/data/infinite_recursion.py diff --git a/lib-python/2.7.0/lib2to3/tests/data/py2_test_grammar.py b/lib-python/modified-2.7.0/lib2to3/tests/data/py2_test_grammar.py copy from lib-python/2.7.0/lib2to3/tests/data/py2_test_grammar.py copy to lib-python/modified-2.7.0/lib2to3/tests/data/py2_test_grammar.py diff --git a/lib-python/2.7.0/lib2to3/tests/data/py3_test_grammar.py b/lib-python/modified-2.7.0/lib2to3/tests/data/py3_test_grammar.py copy from lib-python/2.7.0/lib2to3/tests/data/py3_test_grammar.py copy to lib-python/modified-2.7.0/lib2to3/tests/data/py3_test_grammar.py diff --git a/lib-python/2.7.0/lib2to3/tests/pytree_idempotency.py b/lib-python/modified-2.7.0/lib2to3/tests/pytree_idempotency.py copy from lib-python/2.7.0/lib2to3/tests/pytree_idempotency.py copy to lib-python/modified-2.7.0/lib2to3/tests/pytree_idempotency.py diff --git a/lib-python/2.7.0/lib2to3/tests/support.py b/lib-python/modified-2.7.0/lib2to3/tests/support.py copy from lib-python/2.7.0/lib2to3/tests/support.py copy to lib-python/modified-2.7.0/lib2to3/tests/support.py diff --git a/lib-python/2.7.0/lib2to3/tests/test_all_fixers.py b/lib-python/modified-2.7.0/lib2to3/tests/test_all_fixers.py copy from lib-python/2.7.0/lib2to3/tests/test_all_fixers.py copy to lib-python/modified-2.7.0/lib2to3/tests/test_all_fixers.py diff --git a/lib-python/2.7.0/lib2to3/tests/test_fixers.py b/lib-python/modified-2.7.0/lib2to3/tests/test_fixers.py copy from lib-python/2.7.0/lib2to3/tests/test_fixers.py copy to lib-python/modified-2.7.0/lib2to3/tests/test_fixers.py diff --git a/lib-python/2.7.0/lib2to3/tests/test_main.py b/lib-python/modified-2.7.0/lib2to3/tests/test_main.py copy from lib-python/2.7.0/lib2to3/tests/test_main.py copy to lib-python/modified-2.7.0/lib2to3/tests/test_main.py diff --git a/lib-python/2.7.0/lib2to3/tests/test_parser.py b/lib-python/modified-2.7.0/lib2to3/tests/test_parser.py copy from lib-python/2.7.0/lib2to3/tests/test_parser.py copy to lib-python/modified-2.7.0/lib2to3/tests/test_parser.py diff --git a/lib-python/2.7.0/lib2to3/tests/test_pytree.py b/lib-python/modified-2.7.0/lib2to3/tests/test_pytree.py copy from lib-python/2.7.0/lib2to3/tests/test_pytree.py copy to lib-python/modified-2.7.0/lib2to3/tests/test_pytree.py diff --git a/lib-python/2.7.0/lib2to3/tests/test_refactor.py b/lib-python/modified-2.7.0/lib2to3/tests/test_refactor.py copy from lib-python/2.7.0/lib2to3/tests/test_refactor.py copy to lib-python/modified-2.7.0/lib2to3/tests/test_refactor.py diff --git a/lib-python/2.7.0/lib2to3/tests/test_util.py b/lib-python/modified-2.7.0/lib2to3/tests/test_util.py copy from lib-python/2.7.0/lib2to3/tests/test_util.py copy to lib-python/modified-2.7.0/lib2to3/tests/test_util.py diff --git a/lib-python/modified-2.7.0/test/list_tests.py b/lib-python/modified-2.7.0/test/list_tests.py --- a/lib-python/modified-2.7.0/test/list_tests.py +++ b/lib-python/modified-2.7.0/test/list_tests.py @@ -476,7 +476,11 @@ u += "eggs" self.assertEqual(u, self.type2test("spameggs")) - self.assertRaises(TypeError, u.__iadd__, None) + def f_iadd(u, x): + u += x + return u + + self.assertRaises(TypeError, f_iadd, u, None) def test_imul(self): u = self.type2test([0, 1]) diff --git a/lib-python/modified-2.7.0/test/test_ast.py b/lib-python/modified-2.7.0/test/test_ast.py --- a/lib-python/modified-2.7.0/test/test_ast.py +++ b/lib-python/modified-2.7.0/test/test_ast.py @@ -195,22 +195,26 @@ self._assertTrueorder(value, parent_pos) def test_AST_objects(self): - x = ast.AST() - try: - x.foobar = 21 - except AttributeError, e: - self.assertEquals(e.args[0], - "'_ast.AST' object has no attribute 'foobar'") - else: - self.assert_(False) + if test_support.check_impl_detail(): + # PyPy also provides a __dict__ to the ast.AST base class. - try: - ast.AST(lineno=2) - except AttributeError, e: - self.assertEquals(e.args[0], - "'_ast.AST' object has no attribute 'lineno'") - else: - self.assert_(False) + x = ast.AST() + try: + x.foobar = 21 + except AttributeError, e: + self.assertEquals(e.args[0], + "'_ast.AST' object has no attribute 'foobar'") + else: + self.assert_(False) + + try: + ast.AST(lineno=2) + except AttributeError, e: + self.assertEquals(e.args[0], + "'_ast.AST' object has no attribute 'lineno'") + else: + self.assert_(False) + try: ast.AST(2) except TypeError, e: diff --git a/lib-python/2.7.0/test/test_cmd_line_script.py b/lib-python/modified-2.7.0/test/test_cmd_line_script.py copy from lib-python/2.7.0/test/test_cmd_line_script.py copy to lib-python/modified-2.7.0/test/test_cmd_line_script.py --- a/lib-python/2.7.0/test/test_cmd_line_script.py +++ b/lib-python/modified-2.7.0/test/test_cmd_line_script.py @@ -112,6 +112,8 @@ self._check_script(script_dir, script_name, script_dir, '') def test_directory_compiled(self): + if test.test_support.check_impl_detail(pypy=True): + raise unittest.SkipTest("pypy won't load lone .pyc files") with temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__') compiled_name = compile_script(script_name) @@ -173,6 +175,8 @@ script_name, 'test_pkg') def test_package_compiled(self): + if test.test_support.check_impl_detail(pypy=True): + raise unittest.SkipTest("pypy won't load lone .pyc files") with temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) diff --git a/lib-python/2.7.0/test/test_runpy.py b/lib-python/modified-2.7.0/test/test_runpy.py copy from lib-python/2.7.0/test/test_runpy.py copy to lib-python/modified-2.7.0/test/test_runpy.py --- a/lib-python/2.7.0/test/test_runpy.py +++ b/lib-python/modified-2.7.0/test/test_runpy.py @@ -5,10 +5,15 @@ import sys import re import tempfile -from test.test_support import verbose, run_unittest, forget +from test.test_support import verbose, run_unittest, forget, check_impl_detail from test.script_helper import (temp_dir, make_script, compile_script, make_pkg, make_zip_script, make_zip_pkg) +if check_impl_detail(pypy=True): + no_lone_pyc_file = True +else: + no_lone_pyc_file = False + from runpy import _run_code, _run_module_code, run_module, run_path # Note: This module can't safely test _run_module_as_main as it @@ -168,13 +173,14 @@ self.assertIn("x", d1) self.assertTrue(d1["x"] == 1) del d1 # Ensure __loader__ entry doesn't keep file open - __import__(mod_name) - os.remove(mod_fname) - if verbose: print "Running from compiled:", mod_name - d2 = run_module(mod_name) # Read from bytecode - self.assertIn("x", d2) - self.assertTrue(d2["x"] == 1) - del d2 # Ensure __loader__ entry doesn't keep file open + if not no_lone_pyc_file: + __import__(mod_name) + os.remove(mod_fname) + if verbose: print "Running from compiled:", mod_name + d2 = run_module(mod_name) # Read from bytecode + self.assertIn("x", d2) + self.assertTrue(d2["x"] == 1) + del d2 # Ensure __loader__ entry doesn't keep file open finally: self._del_pkg(pkg_dir, depth, mod_name) if verbose: print "Module executed successfully" @@ -190,13 +196,14 @@ self.assertIn("x", d1) self.assertTrue(d1["x"] == 1) del d1 # Ensure __loader__ entry doesn't keep file open - __import__(mod_name) - os.remove(mod_fname) - if verbose: print "Running from compiled:", pkg_name - d2 = run_module(pkg_name) # Read from bytecode - self.assertIn("x", d2) - self.assertTrue(d2["x"] == 1) - del d2 # Ensure __loader__ entry doesn't keep file open + if not no_lone_pyc_file: + __import__(mod_name) + os.remove(mod_fname) + if verbose: print "Running from compiled:", pkg_name + d2 = run_module(pkg_name) # Read from bytecode + self.assertIn("x", d2) + self.assertTrue(d2["x"] == 1) + del d2 # Ensure __loader__ entry doesn't keep file open finally: self._del_pkg(pkg_dir, depth, pkg_name) if verbose: print "Package executed successfully" @@ -244,15 +251,17 @@ self.assertIn("sibling", d1) self.assertIn("nephew", d1) del d1 # Ensure __loader__ entry doesn't keep file open - __import__(mod_name) - os.remove(mod_fname) - if verbose: print "Running from compiled:", mod_name - d2 = run_module(mod_name, run_name=run_name) # Read from bytecode - self.assertIn("__package__", d2) - self.assertTrue(d2["__package__"] == pkg_name) - self.assertIn("sibling", d2) - self.assertIn("nephew", d2) - del d2 # Ensure __loader__ entry doesn't keep file open + if not no_lone_pyc_file: + __import__(mod_name) + os.remove(mod_fname) + if verbose: print "Running from compiled:", mod_name + # Read from bytecode + d2 = run_module(mod_name, run_name=run_name) + self.assertIn("__package__", d2) + self.assertTrue(d2["__package__"] == pkg_name) + self.assertIn("sibling", d2) + self.assertIn("nephew", d2) + del d2 # Ensure __loader__ entry doesn't keep file open finally: self._del_pkg(pkg_dir, depth, mod_name) if verbose: print "Module executed successfully" @@ -345,6 +354,8 @@ script_dir, '') def test_directory_compiled(self): + if no_lone_pyc_file: + return with temp_dir() as script_dir: mod_name = '__main__' script_name = self._make_test_script(script_dir, mod_name) diff --git a/lib-python/modified-2.7.0/test/test_threading.py b/lib-python/modified-2.7.0/test/test_threading.py --- a/lib-python/modified-2.7.0/test/test_threading.py +++ b/lib-python/modified-2.7.0/test/test_threading.py @@ -429,6 +429,9 @@ def joiningfunc(mainthread): mainthread.join() print 'end of thread' + # stdout is fully buffered because not a tty, we have to flush + # before exit. + sys.stdout.flush() \n""" + script import subprocess diff --git a/lib-python/modified-2.7.0/test/test_weakset.py b/lib-python/modified-2.7.0/test/test_weakset.py --- a/lib-python/modified-2.7.0/test/test_weakset.py +++ b/lib-python/modified-2.7.0/test/test_weakset.py @@ -332,10 +332,11 @@ next(it) # Trigger internal iteration # Destroy an item del items[-1] - gc.collect() # just in case + test_support.gc_collect() # We have removed either the first consumed items, or another one self.assertIn(len(list(it)), [len(items), len(items) - 1]) del it + test_support.gc_collect() # The removal has been committed self.assertEqual(len(s), len(items)) diff --git a/lib_pypy/_csv.py b/lib_pypy/_csv.py --- a/lib_pypy/_csv.py +++ b/lib_pypy/_csv.py @@ -256,25 +256,33 @@ while True: if c in '\n\r': # end of line - return [fields] + if pos2 > pos: + self._parse_add_char(line[pos:pos2]) + pos = pos2 self._parse_save_field() self.state = self.EAT_CRNL elif c == self.dialect.escapechar: # possible escaped character + pos2 -= 1 self.state = self.ESCAPED_CHAR elif c == self.dialect.delimiter: # save field - wait for new field + if pos2 > pos: + self._parse_add_char(line[pos:pos2]) + pos = pos2 self._parse_save_field() self.state = self.START_FIELD else: # normal character - save in field pos2 += 1 - c = line[pos2] - continue + if pos2 < len(line): + c = line[pos2] + continue break if pos2 > pos: self._parse_add_char(line[pos:pos2]) - pos = pos2 - + pos = pos2 - 1 + elif self.state == self.START_RECORD: if c in '\n\r': self.state = self.EAT_CRNL diff --git a/lib_pypy/_ctypes/__init__.py b/lib_pypy/_ctypes/__init__.py --- a/lib_pypy/_ctypes/__init__.py +++ b/lib_pypy/_ctypes/__init__.py @@ -4,7 +4,7 @@ from _ctypes.primitive import _SimpleCData from _ctypes.pointer import _Pointer, _cast_addr from _ctypes.pointer import POINTER, pointer, _pointer_type_cache -from _ctypes.function import CFuncPtr +from _ctypes.function import CFuncPtr, call_function from _ctypes.dll import dlopen from _ctypes.structure import Structure from _ctypes.array import Array diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -1,23 +1,40 @@ - -from _ctypes.basics import _CData, _CDataMeta, cdata_from_address -from _ctypes.primitive import SimpleType -from _ctypes.basics import ArgumentError, keepalive_key -from _ctypes.builtin import set_errno, set_last_error import _rawffi import sys import traceback +from _ctypes.basics import ArgumentError, keepalive_key +from _ctypes.basics import _CData, _CDataMeta, cdata_from_address +from _ctypes.builtin import set_errno, set_last_error +from _ctypes.primitive import SimpleType + # XXX this file needs huge refactoring I fear PARAMFLAG_FIN = 0x1 PARAMFLAG_FOUT = 0x2 PARAMFLAG_FLCID = 0x4 +PARAMFLAG_COMBINED = PARAMFLAG_FIN | PARAMFLAG_FOUT | PARAMFLAG_FLCID + +VALID_PARAMFLAGS = ( + 0, + PARAMFLAG_FIN, + PARAMFLAG_FIN | PARAMFLAG_FOUT, + PARAMFLAG_FIN | PARAMFLAG_FLCID + ) + +WIN64 = sys.platform == 'win32' and sys.maxint == 2**63 - 1 + def get_com_error(errcode, riid, pIunk): "Win32 specific: build a COM Error exception" # XXX need C support code from _ctypes import COMError return COMError(errcode, None, None) +def call_function(func, args): + "Only for debugging so far: So that we can call CFunction instances" + funcptr = CFuncPtr(func) + funcptr.restype = int + return funcptr(*args) + class CFuncPtrType(_CDataMeta): # XXX write down here defaults and such things @@ -54,10 +71,11 @@ def _getargtypes(self): return self._argtypes_ + def _setargtypes(self, argtypes): self._ptr = None if argtypes is None: - self._argtypes_ = None + self._argtypes_ = () else: for i, argtype in enumerate(argtypes): if not hasattr(argtype, 'from_param'): @@ -65,38 +83,91 @@ "item %d in _argtypes_ has no from_param method" % ( i + 1,)) self._argtypes_ = argtypes + argtypes = property(_getargtypes, _setargtypes) + def _getparamflags(self): + return self._paramflags + + def _setparamflags(self, paramflags): + if paramflags is None or not self._argtypes_: + self._paramflags = None + return + if not isinstance(paramflags, tuple): + raise TypeError("paramflags must be a tuple or None") + if len(paramflags) != len(self._argtypes_): + raise ValueError("paramflags must have the same length as argtypes") + for idx, paramflag in enumerate(paramflags): + paramlen = len(paramflag) + name = default = None + if paramlen == 1: + flag = paramflag[0] + elif paramlen == 2: + flag, name = paramflag + elif paramlen == 3: + flag, name, default = paramflag + else: + raise TypeError( + "paramflags must be a sequence of (int [,string [,value]]) " + "tuples" + ) + if not isinstance(flag, int): + raise TypeError( + "paramflags must be a sequence of (int [,string [,value]]) " + "tuples" + ) + _flag = flag & PARAMFLAG_COMBINED + if _flag == PARAMFLAG_FOUT: + typ = self._argtypes_[idx] + if getattr(typ, '_ffiargshape', None) not in ('P', 'z', 'Z'): + raise TypeError( + "'out' parameter %d must be a pointer type, not %s" + % (idx+1, type(typ).__name__) + ) + elif _flag not in VALID_PARAMFLAGS: + raise TypeError("paramflag value %d not supported" % flag) + self._paramflags = paramflags + + paramflags = property(_getparamflags, _setparamflags) + def _getrestype(self): return self._restype_ + def _setrestype(self, restype): self._ptr = None if restype is int: from ctypes import c_int restype = c_int - if not isinstance(restype, _CDataMeta) and not restype is None and \ - not callable(restype): - raise TypeError("Expected ctypes type, got %s" % (restype,)) + if not (isinstance(restype, _CDataMeta) or restype is None or + callable(restype)): + raise TypeError("restype must be a type, a callable, or None") self._restype_ = restype + def _delrestype(self): self._ptr = None del self._restype_ + restype = property(_getrestype, _setrestype, _delrestype) def _geterrcheck(self): return getattr(self, '_errcheck_', None) + def _seterrcheck(self, errcheck): if not callable(errcheck): raise TypeError("The errcheck attribute must be callable") self._errcheck_ = errcheck + def _delerrcheck(self): try: del self._errcheck_ except AttributeError: pass + errcheck = property(_geterrcheck, _seterrcheck, _delerrcheck) def _ffishapes(self, args, restype): + if args is None: + args = [] argtypes = [arg._ffiargshape for arg in args] if restype is not None: if not isinstance(restype, SimpleType): @@ -110,53 +181,66 @@ self.name = None self._objects = {keepalive_key(0):self} self._needs_free = True - argument = None - if len(args) == 1: - argument = args[0] - if isinstance(argument, (int, long)): - # direct construction from raw address + # Empty function object -- this is needed for casts + if not args: + self._buffer = _rawffi.Array('P')(1) + return + + argsl = list(args) + argument = argsl.pop(0) + + # Direct construction from raw address + if isinstance(argument, (int, long)) and not argsl: ffiargs, ffires = self._ffishapes(self._argtypes_, self._restype_) - self._ptr = _rawffi.FuncPtr(argument, ffiargs, ffires, - self._flags_) + self._ptr = _rawffi.FuncPtr(argument, ffiargs, ffires, self._flags_) self._buffer = self._ptr.byptr() - elif callable(argument): - # A callback into python + return + + # A callback into Python + if callable(argument) and not argsl: self.callable = argument ffiargs, ffires = self._ffishapes(self._argtypes_, self._restype_) if self._restype_ is None: ffires = None - self._ptr = _rawffi.CallbackPtr(self._wrap_callable(argument, - self.argtypes), - ffiargs, ffires, self._flags_) + self._ptr = _rawffi.CallbackPtr(self._wrap_callable( + argument, self.argtypes + ), ffiargs, ffires, self._flags_) self._buffer = self._ptr.byptr() - elif isinstance(argument, tuple) and len(argument) == 2: - # function exported from a shared library + return + + # Function exported from a shared library + if isinstance(argument, tuple) and len(argument) == 2: import ctypes - self.name, self.dll = argument - if isinstance(self.dll, str): - self.dll = ctypes.CDLL(self.dll) - # we need to check dll anyway + self.name, dll = argument + if isinstance(dll, str): + self.dll = ctypes.CDLL(dll) + else: + self.dll = dll + if argsl: + self.paramflags = argsl.pop(0) + if argsl: + raise TypeError("Unknown constructor %s" % (args,)) + # We need to check dll anyway ptr = self._getfuncptr([], ctypes.c_int) self._buffer = ptr.byptr() + return - elif (sys.platform == 'win32' and - len(args) >= 2 and isinstance(args[0], (int, long))): - # A COM function call, by index + # A COM function call, by index + if (sys.platform == 'win32' and isinstance(argument, (int, long)) + and argsl): ffiargs, ffires = self._ffishapes(self._argtypes_, self._restype_) - self._com_index = args[0] + 0x1000 - self.name = args[1] - if len(args) > 2: - self._paramflags = args[2] - # XXX ignored iid = args[3] + self._com_index = argument + 0x1000 + self.name = argsl.pop(0) + if argsl: + self.paramflags = argsl.pop(0) + if argsl: + self._com_iid = argsl.pop(0) + if argsl: + raise TypeError("Unknown constructor %s" % (args,)) + return - elif len(args) == 0: - # Empty function object. - # this is needed for casts - self._buffer = _rawffi.Array('P')(1) - return - else: - raise TypeError("Unknown constructor %s" % (args,)) + raise TypeError("Unknown constructor %s" % (args,)) def _wrap_callable(self, to_call, argtypes): def f(*args): @@ -166,30 +250,31 @@ return to_call(*args) return f - def __call__(self, *args): + def __call__(self, *args, **kwargs): + argtypes = self._argtypes_ if self.callable is not None: - if len(args) == len(self._argtypes_): + if len(args) == len(argtypes): pass elif self._flags_ & _rawffi.FUNCFLAG_CDECL: - if len(args) < len(self._argtypes_): - plural = len(self._argtypes_) > 1 and "s" or "" + if len(args) < len(argtypes): + plural = len(argtypes) > 1 and "s" or "" raise TypeError( "This function takes at least %d argument%s (%s given)" - % (len(self._argtypes_), plural, len(args))) + % (len(argtypes), plural, len(args))) else: # For cdecl functions, we allow more actual arguments # than the length of the argtypes tuple. args = args[:len(self._argtypes_)] else: - plural = len(self._argtypes_) > 1 and "s" or "" + plural = len(argtypes) > 1 and "s" or "" raise TypeError( "This function takes %d argument%s (%s given)" - % (len(self._argtypes_), plural, len(args))) + % (len(argtypes), plural, len(args))) # check that arguments are convertible ## XXX Not as long as ctypes.cast is a callback function with ## py_object arguments... - ## self._convert_args(self._argtypes_, args) + ## self._convert_args(argtypes, args, {}) try: res = self.callable(*args) @@ -201,22 +286,26 @@ if self._restype_ is not None: return res return - argtypes = self._argtypes_ + + if argtypes is None: + argtypes = [] if self._com_index: from ctypes import cast, c_void_p, POINTER + if not args: + raise ValueError( + "native COM method call without 'this' parameter" + ) thisarg = cast(args[0], POINTER(POINTER(c_void_p))).contents argtypes = [c_void_p] + list(argtypes) args = list(args) args[0] = args[0].value else: thisarg = None - - if argtypes is None: - argtypes = [] - args = self._convert_args(argtypes, args) + + args, outargs = self._convert_args(argtypes, args, kwargs) argtypes = [type(arg) for arg in args] - + restype = self._restype_ funcptr = self._getfuncptr(argtypes, restype, thisarg) if self._flags_ & _rawffi.FUNCFLAG_USE_ERRNO: @@ -231,7 +320,27 @@ set_errno(_rawffi.get_errno()) if self._flags_ & _rawffi.FUNCFLAG_USE_LASTERROR: set_last_error(_rawffi.get_last_error()) - result = self._build_result(restype, resbuffer, argtypes, args) + + result = None + if self._com_index: + if resbuffer[0] & 0x80000000: + raise get_com_error(resbuffer[0], + self._com_iid, args[0]) + else: + result = int(resbuffer[0]) + elif restype is not None: + checker = getattr(self.restype, '_check_retval_', None) + if checker: + val = restype(resbuffer[0]) + # the original ctypes seems to make the distinction between + # classes defining a new type, and their subclasses + if '_type_' in restype.__dict__: + val = val.value + result = checker(val) + elif not isinstance(restype, _CDataMeta): + result = restype(resbuffer[0]) + else: + result = restype._CData_retval(resbuffer) # The 'errcheck' protocol if self._errcheck_: @@ -244,8 +353,13 @@ if v is not args: result = v - return result + if not outargs: + return result + if len(outargs) == 1: + return outargs[0] + + return tuple(outargs) def _getfuncptr(self, argtypes, restype, thisarg=None): if self._ptr is not None and argtypes is self._argtypes_: @@ -268,14 +382,17 @@ raise ValueError("COM method call without VTable") ptr = thisarg[self._com_index - 0x1000] return _rawffi.FuncPtr(ptr, argshapes, resshape, self._flags_) - + cdll = self.dll._handle try: return cdll.ptr(self.name, argshapes, resshape, self._flags_) except AttributeError: if self._flags_ & _rawffi.FUNCFLAG_CDECL: raise - + # Win64 has no stdcall calling conv, so it should also not have the + # name mangling of it. + if WIN64: + raise # For stdcall, try mangled names: # funcname -> _funcname@ # where n is 0, 4, 8, 12, ..., 128 @@ -289,13 +406,12 @@ raise @staticmethod - def _conv_param(argtype, arg, index): + def _conv_param(argtype, arg): from ctypes import c_char_p, c_wchar_p, c_void_p, c_int if argtype is not None: arg = argtype.from_param(arg) if hasattr(arg, '_as_parameter_'): arg = arg._as_parameter_ - if isinstance(arg, _CData): # The usual case when argtype is defined cobj = arg @@ -309,133 +425,91 @@ cobj = c_int(arg) else: raise TypeError("Don't know how to handle %s" % (arg,)) - return cobj - def _convert_args(self, argtypes, args): - wrapped_args = [] - consumed = 0 + def _convert_args(self, argtypes, args, kwargs, marker=object()): + callargs = [] + outargs = [] + total = len(args) + paramflags = self._paramflags + + if self._com_index: + inargs_idx = 1 + else: + inargs_idx = 0 + + if not paramflags and total < len(argtypes): + raise TypeError("not enough arguments") for i, argtype in enumerate(argtypes): - defaultvalue = None - if i > 0 and self._paramflags is not None: - paramflag = self._paramflags[i-1] - if len(paramflag) == 2: - idlflag, name = paramflag - elif len(paramflag) == 3: - idlflag, name, defaultvalue = paramflag + flag = 0 + name = None + defval = marker + if paramflags: + paramflag = paramflags[i] + paramlen = len(paramflag) + name = None + if paramlen == 1: + flag = paramflag[0] + elif paramlen == 2: + flag, name = paramflag + elif paramlen == 3: + flag, name, defval = paramflag + flag = flag & PARAMFLAG_COMBINED + if flag == PARAMFLAG_FIN | PARAMFLAG_FLCID: + val = defval + if val is marker: + val = 0 + wrapped = self._conv_param(argtype, val) + callargs.append(wrapped) + elif flag in (0, PARAMFLAG_FIN): + if inargs_idx < total: + val = args[inargs_idx] + inargs_idx += 1 + elif kwargs and name in kwargs: + val = kwargs[name] + inargs_idx += 1 + elif defval is not marker: + val = defval + elif name: + raise TypeError("required argument '%s' missing" % name) + else: + raise TypeError("not enough arguments") + wrapped = self._conv_param(argtype, val) + callargs.append(wrapped) + elif flag == PARAMFLAG_FOUT: + if defval is not marker: + outargs.append(defval) + wrapped = self._conv_param(argtype, defval) + else: + import ctypes + val = argtype._type_() + outargs.append(val) + wrapped = ctypes.byref(val) + callargs.append(wrapped) else: - idlflag = 0 - idlflag &= (PARAMFLAG_FIN | PARAMFLAG_FOUT | PARAMFLAG_FLCID) + raise ValueError("paramflag %d not yet implemented" % flag) + else: + try: + wrapped = self._conv_param(argtype, args[i]) + except (UnicodeError, TypeError, ValueError), e: + raise ArgumentError(str(e)) + callargs.append(wrapped) + inargs_idx += 1 - if idlflag in (0, PARAMFLAG_FIN): - pass - elif idlflag == PARAMFLAG_FOUT: - import ctypes - val = argtype._type_() - wrapped = (val, ctypes.byref(val)) - wrapped_args.append(wrapped) - continue - elif idlflag == PARAMFLAG_FIN | PARAMFLAG_FLCID: - # Always taken from defaultvalue if given, - # else the integer 0. - val = defaultvalue - if val is None: - val = 0 - wrapped = self._conv_param(argtype, val, consumed) - wrapped_args.append(wrapped) - continue - else: - raise NotImplementedError( - "paramflags = %s" % (self._paramflags[i-1],)) - - if consumed < len(args): - arg = args[consumed] - elif defaultvalue is not None: - arg = defaultvalue - else: - raise TypeError("Not enough arguments") - - try: - wrapped = self._conv_param(argtype, arg, consumed) - except (UnicodeError, TypeError, ValueError), e: - raise ArgumentError(str(e)) - wrapped_args.append(wrapped) - consumed += 1 - - if len(wrapped_args) < len(args): - extra = args[len(wrapped_args):] - argtypes = list(argtypes) + if len(callargs) < total: + extra = args[len(callargs):] for i, arg in enumerate(extra): try: - wrapped = self._conv_param(None, arg, i) + wrapped = self._conv_param(None, arg) except (UnicodeError, TypeError, ValueError), e: raise ArgumentError(str(e)) - wrapped_args.append(wrapped) - return wrapped_args + callargs.append(wrapped) - def _build_result(self, restype, resbuffer, argtypes, argsandobjs): - """Build the function result: - If there is no OUT parameter, return the actual function result - If there is one OUT parameter, return it - If there are many OUT parameters, return a tuple""" - - retval = None - - if self._com_index: - if resbuffer[0] & 0x80000000: - raise get_com_error(resbuffer[0], - self._com_iid, argsandobjs[0]) - else: - retval = int(resbuffer[0]) - elif restype is not None: - checker = getattr(self.restype, '_check_retval_', None) - if checker: - val = restype(resbuffer[0]) - # the original ctypes seems to make the distinction between - # classes defining a new type, and their subclasses - if '_type_' in restype.__dict__: - val = val.value - retval = checker(val) - elif not isinstance(restype, _CDataMeta): - retval = restype(resbuffer[0]) - else: - retval = restype._CData_retval(resbuffer) - - results = [] - if self._paramflags: - for argtype, obj, paramflag in zip(argtypes[1:], argsandobjs[1:], - self._paramflags): - if len(paramflag) == 2: - idlflag, name = paramflag - elif len(paramflag) == 3: - idlflag, name, defaultvalue = paramflag - else: - idlflag = 0 - idlflag &= (PARAMFLAG_FIN | PARAMFLAG_FOUT | PARAMFLAG_FLCID) - - if idlflag in (0, PARAMFLAG_FIN): - pass - elif idlflag == PARAMFLAG_FOUT: - val = obj.__ctypes_from_outparam__() - results.append(val) - elif idlflag == PARAMFLAG_FIN | PARAMFLAG_FLCID: - pass - else: - raise NotImplementedError( - "paramflags = %s" % (paramflag,)) - - if results: - if len(results) == 1: - return results[0] - else: - return tuple(results) - - # No output parameter, return the actual function result. - return retval + return callargs, outargs def __nonzero__(self): - return bool(self._buffer[0]) + return self._com_index is not None or bool(self._buffer[0]) def __del__(self): if self._needs_free: diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -134,7 +134,7 @@ __setattr__ = struct_setattr def from_address(self, address): - instance = self.__new__(self) + instance = StructOrUnion.__new__(self) instance.__dict__['_buffer'] = self._ffistruct.fromaddress(address) return instance @@ -156,7 +156,7 @@ return _CDataMeta.from_param(self, value) def _CData_output(self, resarray, base=None, index=-1): - res = self.__new__(self) + res = StructOrUnion.__new__(self) ffistruct = self._ffistruct.fromaddress(resarray.buffer) res.__dict__['_buffer'] = ffistruct res.__dict__['_base'] = base @@ -164,7 +164,7 @@ return res def _CData_retval(self, resbuffer): - res = self.__new__(self) + res = StructOrUnion.__new__(self) res.__dict__['_buffer'] = resbuffer res.__dict__['_base'] = None res.__dict__['_index'] = -1 diff --git a/lib_pypy/_scproxy.py b/lib_pypy/_scproxy.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_scproxy.py @@ -0,0 +1,130 @@ +"""Helper methods for urllib to fetch the proxy configuration settings using +the SystemConfiguration framework. + +""" +import sys +if sys.platform != 'darwin': + raise ImportError('Requires Mac OS X') + +from ctypes import c_int32, c_int64, c_void_p, c_char_p, c_int, cdll +from ctypes import pointer, create_string_buffer +from ctypes.util import find_library + +kCFNumberSInt32Type = 3 +kCFStringEncodingUTF8 = 134217984 + +def _CFSetup(): + sc = cdll.LoadLibrary(find_library("SystemConfiguration")) + cf = cdll.LoadLibrary(find_library("CoreFoundation")) + sctable = [ + ('SCDynamicStoreCopyProxies', [c_void_p], c_void_p), + ] + cftable = [ + ('CFArrayGetCount', [c_void_p], c_int64), + ('CFArrayGetValueAtIndex', [c_void_p, c_int64], c_void_p), + ('CFDictionaryGetValue', [c_void_p, c_void_p], c_void_p), + ('CFStringCreateWithCString', [c_void_p, c_char_p, c_int32], c_void_p), + ('CFStringGetLength', [c_void_p], c_int32), + ('CFStringGetCString', [c_void_p, c_char_p, c_int32, c_int32], c_int32), + ('CFNumberGetValue', [c_void_p, c_int, c_void_p], c_int32), + ('CFRelease', [c_void_p], None), + ] + scconst = [ + 'kSCPropNetProxiesExceptionsList', + 'kSCPropNetProxiesExcludeSimpleHostnames', + 'kSCPropNetProxiesHTTPEnable', + 'kSCPropNetProxiesHTTPProxy', + 'kSCPropNetProxiesHTTPPort', + 'kSCPropNetProxiesHTTPSEnable', + 'kSCPropNetProxiesHTTPSProxy', + 'kSCPropNetProxiesHTTPSPort', + 'kSCPropNetProxiesFTPEnable', + 'kSCPropNetProxiesFTPProxy', + 'kSCPropNetProxiesFTPPort', + 'kSCPropNetProxiesGopherEnable', + 'kSCPropNetProxiesGopherProxy', + 'kSCPropNetProxiesGopherPort', + ] + class CFProxy(object): + def __init__(self): + for mod, table in [(sc, sctable), (cf, cftable)]: + for fname, argtypes, restype in table: + func = getattr(mod, fname) + func.argtypes = argtypes + func.restype = restype + setattr(self, fname, func) + for k in scconst: + v = None + try: + v = c_void_p.in_dll(sc, k) + except ValueError: + v = None + setattr(self, k, v) + return CFProxy() +ffi = _CFSetup() + +def cfstring_to_pystring(value): + length = (ffi.CFStringGetLength(value) * 4) + 1 + buff = create_string_buffer(length) + ffi.CFStringGetCString(value, buff, length * 4, kCFStringEncodingUTF8) + return unicode(buff.value, 'utf8') + +def cfnum_to_int32(num): + result_ptr = pointer(c_int32(0)) + ffi.CFNumberGetValue(num, kCFNumberSInt32Type, result_ptr) + return result_ptr[0] + +def _get_proxy_settings(): + result = {'exclude_simple': False} + cfdct = ffi.SCDynamicStoreCopyProxies(None) + if not cfdct: + return result + try: + k = ffi.kSCPropNetProxiesExcludeSimpleHostnames + if k: + cfnum = ffi.CFDictionaryGetValue(cfdct, k) + if cfnum: + result['exclude_simple'] = bool(cfnum_to_int32(cfnum)) + k = ffi.kSCPropNetProxiesExceptionsList + if k: + cfarr = ffi.CFDictionaryGetValue(cfdct, k) + if cfarr: + lst = [] + for i in range(ffi.CFArrayGetCount(cfarr)): + cfstr = ffi.CFArrayGetValueAtIndex(cfarr, i) + if cfstr: + v = cfstring_to_pystring(cfstr) + else: + v = None + lst.append(v) + result['exceptions'] = lst + return result + finally: + ffi.CFRelease(cfdct) + +def _get_proxies(): + result = {} + cfdct = ffi.SCDynamicStoreCopyProxies(None) + if not cfdct: + return result + try: + for proto in 'HTTP', 'HTTPS', 'FTP', 'Gopher': + enabled_key = getattr(ffi, 'kSCPropNetProxies' + proto + 'Enable') + proxy_key = getattr(ffi, 'kSCPropNetProxies' + proto + 'Proxy') + port_key = getattr(ffi, 'kSCPropNetProxies' + proto + 'Port') + cfnum = ffi.CFDictionaryGetValue(cfdct, enabled_key) + if cfnum and cfnum_to_int32(cfnum): + cfhoststr = ffi.CFDictionaryGetValue(cfdct, proxy_key) + cfportnum = ffi.CFDictionaryGetValue(cfdct, port_key) + if cfhoststr: + host = cfstring_to_pystring(cfhoststr) + if host: + if cfportnum: + port = cfnum_to_int32(cfportnum) + v = u'http://%s:%d' % (host, port) + else: + v = u'http://%s' % (host,) + result[proto.lower()] = v + return result + finally: + ffi.CFRelease(cfdct) diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -228,6 +228,9 @@ factory = kwargs.get("factory", Connection) return factory(database, **kwargs) +def unicode_text_factory(x): + return unicode(x, 'utf-8') + class Connection(object): def __init__(self, database, isolation_level="", detect_types=0, timeout=None, *args, **kwargs): self.db = c_void_p() @@ -237,7 +240,7 @@ timeout = int(timeout * 1000) # pysqlite2 uses timeout in seconds sqlite.sqlite3_busy_timeout(self.db, timeout) - self.text_factory = lambda x: unicode(x, "utf-8") + self.text_factory = unicode_text_factory self.closed = False self.statements = [] self.statement_counter = 0 @@ -245,6 +248,8 @@ self._isolation_level = isolation_level self.detect_types = detect_types + self.cursors = [] + self.Error = Error self.Warning = Warning self.InterfaceError = InterfaceError @@ -307,6 +312,12 @@ "The object was created in thread id %d and this is thread id %d", self.thread_ident, thread_get_ident()) + def _reset_cursors(self): + for cursor_ref in self.cursors: + cursor = cursor_ref() + if cursor: + cursor.reset = True + def cursor(self, factory=None): self._check_thread() self._check_closed() @@ -421,6 +432,7 @@ raise self._get_exception(ret) finally: sqlite.sqlite3_finalize(statement) + self._reset_cursors() def _check_closed(self): if getattr(self, 'closed', True): @@ -450,6 +462,7 @@ self.closed = True ret = sqlite.sqlite3_close(self.db) + self._reset_cursors() if ret != SQLITE_OK: raise self._get_exception(ret) @@ -536,7 +549,7 @@ self._check_closed() try: c_closure, _ = self.func_cache[callback] - except KeyError: + except KeyError: def closure(context, nargs, c_params): function_callback(callback, context, nargs, c_params) c_closure = FUNC(closure) @@ -547,7 +560,7 @@ cast(None, STEP), cast(None, FINAL)) if ret != SQLITE_OK: - raise self._get_exception(ret) + raise self.OperationalError("Error creating function") def create_aggregate(self, name, num_args, cls): self._check_thread() @@ -629,13 +642,14 @@ raise TypeError con._check_thread() con._check_closed() + con.cursors.append(weakref.ref(self)) self.connection = con self._description = None self.arraysize = 1 - self.text_factory = con.text_factory self.row_factory = None self.rowcount = -1 self.statement = None + self.reset = False def _check_closed(self): if not getattr(self, 'connection', None): @@ -645,6 +659,7 @@ def execute(self, sql, params=None): self._description = None + self.reset = False if type(sql) is unicode: sql = sql.encode("utf-8") self._check_closed() @@ -665,8 +680,12 @@ raise self.connection._get_exception(ret) if self.statement.kind == "DQL": - self.statement._readahead() - self.statement._build_row_cast_map() + if ret == SQLITE_ROW: + self.statement._build_row_cast_map() + self.statement._readahead() + else: + self.statement.item = None + self.statement.exhausted = True if self.statement.kind in ("DML", "DDL"): self.statement.reset() @@ -679,6 +698,7 @@ def executemany(self, sql, many_params): self._description = None + self.reset = False if type(sql) is unicode: sql = sql.encode("utf-8") self._check_closed() @@ -700,6 +720,7 @@ def executescript(self, sql): self._description = None + self.reset = False if type(sql) is unicode: sql = sql.encode("utf-8") self._check_closed() @@ -736,8 +757,17 @@ def __iter__(self): return self.statement + def _check_reset(self): + if self.reset: + raise self.connection.InterfaceError("Cursor needed to be reset because " + "of commit/rollback and can " + "no longer be fetched from.") + + # do all statements def fetchone(self): self._check_closed() + self._check_reset() + if self.statement is None: return None @@ -750,6 +780,7 @@ def fetchmany(self, size=None): self._check_closed() + self._check_reset() if self.statement is None: return [] if size is None: @@ -763,6 +794,7 @@ def fetchall(self): self._check_closed() + self._check_reset() if self.statement is None: return [] return list(self.statement) @@ -782,6 +814,7 @@ if self.statement: self.statement.reset() self.statement = None + self.connection.cursors.remove(weakref.ref(self)) self.connection = None def setinputsizes(self, *args): @@ -829,7 +862,7 @@ def _build_row_cast_map(self): self.row_cast_map = [] - for i in range(sqlite.sqlite3_column_count(self.statement)): + for i in xrange(sqlite.sqlite3_column_count(self.statement)): converter = None if self.con.detect_types & PARSE_COLNAMES: @@ -854,14 +887,23 @@ self.row_cast_map.append(converter) + def _check_decodable(self, param): + if self.con.text_factory in (unicode, OptimizedUnicode, unicode_text_factory): + for c in param: + if ord(c) & 0x80 != 0: + raise self.con.ProgrammingError( + "You must not use 8-bit bytestrings unless " + "you use a text_factory that can interpret " + "8-bit bytestrings (like text_factory = str). " + "It is highly recommended that you instead " + "just switch your application to Unicode strings.") + def set_param(self, idx, param): cvt = converters.get(type(param)) if cvt is not None: cvt = param = cvt(param) - adapter = adapters.get((type(param), PrepareProtocol), None) - if adapter is not None: - param = adapter(param) + param = adapt(param) if param is None: sqlite.sqlite3_bind_null(self.statement, idx) @@ -873,6 +915,7 @@ elif type(param) is float: sqlite.sqlite3_bind_double(self.statement, idx, param) elif isinstance(param, str): + self._check_decodable(param) sqlite.sqlite3_bind_text(self.statement, idx, param, -1, SQLITE_TRANSIENT) elif isinstance(param, unicode): param = param.encode("utf-8") @@ -902,8 +945,8 @@ if len(params) != sqlite.sqlite3_bind_parameter_count(self.statement): raise ProgrammingError("wrong number of arguments") - for idx, param in enumerate(params): - self.set_param(idx+1, param) + for i in range(len(params)): + self.set_param(i+1, params[i]) else: for idx in range(1, sqlite.sqlite3_bind_parameter_count(self.statement) + 1): param_name = sqlite.sqlite3_bind_parameter_name(self.statement, idx) @@ -942,7 +985,8 @@ self.column_count = sqlite.sqlite3_column_count(self.statement) row = [] for i in xrange(self.column_count): - typ = sqlite.sqlite3_column_type(self.statement, i) + typ = sqlite.sqlite3_column_type(self.statement, i) + converter = self.row_cast_map[i] if converter is None: if typ == SQLITE_INTEGER: @@ -959,7 +1003,7 @@ val = None elif typ == SQLITE_TEXT: val = sqlite.sqlite3_column_text(self.statement, i) - val = self.cur().text_factory(val) + val = self.con.text_factory(val) else: blob = sqlite.sqlite3_column_blob(self.statement, i) if not blob: @@ -1069,12 +1113,6 @@ return 1 return 0 -def register_adapter(typ, callable): - adapters[typ, PrepareProtocol] = callable - -def register_converter(name, callable): - converters[name.upper()] = callable - def _convert_params(con, nargs, params): _params = [] for i in range(nargs): @@ -1155,6 +1193,12 @@ class PrepareProtocol(object): pass +def register_adapter(typ, callable): + adapters[typ, PrepareProtocol] = callable + +def register_converter(name, callable): + converters[name.upper()] = callable + def register_adapters_and_converters(): def adapt_date(val): return val.isoformat() @@ -1184,11 +1228,39 @@ register_converter("date", convert_date) register_converter("timestamp", convert_timestamp) +def adapt(val, proto=PrepareProtocol): + # look for an adapter in the registry + adapter = adapters.get((type(val), proto), None) + if adapter is not None: + return adapter(val) + + # try to have the protocol adapt this object + if hasattr(proto, '__adapt__'): + try: + adapted = proto.__adapt__(val) + except TypeError: + pass + else: + if adapted is not None: + return adapted + + # and finally try to have the object adapt itself + if hasattr(val, '__conform__'): + try: + adapted = val.__conform__(proto) + except TypeError: + pass + else: + if adapted is not None: + return adapted + + return val + +register_adapters_and_converters() + def OptimizedUnicode(s): try: val = unicode(s, "ascii").encode("ascii") except UnicodeDecodeError: val = unicode(s, "utf-8") return val - -register_adapters_and_converters() diff --git a/lib_pypy/conftest.py b/lib_pypy/conftest.py new file mode 100644 --- /dev/null +++ b/lib_pypy/conftest.py @@ -0,0 +1,2 @@ + +from pypy.conftest import * diff --git a/lib_pypy/ctypes_config_cache/test/test_cache.py b/lib_pypy/ctypes_config_cache/test/test_cache.py --- a/lib_pypy/ctypes_config_cache/test/test_cache.py +++ b/lib_pypy/ctypes_config_cache/test/test_cache.py @@ -11,6 +11,7 @@ dir=True) tmpdir.join('dumpcache.py').write(dirpath.join('dumpcache.py').read()) path = sys.path[:] + sys.modules.pop('dumpcache', None) try: sys.path.insert(0, str(tmpdir)) execfile(str(filepath), {}) diff --git a/lib_pypy/distributed/test/test_distributed.py b/lib_pypy/distributed/test/test_distributed.py --- a/lib_pypy/distributed/test/test_distributed.py +++ b/lib_pypy/distributed/test/test_distributed.py @@ -4,11 +4,7 @@ from pypy.conftest import gettestobjspace import sys - -class AppTestNoProxy(object): - disabled = True - def test_init(self): - raises(ImportError, "import distributed") +import pytest class AppTestDistributed(object): def setup_class(cls): diff --git a/py/__init__.py b/py/__init__.py --- a/py/__init__.py +++ b/py/__init__.py @@ -4,68 +4,52 @@ this module uses apipkg.py for lazy-loading sub modules and classes. The initpkg-dictionary below specifies name->value mappings where value can be another namespace -dictionary or an import path. +dictionary or an import path. (c) Holger Krekel and others, 2004-2010 """ -__version__ = version = "1.3.1" +__version__ = '1.4.3.dev0' -import py.apipkg +from py import _apipkg -py.apipkg.initpkg(__name__, dict( +# so that py.error.* instances are picklable +import sys +sys.modules['py.error'] = _apipkg.AliasModule("py.error", "py._error", 'error') + +_apipkg.initpkg(__name__, attr={'_apipkg': _apipkg}, exportdefs={ # access to all standard lib modules - std = '._std:std', + 'std': '._std:std', # access to all posix errno's as classes - error = '._error:error', + 'error': '._error:error', - _pydir = '.__metainfo:pydir', - version = 'py:__version__', # backward compatibility + '_pydir' : '.__metainfo:pydir', + 'version': 'py:__version__', # backward compatibility - cmdline = { - 'pytest': '._cmdline.pytest:main', - 'pylookup': '._cmdline.pylookup:main', - 'pycountloc': '._cmdline.pycountlog:main', - 'pylookup': '._cmdline.pylookup:main', - 'pycountloc': '._cmdline.pycountloc:main', - 'pycleanup': '._cmdline.pycleanup:main', - 'pywhich' : '._cmdline.pywhich:main', - 'pysvnwcrevert' : '._cmdline.pysvnwcrevert:main', - 'pyconvert_unittest' : '._cmdline.pyconvert_unittest:main', - }, - - test = { - # helpers for use from test functions or collectors - '__onfirstaccess__' : '._test.config:onpytestaccess', - '__doc__' : '._test:__doc__', - # configuration/initialization related test api - 'config' : '._test.config:config_per_process', - 'ensuretemp' : '._test.config:ensuretemp', - 'collect': { - 'Collector' : '._test.collect:Collector', - 'Directory' : '._test.collect:Directory', - 'File' : '._test.collect:File', - 'Item' : '._test.collect:Item', - 'Module' : '._test.pycollect:Module', - 'Class' : '._test.pycollect:Class', - 'Instance' : '._test.pycollect:Instance', - 'Generator' : '._test.pycollect:Generator', - 'Function' : '._test.pycollect:Function', - '_fillfuncargs' : '._test.funcargs:fillfuncargs', - }, - 'cmdline': { - 'main' : '._test.cmdline:main', # backward compat - }, - }, + # pytest-2.0 has a flat namespace, we use alias modules + # to keep old references compatible + 'test' : 'pytest', + 'test.collect' : 'pytest', + 'test.cmdline' : 'pytest', # hook into the top-level standard library - process = { + 'process' : { '__doc__' : '._process:__doc__', 'cmdexec' : '._process.cmdexec:cmdexec', 'kill' : '._process.killproc:kill', 'ForkedFunc' : '._process.forkedfunc:ForkedFunc', }, - path = { + 'apipkg' : { + 'initpkg' : '._apipkg:initpkg', + 'ApiModule' : '._apipkg:ApiModule', + }, + + 'iniconfig' : { + 'IniConfig' : '._iniconfig:IniConfig', + 'ParseError' : '._iniconfig:ParseError', + }, + + 'path' : { '__doc__' : '._path:__doc__', 'svnwc' : '._path.svnwc:SvnWCCommandPath', 'svnurl' : '._path.svnurl:SvnCommandPath', @@ -73,18 +57,8 @@ 'SvnAuth' : '._path.svnwc:SvnAuth', }, - # some nice slightly magic APIs - magic = { - 'invoke' : '._code.oldmagic:invoke', - 'revoke' : '._code.oldmagic:revoke', - 'patch' : '._code.oldmagic:patch', - 'revert' : '._code.oldmagic:revert', - 'autopath' : '._path.local:autopath', - 'AssertionError' : '._code.oldmagic2:AssertionError', - }, - # python inspection/code-generation API - code = { + 'code' : { '__doc__' : '._code:__doc__', 'compile' : '._code.source:compile_', 'Source' : '._code.source:Source', @@ -99,18 +73,22 @@ '_AssertionError' : '._code.assertion:AssertionError', '_reinterpret_old' : '._code.assertion:reinterpret_old', '_reinterpret' : '._code.assertion:reinterpret', + '_reprcompare' : '._code.assertion:_reprcompare', }, # backports and additions of builtins - builtin = { + 'builtin' : { '__doc__' : '._builtin:__doc__', 'enumerate' : '._builtin:enumerate', 'reversed' : '._builtin:reversed', 'sorted' : '._builtin:sorted', + 'any' : '._builtin:any', + 'all' : '._builtin:all', 'set' : '._builtin:set', 'frozenset' : '._builtin:frozenset', 'BaseException' : '._builtin:BaseException', 'GeneratorExit' : '._builtin:GeneratorExit', + '_sysex' : '._builtin:_sysex', 'print_' : '._builtin:print_', '_reraise' : '._builtin:_reraise', '_tryimport' : '._builtin:_tryimport', @@ -128,7 +106,7 @@ }, # input-output helping - io = { + 'io' : { '__doc__' : '._io:__doc__', 'dupfile' : '._io.capture:dupfile', 'TextIO' : '._io.capture:TextIO', @@ -137,13 +115,13 @@ 'StdCapture' : '._io.capture:StdCapture', 'StdCaptureFD' : '._io.capture:StdCaptureFD', 'TerminalWriter' : '._io.terminalwriter:TerminalWriter', - 'ansi_print' : '._io.terminalwriter:ansi_print', + 'ansi_print' : '._io.terminalwriter:ansi_print', 'get_terminal_width' : '._io.terminalwriter:get_terminal_width', 'saferepr' : '._io.saferepr:saferepr', }, # small and mean xml/html generation - xml = { + 'xml' : { '__doc__' : '._xmlgen:__doc__', 'html' : '._xmlgen:html', 'Tag' : '._xmlgen:Tag', @@ -152,7 +130,7 @@ 'escape' : '._xmlgen:escape', }, - log = { + 'log' : { # logging API ('producers' and 'consumers' connected via keywords) '__doc__' : '._log:__doc__', '_apiwarn' : '._log.warning:_apiwarn', @@ -166,12 +144,4 @@ 'Syslog' : '._log.log:Syslog', }, - # compatibility modules (deprecated) - compat = { - '__doc__' : '._compat:__doc__', - 'doctest' : '._compat.dep_doctest:doctest', - 'optparse' : '._compat.dep_optparse:optparse', - 'textwrap' : '._compat.dep_textwrap:textwrap', - 'subprocess' : '._compat.dep_subprocess:subprocess', - }, -)) +}) diff --git a/py/_apipkg.py b/py/_apipkg.py new file mode 100644 --- /dev/null +++ b/py/_apipkg.py @@ -0,0 +1,167 @@ +""" +apipkg: control the exported namespace of a python package. + +see http://pypi.python.org/pypi/apipkg + +(c) holger krekel, 2009 - MIT license +""" +import os +import sys +from types import ModuleType + +__version__ = '1.2.dev6' + +def initpkg(pkgname, exportdefs, attr=dict()): + """ initialize given package from the export definitions. """ + oldmod = sys.modules.get(pkgname) + d = {} + f = getattr(oldmod, '__file__', None) + if f: + f = os.path.abspath(f) + d['__file__'] = f + if hasattr(oldmod, '__version__'): + d['__version__'] = oldmod.__version__ + if hasattr(oldmod, '__loader__'): + d['__loader__'] = oldmod.__loader__ + if hasattr(oldmod, '__path__'): + d['__path__'] = [os.path.abspath(p) for p in oldmod.__path__] + if '__doc__' not in exportdefs and getattr(oldmod, '__doc__', None): + d['__doc__'] = oldmod.__doc__ + d.update(attr) + if hasattr(oldmod, "__dict__"): + oldmod.__dict__.update(d) + mod = ApiModule(pkgname, exportdefs, implprefix=pkgname, attr=d) + sys.modules[pkgname] = mod + +def importobj(modpath, attrname): + module = __import__(modpath, None, None, ['__doc__']) + if not attrname: + return module + + retval = module + names = attrname.split(".") + for x in names: + retval = getattr(retval, x) + return retval + +class ApiModule(ModuleType): + def __docget(self): + try: + return self.__doc + except AttributeError: + if '__doc__' in self.__map__: + return self.__makeattr('__doc__') + def __docset(self, value): + self.__doc = value + __doc__ = property(__docget, __docset) + + def __init__(self, name, importspec, implprefix=None, attr=None): + self.__name__ = name + self.__all__ = [x for x in importspec if x != '__onfirstaccess__'] + self.__map__ = {} + self.__implprefix__ = implprefix or name + if attr: + for name, val in attr.items(): + #print "setting", self.__name__, name, val + setattr(self, name, val) + for name, importspec in importspec.items(): + if isinstance(importspec, dict): + subname = '%s.%s'%(self.__name__, name) + apimod = ApiModule(subname, importspec, implprefix) + sys.modules[subname] = apimod + setattr(self, name, apimod) + else: + parts = importspec.split(':') + modpath = parts.pop(0) + attrname = parts and parts[0] or "" + if modpath[0] == '.': + modpath = implprefix + modpath + + if not attrname: + subname = '%s.%s'%(self.__name__, name) + apimod = AliasModule(subname, modpath) + sys.modules[subname] = apimod + if '.' not in name: + setattr(self, name, apimod) + else: + self.__map__[name] = (modpath, attrname) + + def __repr__(self): + l = [] + if hasattr(self, '__version__'): + l.append("version=" + repr(self.__version__)) + if hasattr(self, '__file__'): + l.append('from ' + repr(self.__file__)) + if l: + return '' % (self.__name__, " ".join(l)) + return '' % (self.__name__,) + + def __makeattr(self, name): + """lazily compute value for name or raise AttributeError if unknown.""" + #print "makeattr", self.__name__, name + target = None + if '__onfirstaccess__' in self.__map__: + target = self.__map__.pop('__onfirstaccess__') + importobj(*target)() + try: + modpath, attrname = self.__map__[name] + except KeyError: + if target is not None and name != '__onfirstaccess__': + # retry, onfirstaccess might have set attrs + return getattr(self, name) + raise AttributeError(name) + else: + result = importobj(modpath, attrname) + setattr(self, name, result) + try: + del self.__map__[name] + except KeyError: + pass # in a recursive-import situation a double-del can happen + return result + + __getattr__ = __makeattr + + def __dict__(self): + # force all the content of the module to be loaded when __dict__ is read + dictdescr = ModuleType.__dict__['__dict__'] + dict = dictdescr.__get__(self) + if dict is not None: + hasattr(self, 'some') + for name in self.__all__: + try: + self.__makeattr(name) + except AttributeError: + pass + return dict + __dict__ = property(__dict__) + + +def AliasModule(modname, modpath, attrname=None): + mod = [] + + def getmod(): + if not mod: + x = importobj(modpath, None) + if attrname is not None: + x = getattr(x, attrname) + mod.append(x) + return mod[0] + + class AliasModule(ModuleType): + + def __repr__(self): + x = modpath + if attrname: + x += "." + attrname + return '' % (modname, x) + + def __getattribute__(self, name): + return getattr(getmod(), name) + + def __setattr__(self, name, value): + setattr(getmod(), name, value) + + def __delattr__(self, name): + delattr(getmod(), name) + + return AliasModule(modname) diff --git a/py/_builtin.py b/py/_builtin.py --- a/py/_builtin.py +++ b/py/_builtin.py @@ -36,6 +36,24 @@ return self.remaining try: + any = any +except NameError: + def any(iterable): + for x in iterable: + if x: + return True + return False + +try: + all = all +except NameError: + def all(iterable): + for x in iterable: + if not x: + return False + return True + +try: sorted = sorted except NameError: builtin_cmp = cmp # need to use cmp as keyword arg @@ -67,10 +85,10 @@ try: set, frozenset = set, frozenset except NameError: - from sets import set, frozenset + from sets import set, frozenset # pass through -enumerate = enumerate +enumerate = enumerate try: BaseException = BaseException @@ -87,12 +105,14 @@ pass GeneratorExit.__module__ = 'exceptions' +_sysex = (KeyboardInterrupt, SystemExit, MemoryError, GeneratorExit) + if sys.version_info >= (3, 0): exec ("print_ = print ; exec_=exec") import builtins - # some backward compatibility helpers - _basestring = str + # some backward compatibility helpers + _basestring = str def _totext(obj, encoding=None): if isinstance(obj, bytes): obj = obj.decode(encoding) @@ -100,9 +120,9 @@ obj = str(obj) return obj - def _isbytes(x): + def _isbytes(x): return isinstance(x, bytes) - def _istext(x): + def _istext(x): return isinstance(x, str) def _getimself(function): @@ -135,13 +155,13 @@ else: import __builtin__ as builtins - _totext = unicode + _totext = unicode _basestring = basestring execfile = execfile callable = callable - def _isbytes(x): + def _isbytes(x): return isinstance(x, str) - def _istext(x): + def _istext(x): return isinstance(x, unicode) def _getimself(function): @@ -157,7 +177,7 @@ return getattr(function, "func_code", None) def print_(*args, **kwargs): - """ minimal backport of py3k print statement. """ + """ minimal backport of py3k print statement. """ sep = ' ' if 'sep' in kwargs: sep = kwargs.pop('sep') @@ -177,24 +197,22 @@ file.write(end) def exec_(obj, globals=None, locals=None): - """ minimal backport of py3k exec statement. """ + """ minimal backport of py3k exec statement. """ __tracebackhide__ = True - if globals is None: + if globals is None: frame = sys._getframe(1) - globals = frame.f_globals + globals = frame.f_globals if locals is None: locals = frame.f_locals elif locals is None: locals = globals - exec2(obj, globals, locals) + exec2(obj, globals, locals) if sys.version_info >= (3,0): - exec (""" -def _reraise(cls, val, tb): - __tracebackhide__ = True - assert hasattr(val, '__traceback__') - raise val -""") + def _reraise(cls, val, tb): + __tracebackhide__ = True + assert hasattr(val, '__traceback__') + raise val else: exec (""" def _reraise(cls, val, tb): @@ -202,11 +220,11 @@ raise cls, val, tb def exec2(obj, globals, locals): __tracebackhide__ = True - exec obj in globals, locals + exec obj in globals, locals """) def _tryimport(*names): - """ return the first successfully imported module. """ + """ return the first successfully imported module. """ assert names for name in names: try: diff --git a/py/_cmdline/__init__.py b/py/_cmdline/__init__.py deleted file mode 100644 --- a/py/_cmdline/__init__.py +++ /dev/null @@ -1,1 +0,0 @@ -# diff --git a/py/_cmdline/pycleanup.py b/py/_cmdline/pycleanup.py deleted file mode 100755 --- a/py/_cmdline/pycleanup.py +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env python - -"""\ -py.cleanup [PATH] ... - -Delete typical python development related files recursively under the specified PATH (which defaults to the current working directory). Don't follow links and don't recurse into directories with a dot. Optionally remove setup.py related files and empty -directories. - -""" -import py -import sys, subprocess - -def main(): - parser = py.std.optparse.OptionParser(usage=__doc__) - parser.add_option("-e", metavar="ENDING", - dest="endings", default=[".pyc", "$py.class"], action="append", - help=("(multi) recursively remove files with the given ending." - " '.pyc' and '$py.class' are in the default list.")) - parser.add_option("-d", action="store_true", dest="removedir", - help="remove empty directories.") - parser.add_option("-s", action="store_true", dest="setup", - help="remove 'build' and 'dist' directories next to setup.py files") - parser.add_option("-a", action="store_true", dest="all", - help="synonym for '-S -d -e pip-log.txt'") - parser.add_option("-n", "--dryrun", dest="dryrun", default=False, - action="store_true", - help="don't actually delete but display would-be-removed filenames.") - (options, args) = parser.parse_args() - - Cleanup(options, args).main() - -class Cleanup: - def __init__(self, options, args): - if not args: - args = ["."] - self.options = options - self.args = [py.path.local(x) for x in args] - if options.all: - options.setup = True - options.removedir = True - options.endings.append("pip-log.txt") - - def main(self): - if self.options.setup: - for arg in self.args: - self.setupclean(arg) - - for path in self.args: - py.builtin.print_("cleaning path", path, - "of extensions", self.options.endings) - for x in path.visit(self.shouldremove, self.recursedir): - self.remove(x) - if self.options.removedir: - for x in path.visit(lambda x: x.check(dir=1), self.recursedir): - if not x.listdir(): - self.remove(x) - - def shouldremove(self, p): - for ending in self.options.endings: - if p.basename.endswith(ending): - return True - - def recursedir(self, path): - return path.check(dotfile=0, link=0) - - def remove(self, path): - if not path.check(): - return - if self.options.dryrun: - py.builtin.print_("would remove", path) - else: - py.builtin.print_("removing", path) - path.remove() - - def XXXcallsetup(self, setup, *args): - old = setup.dirpath().chdir() - try: - subprocess.call([sys.executable, str(setup)] + list(args)) - finally: - old.chdir() - - def setupclean(self, path): - for x in path.visit("setup.py", self.recursedir): - basepath = x.dirpath() - self.remove(basepath / "build") - self.remove(basepath / "dist") diff --git a/py/_cmdline/pyconvert_unittest.py b/py/_cmdline/pyconvert_unittest.py deleted file mode 100644 --- a/py/_cmdline/pyconvert_unittest.py +++ /dev/null @@ -1,253 +0,0 @@ -import re -import sys - -try: - import parser -except ImportError: - parser = None - -d={} -# d is the dictionary of unittest changes, keyed to the old name -# used by unittest. -# d[old][0] is the new replacement function. -# d[old][1] is the operator you will substitute, or '' if there is none. -# d[old][2] is the possible number of arguments to the unittest -# function. - -# Old Unittest Name new name operator # of args -d['assertRaises'] = ('raises', '', ['Any']) -d['fail'] = ('raise AssertionError', '', [0,1]) -d['assert_'] = ('assert', '', [1,2]) -d['failIf'] = ('assert not', '', [1,2]) -d['assertEqual'] = ('assert', ' ==', [2,3]) -d['failIfEqual'] = ('assert not', ' ==', [2,3]) -d['assertIn'] = ('assert', ' in', [2,3]) -d['assertNotIn'] = ('assert', ' not in', [2,3]) -d['assertNotEqual'] = ('assert', ' !=', [2,3]) -d['failUnlessEqual'] = ('assert', ' ==', [2,3]) -d['assertAlmostEqual'] = ('assert round', ' ==', [2,3,4]) -d['failIfAlmostEqual'] = ('assert not round', ' ==', [2,3,4]) -d['assertNotAlmostEqual'] = ('assert round', ' !=', [2,3,4]) -d['failUnlessAlmostEquals'] = ('assert round', ' ==', [2,3,4]) - -# the list of synonyms -d['failUnlessRaises'] = d['assertRaises'] -d['failUnless'] = d['assert_'] -d['assertEquals'] = d['assertEqual'] -d['assertNotEquals'] = d['assertNotEqual'] -d['assertAlmostEquals'] = d['assertAlmostEqual'] -d['assertNotAlmostEquals'] = d['assertNotAlmostEqual'] - -# set up the regular expressions we will need -leading_spaces = re.compile(r'^(\s*)') # this never fails - -pat = '' -for k in d.keys(): # this complicated pattern to match all unittests - pat += '|' + r'^(\s*)' + 'self.' + k + r'\(' # \tself.whatever( - -old_names = re.compile(pat[1:]) -linesep='\n' # nobody will really try to convert files not read - # in text mode, will they? - - -def blocksplitter(fp): - '''split a file into blocks that are headed by functions to rename''' - - blocklist = [] - blockstring = '' - - for line in fp: - interesting = old_names.match(line) - if interesting : - if blockstring: - blocklist.append(blockstring) - blockstring = line # reset the block - else: - blockstring += line - - blocklist.append(blockstring) - return blocklist - -def rewrite_utest(block): - '''rewrite every block to use the new utest functions''' - - '''returns the rewritten unittest, unless it ran into problems, - in which case it just returns the block unchanged. - ''' - utest = old_names.match(block) - - if not utest: - return block - - old = utest.group(0).lstrip()[5:-1] # the name we want to replace - new = d[old][0] # the name of the replacement function - op = d[old][1] # the operator you will use , or '' if there is none. - possible_args = d[old][2] # a list of the number of arguments the - # unittest function could possibly take. - - if possible_args == ['Any']: # just rename assertRaises & friends - return re.sub('self.'+old, new, block) - - message_pos = possible_args[-1] - # the remaining unittests can have an optional message to print - # when they fail. It is always the last argument to the function. - - try: - indent, argl, trailer = decompose_unittest(old, block) - - except SyntaxError: # but we couldn't parse it! - return block - - argnum = len(argl) - if argnum not in possible_args: - # sanity check - this one isn't real either - return block - - elif argnum == message_pos: - message = argl[-1] - argl = argl[:-1] - else: - message = None - - if argnum is 0 or (argnum is 1 and argnum is message_pos): #unittest fail() - string = '' - if message: - message = ' ' + message - - elif message_pos is 4: # assertAlmostEqual & friends - try: - pos = argl[2].lstrip() - except IndexError: - pos = '7' # default if none is specified - string = '(%s -%s, %s)%s 0' % (argl[0], argl[1], pos, op ) - - else: # assert_, assertEquals and all the rest - string = ' ' + op.join(argl) - - if message: - string = string + ',' + message - - return indent + new + string + trailer - -def decompose_unittest(old, block): - '''decompose the block into its component parts''' - - ''' returns indent, arglist, trailer - indent -- the indentation - arglist -- the arguments to the unittest function - trailer -- any extra junk after the closing paren, such as #commment - ''' - - indent = re.match(r'(\s*)', block).group() - pat = re.search('self.' + old + r'\(', block) - - args, trailer = get_expr(block[pat.end():], ')') - arglist = break_args(args, []) - - if arglist == ['']: # there weren't any - return indent, [], trailer - - for i in range(len(arglist)): - try: - parser.expr(arglist[i].lstrip('\t ')) - except SyntaxError: - if i == 0: - arglist[i] = '(' + arglist[i] + ')' - else: - arglist[i] = ' (' + arglist[i] + ')' - - return indent, arglist, trailer - -def break_args(args, arglist): - '''recursively break a string into a list of arguments''' - try: - first, rest = get_expr(args, ',') - if not rest: - return arglist + [first] - else: - return [first] + break_args(rest, arglist) - except SyntaxError: - return arglist + [args] - -def get_expr(s, char): - '''split a string into an expression, and the rest of the string''' - - pos=[] - for i in range(len(s)): - if s[i] == char: - pos.append(i) - if pos == []: - raise SyntaxError # we didn't find the expected char. Ick. - - for p in pos: - # make the python parser do the hard work of deciding which comma - # splits the string into two expressions - try: - parser.expr('(' + s[:p] + ')') - return s[:p], s[p+1:] - except SyntaxError: # It's not an expression yet - pass - raise SyntaxError # We never found anything that worked. - - -def main(): - import sys - import py - - usage = "usage: %prog [-s [filename ...] | [-i | -c filename ...]]" - optparser = py.std.optparse.OptionParser(usage) - - def select_output (option, opt, value, optparser, **kw): - if hasattr(optparser, 'output'): - optparser.error( - 'Cannot combine -s -i and -c options. Use one only.') - else: - optparser.output = kw['output'] - - optparser.add_option("-s", "--stdout", action="callback", - callback=select_output, - callback_kwargs={'output':'stdout'}, - help="send your output to stdout") - - optparser.add_option("-i", "--inplace", action="callback", - callback=select_output, - callback_kwargs={'output':'inplace'}, - help="overwrite files in place") - - optparser.add_option("-c", "--copy", action="callback", - callback=select_output, - callback_kwargs={'output':'copy'}, - help="copy files ... fn.py --> fn_cp.py") - - options, args = optparser.parse_args() - - output = getattr(optparser, 'output', 'stdout') - - if output in ['inplace', 'copy'] and not args: - optparser.error( - '-i and -c option require at least one filename') - - if not args: - s = '' - for block in blocksplitter(sys.stdin): - s += rewrite_utest(block) - sys.stdout.write(s) - - else: - for infilename in args: # no error checking to see if we can open, etc. - infile = file(infilename) - s = '' - for block in blocksplitter(infile): - s += rewrite_utest(block) - if output == 'inplace': - outfile = file(infilename, 'w+') - elif output == 'copy': # yes, just go clobber any existing .cp - outfile = file (infilename[:-3]+ '_cp.py', 'w+') - else: - outfile = sys.stdout - - outfile.write(s) - - -if __name__ == '__main__': - main() diff --git a/py/_cmdline/pycountloc.py b/py/_cmdline/pycountloc.py deleted file mode 100755 --- a/py/_cmdline/pycountloc.py +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/env python - -# hands on script to compute the non-empty Lines of Code -# for tests and non-test code - -"""\ -py.countloc [PATHS] - -Count (non-empty) lines of python code and number of python files recursively -starting from a list of paths given on the command line (starting from the -current working directory). Distinguish between test files and normal ones and -report them separately. -""" -import py - -def main(): - parser = py.std.optparse.OptionParser(usage=__doc__) - (options, args) = parser.parse_args() - countloc(args) - -def nodot(p): - return p.check(dotfile=0) - -class FileCounter(object): - def __init__(self): - self.file2numlines = {} - self.numlines = 0 - self.numfiles = 0 - - def addrecursive(self, directory, fil="*.py", rec=nodot): - for x in directory.visit(fil, rec): - self.addfile(x) - - def addfile(self, fn, emptylines=False): - if emptylines: - s = len(p.readlines()) - else: - s = 0 - for i in fn.readlines(): - if i.strip(): - s += 1 - self.file2numlines[fn] = s - self.numfiles += 1 - self.numlines += s - - def getnumlines(self, fil): - numlines = 0 - for path, value in self.file2numlines.items(): - if fil(path): - numlines += value - return numlines - - def getnumfiles(self, fil): - numfiles = 0 - for path in self.file2numlines: - if fil(path): - numfiles += 1 - return numfiles - -def get_loccount(locations=None): - if locations is None: - localtions = [py.path.local()] - counter = FileCounter() - for loc in locations: - counter.addrecursive(loc, '*.py', rec=nodot) - - def istestfile(p): - return p.check(fnmatch='test_*.py') - isnottestfile = lambda x: not istestfile(x) - - numfiles = counter.getnumfiles(isnottestfile) - numlines = counter.getnumlines(isnottestfile) - numtestfiles = counter.getnumfiles(istestfile) - numtestlines = counter.getnumlines(istestfile) - - return counter, numfiles, numlines, numtestfiles, numtestlines - -def countloc(paths=None): - if not paths: - paths = ['.'] - locations = [py.path.local(x) for x in paths] - (counter, numfiles, numlines, numtestfiles, - numtestlines) = get_loccount(locations) - - items = counter.file2numlines.items() - items.sort(lambda x,y: cmp(x[1], y[1])) - for x, y in items: - print("%3d %30s" % (y,x)) - - print("%30s %3d" %("number of testfiles", numtestfiles)) - print("%30s %3d" %("number of non-empty testlines", numtestlines)) - print("%30s %3d" %("number of files", numfiles)) - print("%30s %3d" %("number of non-empty lines", numlines)) - diff --git a/py/_cmdline/pylookup.py b/py/_cmdline/pylookup.py deleted file mode 100755 --- a/py/_cmdline/pylookup.py +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env python - -"""\ -py.lookup [search_directory] SEARCH_STRING [options] - -Looks recursively at Python files for a SEARCH_STRING, starting from the -present working directory. Prints the line, with the filename and line-number -prepended.""" - -import sys, os -import py -from py.io import ansi_print, get_terminal_width -import re - -def rec(p): - return p.check(dotfile=0) - -parser = py.std.optparse.OptionParser(usage=__doc__) -parser.add_option("-i", "--ignore-case", action="store_true", dest="ignorecase", - help="ignore case distinctions") -parser.add_option("-C", "--context", action="store", type="int", dest="context", - default=0, help="How many lines of output to show") - -terminal_width = get_terminal_width() - -def find_indexes(search_line, string): - indexes = [] - before = 0 - while 1: - i = search_line.find(string, before) - if i == -1: - break - indexes.append(i) - before = i + len(string) - return indexes - -def main(): - (options, args) = parser.parse_args() - if len(args) == 2: - search_dir, string = args - search_dir = py.path.local(search_dir) - else: - search_dir = py.path.local() - string = args[0] - if options.ignorecase: - string = string.lower() - for x in search_dir.visit('*.py', rec): - # match filename directly - s = x.relto(search_dir) - if options.ignorecase: - s = s.lower() - if s.find(string) != -1: - sys.stdout.write("%s: filename matches %r" %(x, string) + "\n") - - try: - s = x.read() - except py.error.ENOENT: - pass # whatever, probably broken link (ie emacs lock) - searchs = s - if options.ignorecase: - searchs = s.lower() - if s.find(string) != -1: - lines = s.splitlines() - if options.ignorecase: - searchlines = s.lower().splitlines() - else: - searchlines = lines - for i, (line, searchline) in enumerate(zip(lines, searchlines)): - indexes = find_indexes(searchline, string) - if not indexes: - continue - if not options.context: - sys.stdout.write("%s:%d: " %(x.relto(search_dir), i+1)) - last_index = 0 - for index in indexes: - sys.stdout.write(line[last_index: index]) - ansi_print(line[index: index+len(string)], - file=sys.stdout, esc=31, newline=False) - last_index = index + len(string) - sys.stdout.write(line[last_index:] + "\n") - else: - context = (options.context)/2 - for count in range(max(0, i-context), min(len(lines) - 1, i+context+1)): - print("%s:%d: %s" %(x.relto(search_dir), count+1, lines[count].rstrip())) - print("-" * terminal_width) diff --git a/py/_cmdline/pysvnwcrevert.py b/py/_cmdline/pysvnwcrevert.py deleted file mode 100755 --- a/py/_cmdline/pysvnwcrevert.py +++ /dev/null @@ -1,55 +0,0 @@ -#! /usr/bin/env python -"""\ -py.svnwcrevert [options] WCPATH - -Running this script and then 'svn up' puts the working copy WCPATH in a state -as clean as a fresh check-out. - -WARNING: you'll loose all local changes, obviously! - -This script deletes all files that have been modified -or that svn doesn't explicitly know about, including svn:ignored files -(like .pyc files, hint hint). - -The goal of this script is to leave the working copy with some files and -directories possibly missing, but - most importantly - in a state where -the following 'svn up' won't just crash. -""" - -import sys, py - -def kill(p, root): - print('< %s' % (p.relto(root),)) - p.remove(rec=1) - -def svnwcrevert(path, root=None, precious=[]): - if root is None: - root = path - wcpath = py.path.svnwc(path) - try: - st = wcpath.status() - except ValueError: # typically, "bad char in wcpath" - kill(path, root) - return - for p in path.listdir(): - if p.basename == '.svn' or p.basename in precious: - continue - wcp = py.path.svnwc(p) - if wcp not in st.unchanged and wcp not in st.external: - kill(p, root) - elif p.check(dir=1): - svnwcrevert(p, root) - -# XXX add a functional test - -parser = py.std.optparse.OptionParser(usage=__doc__) -parser.add_option("-p", "--precious", - action="append", dest="precious", default=[], - help="preserve files with this name") - -def main(): - opts, args = parser.parse_args() - if len(args) != 1: - parser.print_help() - sys.exit(2) - svnwcrevert(py.path.local(args[0]), precious=opts.precious) diff --git a/py/_cmdline/pytest.py b/py/_cmdline/pytest.py deleted file mode 100755 --- a/py/_cmdline/pytest.py +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env python -import py - -def main(args=None): - raise SystemExit(py.test.cmdline.main(args)) diff --git a/py/_cmdline/pywhich.py b/py/_cmdline/pywhich.py deleted file mode 100755 --- a/py/_cmdline/pywhich.py +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python - -"""\ -py.which [name] - -print the location of the given python module or package name -""" - -import sys - -def main(): - name = sys.argv[1] - try: - mod = __import__(name) - except ImportError: - sys.stderr.write("could not import: " + name + "\n") - else: - try: - location = mod.__file__ - except AttributeError: - sys.stderr.write("module (has no __file__): " + str(mod)) - else: - print(location) diff --git a/py/_code/_assertionnew.py b/py/_code/_assertionnew.py --- a/py/_code/_assertionnew.py +++ b/py/_code/_assertionnew.py @@ -1,6 +1,6 @@ """ -Like _assertion.py but using builtin AST. It should replace _assertionold.py -eventually. +Find intermediate evalutation results in assert statements through builtin AST. +This should replace _assertionold.py eventually. """ import sys @@ -108,7 +108,7 @@ class DebugInterpreter(ast.NodeVisitor): - """Interpret AST nodes to gleam useful debugging information.""" + """Interpret AST nodes to gleam useful debugging information. """ def __init__(self, frame): self.frame = frame @@ -162,10 +162,7 @@ def visit_Compare(self, comp): left = comp.left left_explanation, left_result = self.visit(left) - got_result = False for op, next_op in zip(comp.ops, comp.comparators): - if got_result and not result: - break next_explanation, next_result = self.visit(next_op) op_symbol = operator_map[op.__class__] explanation = "%s %s %s" % (left_explanation, op_symbol, @@ -177,9 +174,20 @@ __exprinfo_right=next_result) except Exception: raise Failure(explanation) - else: - got_result = True + try: + if not result: + break + except KeyboardInterrupt: + raise + except: + break left_explanation, left_result = next_explanation, next_result + + rcomp = py.code._reprcompare + if rcomp: + res = rcomp(op_symbol, left_result, next_result) + if res: + explanation = res return explanation, result def visit_BoolOp(self, boolop): @@ -259,20 +267,9 @@ result = self.frame.eval(co, **ns) except Exception: raise Failure(explanation) - # Only show result explanation if it's not a builtin call or returns a - # bool. - if not isinstance(call.func, ast.Name) or \ - not self._is_builtin_name(call.func): - source = "isinstance(__exprinfo_value, bool)" - co = self._compile(source) - try: - is_bool = self.frame.eval(co, __exprinfo_value=result) - except Exception: - is_bool = False - if not is_bool: - pattern = "%s\n{%s = %s\n}" - rep = self.frame.repr(result) - explanation = pattern % (rep, rep, explanation) + pattern = "%s\n{%s = %s\n}" + rep = self.frame.repr(result) + explanation = pattern % (rep, rep, explanation) return explanation, result def _is_builtin_name(self, name): @@ -295,6 +292,9 @@ result = self.frame.eval(co, __exprinfo_expr=source_result) except Exception: raise Failure(explanation) + explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result), + self.frame.repr(result), + source_explanation, attr.attr) # Check if the attr is from an instance. source = "%r in getattr(__exprinfo_expr, '__dict__', {})" source = source % (attr.attr,) @@ -325,10 +325,11 @@ def visit_Assign(self, assign): value_explanation, value_result = self.visit(assign.value) explanation = "... = %s" % (value_explanation,) - name = ast.Name("__exprinfo_expr", ast.Load(), assign.value.lineno, - assign.value.col_offset) - new_assign = ast.Assign(assign.targets, name, assign.lineno, - assign.col_offset) + name = ast.Name("__exprinfo_expr", ast.Load(), + lineno=assign.value.lineno, + col_offset=assign.value.col_offset) + new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno, + col_offset=assign.col_offset) mod = ast.Module([new_assign]) co = self._compile(mod, "exec") try: diff --git a/py/_code/_assertionold.py b/py/_code/_assertionold.py --- a/py/_code/_assertionold.py +++ b/py/_code/_assertionold.py @@ -3,7 +3,7 @@ from compiler import parse, ast, pycodegen from py._code.assertion import BuiltinAssertionError, _format_explanation -passthroughex = (KeyboardInterrupt, SystemExit, MemoryError) +passthroughex = py.builtin._sysex class Failure: def __init__(self, node): @@ -496,7 +496,7 @@ #frame = py.code.Frame(frame) #return interpret(line, frame) - tb = excinfo.traceback[-1] + tb = excinfo.traceback[-1] source = str(tb.statement).strip() x = interpret(source, tb.frame, should_fail=True) if not isinstance(x, str): diff --git a/py/_code/assertion.py b/py/_code/assertion.py --- a/py/_code/assertion.py +++ b/py/_code/assertion.py @@ -3,14 +3,23 @@ BuiltinAssertionError = py.builtin.builtins.AssertionError +_reprcompare = None # if set, will be called by assert reinterp for comparison ops def _format_explanation(explanation): - # uck! See CallFunc for where \n{ and \n} escape sequences are used + """This formats an explanation + + Normally all embedded newlines are escaped, however there are + three exceptions: \n{, \n} and \n~. The first two are intended + cover nested explanations, see function and attribute explanations + for examples (.visit_Call(), visit_Attribute()). The last one is + for when one explanation needs to span multiple lines, e.g. when + displaying diffs. + """ raw_lines = (explanation or '').split('\n') - # escape newlines not followed by { and } + # escape newlines not followed by {, } and ~ lines = [raw_lines[0]] for l in raw_lines[1:]: - if l.startswith('{') or l.startswith('}'): + if l.startswith('{') or l.startswith('}') or l.startswith('~'): lines.append(l) else: lines[-1] += '\\n' + l @@ -28,23 +37,25 @@ stackcnt[-1] += 1 stackcnt.append(0) result.append(' +' + ' '*(len(stack)-1) + s + line[1:]) - else: + elif line.startswith('}'): assert line.startswith('}') stack.pop() stackcnt.pop() result[stack[-1]] += line[1:] + else: + assert line.startswith('~') + result.append(' '*len(stack) + line[1:]) assert len(stack) == 1 return '\n'.join(result) class AssertionError(BuiltinAssertionError): - def __init__(self, *args): BuiltinAssertionError.__init__(self, *args) if args: try: self.msg = str(args[0]) - except (KeyboardInterrupt, SystemExit): + except py.builtin._sysex: raise except: self.msg = "<[broken __repr__] %s at %0xd>" %( @@ -52,18 +63,24 @@ else: f = py.code.Frame(sys._getframe(1)) try: - source = f.statement - source = str(source.deindent()).strip() + source = f.code.fullsource + if source is not None: + try: + source = source.getstatement(f.lineno, assertion=True) + except IndexError: + source = None + else: + source = str(source.deindent()).strip() except py.error.ENOENT: source = None # this can also occur during reinterpretation, when the # co_filename is set to "". if source: self.msg = reinterpret(source, f, should_fail=True) - if not self.args: - self.args = (self.msg,) else: - self.msg = None + self.msg = "" + if not self.args: + self.args = (self.msg,) if sys.version_info > (3, 0): AssertionError.__module__ = "builtins" @@ -74,4 +91,4 @@ from py._code._assertionnew import interpret as reinterpret else: reinterpret = reinterpret_old - + diff --git a/py/_code/code.py b/py/_code/code.py --- a/py/_code/code.py +++ b/py/_code/code.py @@ -9,15 +9,15 @@ """ wrapper around Python code objects """ def __init__(self, rawcode): rawcode = py.code.getrawcode(rawcode) - self.raw = rawcode + self.raw = rawcode try: self.filename = rawcode.co_filename self.firstlineno = rawcode.co_firstlineno - 1 self.name = rawcode.co_name - except AttributeError: + except AttributeError: raise TypeError("not a code object: %r" %(rawcode,)) - - def __eq__(self, other): + + def __eq__(self, other): return self.raw == other.raw def __ne__(self, other): @@ -27,11 +27,11 @@ """ return a path object pointing to source code""" p = py.path.local(self.raw.co_filename) if not p.check(): - # XXX maybe try harder like the weird logic - # in the standard lib [linecache.updatecache] does? + # XXX maybe try harder like the weird logic + # in the standard lib [linecache.updatecache] does? p = self.raw.co_filename return p - + path = property(path, None, None, "path of this code object") def fullsource(self): @@ -42,7 +42,7 @@ return full fullsource = property(fullsource, None, None, "full source containing this code object") - + def source(self): """ return a py.code.Source object for the code object's source only """ @@ -81,7 +81,7 @@ returns the result of the evaluation """ - f_locals = self.f_locals.copy() + f_locals = self.f_locals.copy() f_locals.update(vars) return eval(code, self.f_globals, f_locals) @@ -90,7 +90,7 @@ 'vars' are optiona; additional local variables """ - f_locals = self.f_locals.copy() + f_locals = self.f_locals.copy() f_locals.update(vars) py.builtin.exec_(code, self.f_globals, f_locals ) @@ -115,8 +115,8 @@ class TracebackEntry(object): """ a single entry in a traceback """ - - exprinfo = None + + exprinfo = None def __init__(self, rawentry): self._rawentry = rawentry @@ -153,13 +153,14 @@ x = py.code._reinterpret(source, self.frame, should_fail=True) if not isinstance(x, str): raise TypeError("interpret returned non-string %r" % (x,)) - self.exprinfo = x + self.exprinfo = x return self.exprinfo def getfirstlinesource(self): - return self.frame.code.firstlineno + # on Jython this firstlineno can be -1 apparently + return max(self.frame.code.firstlineno, 0) - def getsource(self): + def getsource(self): """ return failing source code. """ source = self.frame.code.fullsource if source is None: @@ -167,64 +168,64 @@ start = self.getfirstlinesource() end = self.lineno try: - _, end = source.getstatementrange(end) - except IndexError: - end = self.lineno + 1 - # heuristic to stop displaying source on e.g. + _, end = source.getstatementrange(end) + except IndexError: + end = self.lineno + 1 + # heuristic to stop displaying source on e.g. # if something: # assume this causes a NameError - # # _this_ lines and the one - # below we don't want from entry.getsource() - for i in range(self.lineno, end): - if source[i].rstrip().endswith(':'): + # # _this_ lines and the one + # below we don't want from entry.getsource() + for i in range(self.lineno, end): + if source[i].rstrip().endswith(':'): end = i + 1 - break + break return source[start:end] source = property(getsource) def ishidden(self): - """ return True if the current frame has a var __tracebackhide__ + """ return True if the current frame has a var __tracebackhide__ resolving to True - + mostly for internal use """ - try: - return self.frame.eval("__tracebackhide__") - except (SystemExit, KeyboardInterrupt): + try: + return self.frame.eval("__tracebackhide__") + except py.builtin._sysex: raise except: - return False + return False - def __str__(self): - try: - fn = str(self.path) - except py.error.Error: + def __str__(self): + try: + fn = str(self.path) + except py.error.Error: fn = '???' - name = self.frame.code.name - try: + name = self.frame.code.name + try: line = str(self.statement).lstrip() except KeyboardInterrupt: raise except: line = "???" - return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line) + return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line) def name(self): return self.frame.code.raw.co_name name = property(name, None, None, "co_name of underlaying code") class Traceback(list): - """ Traceback objects encapsulate and offer higher level - access to Traceback entries. + """ Traceback objects encapsulate and offer higher level + access to Traceback entries. """ - Entry = TracebackEntry + Entry = TracebackEntry def __init__(self, tb): """ initialize from given python traceback object. """ if hasattr(tb, 'tb_next'): - def f(cur): - while cur is not None: + def f(cur): + while cur is not None: yield self.Entry(cur) - cur = cur.tb_next - list.__init__(self, f(tb)) + cur = cur.tb_next + list.__init__(self, f(tb)) else: list.__init__(self, tb) @@ -243,7 +244,7 @@ codepath = code.path if ((path is None or codepath == path) and (excludepath is None or not hasattr(codepath, 'relto') or - not codepath.relto(excludepath)) and + not codepath.relto(excludepath)) and (lineno is None or x.lineno == lineno) and (firstlineno is None or x.frame.code.firstlineno == firstlineno)): return Traceback(x._rawentry) @@ -269,7 +270,7 @@ def getcrashentry(self): """ return last non-hidden traceback entry that lead - to the exception of a traceback. + to the exception of a traceback. """ tb = self.filter() if not tb: @@ -282,17 +283,17 @@ """ cache = {} for i, entry in enumerate(self): - key = entry.frame.code.path, entry.lineno + key = entry.frame.code.path, entry.lineno #print "checking for recursion at", key l = cache.setdefault(key, []) - if l: + if l: f = entry.frame loc = f.f_locals - for otherloc in l: - if f.is_true(f.eval(co_equal, + for otherloc in l: + if f.is_true(f.eval(co_equal, __recursioncache_locals_1=loc, __recursioncache_locals_2=otherloc)): - return i + return i l.append(entry.frame.f_locals) return None @@ -303,7 +304,7 @@ """ wraps sys.exc_info() objects and offers help for navigating the traceback. """ - _striptext = '' + _striptext = '' def __init__(self, tup=None, exprinfo=None): # NB. all attributes are private! Subclasses or other # ExceptionInfo-like classes may have different attributes. @@ -318,14 +319,14 @@ self._excinfo = tup self.type, self.value, tb = self._excinfo self.typename = self.type.__name__ - self.traceback = py.code.Traceback(tb) + self.traceback = py.code.Traceback(tb) def __repr__(self): return "" % (self.typename, len(self.traceback)) - def exconly(self, tryshort=False): + def exconly(self, tryshort=False): """ return the exception as a string - + when 'tryshort' resolves to True, and the exception is a py.code._AssertionError, only the actual exception part of the exception representation is returned (so 'AssertionError: ' is @@ -334,14 +335,14 @@ lines = py.std.traceback.format_exception_only(self.type, self.value) text = ''.join(lines) text = text.rstrip() - if tryshort: - if text.startswith(self._striptext): + if tryshort: + if text.startswith(self._striptext): text = text[len(self._striptext):] return text - def errisinstance(self, exc): + def errisinstance(self, exc): """ return True if the exception is an instance of exc """ - return isinstance(self.value, exc) + return isinstance(self.value, exc) def _getreprcrash(self): exconly = self.exconly(tryshort=True) @@ -350,14 +351,22 @@ reprcrash = ReprFileLocation(path, lineno+1, exconly) return reprcrash - def getrepr(self, showlocals=False, style="long", + def getrepr(self, showlocals=False, style="long", abspath=False, tbfilter=True, funcargs=False): """ return str()able representation of this exception info. - showlocals: show locals per traceback entry - style: long|short|no traceback style + showlocals: show locals per traceback entry + style: long|short|no|native traceback style tbfilter: hide entries (where __tracebackhide__ is true) """ - fmt = FormattedExcinfo(showlocals=showlocals, style=style, + if style == 'native': + import traceback + return ''.join(traceback.format_exception( + self.type, + self.value, + self.traceback[0]._rawentry, + )) + + fmt = FormattedExcinfo(showlocals=showlocals, style=style, abspath=abspath, tbfilter=tbfilter, funcargs=funcargs) return fmt.repr_excinfo(self) @@ -370,27 +379,27 @@ entry = self.traceback[-1] loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) return unicode(loc) - + class FormattedExcinfo(object): - """ presenting information about failing Functions and Generators. """ From noreply at buildbot.pypy.org Fri Sep 23 13:12:41 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:41 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: This will make rtyper happier...hopefully Message-ID: <20110923111241.C47FA820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47476:4c0e74dd0f30 Date: 2011-03-16 14:53 +0100 http://bitbucket.org/pypy/pypy/changeset/4c0e74dd0f30/ Log: This will make rtyper happier...hopefully diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -412,10 +412,12 @@ return len(self.cast_from_void_star(w_list.storage)) def getitem(self, w_list, index): + l = self.cast_from_void_star(w_list.storage) try: - return self.wrap(self.cast_from_void_star(w_list.storage)[index]) + r = l[index] except IndexError: # make RPython raise the exception raise + return self.wrap(r) def getitems(self, w_list): return [self.wrap(item) for item in self.cast_from_void_star(w_list.storage)] From noreply at buildbot.pypy.org Fri Sep 23 13:12:43 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:43 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Just create a copy from storage when extending an EmptyList Message-ID: <20110923111243.006E0820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47477:24c514951069 Date: 2011-03-16 15:26 +0100 http://bitbucket.org/pypy/pypy/changeset/24c514951069/ Log: Just create a copy from storage when extending an EmptyList diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -233,9 +233,9 @@ self.append(w_list, w_item) def extend(self, w_list, w_other): - #XXX items are wrapped and unwrapped again - w_list.strategy = w_other.strategy - w_list.strategy.init_from_list_w(w_list, w_other.getitems()) + strategy = w_list.strategy = w_other.strategy + items = strategy.cast_from_void_star(w_other.storage)[:] # copy! + w_list.storage = strategy.cast_to_void_star(items) def reverse(self, w_list): pass diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -378,6 +378,14 @@ l.extend([10]) assert l == range(11) + l = [] + m = [1,2,3] + l.extend(m) + m[0] = 5 + assert m == [5,2,3] + assert l == [1,2,3] + + def test_extend_tuple(self): l = l0 = [1] l.extend((2,)) From noreply at buildbot.pypy.org Fri Sep 23 13:12:44 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:44 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Removed old rangelist implementation Message-ID: <20110923111244.3FD40820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47478:4fe6b0555b56 Date: 2011-03-17 11:47 +0100 http://bitbucket.org/pypy/pypy/changeset/4fe6b0555b56/ Log: Removed old rangelist implementation diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -349,7 +349,7 @@ config.objspace.opcodes.suggest(CALL_LIKELY_BUILTIN=True) if level in ['2', '3', 'jit']: config.objspace.opcodes.suggest(CALL_METHOD=True) - config.objspace.std.suggest(withrangelist=False) + config.objspace.std.suggest(withrangelist=True) config.objspace.std.suggest(withmethodcache=True) config.objspace.std.suggest(withprebuiltchar=True) config.objspace.std.suggest(builtinshortcut=True) @@ -368,7 +368,7 @@ # memory-saving optimizations if level == 'mem': config.objspace.std.suggest(withprebuiltint=True) - config.objspace.std.suggest(withrangelist=False) + config.objspace.std.suggest(withrangelist=True) config.objspace.std.suggest(withprebuiltchar=True) config.objspace.std.suggest(withmapdict=True) config.objspace.std.suggest(withstrslice=True) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -69,7 +69,7 @@ def __repr__(w_self): """ representation for debugging purposes """ - return "%s(%s, %s)" % (w_self.__class__.__name__, w_self.strategy, w_self.storage._content) + return "%s(%s, %s)" % (w_self.__class__.__name__, w_self.strategy, w_self.storage._x) def unwrap(w_list, space): # for tests only! diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -25,8 +25,6 @@ "ropeobject.W_RopeIterObject"], "withropeunicode": ["ropeunicodeobject.W_RopeUnicodeObject", "ropeunicodeobject.W_RopeUnicodeIterObject"], - "withrangelist" : ["rangeobject.W_RangeListObject", - "rangeobject.W_RangeIterObject"], "withtproxy" : ["proxyobject.W_TransparentList", "proxyobject.W_TransparentDict"], } @@ -90,7 +88,6 @@ from pypy.objspace.std import iterobject from pypy.objspace.std import unicodeobject from pypy.objspace.std import dictproxyobject - from pypy.objspace.std import rangeobject from pypy.objspace.std import proxyobject from pypy.objspace.std import fake import pypy.objspace.std.default # register a few catch-all multimethods @@ -252,11 +249,6 @@ (unicodeobject.W_UnicodeObject, strbufobject.delegate_buf2unicode) ] - if config.objspace.std.withrangelist: - self.typeorder[rangeobject.W_RangeListObject] += [ - (listobject.W_ListObject, - rangeobject.delegate_range2list), - ] if config.objspace.std.withsmalltuple: self.typeorder[smalltupleobject.W_SmallTupleObject] += [ (tupleobject.W_TupleObject, smalltupleobject.delegate_SmallTuple2Tuple)] diff --git a/pypy/objspace/std/rangeobject.py b/pypy/objspace/std/rangeobject.py deleted file mode 100644 --- a/pypy/objspace/std/rangeobject.py +++ /dev/null @@ -1,233 +0,0 @@ -from pypy.interpreter.error import OperationError -from pypy.objspace.std.model import registerimplementation, W_Object -from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.multimethod import FailedToImplement -from pypy.objspace.std.noneobject import W_NoneObject -from pypy.objspace.std.inttype import wrapint -from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice -from pypy.objspace.std.listobject import W_ListObject -from pypy.objspace.std import listtype, iterobject, slicetype -from pypy.interpreter import gateway, baseobjspace - -def length(start, stop, step): - if step > 0: - if stop <= start: - return 0 - return (stop - start + step - 1)/step - - else: # step must be < 0 - if stop >= start: - return 0 - return (start - stop - step - 1)/-step - - -class W_RangeListObject(W_Object): - typedef = listtype.list_typedef - - def __init__(w_self, start, step, length): - assert step != 0 - w_self.start = start - w_self.step = step - w_self.length = length - w_self.w_list = None - - def force(w_self, space): - if w_self.w_list is not None: - return w_self.w_list - start = w_self.start - step = w_self.step - length = w_self.length - if not length: - w_self.w_list = space.newlist([]) - return w_self.w_list - - arr = [None] * length # this is to avoid using append. - - i = start - n = 0 - while n < length: - arr[n] = wrapint(space, i) - i += step - n += 1 - - w_self.w_list = space.newlist(arr) - return w_self.w_list - - def getitem(w_self, i): - if i < 0: - i += w_self.length - if i < 0: - raise IndexError - elif i >= w_self.length: - raise IndexError - return w_self.start + i * w_self.step - - def getitem_unchecked(w_self, i): - # bounds not checked, on purpose - return w_self.start + i * w_self.step - - def __repr__(w_self): - if w_self.w_list is None: - return "W_RangeListObject(%s, %s, %s)" % ( - w_self.start, w_self.step, w_self.length) - else: - return "W_RangeListObject(%r)" % (w_self.w_list, ) - -def delegate_range2list(space, w_rangelist): - return w_rangelist.force(space) - -def len__RangeList(space, w_rangelist): - if w_rangelist.w_list is not None: - return space.len(w_rangelist.w_list) - return wrapint(space, w_rangelist.length) - - -def getitem__RangeList_ANY(space, w_rangelist, w_index): - if w_rangelist.w_list is not None: - return space.getitem(w_rangelist.w_list, w_index) - idx = space.getindex_w(w_index, space.w_IndexError, "list index") - try: - return wrapint(space, w_rangelist.getitem(idx)) - except IndexError: - raise OperationError(space.w_IndexError, - space.wrap("list index out of range")) - -def getitem__RangeList_Slice(space, w_rangelist, w_slice): - if w_rangelist.w_list is not None: - return space.getitem(w_rangelist.w_list, w_slice) - length = w_rangelist.length - start, stop, step, slicelength = w_slice.indices4(space, length) - assert slicelength >= 0 - rangestart = w_rangelist.getitem_unchecked(start) - rangestep = w_rangelist.step * step - return W_RangeListObject(rangestart, rangestep, slicelength) - -def getslice__RangeList_ANY_ANY(space, w_rangelist, w_start, w_stop): - if w_rangelist.w_list is not None: - return space.getslice(w_rangelist.w_list, w_start, w_stop) - length = w_rangelist.length - start, stop = normalize_simple_slice(space, length, w_start, w_stop) - slicelength = stop - start - assert slicelength >= 0 - rangestart = w_rangelist.getitem_unchecked(start) - rangestep = w_rangelist.step - return W_RangeListObject(rangestart, rangestep, slicelength) - -def iter__RangeList(space, w_rangelist): - return W_RangeIterObject(w_rangelist) - -def repr__RangeList(space, w_rangelist): - if w_rangelist.w_list is not None: - return space.repr(w_rangelist.w_list) - if w_rangelist.length == 0: - return space.wrap('[]') - result = [''] * w_rangelist.length - i = w_rangelist.start - n = 0 - while n < w_rangelist.length: - result[n] = str(i) - i += w_rangelist.step - n += 1 - return space.wrap("[" + ", ".join(result) + "]") - -def inplace_add__RangeList_ANY(space, w_rangelist, w_iterable2): - space.inplace_add(w_rangelist.force(space), w_iterable2) - return w_rangelist - -def inplace_mul__RangeList_ANY(space, w_rangelist, w_number): - space.inplace_mul(w_rangelist.force(space), w_number) - return w_rangelist - - -def list_pop__RangeList_ANY(space, w_rangelist, w_idx=-1): - if w_rangelist.w_list is not None: - raise FailedToImplement - length = w_rangelist.length - if length == 0: - raise OperationError(space.w_IndexError, - space.wrap("pop from empty list")) - idx = space.int_w(w_idx) - if idx == 0: - result = w_rangelist.start - w_rangelist.start += w_rangelist.step - w_rangelist.length -= 1 - return wrapint(space, result) - if idx == -1 or idx == length - 1: - w_rangelist.length -= 1 - return wrapint( - space, w_rangelist.start + (length - 1) * w_rangelist.step) - if idx >= w_rangelist.length: - raise OperationError(space.w_IndexError, - space.wrap("pop index out of range")) - raise FailedToImplement - -def list_reverse__RangeList(space, w_rangelist): - # probably somewhat useless, but well... - if w_rangelist.w_list is not None: - raise FailedToImplement - w_rangelist.start = w_rangelist.getitem_unchecked(w_rangelist.length-1) - w_rangelist.step = -w_rangelist.step - -def list_sort__RangeList_None_None_ANY(space, w_rangelist, w_cmp, - w_keyfunc, w_reverse): - # even more useless but fun - has_reverse = space.is_true(w_reverse) - if w_rangelist.w_list is not None: - raise FailedToImplement - if has_reverse: - factor = -1 - else: - factor = 1 - reverse = w_rangelist.step * factor < 0 - if reverse: - w_rangelist.start = w_rangelist.getitem_unchecked(w_rangelist.length-1) - w_rangelist.step = -w_rangelist.step - return space.w_None - - -class W_RangeIterObject(iterobject.W_AbstractSeqIterObject): - pass - -def iter__RangeIter(space, w_rangeiter): - return w_rangeiter - -def next__RangeIter(space, w_rangeiter): - w_rangelist = w_rangeiter.w_seq - if w_rangelist is None: - raise OperationError(space.w_StopIteration, space.w_None) - assert isinstance(w_rangelist, W_RangeListObject) - index = w_rangeiter.index - if w_rangelist.w_list is not None: - try: - w_item = space.getitem(w_rangelist.w_list, - wrapint(space, index)) - except OperationError, e: - w_rangeiter.w_seq = None - if not e.match(space, space.w_IndexError): - raise - raise OperationError(space.w_StopIteration, space.w_None) - else: - if index >= w_rangelist.length: - w_rangeiter.w_seq = None - raise OperationError(space.w_StopIteration, space.w_None) - w_item = wrapint( - space, - w_rangelist.getitem_unchecked(index)) - w_rangeiter.index = index + 1 - return w_item - -# XXX __length_hint__() -##def len__RangeIter(space, w_rangeiter): -## if w_rangeiter.w_seq is None: -## return wrapint(space, 0) -## index = w_rangeiter.index -## w_length = space.len(w_rangeiter.w_seq) -## w_len = space.sub(w_length, wrapint(space, index)) -## if space.is_true(space.lt(w_len, wrapint(space, 0))): -## w_len = wrapint(space, 0) -## return w_len - -registerimplementation(W_RangeListObject) -registerimplementation(W_RangeIterObject) - -register_all(vars(), listtype) From noreply at buildbot.pypy.org Fri Sep 23 13:12:45 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:45 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: a little refactoring Message-ID: <20110923111245.6C73C820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47479:e8148de5e147 Date: 2011-03-18 11:12 +0100 http://bitbucket.org/pypy/pypy/changeset/e8148de5e147/ Log: a little refactoring diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -243,7 +243,7 @@ class RangeListStrategy(ListStrategy): def switch_to_integer_strategy(self, w_list): - items = self._getitem_range(w_list, False) + items = self._getitems_range(w_list, False) strategy = w_list.strategy = self.space.fromcache(IntegerListStrategy) w_list.storage = strategy.cast_to_void_star(items) @@ -277,10 +277,10 @@ return self.wrap(start + i * step) def getitems(self, w_list): - return self._getitem_range(w_list, True) + return self._getitems_range(w_list, True) @specialize.arg(2) - def _getitem_range(self, w_list, wrap_items): + def _getitems_range(self, w_list, wrap_items): l = self.cast_from_void_star(w_list.storage) start = l[0] step = l[1] @@ -375,10 +375,10 @@ def reverse(self, w_list): v = self.cast_from_void_star(w_list.storage) - last = w_list.getitem(-1) #XXX wrapped + w_last = w_list.getitem(-1) #XXX wrapped length = v[2] skip = v[1] - new = self.cast_to_void_star((self.unwrap(last), -skip, length)) + new = self.cast_to_void_star((self.unwrap(w_last), -skip, length)) w_list.storage = new class AbstractUnwrappedStrategy(object): From noreply at buildbot.pypy.org Fri Sep 23 13:12:46 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:46 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Replaced storage with lstorage to avoid collision with mapdict Message-ID: <20110923111246.99BC7820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47480:54924c51a01b Date: 2011-03-18 11:22 +0100 http://bitbucket.org/pypy/pypy/changeset/54924c51a01b/ Log: Replaced storage with lstorage to avoid collision with mapdict diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -64,12 +64,12 @@ w_self = instantiate(W_ListObject) w_self.space = space w_self.strategy = strategy - w_self.storage = storage + w_self.lstorage = storage return w_self def __repr__(w_self): """ representation for debugging purposes """ - return "%s(%s, %s)" % (w_self.__class__.__name__, w_self.strategy, w_self.storage._x) + return "%s(%s, %s)" % (w_self.__class__.__name__, w_self.strategy, w_self.lstorage._x) def unwrap(w_list, space): # for tests only! @@ -189,7 +189,7 @@ def init_from_list_w(self, w_list, list_w): assert len(list_w) == 0 - w_list.storage = self.cast_to_void_star(None) + w_list.lstorage = self.cast_to_void_star(None) cast_to_void_star, cast_from_void_star = rerased.new_erasing_pair("empty") cast_to_void_star = staticmethod(cast_to_void_star) @@ -234,8 +234,8 @@ def extend(self, w_list, w_other): strategy = w_list.strategy = w_other.strategy - items = strategy.cast_from_void_star(w_other.storage)[:] # copy! - w_list.storage = strategy.cast_to_void_star(items) + items = strategy.cast_from_void_star(w_other.lstorage)[:] # copy! + w_list.lstorage = strategy.cast_to_void_star(items) def reverse(self, w_list): pass @@ -245,7 +245,7 @@ def switch_to_integer_strategy(self, w_list): items = self._getitems_range(w_list, False) strategy = w_list.strategy = self.space.fromcache(IntegerListStrategy) - w_list.storage = strategy.cast_to_void_star(items) + w_list.lstorage = strategy.cast_to_void_star(items) def wrap(self, intval): return self.space.wrap(intval) @@ -261,10 +261,10 @@ cast_from_void_star = staticmethod(cast_from_void_star) def length(self, w_list): - return self.cast_from_void_star(w_list.storage)[2] + return self.cast_from_void_star(w_list.lstorage)[2] def getitem(self, w_list, i): - v = self.cast_from_void_star(w_list.storage) + v = self.cast_from_void_star(w_list.lstorage) start = v[0] step = v[1] length = v[2] @@ -281,7 +281,7 @@ @specialize.arg(2) def _getitems_range(self, w_list, wrap_items): - l = self.cast_from_void_star(w_list.storage) + l = self.cast_from_void_star(w_list.lstorage) start = l[0] step = l[1] length = l[2] @@ -302,7 +302,7 @@ return r def getslice(self, w_list, start, stop, step, length): - v = self.cast_from_void_star(w_list.storage) + v = self.cast_from_void_star(w_list.lstorage) old_start = v[0] old_step = v[1] old_length = v[2] @@ -313,12 +313,12 @@ def append(self, w_list, w_item): if is_W_IntObject(w_item): - l = self.cast_from_void_star(w_list.storage) + l = self.cast_from_void_star(w_list.lstorage) step = l[1] last_in_range = self.getitem(w_list, -1) if self.unwrap(w_item) - step == self.unwrap(last_in_range): new = self.cast_to_void_star((l[0],l[1],l[2]+1)) - w_list.storage = new + w_list.lstorage = new return self.switch_to_integer_strategy(w_list) @@ -343,14 +343,14 @@ if index < 0: index += self.length(w_list) - l = self.cast_from_void_star(w_list.storage) + l = self.cast_from_void_star(w_list.lstorage) if index in [0, self.length(w_list)-1]: r = self.getitem(w_list, index) if index == 0: new = self.cast_to_void_star((l[0]+l[1],l[1],l[2]-1)) else: new = self.cast_to_void_star((l[0],l[1],l[2]-1)) - w_list.storage = new + w_list.lstorage = new w_list.check_empty_strategy() return r @@ -374,12 +374,12 @@ w_list.extend(items_w) def reverse(self, w_list): - v = self.cast_from_void_star(w_list.storage) + v = self.cast_from_void_star(w_list.lstorage) w_last = w_list.getitem(-1) #XXX wrapped length = v[2] skip = v[1] new = self.cast_to_void_star((self.unwrap(w_last), -skip, length)) - w_list.storage = new + w_list.lstorage = new class AbstractUnwrappedStrategy(object): _mixin_ = True @@ -406,13 +406,13 @@ def init_from_list_w(self, w_list, list_w): l = [self.unwrap(w_item) for w_item in list_w] - w_list.storage = self.cast_to_void_star(l) + w_list.lstorage = self.cast_to_void_star(l) def length(self, w_list): - return len(self.cast_from_void_star(w_list.storage)) + return len(self.cast_from_void_star(w_list.lstorage)) def getitem(self, w_list, index): - l = self.cast_from_void_star(w_list.storage) + l = self.cast_from_void_star(w_list.lstorage) try: r = l[index] except IndexError: # make RPython raise the exception @@ -420,11 +420,11 @@ return self.wrap(r) def getitems(self, w_list): - return [self.wrap(item) for item in self.cast_from_void_star(w_list.storage)] + return [self.wrap(item) for item in self.cast_from_void_star(w_list.lstorage)] def getslice(self, w_list, start, stop, step, length): if step == 1: - l = self.cast_from_void_star(w_list.storage) + l = self.cast_from_void_star(w_list.lstorage) assert start >= 0 assert stop >= 0 sublist = l[start:stop] @@ -440,14 +440,14 @@ def append(self, w_list, w_item): if self.is_correct_type(w_item): - self.cast_from_void_star(w_list.storage).append(self.unwrap(w_item)) + self.cast_from_void_star(w_list.lstorage).append(self.unwrap(w_item)) return w_list.switch_to_object_strategy() w_list.append(w_item) def insert(self, w_list, index, w_item): - l = self.cast_from_void_star(w_list.storage) + l = self.cast_from_void_star(w_list.lstorage) if self.is_correct_type(w_item): l.insert(index, self.unwrap(w_item)) @@ -457,9 +457,9 @@ w_list.insert(index, w_item) def extend(self, w_list, w_other): - l = self.cast_from_void_star(w_list.storage) + l = self.cast_from_void_star(w_list.lstorage) if self.list_is_correct_type(w_other): - l += self.cast_from_void_star(w_other.storage) + l += self.cast_from_void_star(w_other.lstorage) return #XXX unnecessary copy if w_other is ObjectList @@ -471,7 +471,7 @@ w_list.extend(w_other) def setitem(self, w_list, index, w_item): - l = self.cast_from_void_star(w_list.storage) + l = self.cast_from_void_star(w_list.lstorage) if self.is_correct_type(w_item): l[index] = self.unwrap(w_item) @@ -483,7 +483,7 @@ def setslice(self, w_list, start, step, slicelength, sequence_w): #XXX inefficient assert slicelength >= 0 - items = self.cast_from_void_star(w_list.storage) + items = self.cast_from_void_star(w_list.lstorage) if (self is not self.space.fromcache(ObjectListStrategy) and not self.list_is_correct_type(W_ListObject(self.space, sequence_w)) and @@ -536,12 +536,12 @@ start += step def deleteitem(self, w_list, index): - l = self.cast_from_void_star(w_list.storage) + l = self.cast_from_void_star(w_list.lstorage) del l[index] w_list.check_empty_strategy() def deleteslice(self, w_list, start, step, slicelength): - items = self.cast_from_void_star(w_list.storage) + items = self.cast_from_void_star(w_list.lstorage) if slicelength==0: return @@ -575,18 +575,18 @@ w_list.check_empty_strategy() def pop(self, w_list, index): - l = self.cast_from_void_star(w_list.storage) + l = self.cast_from_void_star(w_list.lstorage) w_item = self.wrap(l.pop(index)) w_list.check_empty_strategy() return w_item def inplace_mul(self, w_list, times): - l = self.cast_from_void_star(w_list.storage) + l = self.cast_from_void_star(w_list.lstorage) l *= times def reverse(self, w_list): - self.cast_from_void_star(w_list.storage).reverse() + self.cast_from_void_star(w_list.lstorage).reverse() class ObjectListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = None @@ -608,7 +608,7 @@ return w_list.strategy is self.space.fromcache(ObjectListStrategy) def init_from_list_w(self, w_list, list_w): - w_list.storage = self.cast_to_void_star(list_w) + w_list.lstorage = self.cast_to_void_star(list_w) # XXX implement getitems without copying here From noreply at buildbot.pypy.org Fri Sep 23 13:12:47 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:47 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: added new method copy_into to extend an EmptyList with other lists Message-ID: <20110923111247.C4E11820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47481:4fc5434c0449 Date: 2011-03-22 11:23 +0100 http://bitbucket.org/pypy/pypy/changeset/4fc5434c0449/ Log: added new method copy_into to extend an EmptyList with other lists diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -86,6 +86,8 @@ self.strategy = self.space.fromcache(EmptyListStrategy) self.strategy.init_from_list_w(self, []) + def copy_into(self, other): + self.strategy.copy_into(self, other) # ___________________________________________________ def append(w_list, w_item): @@ -233,9 +235,7 @@ self.append(w_list, w_item) def extend(self, w_list, w_other): - strategy = w_list.strategy = w_other.strategy - items = strategy.cast_from_void_star(w_other.lstorage)[:] # copy! - w_list.lstorage = strategy.cast_to_void_star(items) + w_other.copy_into(w_list) def reverse(self, w_list): pass @@ -260,6 +260,10 @@ cast_to_void_star = staticmethod(cast_to_void_star) cast_from_void_star = staticmethod(cast_from_void_star) + def copy_into(self, w_list, w_other): + w_other.strategy = self + w_other.lstorage = w_list.lstorage + def length(self, w_list): return self.cast_from_void_star(w_list.lstorage)[2] @@ -408,6 +412,11 @@ l = [self.unwrap(w_item) for w_item in list_w] w_list.lstorage = self.cast_to_void_star(l) + def copy_into(self, w_list, w_other): + w_other.strategy = self + items = self.cast_from_void_star(w_list.lstorage)[:] + w_other.lstorage = self.cast_to_void_star(items) + def length(self, w_list): return len(self.cast_from_void_star(w_list.lstorage)) From noreply at buildbot.pypy.org Fri Sep 23 13:12:48 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:48 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: index is non-negative because of get_positive_index in list_insert_List_ANY_ANY Message-ID: <20110923111248.EF2B8820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47482:f3df821d227f Date: 2011-03-22 11:43 +0100 http://bitbucket.org/pypy/pypy/changeset/f3df821d227f/ Log: index is non-negative because of get_positive_index in list_insert_List_ANY_ANY diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -456,6 +456,7 @@ w_list.append(w_item) def insert(self, w_list, index, w_item): + assert index >= 0 l = self.cast_from_void_star(w_list.lstorage) if self.is_correct_type(w_item): From noreply at buildbot.pypy.org Fri Sep 23 13:12:50 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:50 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: This index is non-negativ too Message-ID: <20110923111250.25D76820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47483:bc617ef7061f Date: 2011-03-22 13:04 +0100 http://bitbucket.org/pypy/pypy/changeset/bc617ef7061f/ Log: This index is non-negativ too diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -370,6 +370,7 @@ w_list.setslice(start, step, slicelength, sequence_w) def insert(self, w_list, index, w_item): + assert index >= 0 self.switch_to_integer_strategy(w_list) w_list.insert(index, w_item) From noreply at buildbot.pypy.org Fri Sep 23 13:12:51 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:51 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Moved non-negative prove to get_positive_index Message-ID: <20110923111251.501C4820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47484:bbc59e3c25ea Date: 2011-03-22 13:23 +0100 http://bitbucket.org/pypy/pypy/changeset/bbc59e3c25ea/ Log: Moved non-negative prove to get_positive_index diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -370,7 +370,6 @@ w_list.setslice(start, step, slicelength, sequence_w) def insert(self, w_list, index, w_item): - assert index >= 0 self.switch_to_integer_strategy(w_list) w_list.insert(index, w_item) @@ -457,7 +456,6 @@ w_list.append(w_item) def insert(self, w_list, index, w_item): - assert index >= 0 l = self.cast_from_void_star(w_list.lstorage) if self.is_correct_type(w_item): @@ -900,6 +898,7 @@ where = 0 elif where > length: where = length + assert where >= 0 return where def list_append__List_ANY(space, w_list, w_any): From noreply at buildbot.pypy.org Fri Sep 23 13:12:52 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:52 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: More tests for extend with empty list Message-ID: <20110923111252.79800820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47485:2a4e6bf4229e Date: 2011-03-23 09:51 +0100 http://bitbucket.org/pypy/pypy/changeset/2a4e6bf4229e/ Log: More tests for extend with empty list diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -145,6 +145,35 @@ l.extend(W_ListObject(self.space, [self.space.wrap(4), self.space.wrap(5), self.space.wrap(6)])) assert isinstance(l.strategy, IntegerListStrategy) + def test_empty_extend_with_any(self): + empty = W_ListObject(self.space, []) + assert isinstance(empty.strategy, EmptyListStrategy) + empty.extend(W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) + assert isinstance(empty.strategy, IntegerListStrategy) + + empty = W_ListObject(self.space, []) + assert isinstance(empty.strategy, EmptyListStrategy) + empty.extend(W_ListObject(self.space, [self.space.wrap("a"), self.space.wrap("b"), self.space.wrap("c")])) + assert isinstance(empty.strategy, StringListStrategy) + + empty = W_ListObject(self.space, []) + assert isinstance(empty.strategy, EmptyListStrategy) + r = make_range_list(self.space, 1,3,7) + empty.extend(r) + assert isinstance(empty.strategy, RangeListStrategy) + print empty.getitem(6) + assert self.space.is_true(self.space.eq(empty.getitem(1), self.space.wrap(4))) + + empty = W_ListObject(self.space, []) + assert isinstance(empty.strategy, EmptyListStrategy) + empty.extend(W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) + assert isinstance(empty.strategy, IntegerListStrategy) + + empty = W_ListObject(self.space, []) + assert isinstance(empty.strategy, EmptyListStrategy) + empty.extend(W_ListObject(self.space, [])) + assert isinstance(empty.strategy, EmptyListStrategy) + def test_rangelist(self): l = make_range_list(self.space, 1,3,7) assert isinstance(l.strategy, RangeListStrategy) From noreply at buildbot.pypy.org Fri Sep 23 13:12:53 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:53 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: copy_into method needed for ALL ListStrategies Message-ID: <20110923111253.A796A820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47486:fd011d8f9e6d Date: 2011-03-23 09:53 +0100 http://bitbucket.org/pypy/pypy/changeset/fd011d8f9e6d/ Log: copy_into method needed for ALL ListStrategies diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -145,6 +145,9 @@ def init_from_list_w(self, w_list, list_w): raise NotImplementedError + def copy_into(self, w_list, w_other): + raise NotImplementedError + def length(self, w_list): raise NotImplementedError @@ -197,6 +200,9 @@ cast_to_void_star = staticmethod(cast_to_void_star) cast_from_void_star = staticmethod(cast_from_void_star) + def copy_into(self, w_list, w_other): + pass + def length(self, w_list): return 0 From noreply at buildbot.pypy.org Fri Sep 23 13:12:54 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:54 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: extend any list with emptylist Message-ID: <20110923111254.D485C820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47487:d6d8f46ff357 Date: 2011-03-23 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/d6d8f46ff357/ Log: extend any list with emptylist diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -476,6 +476,8 @@ if self.list_is_correct_type(w_other): l += self.cast_from_void_star(w_other.lstorage) return + elif w_other.strategy is self.space.fromcache(EmptyListStrategy): + return #XXX unnecessary copy if w_other is ObjectList list_w = w_other.getitems() diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -174,6 +174,12 @@ empty.extend(W_ListObject(self.space, [])) assert isinstance(empty.strategy, EmptyListStrategy) + def test_extend_other_with_empty(self): + l = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + assert isinstance(l.strategy, IntegerListStrategy) + l.extend(W_ListObject(self.space, [])) + assert isinstance(l.strategy, IntegerListStrategy) + def test_rangelist(self): l = make_range_list(self.space, 1,3,7) assert isinstance(l.strategy, RangeListStrategy) From noreply at buildbot.pypy.org Fri Sep 23 13:12:56 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:56 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: (cfbolz, l.diekmann): one less copy in extend Message-ID: <20110923111256.0DA71820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47488:42ec7bd0633f Date: 2011-03-29 11:41 +0200 http://bitbucket.org/pypy/pypy/changeset/42ec7bd0633f/ Log: (cfbolz, l.diekmann): one less copy in extend diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -213,6 +213,7 @@ return W_ListObject(self.space, []) def getitems(self, w_list): + # cache result XXX return [] def append(self, w_list, w_item): @@ -479,10 +480,12 @@ elif w_other.strategy is self.space.fromcache(EmptyListStrategy): return - #XXX unnecessary copy if w_other is ObjectList list_w = w_other.getitems() - w_other = W_ListObject(self.space, list_w) - w_other.switch_to_object_strategy() + strategy = self.space.fromcache(ObjectListStrategy) + storage = strategy.cast_to_void_star(list_w) + # NB: w_other shares its storage with the original w_other. the new + # w_other does not survive long, so this is not a problem + w_other = W_ListObject.from_storage_and_strategy(self.space, storage, strategy) w_list.switch_to_object_strategy() w_list.extend(w_other) From noreply at buildbot.pypy.org Fri Sep 23 13:12:57 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:57 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: (l.diekmann, cfbolz): Message-ID: <20110923111257.3D5D5820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47489:c9f9f040c318 Date: 2011-03-29 13:39 +0200 http://bitbucket.org/pypy/pypy/changeset/c9f9f040c318/ Log: (l.diekmann, cfbolz): Implemented getitems_copy used in objspace.unpackiterable (+ tests) Now getitems() in ObjectListStrategy doesn't copy anymore diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -105,6 +105,8 @@ def getitems(self): return self.strategy.getitems(self) + def getitems_copy(self): + return self.strategy.getitems_copy(self) # ___________________________________________________ def inplace_mul(self, times): @@ -160,6 +162,9 @@ def getitems(self, w_list): raise NotImplementedError + def getitems_copy(self, w_list): + raise NotImplementedError + def append(self, w_list, w_item): raise NotImplementedError @@ -216,6 +221,9 @@ # cache result XXX return [] + def getitems_copy(self, w_list): + return [] + def append(self, w_list, w_item): w_list.__init__(self.space, [w_item]) @@ -290,6 +298,8 @@ def getitems(self, w_list): return self._getitems_range(w_list, True) + getitems_copy = getitems + @specialize.arg(2) def _getitems_range(self, w_list, wrap_items): l = self.cast_from_void_star(w_list.lstorage) @@ -435,9 +445,11 @@ raise return self.wrap(r) - def getitems(self, w_list): + def getitems_copy(self, w_list): return [self.wrap(item) for item in self.cast_from_void_star(w_list.lstorage)] + getitems = getitems_copy + def getslice(self, w_list, start, stop, step, length): if step == 1: l = self.cast_from_void_star(w_list.lstorage) @@ -630,7 +642,8 @@ def init_from_list_w(self, w_list, list_w): w_list.lstorage = self.cast_to_void_star(list_w) - # XXX implement getitems without copying here + def getitems(self, w_list): + return self.cast_from_void_star(w_list.lstorage) class IntegerListStrategy(AbstractUnwrappedStrategy, ListStrategy): _none_value = 0 @@ -1047,7 +1060,7 @@ # The list is temporarily made empty, so that mutations performed # by comparison functions can't affect the slice of memory we're # sorting (allowing mutations during sorting is an IndexError or - # core-dump factory, since wrappeditems may change). + # core-dump factory, since the storage may change). w_list.__init__(space, []) # wrap each item in a KeyContainer if needed diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -387,8 +387,8 @@ def unpackiterable(self, w_obj, expected_length=-1): if isinstance(w_obj, W_TupleObject): t = w_obj.wrappeditems[:] - elif isinstance(w_obj, W_ListObject): # XXX enable fast path again - t = w_obj.getitems() + elif isinstance(w_obj, W_ListObject): + t = w_obj.getitems_copy() else: return ObjSpace.unpackiterable(self, w_obj, expected_length) if expected_length != -1 and len(t) != expected_length: @@ -402,6 +402,7 @@ if isinstance(w_obj, W_TupleObject): t = w_obj.wrappeditems elif isinstance(w_obj, W_ListObject): + # XXX this can copy twice t = w_obj.getitems()[:] else: if unroll: diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -232,3 +232,15 @@ assert isinstance(l.strategy, RangeListStrategy) l.setslice(0, 1, 3, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) + + def test_get_items_copy(self): + l1 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + l2 = l1.getitems() + l2.append(self.space.wrap(4)) + assert not l2 == l1.getitems() + + l1 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap("two"), self.space.wrap(3)]) + l2 = l1.getitems() + l2.append(self.space.wrap("four")) + assert l2 == l1.getitems() + From noreply at buildbot.pypy.org Fri Sep 23 13:12:58 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:58 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Optimized add__List_List to not use getitems anymore Message-ID: <20110923111258.6AA44820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47490:6ce154248735 Date: 2011-03-29 17:17 +0200 http://bitbucket.org/pypy/pypy/changeset/6ce154248735/ Log: Optimized add__List_List to not use getitems anymore diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -86,6 +86,9 @@ self.strategy = self.space.fromcache(EmptyListStrategy) self.strategy.init_from_list_w(self, []) + def clone(self): + return self.strategy.clone(self) + def copy_into(self, other): self.strategy.copy_into(self, other) # ___________________________________________________ @@ -147,6 +150,9 @@ def init_from_list_w(self, w_list, list_w): raise NotImplementedError + def clone(self, w_list): + raise NotImplementedError + def copy_into(self, w_list, w_other): raise NotImplementedError @@ -205,6 +211,9 @@ cast_to_void_star = staticmethod(cast_to_void_star) cast_from_void_star = staticmethod(cast_from_void_star) + def clone(self, w_list): + return W_ListObject.from_storage_and_strategy(self.space, w_list.lstorage, self) + def copy_into(self, w_list, w_other): pass @@ -275,6 +284,10 @@ cast_to_void_star = staticmethod(cast_to_void_star) cast_from_void_star = staticmethod(cast_from_void_star) + def clone(self, w_list): + storage = w_list.lstorage # lstorage is tuple, no need to clone + w_clone = W_ListObject.from_storage_and_strategy(self.space, storage, self) + def copy_into(self, w_list, w_other): w_other.strategy = self w_other.lstorage = w_list.lstorage @@ -429,6 +442,12 @@ l = [self.unwrap(w_item) for w_item in list_w] w_list.lstorage = self.cast_to_void_star(l) + def clone(self, w_list): + l = self.cast_from_void_star(w_list.lstorage) + storage = self.cast_to_void_star(l[:]) + w_clone = W_ListObject.from_storage_and_strategy(self.space, storage, self) + return w_clone + def copy_into(self, w_list, w_other): w_other.strategy = self items = self.cast_from_void_star(w_list.lstorage)[:] @@ -761,7 +780,9 @@ return iterobject.W_FastListIterObject(w_list) def add__List_List(space, w_list1, w_list2): - return W_ListObject(space, w_list1.getitems() + w_list2.getitems()) + w_clone = w_list1.clone() + w_clone.extend(w_list2) + return w_clone def inplace_add__List_ANY(space, w_list1, w_iterable2): diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -244,3 +244,18 @@ l2.append(self.space.wrap("four")) assert l2 == l1.getitems() + def test_clone(self): + l1 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + clone = l1.clone() + assert isinstance(clone.strategy, IntegerListStrategy) + clone.append(self.space.wrap(7)) + assert not self.space.eq_w(l1, clone) + + def test_add_does_not_use_getitems(self): + l1 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + l1.getitems = None + l2 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + l2.getitems = None + l3 = self.space.add(l1, l2) + l4 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3), self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + assert self.space.eq_w(l3, l4) From noreply at buildbot.pypy.org Fri Sep 23 13:12:59 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:12:59 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Optimized W_ListObject.setslice to take w_list as slice instead of sequence_w Message-ID: <20110923111259.9EF32820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47491:4729d45b3dc6 Date: 2011-03-30 14:02 +0200 http://bitbucket.org/pypy/pypy/changeset/4729d45b3dc6/ Log: Optimized W_ListObject.setslice to take w_list as slice instead of sequence_w This avoids unnecessary wrapping and unwrapping (listview) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -89,6 +89,15 @@ def clone(self): return self.strategy.clone(self) + def _temporarily_as_objects(self): + if self.strategy is self.space.fromcache(ObjectListStrategy): + return self + list_w = self.getitems() + strategy = self.space.fromcache(ObjectListStrategy) + storage = strategy.cast_to_void_star(list_w) + w_objectlist = W_ListObject.from_storage_and_strategy(self.space, storage, strategy) + return w_objectlist + def copy_into(self, other): self.strategy.copy_into(self, other) # ___________________________________________________ @@ -251,8 +260,10 @@ def setitem(self, w_list, index, w_item): raise IndexError - def setslice(self, w_list, start, step, slicelength, sequence_w): - w_list.__init__(self.space, sequence_w) + def setslice(self, w_list, start, step, slicelength, w_other): + #XXX now we have wrapping/unwrapping here! + #XXX BUT: shouldn't we use from_strage_and_strategy here anyway? + w_list.__init__(self.space, w_other.getitems()) def insert(self, w_list, index, w_item): assert index == 0 @@ -510,7 +521,7 @@ return elif w_other.strategy is self.space.fromcache(EmptyListStrategy): return - + #XXX use _temporarily_as_objects here list_w = w_other.getitems() strategy = self.space.fromcache(ObjectListStrategy) storage = strategy.cast_to_void_star(list_w) @@ -531,20 +542,23 @@ w_list.switch_to_object_strategy() w_list.setitem(index, w_item) - def setslice(self, w_list, start, step, slicelength, sequence_w): + def setslice(self, w_list, start, step, slicelength, w_other): #XXX inefficient assert slicelength >= 0 items = self.cast_from_void_star(w_list.lstorage) - if (self is not self.space.fromcache(ObjectListStrategy) and - not self.list_is_correct_type(W_ListObject(self.space, sequence_w)) and - len(sequence_w) != 0): + if self is self.space.fromcache(ObjectListStrategy): + w_other = w_other._temporarily_as_objects() + elif (not self.list_is_correct_type(w_other) and + w_other.length() != 0): w_list.switch_to_object_strategy() - w_list.setslice(start, step, slicelength, sequence_w) + w_other_as_object = w_other._temporarily_as_objects() + assert w_other_as_object.strategy is self.space.fromcache(ObjectListStrategy) + w_list.setslice(start, step, slicelength, w_other_as_object) return oldsize = len(items) - len2 = len(sequence_w) + len2 = w_other.length() if step == 1: # Support list resizing for non-extended slices delta = slicelength - len2 if delta < 0: @@ -566,7 +580,13 @@ "assign sequence of size %d to extended slice of size %d", len2, slicelength) - if sequence_w is items: + if w_other.strategy is self.space.fromcache(EmptyListStrategy): + other_items = [] + else: + # at this point both w_list and w_other have the same type, so + # self.cast_from_void_star is valid for both of them + other_items = self.cast_from_void_star(w_other.lstorage) + if other_items is items: if step > 0: # Always copy starting from the right to avoid # having to make a shallow copy in the case where @@ -574,18 +594,22 @@ i = len2 - 1 start += i*step while i >= 0: - items[start] = self.unwrap(sequence_w[i]) + items[start] = other_items[i] start -= step i -= 1 return else: # Make a shallow copy to more easily handle the reversal case # XXX why is this needed ??? - sequence_w = list(sequence_w) + w_list.reverse() + return + #other_items = list(other_items) for i in range(len2): - items[start] = self.unwrap(sequence_w[i]) + items[start] = other_items[i] start += step + w_list.check_empty_strategy() + def deleteitem(self, w_list, index): l = self.cast_from_void_star(w_list.lstorage) del l[index] @@ -755,11 +779,17 @@ start, stop = normalize_simple_slice(space, length, w_start, w_stop) return w_list.getslice(start, stop, 1, stop - start) +def setslice__List_ANY_ANY_List(space, w_list, w_start, w_stop, w_other): + length = w_list.length() + start, stop = normalize_simple_slice(space, length, w_start, w_stop) + w_list.setslice(start, 1, stop-start, w_other) + def setslice__List_ANY_ANY_ANY(space, w_list, w_start, w_stop, w_iterable): length = w_list.length() start, stop = normalize_simple_slice(space, length, w_start, w_stop) sequence_w = space.listview(w_iterable) - w_list.setslice(start, 1, stop-start, sequence_w) + w_other = W_ListObject(space, sequence_w) + w_list.setslice(start, 1, stop-start, w_other) def delslice__List_ANY_ANY(space, w_list, w_start, w_stop): length = w_list.length() @@ -896,11 +926,17 @@ space.wrap("list index out of range")) return space.w_None +def setitem__List_Slice_List(space, w_list, w_slice, w_other): + oldsize = w_list.length() + start, stop, step, slicelength = w_slice.indices4(space, oldsize) + w_list.setslice(start, step, slicelength, w_other) + def setitem__List_Slice_ANY(space, w_list, w_slice, w_iterable): oldsize = w_list.length() start, stop, step, slicelength = w_slice.indices4(space, oldsize) sequence_w = space.listview(w_iterable) - w_list.setslice(start, step, slicelength, sequence_w) + w_other = W_ListObject(space, sequence_w) + w_list.setslice(start, step, slicelength, w_other) app = gateway.applevel(""" def listrepr(currently_in_repr, l): diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -657,6 +657,7 @@ l = [1,2,3] raises(ValueError, "l[0:2:2] = [1,2,3,4]") + raises(ValueError, "l[::2] = []") def test_recursive_repr(self): l = [] diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -111,24 +111,62 @@ def test_setslice(self): l = W_ListObject(self.space, []) assert isinstance(l.strategy, EmptyListStrategy) - l.setslice(0, 1, 2, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) assert isinstance(l.strategy, IntegerListStrategy) l = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) - l.setslice(0, 1, 2, [self.space.wrap(4), self.space.wrap(5), self.space.wrap(6)]) + l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap(4), self.space.wrap(5), self.space.wrap(6)])) assert isinstance(l.strategy, IntegerListStrategy) l = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap('b'), self.space.wrap(3)]) assert isinstance(l.strategy, ObjectListStrategy) - l.setslice(0, 1, 2, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) assert isinstance(l.strategy, ObjectListStrategy) l = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) assert isinstance(l.strategy, IntegerListStrategy) - l.setslice(0, 1, 2, [self.space.wrap('a'), self.space.wrap('b'), self.space.wrap('c')]) + l.setslice(0, 1, 2, W_ListObject(self.space, [self.space.wrap('a'), self.space.wrap('b'), self.space.wrap('c')])) assert isinstance(l.strategy, ObjectListStrategy) + def test_setslice_List(self): + + def wrapitems(items): + items_w = [] + for i in items: + items_w.append(self.space.wrap(i)) + return items_w + + def keep_other_strategy(w_list, start, step, length, w_other): + other_strategy = w_other.strategy + w_list.setslice(start, step, length, w_other) + assert w_other.strategy is other_strategy + + l = W_ListObject(self.space, wrapitems([1,2,3,4,5])) + other = W_ListObject(self.space, wrapitems(["a", "b", "c"])) + keep_other_strategy(l, 0, 2, other.length(), other) + assert l.strategy is self.space.fromcache(ObjectListStrategy) + + l = W_ListObject(self.space, wrapitems([1,2,3,4,5])) + other = W_ListObject(self.space, wrapitems([6, 6, 6])) + keep_other_strategy(l, 0, 2, other.length(), other) + assert l.strategy is self.space.fromcache(IntegerListStrategy) + + l = W_ListObject(self.space, wrapitems(["a","b","c","d","e"])) + other = W_ListObject(self.space, wrapitems(["a", "b", "c"])) + keep_other_strategy(l, 0, 2, other.length(), other) + assert l.strategy is self.space.fromcache(StringListStrategy) + + l = W_ListObject(self.space, wrapitems(["a",3,"c",4,"e"])) + other = W_ListObject(self.space, wrapitems(["a", "b", "c"])) + keep_other_strategy(l, 0, 2, other.length(), other) + assert l.strategy is self.space.fromcache(ObjectListStrategy) + + l = W_ListObject(self.space, wrapitems(["a",3,"c",4,"e"])) + other = W_ListObject(self.space, []) + keep_other_strategy(l, 0, 1, l.length(), other) + assert l.strategy is self.space.fromcache(EmptyListStrategy) + def test_extend(self): l = W_ListObject(self.space, []) assert isinstance(l.strategy, EmptyListStrategy) @@ -230,7 +268,7 @@ def test_range_setslice(self): l = make_range_list(self.space, 1, 3, 5) assert isinstance(l.strategy, RangeListStrategy) - l.setslice(0, 1, 3, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + l.setslice(0, 1, 3, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)])) assert isinstance(l.strategy, IntegerListStrategy) def test_get_items_copy(self): From noreply at buildbot.pypy.org Fri Sep 23 13:13:00 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:13:00 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: added _getitem_unwrapped in RangeListStrategy to avoid wrapping/unwrapping Message-ID: <20110923111300.CC167820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47492:158c79deb4df Date: 2011-03-30 15:05 +0200 http://bitbucket.org/pypy/pypy/changeset/158c79deb4df/ Log: added _getitem_unwrapped in RangeListStrategy to avoid wrapping/unwrapping diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -306,7 +306,7 @@ def length(self, w_list): return self.cast_from_void_star(w_list.lstorage)[2] - def getitem(self, w_list, i): + def _getitem_unwrapped(self, w_list, i): v = self.cast_from_void_star(w_list.lstorage) start = v[0] step = v[1] @@ -317,7 +317,10 @@ raise IndexError elif i >= length: raise IndexError - return self.wrap(start + i * step) + return start + i * step + + def getitem(self, w_list, i): + return self.wrap(self._getitem_unwrapped(w_list, i)) def getitems(self, w_list): return self._getitems_range(w_list, True) @@ -352,7 +355,7 @@ old_step = v[1] old_length = v[2] - new_start = self.unwrap(w_list.getitem(start)) + new_start = self._getitem_unwrapped(w_list, start) new_step = old_step * step return make_range_list(self.space, new_start, new_step, length) @@ -360,8 +363,8 @@ if is_W_IntObject(w_item): l = self.cast_from_void_star(w_list.lstorage) step = l[1] - last_in_range = self.getitem(w_list, -1) - if self.unwrap(w_item) - step == self.unwrap(last_in_range): + last_in_range = self._getitem_unwrapped(w_list, -1) + if self.unwrap(w_item) - step == last_in_range: new = self.cast_to_void_star((l[0],l[1],l[2]+1)) w_list.lstorage = new return @@ -420,10 +423,10 @@ def reverse(self, w_list): v = self.cast_from_void_star(w_list.lstorage) - w_last = w_list.getitem(-1) #XXX wrapped + last = self._getitem_unwrapped(w_list, -1) length = v[2] skip = v[1] - new = self.cast_to_void_star((self.unwrap(w_last), -skip, length)) + new = self.cast_to_void_star((last, -skip, length)) w_list.lstorage = new class AbstractUnwrappedStrategy(object): From noreply at buildbot.pypy.org Fri Sep 23 13:13:02 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:13:02 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Fixed setting slice of EmptyList (getitems of ObjectList doesn't copy anymore) Message-ID: <20110923111302.036F3820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47493:abbd13b48791 Date: 2011-03-30 15:21 +0200 http://bitbucket.org/pypy/pypy/changeset/abbd13b48791/ Log: Fixed setting slice of EmptyList (getitems of ObjectList doesn't copy anymore) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -261,9 +261,11 @@ raise IndexError def setslice(self, w_list, start, step, slicelength, w_other): - #XXX now we have wrapping/unwrapping here! - #XXX BUT: shouldn't we use from_strage_and_strategy here anyway? - w_list.__init__(self.space, w_other.getitems()) + items = w_other.getitems_copy() + strategy = w_other.strategy + storage = strategy.cast_to_void_star(items) + w_list.strategy = strategy + w_list.lstorage = storage def insert(self, w_list, index, w_item): assert index == 0 diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -167,6 +167,14 @@ keep_other_strategy(l, 0, 1, l.length(), other) assert l.strategy is self.space.fromcache(EmptyListStrategy) + def test_empty_setslice_with_objectlist(self): + l = W_ListObject(self.space, []) + o = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap("2"), self.space.wrap(3)]) + l.setslice(0, 1, o.length(), o) + assert l.getitems() == o.getitems() + l.append(self.space.wrap(17)) + assert l.getitems() != o.getitems() + def test_extend(self): l = W_ListObject(self.space, []) assert isinstance(l.strategy, EmptyListStrategy) From noreply at buildbot.pypy.org Fri Sep 23 13:13:03 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:13:03 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Use new method _temporarily_as_objects in extend, too Message-ID: <20110923111303.2E281820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47494:71c4ed6fd9da Date: 2011-03-30 16:21 +0200 http://bitbucket.org/pypy/pypy/changeset/71c4ed6fd9da/ Log: Use new method _temporarily_as_objects in extend, too diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -526,14 +526,8 @@ return elif w_other.strategy is self.space.fromcache(EmptyListStrategy): return - #XXX use _temporarily_as_objects here - list_w = w_other.getitems() - strategy = self.space.fromcache(ObjectListStrategy) - storage = strategy.cast_to_void_star(list_w) - # NB: w_other shares its storage with the original w_other. the new - # w_other does not survive long, so this is not a problem - w_other = W_ListObject.from_storage_and_strategy(self.space, storage, strategy) + w_other = w_other._temporarily_as_objects() w_list.switch_to_object_strategy() w_list.extend(w_other) From noreply at buildbot.pypy.org Fri Sep 23 13:13:04 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:13:04 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Implemented mul on strategies Message-ID: <20110923111304.5C557820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47495:df95e88b13b3 Date: 2011-04-11 14:32 +0200 http://bitbucket.org/pypy/pypy/changeset/df95e88b13b3/ Log: Implemented mul on strategies diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -121,6 +121,10 @@ return self.strategy.getitems_copy(self) # ___________________________________________________ + + def mul(self, times): + return self.strategy.mul(self, times) + def inplace_mul(self, times): self.strategy.inplace_mul(self, times) @@ -183,6 +187,9 @@ def append(self, w_list, w_item): raise NotImplementedError + def mul(self, w_list, times): + raise NotImplementedError + def inplace_mul(self, w_list, times): raise NotImplementedError @@ -245,6 +252,9 @@ def append(self, w_list, w_item): w_list.__init__(self.space, [w_item]) + def mul(self, w_list, times): + return w_list.clone() + def inplace_mul(self, w_list, times): return @@ -376,6 +386,12 @@ w_list.switch_to_object_strategy() w_list.append(w_item) + def mul(self, w_list, times): + #XXX maybe faster to get unwrapped items and create new integer list? + w_newlist = w_list.clone() + w_newlist.inplace_mul(times) + return w_newlist + def inplace_mul(self, w_list, times): self.switch_to_integer_strategy(w_list) w_list.inplace_mul(times) @@ -655,6 +671,11 @@ w_list.check_empty_strategy() return w_item + def mul(self, w_list, times): + w_newlist = w_list.clone() + w_newlist.inplace_mul(times) + return w_newlist + def inplace_mul(self, w_list, times): l = self.cast_from_void_star(w_list.lstorage) l *= times @@ -834,6 +855,7 @@ if e.match(space, space.w_TypeError): raise FailedToImplement raise + return w_list.mul(times) return W_ListObject(space, w_list.getitems() * times) def mul__List_ANY(space, w_list, w_times): diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -305,3 +305,17 @@ l3 = self.space.add(l1, l2) l4 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3), self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) assert self.space.eq_w(l3, l4) + + def test_mul(self): + l1 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + l2 = l1.mul(2) + l3 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3), self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + assert self.space.eq_w(l2, l3) + + def test_mul_same_strategy_but_different_object(self): + l1 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) + l2 = l1.mul(1) + assert self.space.eq_w(l1, l2) + l1.setitem(0, self.space.wrap(5)) + assert not self.space.eq_w(l1, l2) + From noreply at buildbot.pypy.org Fri Sep 23 13:13:05 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:13:05 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Avoid duplicate copy in RangeList.mul() + tests Message-ID: <20110923111305.8A9BA820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47496:be8327c01c6a Date: 2011-04-12 12:02 +0200 http://bitbucket.org/pypy/pypy/changeset/be8327c01c6a/ Log: Avoid duplicate copy in RangeList.mul() + tests diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -387,9 +387,11 @@ w_list.append(w_item) def mul(self, w_list, times): - #XXX maybe faster to get unwrapped items and create new integer list? - w_newlist = w_list.clone() - w_newlist.inplace_mul(times) + l = self._getitems_range(w_list, False) + l *= times + strategy = self.space.fromcache(IntegerListStrategy) + storage = strategy.cast_to_void_star(l) + w_newlist = W_ListObject.from_storage_and_strategy(self.space, storage, strategy) return w_newlist def inplace_mul(self, w_list, times): @@ -672,6 +674,7 @@ return w_item def mul(self, w_list, times): + # clone w_newlist = w_list.clone() w_newlist.inplace_mul(times) return w_newlist diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -312,6 +312,12 @@ l3 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3), self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) assert self.space.eq_w(l2, l3) + l4 = make_range_list(self.space, 1, 1, 3) + assert self.space.eq_w(l4, l1) + + l5 = l4.mul(2) + assert self.space.eq_w(l5, l3) + def test_mul_same_strategy_but_different_object(self): l1 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) l2 = l1.mul(1) From noreply at buildbot.pypy.org Fri Sep 23 13:13:11 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 23 Sep 2011 13:13:11 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: merge Message-ID: <20110923111311.2E87B820D1@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: list-strategies Changeset: r47497:aac2064d18e3 Date: 2011-04-12 15:07 +0200 http://bitbucket.org/pypy/pypy/changeset/aac2064d18e3/ Log: merge diff too long, truncating to 10000 out of 21908 lines diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,8 @@ pypy/doc/*.html pypy/doc/config/*.html pypy/doc/discussion/*.html +pypy/module/cpyext/src/*.o +pypy/module/cpyext/test/*.o pypy/module/test_lib_pypy/ctypes_tests/*.o pypy/translator/c/src/dtoa.o pypy/translator/goal/pypy-c diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -17,7 +17,6 @@ ^pypy/doc/.+\.html$ ^pypy/doc/basicblock\.asc$ ^pypy/doc/.+\.svninfo$ -^pypy/translator/c/src/dtoa.o$ ^pypy/translator/c/src/libffi_msvc/.+\.obj$ ^pypy/translator/c/src/libffi_msvc/.+\.dll$ ^pypy/translator/c/src/libffi_msvc/.+\.lib$ @@ -52,6 +51,7 @@ ^pypy/doc/discussion/.+\.html$ ^include/.+\.h$ ^include/.+\.inl$ +^pypy/doc/_build/.*$ ^pypy/doc/config/.+\.html$ ^pypy/doc/config/style\.css$ ^pypy/doc/jit/.+\.html$ @@ -63,4 +63,4 @@ ^pypy/doc/image/parsing_example.+\.png$ ^compiled ^.git/ -^release/ \ No newline at end of file +^release/ diff --git a/.hgsub b/.hgsub deleted file mode 100644 --- a/.hgsub +++ /dev/null @@ -1,3 +0,0 @@ -greenlet = [svn]http://codespeak.net/svn/greenlet/trunk/c -testrunner = [svn]http://codespeak.net/svn/pypy/build/testrunner -lib_pypy/pyrepl = [svn]http://codespeak.net/svn/pyrepl/trunk/pyrepl/pyrepl diff --git a/.hgsubstate b/.hgsubstate deleted file mode 100644 --- a/.hgsubstate +++ /dev/null @@ -1,3 +0,0 @@ -80037 greenlet -80348 lib_pypy/pyrepl -80409 testrunner diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -108,6 +108,7 @@ Anders Qvist Alan McIntyre Bert Freudenberg + Tav Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden diff --git a/_pytest/__init__.py b/_pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,1 +1,2 @@ # +__version__ = '2.0.3.dev3' diff --git a/_pytest/config.py b/_pytest/config.py --- a/_pytest/config.py +++ b/_pytest/config.py @@ -252,6 +252,16 @@ self.hook = self.pluginmanager.hook self._inicache = {} + @classmethod + def fromdictargs(cls, option_dict, args): + """ constructor useable for subprocesses. """ + config = cls() + config._preparse(args, addopts=False) + config.option.__dict__.update(option_dict) + for x in config.option.plugins: + config.pluginmanager.consider_pluginarg(x) + return config + def _onimportconftest(self, conftestmodule): self.trace("loaded conftestmodule %r" %(conftestmodule,)) self.pluginmanager.consider_conftest(conftestmodule) diff --git a/_pytest/core.py b/_pytest/core.py --- a/_pytest/core.py +++ b/_pytest/core.py @@ -164,14 +164,17 @@ def consider_preparse(self, args): for opt1,opt2 in zip(args, args[1:]): if opt1 == "-p": - if opt2.startswith("no:"): - name = opt2[3:] - if self.getplugin(name) is not None: - self.unregister(None, name=name) - self._name2plugin[name] = -1 - else: - if self.getplugin(opt2) is None: - self.import_plugin(opt2) + self.consider_pluginarg(opt2) + + def consider_pluginarg(self, arg): + if arg.startswith("no:"): + name = arg[3:] + if self.getplugin(name) is not None: + self.unregister(None, name=name) + self._name2plugin[name] = -1 + else: + if self.getplugin(arg) is None: + self.import_plugin(arg) def consider_conftest(self, conftestmodule): if self.register(conftestmodule, name=conftestmodule.__file__): diff --git a/_pytest/junitxml.py b/_pytest/junitxml.py --- a/_pytest/junitxml.py +++ b/_pytest/junitxml.py @@ -106,7 +106,13 @@ '%s', report.keywords['xfail']) else: - self.appendlog("") + filename, lineno, skipreason = report.longrepr + if skipreason.startswith("Skipped: "): + skipreason = skipreason[9:] + self.appendlog('%s', + skipreason, "%s:%s: %s" % report.longrepr, + ) self._closetestcase() self.skipped += 1 diff --git a/_pytest/python.py b/_pytest/python.py --- a/_pytest/python.py +++ b/_pytest/python.py @@ -70,11 +70,13 @@ res = __multicall__.execute() if res is not None: return res - if collector._istestclasscandidate(name, obj): + if inspect.isclass(obj): #if hasattr(collector.obj, 'unittest'): # return # we assume it's a mixin class for a TestCase derived one - Class = collector._getcustomclass("Class") - return Class(name, parent=collector) + if collector.classnamefilter(name): + if not hasinit(obj): + Class = collector._getcustomclass("Class") + return Class(name, parent=collector) elif collector.funcnamefilter(name) and hasattr(obj, '__call__'): if is_generator(obj): return Generator(name, parent=collector) @@ -194,14 +196,6 @@ return self.ihook.pytest_pycollect_makeitem( collector=self, name=name, obj=obj) - def _istestclasscandidate(self, name, obj): - if self.classnamefilter(name) and \ - inspect.isclass(obj): - if hasinit(obj): - # XXX WARN - return False - return True - def _genfunctions(self, name, funcobj): module = self.getparent(Module).obj clscol = self.getparent(Class) diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -74,7 +74,7 @@ elif report.failed: longrepr = str(report.longrepr) elif report.skipped: - longrepr = str(report.longrepr[2]) + longrepr = str(report.longrepr) self.log_outcome(report, code, longrepr) def pytest_collectreport(self, report): diff --git a/dotviewer/conftest.py b/dotviewer/conftest.py --- a/dotviewer/conftest.py +++ b/dotviewer/conftest.py @@ -6,4 +6,6 @@ dest="pygame", default=False, help="allow interactive tests using Pygame") -option = py.test.config.option +def pytest_configure(config): + global option + option = config.option diff --git a/lib-python/TODO b/lib-python/TODO --- a/lib-python/TODO +++ b/lib-python/TODO @@ -2,7 +2,7 @@ =================== You can find the results of the most recent buildbot run at: -http://buildbot.pypy.org/summary?branch=fast-forward +http://buildbot.pypy.org/ Probably easy tasks @@ -39,18 +39,8 @@ Medium tasks ------------ -- Ast objects should be picklable, see in pypy/module/_ast/test/test_ast.py: - test_pickle() - - socket module has a couple of changes (including AF_TIPC packet range) -- (test_lib2to3) When a "for" loop runs a generator function, if the loop is - exited before the end, the "finally" clause of the generator is not called - until the next gc collection. In our case, in lib2to3/pytree.py, - WildcardPattern.match_seq() does not exhaust the generate_matches() generator, - and stderr is not restored. - - Longer tasks ------------ diff --git a/lib-python/modified-2.7.0/ctypes/test/test_internals.py b/lib-python/modified-2.7.0/ctypes/test/test_internals.py --- a/lib-python/modified-2.7.0/ctypes/test/test_internals.py +++ b/lib-python/modified-2.7.0/ctypes/test/test_internals.py @@ -2,7 +2,6 @@ import unittest from ctypes import * from sys import getrefcount as grc -from ctypes.test import xfail # XXX This test must be reviewed for correctness!!! @@ -29,13 +28,18 @@ self.assertEqual(refcnt, grc(i)) self.assertEqual(ci._objects, None) - @xfail def test_c_char_p(self): s = "Hello, World" refcnt = grc(s) cs = c_char_p(s) self.assertEqual(refcnt + 1, grc(s)) - self.assertSame(cs._objects, s) + try: + # Moving gcs need to allocate a nonmoving buffer + cs._objects._obj + except AttributeError: + self.assertSame(cs._objects, s) + else: + self.assertSame(cs._objects._obj, s) def test_simple_struct(self): class X(Structure): diff --git a/lib-python/modified-2.7.0/ctypes/test/test_loading.py b/lib-python/modified-2.7.0/ctypes/test/test_loading.py --- a/lib-python/modified-2.7.0/ctypes/test/test_loading.py +++ b/lib-python/modified-2.7.0/ctypes/test/test_loading.py @@ -2,7 +2,7 @@ import sys, unittest import os from ctypes.util import find_library -from ctypes.test import is_resource_enabled +from ctypes.test import is_resource_enabled, xfail libc_name = None if os.name == "nt": @@ -75,6 +75,7 @@ self.assertRaises(AttributeError, dll.__getitem__, 1234) if os.name == "nt": + @xfail def test_1703286_A(self): from _ctypes import LoadLibrary, FreeLibrary # On winXP 64-bit, advapi32 loads at an address that does @@ -85,6 +86,7 @@ handle = LoadLibrary("advapi32") FreeLibrary(handle) + @xfail def test_1703286_B(self): # Since on winXP 64-bit advapi32 loads like described # above, the (arbitrarily selected) CloseEventLog function diff --git a/lib-python/modified-2.7.0/ctypes/test/test_parameters.py b/lib-python/modified-2.7.0/ctypes/test/test_parameters.py --- a/lib-python/modified-2.7.0/ctypes/test/test_parameters.py +++ b/lib-python/modified-2.7.0/ctypes/test/test_parameters.py @@ -89,6 +89,8 @@ pa = c_wchar_p.from_param(c_wchar_p(u"123")) self.assertEqual(type(pa), c_wchar_p) + if sys.platform == "win32": + test_cw_strings = xfail(test_cw_strings) @xfail def test_int_pointers(self): diff --git a/lib-python/modified-2.7.0/distutils/command/build_ext.py b/lib-python/modified-2.7.0/distutils/command/build_ext.py --- a/lib-python/modified-2.7.0/distutils/command/build_ext.py +++ b/lib-python/modified-2.7.0/distutils/command/build_ext.py @@ -184,7 +184,7 @@ # the 'libs' directory is for binary installs - we assume that # must be the *native* platform. But we don't really support # cross-compiling via a binary install anyway, so we let it go. - self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs')) + self.library_dirs.append(os.path.join(sys.exec_prefix, 'include')) if self.debug: self.build_temp = os.path.join(self.build_temp, "Debug") else: @@ -192,8 +192,13 @@ # Append the source distribution include and library directories, # this allows distutils on windows to work in the source tree - self.include_dirs.append(os.path.join(sys.exec_prefix, 'PC')) - if MSVC_VERSION == 9: + if 0: + # pypy has no PC directory + self.include_dirs.append(os.path.join(sys.exec_prefix, 'PC')) + if 1: + # pypy has no PCBuild directory + pass + elif MSVC_VERSION == 9: # Use the .lib files for the correct architecture if self.plat_name == 'win32': suffix = '' @@ -695,24 +700,14 @@ shared extension. On most platforms, this is just 'ext.libraries'; on Windows and OS/2, we add the Python library (eg. python20.dll). """ - # The python library is always needed on Windows. For MSVC, this - # is redundant, since the library is mentioned in a pragma in - # pyconfig.h that MSVC groks. The other Windows compilers all seem - # to need it mentioned explicitly, though, so that's what we do. - # Append '_d' to the python import library on debug builds. + # The python library is always needed on Windows. if sys.platform == "win32": - from distutils.msvccompiler import MSVCCompiler - if not isinstance(self.compiler, MSVCCompiler): - template = "python%d%d" - if self.debug: - template = template + '_d' - pythonlib = (template % - (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) - # don't extend ext.libraries, it may be shared with other - # extensions, it is a reference to the original list - return ext.libraries + [pythonlib] - else: - return ext.libraries + template = "python%d%d" + pythonlib = (template % + (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) + # don't extend ext.libraries, it may be shared with other + # extensions, it is a reference to the original list + return ext.libraries + [pythonlib] elif sys.platform == "os2emx": # EMX/GCC requires the python library explicitly, and I # believe VACPP does as well (though not confirmed) - AIM Apr01 diff --git a/lib-python/modified-2.7.0/distutils/msvc9compiler.py b/lib-python/modified-2.7.0/distutils/msvc9compiler.py --- a/lib-python/modified-2.7.0/distutils/msvc9compiler.py +++ b/lib-python/modified-2.7.0/distutils/msvc9compiler.py @@ -644,6 +644,7 @@ temp_manifest = os.path.join( build_temp, os.path.basename(output_filename) + ".manifest") + ld_args.append('/MANIFEST') ld_args.append('/MANIFESTFILE:' + temp_manifest) if extra_preargs: diff --git a/lib-python/modified-2.7.0/sqlite3/test/regression.py b/lib-python/modified-2.7.0/sqlite3/test/regression.py --- a/lib-python/modified-2.7.0/sqlite3/test/regression.py +++ b/lib-python/modified-2.7.0/sqlite3/test/regression.py @@ -264,6 +264,16 @@ """ self.assertRaises(sqlite.Warning, self.con, 1) + def CheckUpdateDescriptionNone(self): + """ + Call Cursor.update with an UPDATE query and check that it sets the + cursor's description to be None. + """ + cur = self.con.cursor() + cur.execute("CREATE TABLE foo (id INTEGER)") + cur.execute("UPDATE foo SET id = 3 WHERE id = 1") + self.assertEqual(cur.description, None) + def suite(): regression_suite = unittest.makeSuite(RegressionTests, "Check") return unittest.TestSuite((regression_suite,)) diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_pypy_wait.py @@ -0,0 +1,51 @@ +from ctypes import CDLL, c_int, POINTER, byref +from ctypes.util import find_library +from resource import _struct_rusage, struct_rusage + +__all__ = ["wait3", "wait4"] + +libc = CDLL(find_library("c")) +c_wait3 = libc.wait3 + +c_wait3.argtypes = [POINTER(c_int), c_int, POINTER(_struct_rusage)] + +c_wait4 = libc.wait4 + +c_wait4.argtypes = [c_int, POINTER(c_int), c_int, POINTER(_struct_rusage)] + +def create_struct_rusage(c_struct): + return struct_rusage(( + float(c_struct.ru_utime), + float(c_struct.ru_stime), + c_struct.ru_maxrss, + c_struct.ru_ixrss, + c_struct.ru_idrss, + c_struct.ru_isrss, + c_struct.ru_minflt, + c_struct.ru_majflt, + c_struct.ru_nswap, + c_struct.ru_inblock, + c_struct.ru_oublock, + c_struct.ru_msgsnd, + c_struct.ru_msgrcv, + c_struct.ru_nsignals, + c_struct.ru_nvcsw, + c_struct.ru_nivcsw)) + +def wait3(options): + status = c_int() + _rusage = _struct_rusage() + pid = c_wait3(byref(status), c_int(options), byref(_rusage)) + + rusage = create_struct_rusage(_rusage) + + return pid, status.value, rusage + +def wait4(pid, options): + status = c_int() + _rusage = _struct_rusage() + pid = c_wait4(c_int(pid), byref(status), c_int(options), byref(_rusage)) + + rusage = create_struct_rusage(_rusage) + + return pid, status.value, rusage diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -31,9 +31,9 @@ from threading import _get_ident as thread_get_ident names = "sqlite3.dll libsqlite3.so.0 libsqlite3.so libsqlite3.dylib".split() -for name in names: +for name in names: try: - sqlite = cdll.LoadLibrary(name) + sqlite = cdll.LoadLibrary(name) break except OSError: continue @@ -232,7 +232,7 @@ return unicode(x, 'utf-8') class Connection(object): - def __init__(self, database, isolation_level="", detect_types=0, timeout=None, *args, **kwargs): + def __init__(self, database, isolation_level="", detect_types=0, timeout=None, cached_statements=None, factory=None): self.db = c_void_p() if sqlite.sqlite3_open(database, byref(self.db)) != SQLITE_OK: raise OperationalError("Could not open database") @@ -1032,6 +1032,8 @@ self.statement = None def _get_description(self): + if self.kind == "DML": + return None desc = [] for i in xrange(sqlite.sqlite3_column_count(self.statement)): name = sqlite.sqlite3_column_name(self.statement, i).split("[")[0].strip() @@ -1140,7 +1142,7 @@ def _convert_result(con, val): if val is None: - sqlite.sqlite3_result_null(con) + sqlite.sqlite3_result_null(con) elif isinstance(val, (bool, int, long)): sqlite.sqlite3_result_int64(con, int(val)) elif isinstance(val, str): diff --git a/lib_pypy/pypy_test/test_os_wait.py b/lib_pypy/pypy_test/test_os_wait.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pypy_test/test_os_wait.py @@ -0,0 +1,44 @@ +# Generates the resource cache +from __future__ import absolute_import +from lib_pypy.ctypes_config_cache import rebuild +rebuild.rebuild_one('resource.ctc.py') + +import os + +from lib_pypy._pypy_wait import wait3, wait4 + +if hasattr(os, 'wait3'): + def test_os_wait3(): + exit_status = 0x33 + + if not hasattr(os, "fork"): + skip("Need fork() to test wait3()") + + child = os.fork() + if child == 0: # in child + os._exit(exit_status) + else: + pid, status, rusage = wait3(0) + assert child == pid + assert os.WIFEXITED(status) + assert os.WEXITSTATUS(status) == exit_status + assert isinstance(rusage.ru_utime, float) + assert isinstance(rusage.ru_maxrss, int) + +if hasattr(os, 'wait4'): + def test_os_wait4(): + exit_status = 0x33 + + if not hasattr(os, "fork"): + skip("Need fork() to test wait4()") + + child = os.fork() + if child == 0: # in child + os._exit(exit_status) + else: + pid, status, rusage = wait4(child, 0) + assert child == pid + assert os.WIFEXITED(status) + assert os.WEXITSTATUS(status) == exit_status + assert isinstance(rusage.ru_utime, float) + assert isinstance(rusage.ru_maxrss, int) diff --git a/lib_pypy/pyrepl/__init__.py b/lib_pypy/pyrepl/__init__.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2000-2008 Michael Hudson-Doyle +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/lib_pypy/pyrepl/cmdrepl.py b/lib_pypy/pyrepl/cmdrepl.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/cmdrepl.py @@ -0,0 +1,118 @@ +# Copyright 2000-2007 Michael Hudson-Doyle +# Maciek Fijalkowski +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""Wedge pyrepl behaviour into cmd.Cmd-derived classes. + +replize, when given a subclass of cmd.Cmd, returns a class that +behaves almost identically to the supplied class, except that it uses +pyrepl instead if raw_input. + +It was designed to let you do this: + +>>> import pdb +>>> from pyrepl import replize +>>> pdb.Pdb = replize(pdb.Pdb) + +which is in fact done by the `pythoni' script that comes with +pyrepl.""" + +from __future__ import nested_scopes + +from pyrepl import completing_reader as cr, reader, completer +from pyrepl.completing_reader import CompletingReader as CR +import cmd + +class CmdReader(CR): + def collect_keymap(self): + return super(CmdReader, self).collect_keymap() + ( + ("\\M-\\n", "invalid-key"), + ("\\n", "accept")) + + CR_init = CR.__init__ + def __init__(self, completions): + self.CR_init(self) + self.completions = completions + + def get_completions(self, stem): + if len(stem) != self.pos: + return [] + return cr.uniqify([s for s in self.completions + if s.startswith(stem)]) + +def replize(klass, history_across_invocations=1): + + """Return a subclass of the cmd.Cmd-derived klass that uses + pyrepl instead of readline. + + Raises a ValueError if klass does not derive from cmd.Cmd. + + The optional history_across_invocations parameter (default 1) + controls whether instances of the returned class share + histories.""" + + completions = [s[3:] + for s in completer.get_class_members(klass) + if s.startswith("do_")] + + if not issubclass(klass, cmd.Cmd): + raise Exception +# if klass.cmdloop.im_class is not cmd.Cmd: +# print "this may not work" + + class CmdRepl(klass): + k_init = klass.__init__ + + if history_across_invocations: + _CmdRepl__history = [] + def __init__(self, *args, **kw): + self.k_init(*args, **kw) + self.__reader = CmdReader(completions) + self.__reader.history = CmdRepl._CmdRepl__history + self.__reader.historyi = len(CmdRepl._CmdRepl__history) + else: + def __init__(self, *args, **kw): + self.k_init(*args, **kw) + self.__reader = CmdReader(completions) + + def cmdloop(self, intro=None): + self.preloop() + if intro is not None: + self.intro = intro + if self.intro: + print self.intro + stop = None + while not stop: + if self.cmdqueue: + line = self.cmdqueue[0] + del self.cmdqueue[0] + else: + try: + self.__reader.ps1 = self.prompt + line = self.__reader.readline() + except EOFError: + line = "EOF" + line = self.precmd(line) + stop = self.onecmd(line) + stop = self.postcmd(stop, line) + self.postloop() + + CmdRepl.__name__ = "replize(%s.%s)"%(klass.__module__, klass.__name__) + return CmdRepl + diff --git a/lib_pypy/pyrepl/commands.py b/lib_pypy/pyrepl/commands.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/commands.py @@ -0,0 +1,385 @@ +# Copyright 2000-2010 Michael Hudson-Doyle +# Antonio Cuni +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +import sys, os + +# Catgories of actions: +# killing +# yanking +# motion +# editing +# history +# finishing +# [completion] + +class Command(object): + finish = 0 + kills_digit_arg = 1 + def __init__(self, reader, (event_name, event)): + self.reader = reader + self.event = event + self.event_name = event_name + def do(self): + pass + +class KillCommand(Command): + def kill_range(self, start, end): + if start == end: + return + r = self.reader + b = r.buffer + text = b[start:end] + del b[start:end] + if is_kill(r.last_command): + if start < r.pos: + r.kill_ring[-1] = text + r.kill_ring[-1] + else: + r.kill_ring[-1] = r.kill_ring[-1] + text + else: + r.kill_ring.append(text) + r.pos = start + r.dirty = 1 + +class YankCommand(Command): + pass + +class MotionCommand(Command): + pass + +class EditCommand(Command): + pass + +class FinishCommand(Command): + finish = 1 + pass + +def is_kill(command): + return command and issubclass(command, KillCommand) + +def is_yank(command): + return command and issubclass(command, YankCommand) + +# etc + +class digit_arg(Command): + kills_digit_arg = 0 + def do(self): + r = self.reader + c = self.event[-1] + if c == "-": + if r.arg is not None: + r.arg = -r.arg + else: + r.arg = -1 + else: + d = int(c) + if r.arg is None: + r.arg = d + else: + if r.arg < 0: + r.arg = 10*r.arg - d + else: + r.arg = 10*r.arg + d + r.dirty = 1 + +class clear_screen(Command): + def do(self): + r = self.reader + r.console.clear() + r.dirty = 1 + +class refresh(Command): + def do(self): + self.reader.dirty = 1 + +class repaint(Command): + def do(self): + self.reader.dirty = 1 + self.reader.console.repaint_prep() + +class kill_line(KillCommand): + def do(self): + r = self.reader + b = r.buffer + eol = r.eol() + for c in b[r.pos:eol]: + if not c.isspace(): + self.kill_range(r.pos, eol) + return + else: + self.kill_range(r.pos, eol+1) + +class unix_line_discard(KillCommand): + def do(self): + r = self.reader + self.kill_range(r.bol(), r.pos) + +# XXX unix_word_rubout and backward_kill_word should actually +# do different things... + +class unix_word_rubout(KillCommand): + def do(self): + r = self.reader + for i in range(r.get_arg()): + self.kill_range(r.bow(), r.pos) + +class kill_word(KillCommand): + def do(self): + r = self.reader + for i in range(r.get_arg()): + self.kill_range(r.pos, r.eow()) + +class backward_kill_word(KillCommand): + def do(self): + r = self.reader + for i in range(r.get_arg()): + self.kill_range(r.bow(), r.pos) + +class yank(YankCommand): + def do(self): + r = self.reader + if not r.kill_ring: + r.error("nothing to yank") + return + r.insert(r.kill_ring[-1]) + +class yank_pop(YankCommand): + def do(self): + r = self.reader + b = r.buffer + if not r.kill_ring: + r.error("nothing to yank") + return + if not is_yank(r.last_command): + r.error("previous command was not a yank") + return + repl = len(r.kill_ring[-1]) + r.kill_ring.insert(0, r.kill_ring.pop()) + t = r.kill_ring[-1] + b[r.pos - repl:r.pos] = t + r.pos = r.pos - repl + len(t) + r.dirty = 1 + +class interrupt(FinishCommand): + def do(self): + import signal + self.reader.console.finish() + os.kill(os.getpid(), signal.SIGINT) + +class suspend(Command): + def do(self): + import signal + r = self.reader + p = r.pos + r.console.finish() + os.kill(os.getpid(), signal.SIGSTOP) + ## this should probably be done + ## in a handler for SIGCONT? + r.console.prepare() + r.pos = p + r.posxy = 0, 0 + r.dirty = 1 + r.console.screen = [] + +class up(MotionCommand): + def do(self): + r = self.reader + for i in range(r.get_arg()): + bol1 = r.bol() + if bol1 == 0: + if r.historyi > 0: + r.select_item(r.historyi - 1) + return + r.pos = 0 + r.error("start of buffer") + return + bol2 = r.bol(bol1-1) + line_pos = r.pos - bol1 + if line_pos > bol1 - bol2 - 1: + r.sticky_y = line_pos + r.pos = bol1 - 1 + else: + r.pos = bol2 + line_pos + +class down(MotionCommand): + def do(self): + r = self.reader + b = r.buffer + for i in range(r.get_arg()): + bol1 = r.bol() + eol1 = r.eol() + if eol1 == len(b): + if r.historyi < len(r.history): + r.select_item(r.historyi + 1) + r.pos = r.eol(0) + return + r.pos = len(b) + r.error("end of buffer") + return + eol2 = r.eol(eol1+1) + if r.pos - bol1 > eol2 - eol1 - 1: + r.pos = eol2 + else: + r.pos = eol1 + (r.pos - bol1) + 1 + +class left(MotionCommand): + def do(self): + r = self.reader + for i in range(r.get_arg()): + p = r.pos - 1 + if p >= 0: + r.pos = p + else: + self.reader.error("start of buffer") + +class right(MotionCommand): + def do(self): + r = self.reader + b = r.buffer + for i in range(r.get_arg()): + p = r.pos + 1 + if p <= len(b): + r.pos = p + else: + self.reader.error("end of buffer") + +class beginning_of_line(MotionCommand): + def do(self): + self.reader.pos = self.reader.bol() + +class end_of_line(MotionCommand): + def do(self): + r = self.reader + self.reader.pos = self.reader.eol() + +class home(MotionCommand): + def do(self): + self.reader.pos = 0 + +class end(MotionCommand): + def do(self): + self.reader.pos = len(self.reader.buffer) + +class forward_word(MotionCommand): + def do(self): + r = self.reader + for i in range(r.get_arg()): + r.pos = r.eow() + +class backward_word(MotionCommand): + def do(self): + r = self.reader + for i in range(r.get_arg()): + r.pos = r.bow() + +class self_insert(EditCommand): + def do(self): + r = self.reader + r.insert(self.event * r.get_arg()) + +class insert_nl(EditCommand): + def do(self): + r = self.reader + r.insert("\n" * r.get_arg()) + +class transpose_characters(EditCommand): + def do(self): + r = self.reader + b = r.buffer + s = r.pos - 1 + if s < 0: + r.error("cannot transpose at start of buffer") + else: + if s == len(b): + s -= 1 + t = min(s + r.get_arg(), len(b) - 1) + c = b[s] + del b[s] + b.insert(t, c) + r.pos = t + r.dirty = 1 + +class backspace(EditCommand): + def do(self): + r = self.reader + b = r.buffer + for i in range(r.get_arg()): + if r.pos > 0: + r.pos -= 1 + del b[r.pos] + r.dirty = 1 + else: + self.reader.error("can't backspace at start") + +class delete(EditCommand): + def do(self): + r = self.reader + b = r.buffer + if ( r.pos == 0 and len(b) == 0 # this is something of a hack + and self.event[-1] == "\004"): + r.update_screen() + r.console.finish() + raise EOFError + for i in range(r.get_arg()): + if r.pos != len(b): + del b[r.pos] + r.dirty = 1 + else: + self.reader.error("end of buffer") + +class accept(FinishCommand): + def do(self): + pass + +class help(Command): + def do(self): + self.reader.msg = self.reader.help_text + self.reader.dirty = 1 + +class invalid_key(Command): + def do(self): + pending = self.reader.console.getpending() + s = ''.join(self.event) + pending.data + self.reader.error("`%r' not bound"%s) + +class invalid_command(Command): + def do(self): + s = self.event_name + self.reader.error("command `%s' not known"%s) + +class qIHelp(Command): + def do(self): + r = self.reader + r.insert((self.event + r.console.getpending().data) * r.get_arg()) + r.pop_input_trans() + +from pyrepl import input + +class QITrans(object): + def push(self, evt): + self.evt = evt + def get(self): + return ('qIHelp', self.evt.raw) + +class quoted_insert(Command): + kills_digit_arg = 0 + def do(self): + self.reader.push_input_trans(QITrans()) diff --git a/lib_pypy/pyrepl/completer.py b/lib_pypy/pyrepl/completer.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/completer.py @@ -0,0 +1,87 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +import __builtin__ + +class Completer: + def __init__(self, ns): + self.ns = ns + + def complete(self, text): + if "." in text: + return self.attr_matches(text) + else: + return self.global_matches(text) + + def global_matches(self, text): + """Compute matches when text is a simple name. + + Return a list of all keywords, built-in functions and names + currently defines in __main__ that match. + + """ + import keyword + matches = [] + n = len(text) + for list in [keyword.kwlist, + __builtin__.__dict__.keys(), + self.ns.keys()]: + for word in list: + if word[:n] == text and word != "__builtins__": + matches.append(word) + return matches + + def attr_matches(self, text): + """Compute matches when text contains a dot. + + Assuming the text is of the form NAME.NAME....[NAME], and is + evaluatable in the globals of __main__, it will be evaluated + and its attributes (as revealed by dir()) are used as possible + completions. (For class instances, class members are are also + considered.) + + WARNING: this can still invoke arbitrary C code, if an object + with a __getattr__ hook is evaluated. + + """ + import re + m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text) + if not m: + return [] + expr, attr = m.group(1, 3) + object = eval(expr, self.ns) + words = dir(object) + if hasattr(object, '__class__'): + words.append('__class__') + words = words + get_class_members(object.__class__) + matches = [] + n = len(attr) + for word in words: + if word[:n] == attr and word != "__builtins__": + matches.append("%s.%s" % (expr, word)) + return matches + +def get_class_members(klass): + ret = dir(klass) + if hasattr(klass, '__bases__'): + for base in klass.__bases__: + ret = ret + get_class_members(base) + return ret + + diff --git a/lib_pypy/pyrepl/completing_reader.py b/lib_pypy/pyrepl/completing_reader.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/completing_reader.py @@ -0,0 +1,280 @@ +# Copyright 2000-2010 Michael Hudson-Doyle +# Antonio Cuni +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from pyrepl import commands, reader +from pyrepl.reader import Reader + +def uniqify(l): + d = {} + for i in l: + d[i] = 1 + r = d.keys() + r.sort() + return r + +def prefix(wordlist, j = 0): + d = {} + i = j + try: + while 1: + for word in wordlist: + d[word[i]] = 1 + if len(d) > 1: + return wordlist[0][j:i] + i += 1 + d = {} + except IndexError: + return wordlist[0][j:i] + +import re +def stripcolor(s): + return stripcolor.regexp.sub('', s) +stripcolor.regexp = re.compile(r"\x1B\[([0-9]{1,3}(;[0-9]{1,2})?)?[m|K]") + +def real_len(s): + return len(stripcolor(s)) + +def left_align(s, maxlen): + stripped = stripcolor(s) + if len(stripped) > maxlen: + # too bad, we remove the color + return stripped[:maxlen] + padding = maxlen - len(stripped) + return s + ' '*padding + +def build_menu(cons, wordlist, start, use_brackets, sort_in_column): + if use_brackets: + item = "[ %s ]" + padding = 4 + else: + item = "%s " + padding = 2 + maxlen = min(max(map(real_len, wordlist)), cons.width - padding) + cols = cons.width / (maxlen + padding) + rows = (len(wordlist) - 1)/cols + 1 + + if sort_in_column: + # sort_in_column=False (default) sort_in_column=True + # A B C A D G + # D E F B E + # G C F + # + # "fill" the table with empty words, so we always have the same amout + # of rows for each column + missing = cols*rows - len(wordlist) + wordlist = wordlist + ['']*missing + indexes = [(i%cols)*rows + i//cols for i in range(len(wordlist))] + wordlist = [wordlist[i] for i in indexes] + menu = [] + i = start + for r in range(rows): + row = [] + for col in range(cols): + row.append(item % left_align(wordlist[i], maxlen)) + i += 1 + if i >= len(wordlist): + break + menu.append( ''.join(row) ) + if i >= len(wordlist): + i = 0 + break + if r + 5 > cons.height: + menu.append(" %d more... "%(len(wordlist) - i)) + break + return menu, i + +# this gets somewhat user interface-y, and as a result the logic gets +# very convoluted. +# +# To summarise the summary of the summary:- people are a problem. +# -- The Hitch-Hikers Guide to the Galaxy, Episode 12 + +#### Desired behaviour of the completions commands. +# the considerations are: +# (1) how many completions are possible +# (2) whether the last command was a completion +# (3) if we can assume that the completer is going to return the same set of +# completions: this is controlled by the ``assume_immutable_completions`` +# variable on the reader, which is True by default to match the historical +# behaviour of pyrepl, but e.g. False in the ReadlineAlikeReader to match +# more closely readline's semantics (this is needed e.g. by +# fancycompleter) +# +# if there's no possible completion, beep at the user and point this out. +# this is easy. +# +# if there's only one possible completion, stick it in. if the last thing +# user did was a completion, point out that he isn't getting anywhere, but +# only if the ``assume_immutable_completions`` is True. +# +# now it gets complicated. +# +# for the first press of a completion key: +# if there's a common prefix, stick it in. + +# irrespective of whether anything got stuck in, if the word is now +# complete, show the "complete but not unique" message + +# if there's no common prefix and if the word is not now complete, +# beep. + +# common prefix -> yes no +# word complete \/ +# yes "cbnu" "cbnu" +# no - beep + +# for the second bang on the completion key +# there will necessarily be no common prefix +# show a menu of the choices. + +# for subsequent bangs, rotate the menu around (if there are sufficient +# choices). + +class complete(commands.Command): + def do(self): + r = self.reader + stem = r.get_stem() + if r.assume_immutable_completions and \ + r.last_command_is(self.__class__): + completions = r.cmpltn_menu_choices + else: + r.cmpltn_menu_choices = completions = \ + r.get_completions(stem) + if len(completions) == 0: + r.error("no matches") + elif len(completions) == 1: + if r.assume_immutable_completions and \ + len(completions[0]) == len(stem) and \ + r.last_command_is(self.__class__): + r.msg = "[ sole completion ]" + r.dirty = 1 + r.insert(completions[0][len(stem):]) + else: + p = prefix(completions, len(stem)) + if p <> '': + r.insert(p) + if r.last_command_is(self.__class__): + if not r.cmpltn_menu_vis: + r.cmpltn_menu_vis = 1 + r.cmpltn_menu, r.cmpltn_menu_end = build_menu( + r.console, completions, r.cmpltn_menu_end, + r.use_brackets, r.sort_in_column) + r.dirty = 1 + elif stem + p in completions: + r.msg = "[ complete but not unique ]" + r.dirty = 1 + else: + r.msg = "[ not unique ]" + r.dirty = 1 + +class self_insert(commands.self_insert): + def do(self): + commands.self_insert.do(self) + r = self.reader + if r.cmpltn_menu_vis: + stem = r.get_stem() + if len(stem) < 1: + r.cmpltn_reset() + else: + completions = [w for w in r.cmpltn_menu_choices + if w.startswith(stem)] + if completions: + r.cmpltn_menu, r.cmpltn_menu_end = build_menu( + r.console, completions, 0, + r.use_brackets, r.sort_in_column) + else: + r.cmpltn_reset() + +class CompletingReader(Reader): + """Adds completion support + + Adds instance variables: + * cmpltn_menu, cmpltn_menu_vis, cmpltn_menu_end, cmpltn_choices: + * + """ + # see the comment for the complete command + assume_immutable_completions = True + use_brackets = True # display completions inside [] + sort_in_column = False + + def collect_keymap(self): + return super(CompletingReader, self).collect_keymap() + ( + (r'\t', 'complete'),) + + def __init__(self, console): + super(CompletingReader, self).__init__(console) + self.cmpltn_menu = ["[ menu 1 ]", "[ menu 2 ]"] + self.cmpltn_menu_vis = 0 + self.cmpltn_menu_end = 0 + for c in [complete, self_insert]: + self.commands[c.__name__] = c + self.commands[c.__name__.replace('_', '-')] = c + + def after_command(self, cmd): + super(CompletingReader, self).after_command(cmd) + if not isinstance(cmd, complete) and not isinstance(cmd, self_insert): + self.cmpltn_reset() + + def calc_screen(self): + screen = super(CompletingReader, self).calc_screen() + if self.cmpltn_menu_vis: + ly = self.lxy[1] + screen[ly:ly] = self.cmpltn_menu + self.screeninfo[ly:ly] = [(0, [])]*len(self.cmpltn_menu) + self.cxy = self.cxy[0], self.cxy[1] + len(self.cmpltn_menu) + return screen + + def finish(self): + super(CompletingReader, self).finish() + self.cmpltn_reset() + + def cmpltn_reset(self): + self.cmpltn_menu = [] + self.cmpltn_menu_vis = 0 + self.cmpltn_menu_end = 0 + self.cmpltn_menu_choices = [] + + def get_stem(self): + st = self.syntax_table + SW = reader.SYNTAX_WORD + b = self.buffer + p = self.pos - 1 + while p >= 0 and st.get(b[p], SW) == SW: + p -= 1 + return u''.join(b[p+1:self.pos]) + + def get_completions(self, stem): + return [] + +def test(): + class TestReader(CompletingReader): + def get_completions(self, stem): + return [s for l in map(lambda x:x.split(),self.history) + for s in l if s and s.startswith(stem)] + reader = TestReader() + reader.ps1 = "c**> " + reader.ps2 = "c/*> " + reader.ps3 = "c|*> " + reader.ps4 = "c\*> " + while reader.readline(): + pass + +if __name__=='__main__': + test() diff --git a/lib_pypy/pyrepl/console.py b/lib_pypy/pyrepl/console.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/console.py @@ -0,0 +1,93 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +class Event: + """An Event. `evt' is 'key' or somesuch.""" + + def __init__(self, evt, data, raw=''): + self.evt = evt + self.data = data + self.raw = raw + + def __repr__(self): + return 'Event(%r, %r)'%(self.evt, self.data) + +class Console: + """Attributes: + + screen, + height, + width, + """ + + def refresh(self, screen, xy): + pass + + def prepare(self): + pass + + def restore(self): + pass + + def move_cursor(self, x, y): + pass + + def set_cursor_vis(self, vis): + pass + + def getheightwidth(self): + """Return (height, width) where height and width are the height + and width of the terminal window in characters.""" + pass + + def get_event(self, block=1): + """Return an Event instance. Returns None if |block| is false + and there is no event pending, otherwise waits for the + completion of an event.""" + pass + + def beep(self): + pass + + def clear(self): + """Wipe the screen""" + pass + + def finish(self): + """Move the cursor to the end of the display and otherwise get + ready for end. XXX could be merged with restore? Hmm.""" + pass + + def flushoutput(self): + """Flush all output to the screen (assuming there's some + buffering going on somewhere).""" + pass + + def forgetinput(self): + """Forget all pending, but not yet processed input.""" + pass + + def getpending(self): + """Return the characters that have been typed but not yet + processed.""" + pass + + def wait(self): + """Wait for an event.""" + pass diff --git a/lib_pypy/pyrepl/copy_code.py b/lib_pypy/pyrepl/copy_code.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/copy_code.py @@ -0,0 +1,73 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +import new + +def copy_code_with_changes(codeobject, + argcount=None, + nlocals=None, + stacksize=None, + flags=None, + code=None, + consts=None, + names=None, + varnames=None, + filename=None, + name=None, + firstlineno=None, + lnotab=None): + if argcount is None: argcount = codeobject.co_argcount + if nlocals is None: nlocals = codeobject.co_nlocals + if stacksize is None: stacksize = codeobject.co_stacksize + if flags is None: flags = codeobject.co_flags + if code is None: code = codeobject.co_code + if consts is None: consts = codeobject.co_consts + if names is None: names = codeobject.co_names + if varnames is None: varnames = codeobject.co_varnames + if filename is None: filename = codeobject.co_filename + if name is None: name = codeobject.co_name + if firstlineno is None: firstlineno = codeobject.co_firstlineno + if lnotab is None: lnotab = codeobject.co_lnotab + return new.code(argcount, + nlocals, + stacksize, + flags, + code, + consts, + names, + varnames, + filename, + name, + firstlineno, + lnotab) + +code_attrs=['argcount', + 'nlocals', + 'stacksize', + 'flags', + 'code', + 'consts', + 'names', + 'varnames', + 'filename', + 'name', + 'firstlineno', + 'lnotab'] + + diff --git a/lib_pypy/pyrepl/curses.py b/lib_pypy/pyrepl/curses.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/curses.py @@ -0,0 +1,39 @@ + +# Copyright 2000-2010 Michael Hudson-Doyle +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# Some try-import logic for two purposes: avoiding to bring in the whole +# pure Python curses package if possible; and, in _curses is not actually +# present, falling back to _minimal_curses (which is either a ctypes-based +# pure Python module or a PyPy built-in module). +try: + import _curses +except ImportError: + try: + import _minimal_curses as _curses + except ImportError: + # Who knows, maybe some environment has "curses" but not "_curses". + # If not, at least the following import gives a clean ImportError. + import _curses + +setupterm = _curses.setupterm +tigetstr = _curses.tigetstr +tparm = _curses.tparm +error = _curses.error diff --git a/lib_pypy/pyrepl/fancy_termios.py b/lib_pypy/pyrepl/fancy_termios.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/fancy_termios.py @@ -0,0 +1,52 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +import termios + +class TermState: + def __init__(self, tuples): + self.iflag, self.oflag, self.cflag, self.lflag, \ + self.ispeed, self.ospeed, self.cc = tuples + def as_list(self): + return [self.iflag, self.oflag, self.cflag, self.lflag, + self.ispeed, self.ospeed, self.cc] + + def copy(self): + return self.__class__(self.as_list()) + +def tcgetattr(fd): + return TermState(termios.tcgetattr(fd)) + +def tcsetattr(fd, when, attrs): + termios.tcsetattr(fd, when, attrs.as_list()) + +class Term(TermState): + TS__init__ = TermState.__init__ + def __init__(self, fd=0): + self.TS__init__(termios.tcgetattr(fd)) + self.fd = fd + self.stack = [] + def save(self): + self.stack.append( self.as_list() ) + def set(self, when=termios.TCSANOW): + termios.tcsetattr(self.fd, when, self.as_list()) + def restore(self): + self.TS__init__(self.stack.pop()) + self.set() + diff --git a/lib_pypy/pyrepl/historical_reader.py b/lib_pypy/pyrepl/historical_reader.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/historical_reader.py @@ -0,0 +1,311 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from pyrepl import reader, commands +from pyrepl.reader import Reader as R + +isearch_keymap = tuple( + [('\\%03o'%c, 'isearch-end') for c in range(256) if chr(c) != '\\'] + \ + [(c, 'isearch-add-character') + for c in map(chr, range(32, 127)) if c != '\\'] + \ + [('\\%03o'%c, 'isearch-add-character') + for c in range(256) if chr(c).isalpha() and chr(c) != '\\'] + \ + [('\\\\', 'self-insert'), + (r'\C-r', 'isearch-backwards'), + (r'\C-s', 'isearch-forwards'), + (r'\C-c', 'isearch-cancel'), + (r'\C-g', 'isearch-cancel'), + (r'\', 'isearch-backspace')]) + +del c + +ISEARCH_DIRECTION_NONE = '' +ISEARCH_DIRECTION_BACKWARDS = 'r' +ISEARCH_DIRECTION_FORWARDS = 'f' + +class next_history(commands.Command): + def do(self): + r = self.reader + if r.historyi == len(r.history): + r.error("end of history list") + return + r.select_item(r.historyi + 1) + +class previous_history(commands.Command): + def do(self): + r = self.reader + if r.historyi == 0: + r.error("start of history list") + return + r.select_item(r.historyi - 1) + +class restore_history(commands.Command): + def do(self): + r = self.reader + if r.historyi != len(r.history): + if r.get_unicode() != r.history[r.historyi]: + r.buffer = list(r.history[r.historyi]) + r.pos = len(r.buffer) + r.dirty = 1 + +class first_history(commands.Command): + def do(self): + self.reader.select_item(0) + +class last_history(commands.Command): + def do(self): + self.reader.select_item(len(self.reader.history)) + +class operate_and_get_next(commands.FinishCommand): + def do(self): + self.reader.next_history = self.reader.historyi + 1 + +class yank_arg(commands.Command): + def do(self): + r = self.reader + if r.last_command is self.__class__: + r.yank_arg_i += 1 + else: + r.yank_arg_i = 0 + if r.historyi < r.yank_arg_i: + r.error("beginning of history list") + return + a = r.get_arg(-1) + # XXX how to split? + words = r.get_item(r.historyi - r.yank_arg_i - 1).split() + if a < -len(words) or a >= len(words): + r.error("no such arg") + return + w = words[a] + b = r.buffer + if r.yank_arg_i > 0: + o = len(r.yank_arg_yanked) + else: + o = 0 + b[r.pos - o:r.pos] = list(w) + r.yank_arg_yanked = w + r.pos += len(w) - o + r.dirty = 1 + +class forward_history_isearch(commands.Command): + def do(self): + r = self.reader + r.isearch_direction = ISEARCH_DIRECTION_FORWARDS + r.isearch_start = r.historyi, r.pos + r.isearch_term = '' + r.dirty = 1 + r.push_input_trans(r.isearch_trans) + + +class reverse_history_isearch(commands.Command): + def do(self): + r = self.reader + r.isearch_direction = ISEARCH_DIRECTION_BACKWARDS + r.dirty = 1 + r.isearch_term = '' + r.push_input_trans(r.isearch_trans) + r.isearch_start = r.historyi, r.pos + +class isearch_cancel(commands.Command): + def do(self): + r = self.reader + r.isearch_direction = ISEARCH_DIRECTION_NONE + r.pop_input_trans() + r.select_item(r.isearch_start[0]) + r.pos = r.isearch_start[1] + r.dirty = 1 + +class isearch_add_character(commands.Command): + def do(self): + r = self.reader + b = r.buffer + r.isearch_term += self.event[-1] + r.dirty = 1 + p = r.pos + len(r.isearch_term) - 1 + if b[p:p+1] != [r.isearch_term[-1]]: + r.isearch_next() + +class isearch_backspace(commands.Command): + def do(self): + r = self.reader + if len(r.isearch_term) > 0: + r.isearch_term = r.isearch_term[:-1] + r.dirty = 1 + else: + r.error("nothing to rubout") + +class isearch_forwards(commands.Command): + def do(self): + r = self.reader + r.isearch_direction = ISEARCH_DIRECTION_FORWARDS + r.isearch_next() + +class isearch_backwards(commands.Command): + def do(self): + r = self.reader + r.isearch_direction = ISEARCH_DIRECTION_BACKWARDS + r.isearch_next() + +class isearch_end(commands.Command): + def do(self): + r = self.reader + r.isearch_direction = ISEARCH_DIRECTION_NONE + r.console.forgetinput() + r.pop_input_trans() + r.dirty = 1 + +class HistoricalReader(R): + """Adds history support (with incremental history searching) to the + Reader class. + + Adds the following instance variables: + * history: + a list of strings + * historyi: + * transient_history: + * next_history: + * isearch_direction, isearch_term, isearch_start: + * yank_arg_i, yank_arg_yanked: + used by the yank-arg command; not actually manipulated by any + HistoricalReader instance methods. + """ + + def collect_keymap(self): + return super(HistoricalReader, self).collect_keymap() + ( + (r'\C-n', 'next-history'), + (r'\C-p', 'previous-history'), + (r'\C-o', 'operate-and-get-next'), + (r'\C-r', 'reverse-history-isearch'), + (r'\C-s', 'forward-history-isearch'), + (r'\M-r', 'restore-history'), + (r'\M-.', 'yank-arg'), + (r'\', 'last-history'), + (r'\', 'first-history')) + + + def __init__(self, console): + super(HistoricalReader, self).__init__(console) + self.history = [] + self.historyi = 0 + self.transient_history = {} + self.next_history = None + self.isearch_direction = ISEARCH_DIRECTION_NONE + for c in [next_history, previous_history, restore_history, + first_history, last_history, yank_arg, + forward_history_isearch, reverse_history_isearch, + isearch_end, isearch_add_character, isearch_cancel, + isearch_add_character, isearch_backspace, + isearch_forwards, isearch_backwards, operate_and_get_next]: + self.commands[c.__name__] = c + self.commands[c.__name__.replace('_', '-')] = c + from pyrepl import input + self.isearch_trans = input.KeymapTranslator( + isearch_keymap, invalid_cls=isearch_end, + character_cls=isearch_add_character) + + def select_item(self, i): + self.transient_history[self.historyi] = self.get_unicode() + buf = self.transient_history.get(i) + if buf is None: + buf = self.history[i] + self.buffer = list(buf) + self.historyi = i + self.pos = len(self.buffer) + self.dirty = 1 + + def get_item(self, i): + if i <> len(self.history): + return self.transient_history.get(i, self.history[i]) + else: + return self.transient_history.get(i, self.get_unicode()) + + def prepare(self): + super(HistoricalReader, self).prepare() + try: + self.transient_history = {} + if self.next_history is not None \ + and self.next_history < len(self.history): + self.historyi = self.next_history + self.buffer[:] = list(self.history[self.next_history]) + self.pos = len(self.buffer) + self.transient_history[len(self.history)] = '' + else: + self.historyi = len(self.history) + self.next_history = None + except: + self.restore() + raise + + def get_prompt(self, lineno, cursor_on_line): + if cursor_on_line and self.isearch_direction <> ISEARCH_DIRECTION_NONE: + d = 'rf'[self.isearch_direction == ISEARCH_DIRECTION_FORWARDS] + return "(%s-search `%s') "%(d, self.isearch_term) + else: + return super(HistoricalReader, self).get_prompt(lineno, cursor_on_line) + + def isearch_next(self): + st = self.isearch_term + p = self.pos + i = self.historyi + s = self.get_unicode() + forwards = self.isearch_direction == ISEARCH_DIRECTION_FORWARDS + while 1: + if forwards: + p = s.find(st, p + 1) + else: + p = s.rfind(st, 0, p + len(st) - 1) + if p != -1: + self.select_item(i) + self.pos = p + return + elif ((forwards and i == len(self.history) - 1) + or (not forwards and i == 0)): + self.error("not found") + return + else: + if forwards: + i += 1 + s = self.get_item(i) + p = -1 + else: + i -= 1 + s = self.get_item(i) + p = len(s) + + def finish(self): + super(HistoricalReader, self).finish() + ret = self.get_unicode() + for i, t in self.transient_history.items(): + if i < len(self.history) and i != self.historyi: + self.history[i] = t + if ret: + self.history.append(ret) + +def test(): + from pyrepl.unix_console import UnixConsole + reader = HistoricalReader(UnixConsole()) + reader.ps1 = "h**> " + reader.ps2 = "h/*> " + reader.ps3 = "h|*> " + reader.ps4 = "h\*> " + while reader.readline(): + pass + +if __name__=='__main__': + test() diff --git a/lib_pypy/pyrepl/input.py b/lib_pypy/pyrepl/input.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/input.py @@ -0,0 +1,97 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# (naming modules after builtin functions is not such a hot idea...) + +# an KeyTrans instance translates Event objects into Command objects + +# hmm, at what level do we want [C-i] and [tab] to be equivalent? +# [meta-a] and [esc a]? obviously, these are going to be equivalent +# for the UnixConsole, but should they be for PygameConsole? + +# it would in any situation seem to be a bad idea to bind, say, [tab] +# and [C-i] to *different* things... but should binding one bind the +# other? + +# executive, temporary decision: [tab] and [C-i] are distinct, but +# [meta-key] is identified with [esc key]. We demand that any console +# class does quite a lot towards emulating a unix terminal. + +from pyrepl import unicodedata_ + +class InputTranslator(object): + def push(self, evt): + pass + def get(self): + pass + def empty(self): + pass + +class KeymapTranslator(InputTranslator): + def __init__(self, keymap, verbose=0, + invalid_cls=None, character_cls=None): + self.verbose = verbose + from pyrepl.keymap import compile_keymap, parse_keys + self.keymap = keymap + self.invalid_cls = invalid_cls + self.character_cls = character_cls + d = {} + for keyspec, command in keymap: + keyseq = tuple(parse_keys(keyspec)) + d[keyseq] = command + if self.verbose: + print d + self.k = self.ck = compile_keymap(d, ()) + self.results = [] + self.stack = [] + def push(self, evt): + if self.verbose: + print "pushed", evt.data, + key = evt.data + d = self.k.get(key) + if isinstance(d, dict): + if self.verbose: + print "transition" + self.stack.append(key) + self.k = d + else: + if d is None: + if self.verbose: + print "invalid" + if self.stack or len(key) > 1 or unicodedata_.category(key) == 'C': + self.results.append( + (self.invalid_cls, self.stack + [key])) + else: + # small optimization: + self.k[key] = self.character_cls + self.results.append( + (self.character_cls, [key])) + else: + if self.verbose: + print "matched", d + self.results.append((d, self.stack + [key])) + self.stack = [] + self.k = self.ck + def get(self): + if self.results: + return self.results.pop(0) + else: + return None + def empty(self): + return not self.results diff --git a/lib_pypy/pyrepl/keymap.py b/lib_pypy/pyrepl/keymap.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/keymap.py @@ -0,0 +1,186 @@ +# Copyright 2000-2008 Michael Hudson-Doyle +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +functions for parsing keyspecs + +Support for turning keyspecs into appropriate sequences. + +pyrepl uses it's own bastardized keyspec format, which is meant to be +a strict superset of readline's \"KEYSEQ\" format (which is to say +that if you can come up with a spec readline accepts that this +doesn't, you've found a bug and should tell me about it). + +Note that this is the `\\C-o' style of readline keyspec, not the +`Control-o' sort. + +A keyspec is a string representing a sequence of keypresses that can +be bound to a command. + +All characters other than the backslash represent themselves. In the +traditional manner, a backslash introduces a escape sequence. + +The extension to readline is that the sequence \\ denotes the +sequence of charaters produced by hitting KEY. + +Examples: + +`a' - what you get when you hit the `a' key +`\\EOA' - Escape - O - A (up, on my terminal) +`\\' - the up arrow key +`\\' - ditto (keynames are case insensitive) +`\\C-o', `\\c-o' - control-o +`\\M-.' - meta-period +`\\E.' - ditto (that's how meta works for pyrepl) +`\\', `\\', `\\t', `\\011', '\\x09', '\\X09', '\\C-i', '\\C-I' + - all of these are the tab character. Can you think of any more? +""" + +_escapes = { + '\\':'\\', + "'":"'", + '"':'"', + 'a':'\a', + 'b':'\h', + 'e':'\033', + 'f':'\f', + 'n':'\n', + 'r':'\r', + 't':'\t', + 'v':'\v' + } + +_keynames = { + 'backspace': 'backspace', + 'delete': 'delete', + 'down': 'down', + 'end': 'end', + 'enter': '\r', + 'escape': '\033', + 'f1' : 'f1', 'f2' : 'f2', 'f3' : 'f3', 'f4' : 'f4', + 'f5' : 'f5', 'f6' : 'f6', 'f7' : 'f7', 'f8' : 'f8', + 'f9' : 'f9', 'f10': 'f10', 'f11': 'f11', 'f12': 'f12', + 'f13': 'f13', 'f14': 'f14', 'f15': 'f15', 'f16': 'f16', + 'f17': 'f17', 'f18': 'f18', 'f19': 'f19', 'f20': 'f20', + 'home': 'home', + 'insert': 'insert', + 'left': 'left', + 'page down': 'page down', + 'page up': 'page up', + 'return': '\r', + 'right': 'right', + 'space': ' ', + 'tab': '\t', + 'up': 'up', + } + +class KeySpecError(Exception): + pass + +def _parse_key1(key, s): + ctrl = 0 + meta = 0 + ret = '' + while not ret and s < len(key): + if key[s] == '\\': + c = key[s+1].lower() + if _escapes.has_key(c): + ret = _escapes[c] + s += 2 + elif c == "c": + if key[s + 2] != '-': + raise KeySpecError, \ + "\\C must be followed by `-' (char %d of %s)"%( + s + 2, repr(key)) + if ctrl: + raise KeySpecError, "doubled \\C- (char %d of %s)"%( + s + 1, repr(key)) + ctrl = 1 + s += 3 + elif c == "m": + if key[s + 2] != '-': + raise KeySpecError, \ + "\\M must be followed by `-' (char %d of %s)"%( + s + 2, repr(key)) + if meta: + raise KeySpecError, "doubled \\M- (char %d of %s)"%( + s + 1, repr(key)) + meta = 1 + s += 3 + elif c.isdigit(): + n = key[s+1:s+4] + ret = chr(int(n, 8)) + s += 4 + elif c == 'x': + n = key[s+2:s+4] + ret = chr(int(n, 16)) + s += 4 + elif c == '<': + t = key.find('>', s) + if t == -1: + raise KeySpecError, \ + "unterminated \\< starting at char %d of %s"%( + s + 1, repr(key)) + ret = key[s+2:t].lower() + if ret not in _keynames: + raise KeySpecError, \ + "unrecognised keyname `%s' at char %d of %s"%( + ret, s + 2, repr(key)) + ret = _keynames[ret] + s = t + 1 + else: + raise KeySpecError, \ + "unknown backslash escape %s at char %d of %s"%( + `c`, s + 2, repr(key)) + else: + ret = key[s] + s += 1 + if ctrl: + if len(ret) > 1: + raise KeySpecError, "\\C- must be followed by a character" + ret = chr(ord(ret) & 0x1f) # curses.ascii.ctrl() + if meta: + ret = ['\033', ret] + else: + ret = [ret] + return ret, s + +def parse_keys(key): + s = 0 + r = [] + while s < len(key): + k, s = _parse_key1(key, s) + r.extend(k) + return r + +def compile_keymap(keymap, empty=''): + r = {} + for key, value in keymap.items(): + r.setdefault(key[0], {})[key[1:]] = value + for key, value in r.items(): + if empty in value: + if len(value) <> 1: + raise KeySpecError, \ + "key definitions for %s clash"%(value.values(),) + else: + r[key] = value[empty] + else: + r[key] = compile_keymap(value, empty) + return r diff --git a/lib_pypy/pyrepl/keymaps.py b/lib_pypy/pyrepl/keymaps.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/keymaps.py @@ -0,0 +1,140 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +reader_emacs_keymap = tuple( + [(r'\C-a', 'beginning-of-line'), + (r'\C-b', 'left'), + (r'\C-c', 'interrupt'), + (r'\C-d', 'delete'), + (r'\C-e', 'end-of-line'), + (r'\C-f', 'right'), + (r'\C-g', 'cancel'), + (r'\C-h', 'backspace'), + (r'\C-j', 'self-insert'), + (r'\', 'accept'), + (r'\C-k', 'kill-line'), + (r'\C-l', 'clear-screen'), +# (r'\C-m', 'accept'), + (r'\C-q', 'quoted-insert'), + (r'\C-t', 'transpose-characters'), + (r'\C-u', 'unix-line-discard'), + (r'\C-v', 'quoted-insert'), + (r'\C-w', 'unix-word-rubout'), + (r'\C-x\C-u', 'upcase-region'), + (r'\C-y', 'yank'), + (r'\C-z', 'suspend'), + + (r'\M-b', 'backward-word'), + (r'\M-c', 'capitalize-word'), + (r'\M-d', 'kill-word'), + (r'\M-f', 'forward-word'), + (r'\M-l', 'downcase-word'), + (r'\M-t', 'transpose-words'), + (r'\M-u', 'upcase-word'), + (r'\M-y', 'yank-pop'), + (r'\M--', 'digit-arg'), + (r'\M-0', 'digit-arg'), + (r'\M-1', 'digit-arg'), + (r'\M-2', 'digit-arg'), + (r'\M-3', 'digit-arg'), + (r'\M-4', 'digit-arg'), + (r'\M-5', 'digit-arg'), + (r'\M-6', 'digit-arg'), + (r'\M-7', 'digit-arg'), + (r'\M-8', 'digit-arg'), + (r'\M-9', 'digit-arg'), + (r'\M-\n', 'self-insert'), + (r'\', 'self-insert')] + \ + [(c, 'self-insert') + for c in map(chr, range(32, 127)) if c <> '\\'] + \ + [(c, 'self-insert') + for c in map(chr, range(128, 256)) if c.isalpha()] + \ + [(r'\', 'up'), + (r'\', 'down'), + (r'\', 'left'), + (r'\', 'right'), + (r'\', 'quoted-insert'), + (r'\', 'delete'), + (r'\', 'backspace'), + (r'\M-\', 'backward-kill-word'), + (r'\', 'end'), + (r'\', 'home'), + (r'\', 'help'), + (r'\EOF', 'end'), # the entries in the terminfo database for xterms + (r'\EOH', 'home'), # seem to be wrong. this is a less than ideal + # workaround + ]) + +hist_emacs_keymap = reader_emacs_keymap + ( + (r'\C-n', 'next-history'), + (r'\C-p', 'previous-history'), + (r'\C-o', 'operate-and-get-next'), + (r'\C-r', 'reverse-history-isearch'), + (r'\C-s', 'forward-history-isearch'), + (r'\M-r', 'restore-history'), + (r'\M-.', 'yank-arg'), + (r'\', 'last-history'), + (r'\', 'first-history')) + +comp_emacs_keymap = hist_emacs_keymap + ( + (r'\t', 'complete'),) + +python_emacs_keymap = comp_emacs_keymap + ( + (r'\n', 'maybe-accept'), + (r'\M-\n', 'self-insert')) + +reader_vi_insert_keymap = tuple( + [(c, 'self-insert') + for c in map(chr, range(32, 127)) if c <> '\\'] + \ + [(c, 'self-insert') + for c in map(chr, range(128, 256)) if c.isalpha()] + \ + [(r'\C-d', 'delete'), + (r'\', 'backspace'), + ('')]) + +reader_vi_command_keymap = tuple( + [ + ('E', 'enter-emacs-mode'), + ('R', 'enter-replace-mode'), + ('dw', 'delete-word'), + ('dd', 'delete-line'), + + ('h', 'left'), + ('i', 'enter-insert-mode'), + ('j', 'down'), + ('k', 'up'), + ('l', 'right'), + ('r', 'replace-char'), + ('w', 'forward-word'), + ('x', 'delete'), + ('.', 'repeat-edit'), # argh! + (r'\', 'enter-insert-mode'), + ] + + [(c, 'digit-arg') for c in '01234567689'] + + []) + + +reader_keymaps = { + 'emacs' : reader_emacs_keymap, + 'vi-insert' : reader_vi_insert_keymap, + 'vi-command' : reader_vi_command_keymap + } + +del c # from the listcomps + diff --git a/lib_pypy/pyrepl/module_lister.py b/lib_pypy/pyrepl/module_lister.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/module_lister.py @@ -0,0 +1,70 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from pyrepl.completing_reader import uniqify +import os, sys + +# for the completion support. +# this is all quite nastily written. +_packages = {} + +def _make_module_list_dir(dir, suffs, prefix=''): + l = [] + for fname in os.listdir(dir): + file = os.path.join(dir, fname) + if os.path.isfile(file): + for suff in suffs: + if fname.endswith(suff): + l.append( prefix + fname[:-len(suff)] ) + break + elif os.path.isdir(file) \ + and os.path.exists(os.path.join(file, "__init__.py")): + l.append( prefix + fname ) + _packages[prefix + fname] = _make_module_list_dir( + file, suffs, prefix + fname + '.' ) + l = uniqify(l) + l.sort() + return l + +def _make_module_list(): + import imp + suffs = [x[0] for x in imp.get_suffixes() if x[0] != '.pyc'] + def compare(x, y): + c = -cmp(len(x), len(y)) + if c: + return c + else: + return -cmp(x, y) + suffs.sort(compare) + _packages[''] = list(sys.builtin_module_names) + for dir in sys.path: + if dir == '': + dir = '.' + if os.path.isdir(dir): + _packages[''] += _make_module_list_dir(dir, suffs) + _packages[''].sort() + +def find_modules(stem): + l = stem.split('.') + pack = '.'.join(l[:-1]) + try: + mods = _packages[pack] + except KeyError: + raise ImportError, "can't find \"%s\" package"%pack + return [mod for mod in mods if mod.startswith(stem)] diff --git a/lib_pypy/pyrepl/pygame_console.py b/lib_pypy/pyrepl/pygame_console.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/pygame_console.py @@ -0,0 +1,353 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# the pygame console is currently thoroughly broken. + +# there's a fundamental difference from the UnixConsole: here we're +# the terminal emulator too, in effect. This means, e.g., for pythoni +# we really need a separate process (or thread) to monitor for ^C +# during command execution and zap the executor process. Making this +# work on non-Unix is expected to be even more entertaining. + +from pygame.locals import * +from pyrepl.console import Console, Event +from pyrepl import pygame_keymap +import pygame +import types + +lmargin = 5 +rmargin = 5 +tmargin = 5 +bmargin = 5 + +try: + bool +except NameError: + def bool(x): + return not not x + +modcolors = {K_LCTRL:1, + K_RCTRL:1, + K_LMETA:1, + K_RMETA:1, + K_LALT:1, + K_RALT:1, + K_LSHIFT:1, + K_RSHIFT:1} + +class colors: + fg = 250,240,230 + bg = 5, 5, 5 + cursor = 230, 0, 230 + margin = 5, 5, 15 + +class FakeStdout: + def __init__(self, con): + self.con = con + def write(self, text): + self.con.write(text) + def flush(self): + pass + +class FakeStdin: + def __init__(self, con): + self.con = con + def read(self, n=None): + # argh! + raise NotImplementedError + def readline(self, n=None): + from reader import Reader + try: + # this isn't quite right: it will clobber any prompt that's + # been printed. Not sure how to get around this... + return Reader(self.con).readline() + except EOFError: + return '' + +class PyGameConsole(Console): + """Attributes: + + (keymap), + (fd), + screen, + height, + width, + """ + + def __init__(self): + self.pygame_screen = pygame.display.set_mode((800, 600)) + pygame.font.init() + pygame.key.set_repeat(500, 30) + self.font = pygame.font.Font( + "/usr/X11R6/lib/X11/fonts/TTF/luximr.ttf", 15) + self.fw, self.fh = self.fontsize = self.font.size("X") + self.cursor = pygame.Surface(self.fontsize) + self.cursor.fill(colors.cursor) + self.clear() + self.curs_vis = 1 + self.height, self.width = self.getheightwidth() + pygame.display.update() + pygame.event.set_allowed(None) + pygame.event.set_allowed(KEYDOWN) + + def install_keymap(self, keymap): + """Install a given keymap. + + keymap is a tuple of 2-element tuples; each small tuple is a + pair (keyspec, event-name). The format for keyspec is + modelled on that used by readline (so read that manual for + now!).""" + self.k = self.keymap = pygame_keymap.compile_keymap(keymap) + + def char_rect(self, x, y): + return self.char_pos(x, y), self.fontsize + + def char_pos(self, x, y): + return (lmargin + x*self.fw, + tmargin + y*self.fh + self.cur_top + self.scroll) + + def paint_margin(self): + s = self.pygame_screen + c = colors.margin + s.fill(c, [0, 0, 800, tmargin]) + s.fill(c, [0, 0, lmargin, 600]) + s.fill(c, [0, 600 - bmargin, 800, bmargin]) + s.fill(c, [800 - rmargin, 0, lmargin, 600]) + + def refresh(self, screen, (cx, cy)): + self.screen = screen + self.pygame_screen.fill(colors.bg, + [0, tmargin + self.cur_top + self.scroll, + 800, 600]) + self.paint_margin() + + line_top = self.cur_top + width, height = self.fontsize + self.cxy = (cx, cy) + cp = self.char_pos(cx, cy) + if cp[1] < tmargin: + self.scroll = - (cy*self.fh + self.cur_top) + self.repaint() + elif cp[1] + self.fh > 600 - bmargin: + self.scroll += (600 - bmargin) - (cp[1] + self.fh) + self.repaint() + if self.curs_vis: + self.pygame_screen.blit(self.cursor, self.char_pos(cx, cy)) + for line in screen: + if 0 <= line_top + self.scroll <= (600 - bmargin - tmargin - self.fh): + if line: + ren = self.font.render(line, 1, colors.fg) + self.pygame_screen.blit(ren, (lmargin, + tmargin + line_top + self.scroll)) + line_top += self.fh + pygame.display.update() + + def prepare(self): + self.cmd_buf = '' + self.k = self.keymap + self.height, self.width = self.getheightwidth() + self.curs_vis = 1 + self.cur_top = self.pos[0] + self.event_queue = [] + + def restore(self): + pass + + def blit_a_char(self, linen, charn): + line = self.screen[linen] + if charn < len(line): + text = self.font.render(line[charn], 1, colors.fg) + self.pygame_screen.blit(text, self.char_pos(charn, linen)) + + def move_cursor(self, x, y): + cp = self.char_pos(x, y) + if cp[1] < tmargin or cp[1] + self.fh > 600 - bmargin: + self.event_queue.append(Event('refresh', '', '')) + else: + if self.curs_vis: + cx, cy = self.cxy + self.pygame_screen.fill(colors.bg, self.char_rect(cx, cy)) + self.blit_a_char(cy, cx) + self.pygame_screen.blit(self.cursor, cp) + self.blit_a_char(y, x) + pygame.display.update() + self.cxy = (x, y) + + def set_cursor_vis(self, vis): + self.curs_vis = vis + if vis: + self.move_cursor(*self.cxy) + else: + cx, cy = self.cxy + self.pygame_screen.fill(colors.bg, self.char_rect(cx, cy)) + self.blit_a_char(cy, cx) + pygame.display.update() + + def getheightwidth(self): + """Return (height, width) where height and width are the height + and width of the terminal window in characters.""" + return ((600 - tmargin - bmargin)/self.fh, + (800 - lmargin - rmargin)/self.fw) + + def tr_event(self, pyg_event): + shift = bool(pyg_event.mod & KMOD_SHIFT) + ctrl = bool(pyg_event.mod & KMOD_CTRL) + meta = bool(pyg_event.mod & (KMOD_ALT|KMOD_META)) + + try: + return self.k[(pyg_event.unicode, meta, ctrl)], pyg_event.unicode + except KeyError: + try: + return self.k[(pyg_event.key, meta, ctrl)], pyg_event.unicode + except KeyError: + return "invalid-key", pyg_event.unicode + + def get_event(self, block=1): + """Return an Event instance. Returns None if |block| is false + and there is no event pending, otherwise waits for the + completion of an event.""" + while 1: + if self.event_queue: + return self.event_queue.pop(0) + elif block: + pyg_event = pygame.event.wait() + else: + pyg_event = pygame.event.poll() + if pyg_event.type == NOEVENT: + return + + if pyg_event.key in modcolors: + continue + + k, c = self.tr_event(pyg_event) + self.cmd_buf += c.encode('ascii', 'replace') + self.k = k + + if not isinstance(k, types.DictType): + e = Event(k, self.cmd_buf, []) + self.k = self.keymap + self.cmd_buf = '' + return e + + def beep(self): + # uhh, can't be bothered now. + # pygame.sound.something, I guess. + pass + + def clear(self): + """Wipe the screen""" + self.pygame_screen.fill(colors.bg) + #self.screen = [] + self.pos = [0, 0] + self.grobs = [] + self.cur_top = 0 + self.scroll = 0 + + def finish(self): + """Move the cursor to the end of the display and otherwise get + ready for end. XXX could be merged with restore? Hmm.""" + if self.curs_vis: + cx, cy = self.cxy + self.pygame_screen.fill(colors.bg, self.char_rect(cx, cy)) + self.blit_a_char(cy, cx) + for line in self.screen: + self.write_line(line, 1) + if self.curs_vis: + self.pygame_screen.blit(self.cursor, + (lmargin + self.pos[1], + tmargin + self.pos[0] + self.scroll)) + pygame.display.update() + + def flushoutput(self): + """Flush all output to the screen (assuming there's some + buffering going on somewhere)""" + # no buffering here, ma'am (though perhaps there should be!) + pass + + def forgetinput(self): + """Forget all pending, but not yet processed input.""" + while pygame.event.poll().type <> NOEVENT: + pass + + def getpending(self): + """Return the characters that have been typed but not yet + processed.""" + events = [] + while 1: + event = pygame.event.poll() + if event.type == NOEVENT: + break + events.append(event) + + return events + + def wait(self): + """Wait for an event.""" + raise Exception, "erp!" + + def repaint(self): + # perhaps we should consolidate grobs? + self.pygame_screen.fill(colors.bg) + self.paint_margin() + for (y, x), surf, text in self.grobs: + if surf and 0 < y + self.scroll: + self.pygame_screen.blit(surf, (lmargin + x, + tmargin + y + self.scroll)) + pygame.display.update() + + def write_line(self, line, ret): + charsleft = (self.width*self.fw - self.pos[1])/self.fw + while len(line) > charsleft: + self.write_line(line[:charsleft], 1) + line = line[charsleft:] + if line: + ren = self.font.render(line, 1, colors.fg, colors.bg) + self.grobs.append((self.pos[:], ren, line)) + self.pygame_screen.blit(ren, + (lmargin + self.pos[1], + tmargin + self.pos[0] + self.scroll)) + else: + self.grobs.append((self.pos[:], None, line)) + if ret: + self.pos[0] += self.fh + if tmargin + self.pos[0] + self.scroll + self.fh > 600 - bmargin: + self.scroll = 600 - bmargin - self.pos[0] - self.fh - tmargin + self.repaint() + self.pos[1] = 0 + else: + self.pos[1] += self.fw*len(line) + + def write(self, text): + lines = text.split("\n") + if self.curs_vis: + self.pygame_screen.fill(colors.bg, + (lmargin + self.pos[1], + tmargin + self.pos[0] + self.scroll, + self.fw, self.fh)) + for line in lines[:-1]: + self.write_line(line, 1) + self.write_line(lines[-1], 0) + if self.curs_vis: + self.pygame_screen.blit(self.cursor, + (lmargin + self.pos[1], + tmargin + self.pos[0] + self.scroll)) + pygame.display.update() + + def flush(self): + pass diff --git a/lib_pypy/pyrepl/pygame_keymap.py b/lib_pypy/pyrepl/pygame_keymap.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/pygame_keymap.py @@ -0,0 +1,250 @@ +# Copyright 2000-2008 Michael Hudson-Doyle +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# keyspec parsing for a pygame console. currently this is simply copy +# n' change from the unix (ie. trad terminal) variant; probably some +# refactoring will happen when I work out how it will work best. + +# A key is represented as *either* + +# a) a (keycode, meta, ctrl) sequence (used for special keys such as +# f1, the up arrow key, etc) +# b) a (unichar, meta, ctrl) sequence (used for printable chars) + +# Because we allow keystokes like '\\C-xu', I'll use the same trick as +# the unix keymap module uses. + +# '\\C-a' --> (K_a, 0, 1) + +# XXX it's actually possible to test this module, so it should have a +# XXX test suite. + +from pygame.locals import * + +_escapes = { + '\\': K_BACKSLASH, + "'" : K_QUOTE, + '"' : K_QUOTEDBL, +# 'a' : '\a', + 'b' : K_BACKSLASH, + 'e' : K_ESCAPE, +# 'f' : '\f', + 'n' : K_RETURN, + 'r' : K_RETURN, + 't' : K_TAB, +# 'v' : '\v' + } + +_keynames = { + 'backspace' : K_BACKSPACE, + 'delete' : K_DELETE, + 'down' : K_DOWN, + 'end' : K_END, + 'enter' : K_KP_ENTER, + 'escape' : K_ESCAPE, + 'f1' : K_F1, 'f2' : K_F2, 'f3' : K_F3, 'f4' : K_F4, + 'f5' : K_F5, 'f6' : K_F6, 'f7' : K_F7, 'f8' : K_F8, + 'f9' : K_F9, 'f10': K_F10,'f11': K_F11,'f12': K_F12, + 'f13': K_F13,'f14': K_F14,'f15': K_F15, + 'home' : K_HOME, + 'insert' : K_INSERT, + 'left' : K_LEFT, + 'pgdown' : K_PAGEDOWN, 'page down' : K_PAGEDOWN, + 'pgup' : K_PAGEUP, 'page up' : K_PAGEUP, + 'return' : K_RETURN, + 'right' : K_RIGHT, + 'space' : K_SPACE, + 'tab' : K_TAB, + 'up' : K_UP, + } + +class KeySpecError(Exception): + pass + +def _parse_key1(key, s): + ctrl = 0 + meta = 0 + ret = '' + while not ret and s < len(key): + if key[s] == '\\': + c = key[s+1].lower() + if _escapes.has_key(c): + ret = _escapes[c] + s += 2 + elif c == "c": + if key[s + 2] != '-': + raise KeySpecError, \ + "\\C must be followed by `-' (char %d of %s)"%( + s + 2, repr(key)) + if ctrl: + raise KeySpecError, "doubled \\C- (char %d of %s)"%( + s + 1, repr(key)) + ctrl = 1 + s += 3 + elif c == "m": + if key[s + 2] != '-': + raise KeySpecError, \ + "\\M must be followed by `-' (char %d of %s)"%( + s + 2, repr(key)) + if meta: + raise KeySpecError, "doubled \\M- (char %d of %s)"%( + s + 1, repr(key)) + meta = 1 + s += 3 + elif c.isdigit(): + n = key[s+1:s+4] + ret = chr(int(n, 8)) + s += 4 + elif c == 'x': + n = key[s+2:s+4] + ret = chr(int(n, 16)) + s += 4 + elif c == '<': + t = key.find('>', s) + if t == -1: + raise KeySpecError, \ + "unterminated \\< starting at char %d of %s"%( + s + 1, repr(key)) + try: + ret = _keynames[key[s+2:t].lower()] + s = t + 1 + except KeyError: + raise KeySpecError, \ + "unrecognised keyname `%s' at char %d of %s"%( + key[s+2:t], s + 2, repr(key)) + if ret is None: + return None, s + else: + raise KeySpecError, \ + "unknown backslash escape %s at char %d of %s"%( + `c`, s + 2, repr(key)) + else: + if ctrl: + ret = chr(ord(key[s]) & 0x1f) # curses.ascii.ctrl() + ret = unicode(ret) + else: + ret = unicode(key[s]) + s += 1 + return (ret, meta, ctrl), s + +def parse_keys(key): + s = 0 + r = [] + while s < len(key): + k, s = _parse_key1(key, s) + if k is None: + return None + r.append(k) + return tuple(r) + +def _compile_keymap(keymap): + r = {} + for key, value in keymap.items(): + r.setdefault(key[0], {})[key[1:]] = value + for key, value in r.items(): + if value.has_key(()): + if len(value) <> 1: + raise KeySpecError, \ + "key definitions for %s clash"%(value.values(),) + else: + r[key] = value[()] + else: + r[key] = _compile_keymap(value) + return r + +def compile_keymap(keymap): + r = {} + for key, value in keymap: + k = parse_keys(key) + if value is None and r.has_key(k): + del r[k] + if k is not None: + r[k] = value + return _compile_keymap(r) + +def keyname(key): + longest_match = '' + longest_match_name = '' + for name, keyseq in keyset.items(): + if keyseq and key.startswith(keyseq) and \ + len(keyseq) > len(longest_match): + longest_match = keyseq + longest_match_name = name + if len(longest_match) > 0: + return longest_match_name, len(longest_match) + else: + return None, 0 + +_unescapes = {'\r':'\\r', '\n':'\\n', '\177':'^?'} + +#for k,v in _escapes.items(): +# _unescapes[v] = k + +def unparse_key(keyseq): + if not keyseq: + return '' + name, s = keyname(keyseq) + if name: + if name <> 'escape' or s == len(keyseq): + return '\\<' + name + '>' + unparse_key(keyseq[s:]) + else: + return '\\M-' + unparse_key(keyseq[1:]) + else: + c = keyseq[0] + r = keyseq[1:] + if c == '\\': + p = '\\\\' + elif _unescapes.has_key(c): + p = _unescapes[c] + elif ord(c) < ord(' '): + p = '\\C-%s'%(chr(ord(c)+96),) + elif ord(' ') <= ord(c) <= ord('~'): + p = c + else: + p = '\\%03o'%(ord(c),) + return p + unparse_key(r) + +def _unparse_keyf(keyseq): + if not keyseq: + return [] + name, s = keyname(keyseq) + if name: + if name <> 'escape' or s == len(keyseq): + return [name] + _unparse_keyf(keyseq[s:]) + else: + rest = _unparse_keyf(keyseq[1:]) + return ['M-'+rest[0]] + rest[1:] + else: + c = keyseq[0] + r = keyseq[1:] + if c == '\\': + p = '\\' + elif _unescapes.has_key(c): + p = _unescapes[c] + elif ord(c) < ord(' '): + p = 'C-%s'%(chr(ord(c)+96),) + elif ord(' ') <= ord(c) <= ord('~'): + p = c + else: + p = '\\%03o'%(ord(c),) + return [p] + _unparse_keyf(r) + +def unparse_keyf(keyseq): + return " ".join(_unparse_keyf(keyseq)) diff --git a/lib_pypy/pyrepl/python_reader.py b/lib_pypy/pyrepl/python_reader.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/python_reader.py @@ -0,0 +1,392 @@ +# Copyright 2000-2007 Michael Hudson-Doyle +# Bob Ippolito +# Maciek Fijalkowski +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# one impressive collections of imports: +from pyrepl.completing_reader import CompletingReader +from pyrepl.historical_reader import HistoricalReader +from pyrepl import completing_reader, reader +from pyrepl import copy_code, commands, completer +from pyrepl import module_lister +import new, sys, os, re, code, traceback +import atexit, warnings +try: + import cPickle as pickle +except ImportError: + import pickle +try: + import imp + imp.find_module("twisted") + from twisted.internet import reactor + from twisted.internet.abstract import FileDescriptor +except ImportError: + default_interactmethod = "interact" +else: + default_interactmethod = "twistedinteract" + +CommandCompiler = code.CommandCompiler + +def eat_it(*args): + """this function eats warnings, if you were wondering""" + pass + +class maybe_accept(commands.Command): + def do(self): + r = self.reader + text = r.get_unicode() + try: + # ooh, look at the hack: + code = r.compiler("#coding:utf-8\n"+text.encode('utf-8')) + except (OverflowError, SyntaxError, ValueError): + self.finish = 1 + else: + if code is None: + r.insert("\n") + else: + self.finish = 1 + +from_line_prog = re.compile( + "^from\s+(?P[A-Za-z_.0-9]*)\s+import\s+(?P[A-Za-z_.0-9]*)") +import_line_prog = re.compile( + "^(?:import|from)\s+(?P[A-Za-z_.0-9]*)\s*$") + +def mk_saver(reader): + def saver(reader=reader): + try: + file = open(os.path.expanduser("~/.pythoni.hist"), "w") + except IOError: + pass + else: + pickle.dump(reader.history, file) + file.close() + return saver + +class PythonicReader(CompletingReader, HistoricalReader): + def collect_keymap(self): + return super(PythonicReader, self).collect_keymap() + ( + (r'\n', 'maybe-accept'), + (r'\M-\n', 'insert-nl')) + + def __init__(self, console, locals, + compiler=None): + super(PythonicReader, self).__init__(console) + self.completer = completer.Completer(locals) + st = self.syntax_table + for c in "._0123456789": + st[c] = reader.SYNTAX_WORD + self.locals = locals + if compiler is None: + self.compiler = CommandCompiler() + else: + self.compiler = compiler + try: + file = open(os.path.expanduser("~/.pythoni.hist")) + except IOError: + pass + else: + try: + self.history = pickle.load(file) + except: + self.history = [] + self.historyi = len(self.history) + file.close() + atexit.register(mk_saver(self)) + for c in [maybe_accept]: + self.commands[c.__name__] = c + self.commands[c.__name__.replace('_', '-')] = c + + def get_completions(self, stem): + b = self.get_unicode() + m = import_line_prog.match(b) + if m: + if not self._module_list_ready: + module_lister._make_module_list() + self._module_list_ready = True + + mod = m.group("mod") + try: + return module_lister.find_modules(mod) + except ImportError: + pass + m = from_line_prog.match(b) + if m: + mod, name = m.group("mod", "name") + try: + l = module_lister._packages[mod] + except KeyError: + try: + mod = __import__(mod, self.locals, self.locals, ['']) + return [x for x in dir(mod) if x.startswith(name)] + except ImportError: + pass + else: + return [x[len(mod) + 1:] + for x in l if x.startswith(mod + '.' + name)] + try: + l = completing_reader.uniqify(self.completer.complete(stem)) + return l + except (NameError, AttributeError): + return [] + +class ReaderConsole(code.InteractiveInterpreter): + II_init = code.InteractiveInterpreter.__init__ + def __init__(self, console, locals=None): + if locals is None: + locals = {} + self.II_init(locals) + self.compiler = CommandCompiler() + self.compile = self.compiler.compiler + self.reader = PythonicReader(console, locals, self.compiler) + locals['Reader'] = self.reader + + def run_user_init_file(self): + for key in "PYREPLSTARTUP", "PYTHONSTARTUP": + initfile = os.environ.get(key) + if initfile is not None and os.path.exists(initfile): + break + else: + return + try: + execfile(initfile, self.locals, self.locals) + except: + etype, value, tb = sys.exc_info() + traceback.print_exception(etype, value, tb.tb_next) + + def execute(self, text): + try: + # ooh, look at the hack: + code = self.compile("# coding:utf8\n"+text.encode('utf-8'), + '', 'single') + except (OverflowError, SyntaxError, ValueError): + self.showsyntaxerror("") + else: + self.runcode(code) + sys.stdout.flush() + + def interact(self): + while 1: + try: # catches EOFError's and KeyboardInterrupts during execution + try: # catches KeyboardInterrupts during editing + try: # warning saver + # can't have warnings spewed onto terminal + sv = warnings.showwarning + warnings.showwarning = eat_it + l = unicode(self.reader.readline(), 'utf-8') + finally: + warnings.showwarning = sv + except KeyboardInterrupt: + print "KeyboardInterrupt" + else: + if l: + self.execute(l) + except EOFError: + break + except KeyboardInterrupt: + continue + + def prepare(self): + self.sv_sw = warnings.showwarning + warnings.showwarning = eat_it + self.reader.prepare() + self.reader.refresh() # we want :after methods... + + def restore(self): + self.reader.restore() + warnings.showwarning = self.sv_sw + + def handle1(self, block=1): + try: + r = 1 + r = self.reader.handle1(block) + except KeyboardInterrupt: + self.restore() + print "KeyboardInterrupt" + self.prepare() + else: + if self.reader.finished: + text = self.reader.get_unicode() + self.restore() + if text: + self.execute(text) + self.prepare() + return r + + def tkfilehandler(self, file, mask): + try: + self.handle1(block=0) + except: + self.exc_info = sys.exc_info() + + # how the do you get this to work on Windows (without + # createfilehandler)? threads, I guess + def really_tkinteract(self): + import _tkinter + _tkinter.createfilehandler( + self.reader.console.input_fd, _tkinter.READABLE, + self.tkfilehandler) + + self.exc_info = None + while 1: + # dooneevent will return 0 without blocking if there are + # no Tk windows, 1 after blocking until an event otherwise + # so the following does what we want (this wasn't expected + # to be obvious). + if not _tkinter.dooneevent(_tkinter.ALL_EVENTS): + self.handle1(block=1) + if self.exc_info: + type, value, tb = self.exc_info + self.exc_info = None + raise type, value, tb + + def tkinteract(self): + """Run a Tk-aware Python interactive session. + + This function simulates the Python top-level in a way that + allows Tk's mainloop to run.""" + + # attempting to understand the control flow of this function + # without help may cause internal injuries. so, some + # explanation. + + # The outer while loop is there to restart the interaction if + # the user types control-c when execution is deep in our + # innards. I'm not sure this can't leave internals in an + # inconsistent state, but it's a good start. + + # then the inside loop keeps calling self.handle1 until + # _tkinter gets imported; then control shifts to + # self.really_tkinteract, above. + + # this function can only return via an exception; we mask + # EOFErrors (but they end the interaction) and + # KeyboardInterrupts cause a restart. All other exceptions + # are likely bugs in pyrepl (well, 'cept for SystemExit, of + # course). + + while 1: + try: + try: + self.prepare() + try: + while 1: + if sys.modules.has_key("_tkinter"): + self.really_tkinteract() + # really_tkinteract is not expected to + # return except via an exception, but: + break + self.handle1() + except EOFError: + pass + finally: + self.restore() + except KeyboardInterrupt: + continue + else: + break + + def twistedinteract(self): + from twisted.internet import reactor + from twisted.internet.abstract import FileDescriptor + import signal + outerself = self + class Me(FileDescriptor): + def fileno(self): + """ We want to select on FD 0 """ + return 0 + + def doRead(self): + """called when input is ready""" + try: + outerself.handle1() + except EOFError: + reactor.stop() + + reactor.addReader(Me()) + reactor.callWhenRunning(signal.signal, + signal.SIGINT, + signal.default_int_handler) + self.prepare() + try: + reactor.run() + finally: + self.restore() + + + def cocoainteract(self, inputfilehandle=None, outputfilehandle=None): + # only call this when there's a run loop already going! + # note that unlike the other *interact methods, this returns immediately + from cocoasupport import CocoaInteracter + self.cocoainteracter = CocoaInteracter.alloc().init(self, inputfilehandle, outputfilehandle) + + +def main(use_pygame_console=0, interactmethod=default_interactmethod, print_banner=True, clear_main=True): + si, se, so = sys.stdin, sys.stderr, sys.stdout + try: + if 0 and use_pygame_console: # pygame currently borked + from pyrepl.pygame_console import PyGameConsole, FakeStdin, FakeStdout + con = PyGameConsole() + sys.stderr = sys.stdout = FakeStdout(con) + sys.stdin = FakeStdin(con) + else: + from pyrepl.unix_console import UnixConsole + try: + import locale + except ImportError: + encoding = None + else: + if hasattr(locale, 'nl_langinfo') \ + and hasattr(locale, 'CODESET'): + encoding = locale.nl_langinfo(locale.CODESET) + elif os.environ.get('TERM_PROGRAM') == 'Apple_Terminal': + # /me whistles innocently... + code = int(os.popen( + "defaults read com.apple.Terminal StringEncoding" + ).read()) + if code == 4: + encoding = 'utf-8' + # More could go here -- and what's here isn't + # bulletproof. What would be? AppleScript? + # Doesn't seem to be possible. + else: + encoding = None + else: + encoding = None # so you get ASCII... + con = UnixConsole(0, 1, None, encoding) + if print_banner: + print "Python", sys.version, "on", sys.platform + print 'Type "help", "copyright", "credits" or "license" '\ + 'for more information.' + sys.path.insert(0, os.getcwd()) + + if clear_main and __name__ != '__main__': + mainmod = new.module('__main__') + sys.modules['__main__'] = mainmod + else: + mainmod = sys.modules['__main__'] + + rc = ReaderConsole(con, mainmod.__dict__) + rc.reader._module_list_ready = False + rc.run_user_init_file() + getattr(rc, interactmethod)() + finally: + sys.stdin, sys.stderr, sys.stdout = si, se, so + +if __name__ == '__main__': + main() diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/reader.py @@ -0,0 +1,614 @@ +# Copyright 2000-2010 Michael Hudson-Doyle +# Antonio Cuni +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +import types +from pyrepl import unicodedata_ +from pyrepl import commands +from pyrepl import input + +def _make_unctrl_map(): + uc_map = {} + for c in map(unichr, range(256)): + if unicodedata_.category(c)[0] <> 'C': + uc_map[c] = c + for i in range(32): + c = unichr(i) + uc_map[c] = u'^' + unichr(ord('A') + i - 1) + uc_map['\t'] = ' ' # display TABs as 4 characters + uc_map['\177'] = u'^?' + for i in range(256): + c = unichr(i) + if not uc_map.has_key(c): + uc_map[c] = u'\\%03o'%i + return uc_map + +# disp_str proved to be a bottleneck for large inputs, so it's been +# rewritten in C; it's not required though. +try: + raise ImportError # currently it's borked by the unicode support + + from _pyrepl_utils import disp_str, init_unctrl_map + + init_unctrl_map(_make_unctrl_map()) + + del init_unctrl_map +except ImportError: + def _my_unctrl(c, u=_make_unctrl_map()): + if c in u: + return u[c] + else: + if unicodedata_.category(c).startswith('C'): + return '\u%04x'%(ord(c),) + else: + return c + + def disp_str(buffer, join=''.join, uc=_my_unctrl): + """ disp_str(buffer:string) -> (string, [int]) + + Return the string that should be the printed represenation of + |buffer| and a list detailing where the characters of |buffer| + get used up. E.g.: + + >>> disp_str(chr(3)) + ('^C', [1, 0]) + + the list always contains 0s or 1s at present; it could conceivably + go higher as and when unicode support happens.""" + s = map(uc, buffer) + return (join(s), + map(ord, join(map(lambda x:'\001'+(len(x)-1)*'\000', s)))) + + del _my_unctrl + +del _make_unctrl_map + +# syntax classes: + +[SYNTAX_WHITESPACE, + SYNTAX_WORD, + SYNTAX_SYMBOL] = range(3) + +def make_default_syntax_table(): + # XXX perhaps should use some unicodedata here? + st = {} + for c in map(unichr, range(256)): + st[c] = SYNTAX_SYMBOL + for c in [a for a in map(unichr, range(256)) if a.isalpha()]: + st[c] = SYNTAX_WORD + st[u'\n'] = st[u' '] = SYNTAX_WHITESPACE + return st + +default_keymap = tuple( + [(r'\C-a', 'beginning-of-line'), + (r'\C-b', 'left'), + (r'\C-c', 'interrupt'), + (r'\C-d', 'delete'), + (r'\C-e', 'end-of-line'), + (r'\C-f', 'right'), + (r'\C-g', 'cancel'), + (r'\C-h', 'backspace'), + (r'\C-j', 'accept'), + (r'\', 'accept'), + (r'\C-k', 'kill-line'), + (r'\C-l', 'clear-screen'), + (r'\C-m', 'accept'), + (r'\C-q', 'quoted-insert'), + (r'\C-t', 'transpose-characters'), + (r'\C-u', 'unix-line-discard'), + (r'\C-v', 'quoted-insert'), + (r'\C-w', 'unix-word-rubout'), + (r'\C-x\C-u', 'upcase-region'), + (r'\C-y', 'yank'), + (r'\C-z', 'suspend'), + + (r'\M-b', 'backward-word'), + (r'\M-c', 'capitalize-word'), + (r'\M-d', 'kill-word'), + (r'\M-f', 'forward-word'), + (r'\M-l', 'downcase-word'), + (r'\M-t', 'transpose-words'), + (r'\M-u', 'upcase-word'), + (r'\M-y', 'yank-pop'), + (r'\M--', 'digit-arg'), + (r'\M-0', 'digit-arg'), + (r'\M-1', 'digit-arg'), + (r'\M-2', 'digit-arg'), + (r'\M-3', 'digit-arg'), + (r'\M-4', 'digit-arg'), + (r'\M-5', 'digit-arg'), + (r'\M-6', 'digit-arg'), + (r'\M-7', 'digit-arg'), + (r'\M-8', 'digit-arg'), + (r'\M-9', 'digit-arg'), + #(r'\M-\n', 'insert-nl'), + ('\\\\', 'self-insert')] + \ + [(c, 'self-insert') + for c in map(chr, range(32, 127)) if c <> '\\'] + \ + [(c, 'self-insert') + for c in map(chr, range(128, 256)) if c.isalpha()] + \ + [(r'\', 'up'), + (r'\', 'down'), + (r'\', 'left'), + (r'\', 'right'), + (r'\', 'quoted-insert'), + (r'\', 'delete'), + (r'\', 'backspace'), + (r'\M-\', 'backward-kill-word'), + (r'\', 'end'), + (r'\', 'home'), + (r'\', 'help'), + (r'\EOF', 'end'), # the entries in the terminfo database for xterms + (r'\EOH', 'home'), # seem to be wrong. this is a less than ideal + # workaround + ]) + +del c # from the listcomps + +class Reader(object): + """The Reader class implements the bare bones of a command reader, + handling such details as editing and cursor motion. What it does + not support are such things as completion or history support - + these are implemented elsewhere. + + Instance variables of note include: + + * buffer: + A *list* (*not* a string at the moment :-) containing all the + characters that have been entered. + * console: + Hopefully encapsulates the OS dependent stuff. + * pos: + A 0-based index into `buffer' for where the insertion point + is. + * screeninfo: + Ahem. This list contains some info needed to move the + insertion point around reasonably efficiently. I'd like to + get rid of it, because its contents are obtuse (to put it + mildly) but I haven't worked out if that is possible yet. + * cxy, lxy: + the position of the insertion point in screen ... XXX + * syntax_table: + Dictionary mapping characters to `syntax class'; read the + emacs docs to see what this means :-) + * commands: + Dictionary mapping command names to command classes. + * arg: + The emacs-style prefix argument. It will be None if no such + argument has been provided. + * dirty: + True if we need to refresh the display. + * kill_ring: + The emacs-style kill-ring; manipulated with yank & yank-pop + * ps1, ps2, ps3, ps4: + prompts. ps1 is the prompt for a one-line input; for a + multiline input it looks like: + ps2> first line of input goes here + ps3> second and further + ps3> lines get ps3 + ... + ps4> and the last one gets ps4 + As with the usual top-level, you can set these to instances if + you like; str() will be called on them (once) at the beginning + of each command. Don't put really long or newline containing + strings here, please! + This is just the default policy; you can change it freely by + overriding get_prompt() (and indeed some standard subclasses + do). + * finished: + handle1 will set this to a true value if a command signals + that we're done. + """ + + help_text = """\ +This is pyrepl. Hear my roar. + +Helpful text may appear here at some point in the future when I'm +feeling more loquacious than I am now.""" + + msg_at_bottom = True + + def __init__(self, console): + self.buffer = [] + self.ps1 = "->> " + self.ps2 = "/>> " + self.ps3 = "|.. " + self.ps4 = "\__ " + self.kill_ring = [] + self.arg = None + self.finished = 0 + self.console = console + self.commands = {} + self.msg = '' + for v in vars(commands).values(): + if ( isinstance(v, type) + and issubclass(v, commands.Command) + and v.__name__[0].islower() ): + self.commands[v.__name__] = v + self.commands[v.__name__.replace('_', '-')] = v + self.syntax_table = make_default_syntax_table() + self.input_trans_stack = [] + self.keymap = self.collect_keymap() + self.input_trans = input.KeymapTranslator( + self.keymap, + invalid_cls='invalid-key', + character_cls='self-insert') + + def collect_keymap(self): + return default_keymap + + def calc_screen(self): + """The purpose of this method is to translate changes in + self.buffer into changes in self.screen. Currently it rips + everything down and starts from scratch, which whilst not + especially efficient is certainly simple(r). + """ + lines = self.get_unicode().split("\n") + screen = [] + screeninfo = [] + w = self.console.width - 1 + p = self.pos + for ln, line in zip(range(len(lines)), lines): + ll = len(line) + if 0 <= p <= ll: + if self.msg and not self.msg_at_bottom: + for mline in self.msg.split("\n"): + screen.append(mline) + screeninfo.append((0, [])) + self.lxy = p, ln + prompt = self.get_prompt(ln, ll >= p >= 0) + while '\n' in prompt: + pre_prompt, _, prompt = prompt.partition('\n') + screen.append(pre_prompt) + screeninfo.append((0, [])) + p -= ll + 1 + prompt, lp = self.process_prompt(prompt) + l, l2 = disp_str(line) + wrapcount = (len(l) + lp) / w + if wrapcount == 0: + screen.append(prompt + l) + screeninfo.append((lp, l2+[1])) + else: + screen.append(prompt + l[:w-lp] + "\\") + screeninfo.append((lp, l2[:w-lp])) + for i in range(-lp + w, -lp + wrapcount*w, w): + screen.append(l[i:i+w] + "\\") + screeninfo.append((0, l2[i:i + w])) + screen.append(l[wrapcount*w - lp:]) + screeninfo.append((0, l2[wrapcount*w - lp:]+[1])) + self.screeninfo = screeninfo + self.cxy = self.pos2xy(self.pos) + if self.msg and self.msg_at_bottom: + for mline in self.msg.split("\n"): + screen.append(mline) + screeninfo.append((0, [])) + return screen + + def process_prompt(self, prompt): + """ Process the prompt. + + This means calculate the length of the prompt. The character \x01 + and \x02 are used to bracket ANSI control sequences and need to be + excluded from the length calculation. So also a copy of the prompt + is returned with these control characters removed. """ + + out_prompt = '' + l = len(prompt) + pos = 0 + while True: + s = prompt.find('\x01', pos) + if s == -1: + break + e = prompt.find('\x02', s) + if e == -1: + break + # Found start and end brackets, subtract from string length + l = l - (e-s+1) + out_prompt += prompt[pos:s] + prompt[s+1:e] + pos = e+1 + out_prompt += prompt[pos:] + return out_prompt, l + + def bow(self, p=None): + """Return the 0-based index of the word break preceding p most + immediately. + + p defaults to self.pos; word boundaries are determined using + self.syntax_table.""" + if p is None: + p = self.pos + st = self.syntax_table + b = self.buffer + p -= 1 + while p >= 0 and st.get(b[p], SYNTAX_WORD) <> SYNTAX_WORD: + p -= 1 + while p >= 0 and st.get(b[p], SYNTAX_WORD) == SYNTAX_WORD: + p -= 1 + return p + 1 + + def eow(self, p=None): + """Return the 0-based index of the word break following p most + immediately. + + p defaults to self.pos; word boundaries are determined using + self.syntax_table.""" + if p is None: + p = self.pos + st = self.syntax_table + b = self.buffer + while p < len(b) and st.get(b[p], SYNTAX_WORD) <> SYNTAX_WORD: + p += 1 + while p < len(b) and st.get(b[p], SYNTAX_WORD) == SYNTAX_WORD: + p += 1 + return p + + def bol(self, p=None): + """Return the 0-based index of the line break preceding p most + immediately. + + p defaults to self.pos.""" + # XXX there are problems here. + if p is None: + p = self.pos + b = self.buffer + p -= 1 + while p >= 0 and b[p] <> '\n': + p -= 1 + return p + 1 + + def eol(self, p=None): + """Return the 0-based index of the line break following p most + immediately. + + p defaults to self.pos.""" + if p is None: + p = self.pos + b = self.buffer + while p < len(b) and b[p] <> '\n': + p += 1 + return p + + def get_arg(self, default=1): + """Return any prefix argument that the user has supplied, + returning `default' if there is None. `default' defaults + (groan) to 1.""" + if self.arg is None: + return default + else: + return self.arg + + def get_prompt(self, lineno, cursor_on_line): + """Return what should be in the left-hand margin for line + `lineno'.""" + if self.arg is not None and cursor_on_line: + return "(arg: %s) "%self.arg + if "\n" in self.buffer: + if lineno == 0: + return self._ps2 + elif lineno == self.buffer.count("\n"): + return self._ps4 + else: + return self._ps3 + else: + return self._ps1 + + def push_input_trans(self, itrans): + self.input_trans_stack.append(self.input_trans) + self.input_trans = itrans + + def pop_input_trans(self): + self.input_trans = self.input_trans_stack.pop() + + def pos2xy(self, pos): + """Return the x, y coordinates of position 'pos'.""" + # this *is* incomprehensible, yes. + y = 0 + assert 0 <= pos <= len(self.buffer) + if pos == len(self.buffer): + y = len(self.screeninfo) - 1 + p, l2 = self.screeninfo[y] + return p + len(l2) - 1, y + else: + for p, l2 in self.screeninfo: + l = l2.count(1) + if l > pos: + break + else: + pos -= l + y += 1 + c = 0 + i = 0 + while c < pos: + c += l2[i] + i += 1 + while l2[i] == 0: + i += 1 + return p + i, y + + def insert(self, text): + """Insert 'text' at the insertion point.""" + self.buffer[self.pos:self.pos] = list(text) + self.pos += len(text) + self.dirty = 1 + + def update_cursor(self): + """Move the cursor to reflect changes in self.pos""" + self.cxy = self.pos2xy(self.pos) + self.console.move_cursor(*self.cxy) + + def after_command(self, cmd): + """This function is called to allow post command cleanup.""" + if getattr(cmd, "kills_digit_arg", 1): + if self.arg is not None: + self.dirty = 1 + self.arg = None + + def prepare(self): + """Get ready to run. Call restore when finished. You must not + write to the console in between the calls to prepare and + restore.""" + try: + self.console.prepare() + self.arg = None + self.screeninfo = [] + self.finished = 0 + del self.buffer[:] + self.pos = 0 + self.dirty = 1 + self.last_command = None + self._ps1, self._ps2, self._ps3, self._ps4 = \ + map(str, [self.ps1, self.ps2, self.ps3, self.ps4]) + except: + self.restore() + raise + + def last_command_is(self, klass): + if not self.last_command: + return 0 + return issubclass(klass, self.last_command) + + def restore(self): + """Clean up after a run.""" + self.console.restore() + + def finish(self): + """Called when a command signals that we're finished.""" + pass + + def error(self, msg="none"): + self.msg = "! " + msg + " " + self.dirty = 1 + self.console.beep() + + def update_screen(self): + if self.dirty: + self.refresh() + + def refresh(self): + """Recalculate and refresh the screen.""" + # this call sets up self.cxy, so call it first. + screen = self.calc_screen() + self.console.refresh(screen, self.cxy) + self.dirty = 0 # forgot this for a while (blush) + + def do_cmd(self, cmd): + #print cmd + if isinstance(cmd[0], str): + cmd = self.commands.get(cmd[0], + commands.invalid_command)(self, cmd) + elif isinstance(cmd[0], type): + cmd = cmd[0](self, cmd) + + cmd.do() + + self.after_command(cmd) + + if self.dirty: + self.refresh() + else: + self.update_cursor() + + if not isinstance(cmd, commands.digit_arg): + self.last_command = cmd.__class__ + + self.finished = cmd.finish + if self.finished: + self.console.finish() + self.finish() + + def handle1(self, block=1): + """Handle a single event. Wait as long as it takes if block + is true (the default), otherwise return None if no event is + pending.""" + + if self.msg: + self.msg = '' + self.dirty = 1 + + while 1: + event = self.console.get_event(block) + if not event: # can only happen if we're not blocking + return None + + if event.evt == 'key': + self.input_trans.push(event) + elif event.evt == 'scroll': + self.refresh() + elif event.evt == 'resize': + self.refresh() + else: + pass + + cmd = self.input_trans.get() + + if cmd is None: + if block: + continue + else: + return None + + self.do_cmd(cmd) + return 1 + + def push_char(self, char): + self.console.push_char(char) + self.handle1(0) + + def readline(self): + """Read a line. The implementation of this method also shows + how to drive Reader if you want more control over the event + loop.""" + self.prepare() + try: + self.refresh() + while not self.finished: + self.handle1() + return self.get_buffer() + finally: + self.restore() + + def bind(self, spec, command): + self.keymap = self.keymap + ((spec, command),) + self.input_trans = input.KeymapTranslator( + self.keymap, + invalid_cls='invalid-key', + character_cls='self-insert') + + def get_buffer(self, encoding=None): + if encoding is None: + encoding = self.console.encoding + return u''.join(self.buffer).encode(self.console.encoding) + + def get_unicode(self): + """Return the current buffer as a unicode string.""" + return u''.join(self.buffer) + +def test(): + from pyrepl.unix_console import UnixConsole + reader = Reader(UnixConsole()) + reader.ps1 = "**> " + reader.ps2 = "/*> " + reader.ps3 = "|*> " + reader.ps4 = "\*> " + while reader.readline(): + pass + +if __name__=='__main__': + test() diff --git a/lib_pypy/pyrepl/readline.py b/lib_pypy/pyrepl/readline.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/readline.py @@ -0,0 +1,408 @@ +# Copyright 2000-2010 Michael Hudson-Doyle +# Alex Gaynor +# Antonio Cuni +# Armin Rigo +# Holger Krekel +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""A compatibility wrapper reimplementing the 'readline' standard module +on top of pyrepl. Not all functionalities are supported. Contains +extensions for multiline input. +""" + +import sys, os +from pyrepl import commands +from pyrepl.historical_reader import HistoricalReader +from pyrepl.completing_reader import CompletingReader +from pyrepl.unix_console import UnixConsole, _error + + +ENCODING = 'latin1' # XXX hard-coded + +__all__ = ['add_history', + 'clear_history', + 'get_begidx', + 'get_completer', + 'get_completer_delims', + 'get_current_history_length', + 'get_endidx', + 'get_history_item', + 'get_history_length', + 'get_line_buffer', + 'insert_text', + 'parse_and_bind', + 'read_history_file', + 'read_init_file', + 'redisplay', + 'remove_history_item', + 'replace_history_item', + 'set_completer', + 'set_completer_delims', + 'set_history_length', + 'set_pre_input_hook', + 'set_startup_hook', + 'write_history_file', + # ---- multiline extensions ---- + 'multiline_input', + ] + +# ____________________________________________________________ + +class ReadlineConfig(object): + readline_completer = None + completer_delims = dict.fromkeys(' \t\n`~!@#$%^&*()-=+[{]}\\|;:\'",<>/?') + +class ReadlineAlikeReader(HistoricalReader, CompletingReader): + + assume_immutable_completions = False + use_brackets = False + sort_in_column = True + + def error(self, msg="none"): + pass # don't show error messages by default + + def get_stem(self): + b = self.buffer + p = self.pos - 1 + completer_delims = self.config.completer_delims + while p >= 0 and b[p] not in completer_delims: + p -= 1 + return ''.join(b[p+1:self.pos]) + + def get_completions(self, stem): + result = [] + function = self.config.readline_completer + if function is not None: + try: + stem = str(stem) # rlcompleter.py seems to not like unicode + except UnicodeEncodeError: + pass # but feed unicode anyway if we have no choice + state = 0 + while True: + try: + next = function(stem, state) + except: + break + if not isinstance(next, str): + break + result.append(next) + state += 1 + # emulate the behavior of the standard readline that sorts + # the completions before displaying them. + result.sort() + return result + + def get_trimmed_history(self, maxlength): + if maxlength >= 0: + cut = len(self.history) - maxlength + if cut < 0: + cut = 0 + else: + cut = 0 + return self.history[cut:] + + # --- simplified support for reading multiline Python statements --- + + # This duplicates small parts of pyrepl.python_reader. I'm not + # reusing the PythonicReader class directly for two reasons. One is + # to try to keep as close as possible to CPython's prompt. The + # other is that it is the readline module that we are ultimately + # implementing here, and I don't want the built-in raw_input() to + # start trying to read multiline inputs just because what the user + # typed look like valid but incomplete Python code. So we get the + # multiline feature only when using the multiline_input() function + # directly (see _pypy_interact.py). + + more_lines = None + + def collect_keymap(self): + return super(ReadlineAlikeReader, self).collect_keymap() + ( + (r'\n', 'maybe-accept'),) + + def __init__(self, console): + super(ReadlineAlikeReader, self).__init__(console) + self.commands['maybe_accept'] = maybe_accept + self.commands['maybe-accept'] = maybe_accept + + def after_command(self, cmd): + super(ReadlineAlikeReader, self).after_command(cmd) + if self.more_lines is None: + # Force single-line input if we are in raw_input() mode. + # Although there is no direct way to add a \n in this mode, + # multiline buffers can still show up using various + # commands, e.g. navigating the history. + try: + index = self.buffer.index("\n") + except ValueError: + pass + else: + self.buffer = self.buffer[:index] + if self.pos > len(self.buffer): + self.pos = len(self.buffer) + +class maybe_accept(commands.Command): + def do(self): + r = self.reader + r.dirty = 1 # this is needed to hide the completion menu, if visible + # + # if there are already several lines and the cursor + # is not on the last one, always insert a new \n. + text = r.get_unicode() + if "\n" in r.buffer[r.pos:]: + r.insert("\n") + elif r.more_lines is not None and r.more_lines(text): + r.insert("\n") + else: + self.finish = 1 + +# ____________________________________________________________ + +class _ReadlineWrapper(object): + f_in = 0 + f_out = 1 + reader = None + saved_history_length = -1 + startup_hook = None + config = ReadlineConfig() + + def get_reader(self): + if self.reader is None: + console = UnixConsole(self.f_in, self.f_out, encoding=ENCODING) + self.reader = ReadlineAlikeReader(console) + self.reader.config = self.config + return self.reader + + def raw_input(self, prompt=''): + try: + reader = self.get_reader() + except _error: + return _old_raw_input(prompt) + if self.startup_hook is not None: + self.startup_hook() + reader.ps1 = prompt + return reader.readline() + + def multiline_input(self, more_lines, ps1, ps2): + """Read an input on possibly multiple lines, asking for more + lines as long as 'more_lines(unicodetext)' returns an object whose + boolean value is true. + """ + reader = self.get_reader() + saved = reader.more_lines + try: + reader.more_lines = more_lines + reader.ps1 = reader.ps2 = ps1 + reader.ps3 = reader.ps4 = ps2 + return reader.readline() + finally: + reader.more_lines = saved + + def parse_and_bind(self, string): + pass # XXX we don't support parsing GNU-readline-style init files + + def set_completer(self, function=None): + self.config.readline_completer = function + + def get_completer(self): + return self.config.readline_completer + + def set_completer_delims(self, string): + self.config.completer_delims = dict.fromkeys(string) + + def get_completer_delims(self): + chars = self.config.completer_delims.keys() + chars.sort() + return ''.join(chars) + + def _histline(self, line): + return unicode(line.rstrip('\n'), ENCODING) + + def get_history_length(self): + return self.saved_history_length + + def set_history_length(self, length): + self.saved_history_length = length + + def get_current_history_length(self): + return len(self.get_reader().history) + + def read_history_file(self, filename='~/.history'): + # multiline extension (really a hack) for the end of lines that + # are actually continuations inside a single multiline_input() + # history item: we use \r\n instead of just \n. If the history + # file is passed to GNU readline, the extra \r are just ignored. + history = self.get_reader().history + f = open(os.path.expanduser(filename), 'r') + buffer = [] + for line in f: + if line.endswith('\r\n'): + buffer.append(line) + else: + line = self._histline(line) + if buffer: + line = ''.join(buffer).replace('\r', '') + line + del buffer[:] + if line: + history.append(line) + f.close() + + def write_history_file(self, filename='~/.history'): + maxlength = self.saved_history_length + history = self.get_reader().get_trimmed_history(maxlength) + f = open(os.path.expanduser(filename), 'w') + for entry in history: + if isinstance(entry, unicode): + entry = entry.encode(ENCODING) + entry = entry.replace('\n', '\r\n') # multiline history support + f.write(entry + '\n') + f.close() + + def clear_history(self): + del self.get_reader().history[:] + + def get_history_item(self, index): + history = self.get_reader().history + if 1 <= index <= len(history): + return history[index-1] + else: + return None # blame readline.c for not raising + + def remove_history_item(self, index): + history = self.get_reader().history + if 0 <= index < len(history): + del history[index] + else: + raise ValueError("No history item at position %d" % index) + # blame readline.c for raising ValueError + + def replace_history_item(self, index, line): + history = self.get_reader().history + if 0 <= index < len(history): + history[index] = self._histline(line) + else: + raise ValueError("No history item at position %d" % index) + # blame readline.c for raising ValueError + + def add_history(self, line): + self.get_reader().history.append(self._histline(line)) + + def set_startup_hook(self, function=None): + self.startup_hook = function + + def get_line_buffer(self): + return self.get_reader().get_buffer() + + def _get_idxs(self): + start = cursor = self.get_reader().pos + buf = self.get_line_buffer() + for i in xrange(cursor - 1, -1, -1): + if buf[i] in self.get_completer_delims(): + break + start = i + return start, cursor + + def get_begidx(self): + return self._get_idxs()[0] + + def get_endidx(self): + return self._get_idxs()[1] + + def insert_text(self, text): + return self.get_reader().insert(text) + + +_wrapper = _ReadlineWrapper() + +# ____________________________________________________________ +# Public API + +parse_and_bind = _wrapper.parse_and_bind +set_completer = _wrapper.set_completer +get_completer = _wrapper.get_completer +set_completer_delims = _wrapper.set_completer_delims +get_completer_delims = _wrapper.get_completer_delims +get_history_length = _wrapper.get_history_length +set_history_length = _wrapper.set_history_length +get_current_history_length = _wrapper.get_current_history_length +read_history_file = _wrapper.read_history_file +write_history_file = _wrapper.write_history_file +clear_history = _wrapper.clear_history +get_history_item = _wrapper.get_history_item +remove_history_item = _wrapper.remove_history_item +replace_history_item = _wrapper.replace_history_item +add_history = _wrapper.add_history +set_startup_hook = _wrapper.set_startup_hook +get_line_buffer = _wrapper.get_line_buffer +get_begidx = _wrapper.get_begidx +get_endidx = _wrapper.get_endidx +insert_text = _wrapper.insert_text + +# Extension +multiline_input = _wrapper.multiline_input + +# Internal hook +_get_reader = _wrapper.get_reader + +# ____________________________________________________________ +# Stubs + +def _make_stub(_name, _ret): + def stub(*args, **kwds): + import warnings + warnings.warn("readline.%s() not implemented" % _name, stacklevel=2) + stub.func_name = _name + globals()[_name] = stub + +for _name, _ret in [ + ('read_init_file', None), + ('redisplay', None), + ('set_pre_input_hook', None), + ]: + assert _name not in globals(), _name + _make_stub(_name, _ret) + +# ____________________________________________________________ + +def _setup(): + global _old_raw_input + if _old_raw_input is not None: + return # don't run _setup twice + + try: + f_in = sys.stdin.fileno() + f_out = sys.stdout.fileno() + except (AttributeError, ValueError): + return + if not os.isatty(f_in) or not os.isatty(f_out): + return + + _wrapper.f_in = f_in + _wrapper.f_out = f_out + + if hasattr(sys, '__raw_input__'): # PyPy + _old_raw_input = sys.__raw_input__ + sys.__raw_input__ = _wrapper.raw_input + else: + # this is not really what readline.c does. Better than nothing I guess + import __builtin__ + _old_raw_input = __builtin__.raw_input + __builtin__.raw_input = _wrapper.raw_input + +_old_raw_input = None +_setup() diff --git a/lib_pypy/pyrepl/simple_interact.py b/lib_pypy/pyrepl/simple_interact.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/simple_interact.py @@ -0,0 +1,64 @@ +# Copyright 2000-2010 Michael Hudson-Doyle +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""This is an alternative to python_reader which tries to emulate +the CPython prompt as closely as possible, with the exception of +allowing multiline input and multiline history entries. +""" + +import sys +from pyrepl.readline import multiline_input, _error, _get_reader + +def check(): # returns False if there is a problem initializing the state + try: + _get_reader() + except _error: + return False + return True + +def run_multiline_interactive_console(mainmodule=None): + import code + if mainmodule is None: + import __main__ as mainmodule + console = code.InteractiveConsole(mainmodule.__dict__) + + def more_lines(unicodetext): + # ooh, look at the hack: + src = "#coding:utf-8\n"+unicodetext.encode('utf-8') + try: + code = console.compile(src, '', 'single') + except (OverflowError, SyntaxError, ValueError): + return False + else: + return code is None + + while 1: + try: + ps1 = getattr(sys, 'ps1', '>>> ') + ps2 = getattr(sys, 'ps2', '... ') + try: + statement = multiline_input(more_lines, ps1, ps2) + except EOFError: + break + more = console.push(statement) + assert not more + except KeyboardInterrupt: + console.write("\nKeyboardInterrupt\n") + console.resetbuffer() diff --git a/lib_pypy/pyrepl/test/test_functional.py b/lib_pypy/pyrepl/test/test_functional.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/test/test_functional.py @@ -0,0 +1,50 @@ +# Copyright 2000-2007 Michael Hudson-Doyle +# Maciek Fijalkowski +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# some functional tests, to see if this is really working + +import py +import sys + +class TestTerminal(object): + def _spawn(self, *args, **kwds): + try: + import pexpect + except ImportError, e: + py.test.skip(str(e)) + kwds.setdefault('timeout', 10) + child = pexpect.spawn(*args, **kwds) + child.logfile = sys.stdout + return child + + def spawn(self, argv=[]): + # avoid running start.py, cause it might contain + # things like readline or rlcompleter(2) included + child = self._spawn(sys.executable, ['-S'] + argv) + child.sendline('from pyrepl.python_reader import main') + child.sendline('main()') + return child + + def test_basic(self): + child = self.spawn() + child.sendline('a = 3') + child.sendline('a') + child.expect('3') + diff --git a/lib_pypy/pyrepl/tests/__init__.py b/lib_pypy/pyrepl/tests/__init__.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/tests/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# moo diff --git a/lib_pypy/pyrepl/tests/basic.py b/lib_pypy/pyrepl/tests/basic.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/tests/basic.py @@ -0,0 +1,115 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from pyrepl.console import Event +from pyrepl.tests.infrastructure import ReaderTestCase, EA, run_testcase + +class SimpleTestCase(ReaderTestCase): + + def test_basic(self): + self.run_test([(('self-insert', 'a'), ['a']), + ( 'accept', ['a'])]) + + def test_repeat(self): + self.run_test([(('digit-arg', '3'), ['']), + (('self-insert', 'a'), ['aaa']), + ( 'accept', ['aaa'])]) + + def test_kill_line(self): + self.run_test([(('self-insert', 'abc'), ['abc']), + ( 'left', None), + ( 'kill-line', ['ab']), + ( 'accept', ['ab'])]) + + def test_unix_line_discard(self): + self.run_test([(('self-insert', 'abc'), ['abc']), + ( 'left', None), + ( 'unix-word-rubout', ['c']), + ( 'accept', ['c'])]) + + def test_kill_word(self): + self.run_test([(('self-insert', 'ab cd'), ['ab cd']), + ( 'beginning-of-line', ['ab cd']), + ( 'kill-word', [' cd']), + ( 'accept', [' cd'])]) + + def test_backward_kill_word(self): + self.run_test([(('self-insert', 'ab cd'), ['ab cd']), + ( 'backward-kill-word', ['ab ']), + ( 'accept', ['ab '])]) + + def test_yank(self): + self.run_test([(('self-insert', 'ab cd'), ['ab cd']), + ( 'backward-kill-word', ['ab ']), + ( 'beginning-of-line', ['ab ']), + ( 'yank', ['cdab ']), + ( 'accept', ['cdab '])]) + + def test_yank_pop(self): + self.run_test([(('self-insert', 'ab cd'), ['ab cd']), + ( 'backward-kill-word', ['ab ']), + ( 'left', ['ab ']), + ( 'backward-kill-word', [' ']), + ( 'yank', ['ab ']), + ( 'yank-pop', ['cd ']), + ( 'accept', ['cd '])]) + + def test_interrupt(self): + try: + self.run_test([( 'interrupt', [''])]) + except KeyboardInterrupt: + pass + else: + self.fail('KeyboardInterrupt got lost') + + # test_suspend -- hah + + def test_up(self): + self.run_test([(('self-insert', 'ab\ncd'), ['ab', 'cd']), + ( 'up', ['ab', 'cd']), + (('self-insert', 'e'), ['abe', 'cd']), + ( 'accept', ['abe', 'cd'])]) + + def test_down(self): + self.run_test([(('self-insert', 'ab\ncd'), ['ab', 'cd']), + ( 'up', ['ab', 'cd']), + (('self-insert', 'e'), ['abe', 'cd']), + ( 'down', ['abe', 'cd']), + (('self-insert', 'f'), ['abe', 'cdf']), + ( 'accept', ['abe', 'cdf'])]) + + def test_left(self): + self.run_test([(('self-insert', 'ab'), ['ab']), + ( 'left', ['ab']), + (('self-insert', 'c'), ['acb']), + ( 'accept', ['acb'])]) + + def test_right(self): + self.run_test([(('self-insert', 'ab'), ['ab']), + ( 'left', ['ab']), + (('self-insert', 'c'), ['acb']), + ( 'right', ['acb']), + (('self-insert', 'd'), ['acbd']), + ( 'accept', ['acbd'])]) + +def test(): + run_testcase(SimpleTestCase) + +if __name__ == '__main__': + test() diff --git a/lib_pypy/pyrepl/tests/bugs.py b/lib_pypy/pyrepl/tests/bugs.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/tests/bugs.py @@ -0,0 +1,36 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from pyrepl.console import Event +from pyrepl.tests.infrastructure import ReaderTestCase, EA, run_testcase + +# this test case should contain as-verbatim-as-possible versions of +# (applicable) bug reports + +class BugsTestCase(ReaderTestCase): + + def test_transpose_at_start(self): + self.run_test([( 'transpose', [EA, '']), + ( 'accept', [''])]) + +def test(): + run_testcase(BugsTestCase) + +if __name__ == '__main__': + test() diff --git a/lib_pypy/pyrepl/tests/infrastructure.py b/lib_pypy/pyrepl/tests/infrastructure.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/tests/infrastructure.py @@ -0,0 +1,82 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from pyrepl.reader import Reader +from pyrepl.console import Console, Event +import unittest +import sys + +class EqualsAnything(object): + def __eq__(self, other): + return True +EA = EqualsAnything() + +class TestConsole(Console): + height = 24 + width = 80 + encoding = 'utf-8' + + def __init__(self, events, testcase, verbose=False): + self.events = events + self.next_screen = None + self.verbose = verbose + self.testcase = testcase + + def refresh(self, screen, xy): + if self.next_screen is not None: + self.testcase.assertEqual( + screen, self.next_screen, + "[ %s != %s after %r ]"%(screen, self.next_screen, + self.last_event_name)) + + def get_event(self, block=1): + ev, sc = self.events.pop(0) + self.next_screen = sc + if not isinstance(ev, tuple): + ev = (ev,) + self.last_event_name = ev[0] + if self.verbose: + print "event", ev + return Event(*ev) + +class TestReader(Reader): + def get_prompt(self, lineno, cursor_on_line): + return '' + def refresh(self): + Reader.refresh(self) + self.dirty = True + +class ReaderTestCase(unittest.TestCase): + def run_test(self, test_spec, reader_class=TestReader): + # remember to finish your test_spec with 'accept' or similar! + con = TestConsole(test_spec, self) + reader = reader_class(con) + reader.readline() + +class BasicTestRunner: + def run(self, test): + result = unittest.TestResult() + test(result) + return result + +def run_testcase(testclass): + suite = unittest.makeSuite(testclass) + runner = unittest.TextTestRunner(sys.stdout, verbosity=1) + result = runner.run(suite) + diff --git a/lib_pypy/pyrepl/tests/wishes.py b/lib_pypy/pyrepl/tests/wishes.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/tests/wishes.py @@ -0,0 +1,38 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from pyrepl.console import Event +from pyrepl.tests.infrastructure import ReaderTestCase, EA, run_testcase + +# this test case should contain as-verbatim-as-possible versions of +# (applicable) feature requests + +class WishesTestCase(ReaderTestCase): + + def test_quoted_insert_repeat(self): + self.run_test([(('digit-arg', '3'), ['']), + ( 'quoted-insert', ['']), + (('self-insert', '\033'), ['^[^[^[']), + ( 'accept', None)]) + +def test(): + run_testcase(WishesTestCase) + +if __name__ == '__main__': + test() diff --git a/lib_pypy/pyrepl/unicodedata_.py b/lib_pypy/pyrepl/unicodedata_.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/unicodedata_.py @@ -0,0 +1,59 @@ +try: + from unicodedata import * +except ImportError: + + def category(ch): + """ + ASCII only implementation + """ + if type(ch) is not unicode: + raise TypeError + if len(ch) != 1: + raise TypeError + return _categories.get(ord(ch), 'Co') # "Other, private use" + + _categories = { + 0: 'Cc', 1: 'Cc', 2: 'Cc', 3: 'Cc', 4: 'Cc', 5: 'Cc', + 6: 'Cc', 7: 'Cc', 8: 'Cc', 9: 'Cc', 10: 'Cc', 11: 'Cc', + 12: 'Cc', 13: 'Cc', 14: 'Cc', 15: 'Cc', 16: 'Cc', 17: 'Cc', + 18: 'Cc', 19: 'Cc', 20: 'Cc', 21: 'Cc', 22: 'Cc', 23: 'Cc', + 24: 'Cc', 25: 'Cc', 26: 'Cc', 27: 'Cc', 28: 'Cc', 29: 'Cc', + 30: 'Cc', 31: 'Cc', 32: 'Zs', 33: 'Po', 34: 'Po', 35: 'Po', + 36: 'Sc', 37: 'Po', 38: 'Po', 39: 'Po', 40: 'Ps', 41: 'Pe', + 42: 'Po', 43: 'Sm', 44: 'Po', 45: 'Pd', 46: 'Po', 47: 'Po', + 48: 'Nd', 49: 'Nd', 50: 'Nd', 51: 'Nd', 52: 'Nd', 53: 'Nd', + 54: 'Nd', 55: 'Nd', 56: 'Nd', 57: 'Nd', 58: 'Po', 59: 'Po', + 60: 'Sm', 61: 'Sm', 62: 'Sm', 63: 'Po', 64: 'Po', 65: 'Lu', + 66: 'Lu', 67: 'Lu', 68: 'Lu', 69: 'Lu', 70: 'Lu', 71: 'Lu', + 72: 'Lu', 73: 'Lu', 74: 'Lu', 75: 'Lu', 76: 'Lu', 77: 'Lu', + 78: 'Lu', 79: 'Lu', 80: 'Lu', 81: 'Lu', 82: 'Lu', 83: 'Lu', + 84: 'Lu', 85: 'Lu', 86: 'Lu', 87: 'Lu', 88: 'Lu', 89: 'Lu', + 90: 'Lu', 91: 'Ps', 92: 'Po', 93: 'Pe', 94: 'Sk', 95: 'Pc', + 96: 'Sk', 97: 'Ll', 98: 'Ll', 99: 'Ll', 100: 'Ll', 101: 'Ll', + 102: 'Ll', 103: 'Ll', 104: 'Ll', 105: 'Ll', 106: 'Ll', 107: 'Ll', + 108: 'Ll', 109: 'Ll', 110: 'Ll', 111: 'Ll', 112: 'Ll', 113: 'Ll', + 114: 'Ll', 115: 'Ll', 116: 'Ll', 117: 'Ll', 118: 'Ll', 119: 'Ll', + 120: 'Ll', 121: 'Ll', 122: 'Ll', 123: 'Ps', 124: 'Sm', 125: 'Pe', + 126: 'Sm', 127: 'Cc', 128: 'Cc', 129: 'Cc', 130: 'Cc', 131: 'Cc', + 132: 'Cc', 133: 'Cc', 134: 'Cc', 135: 'Cc', 136: 'Cc', 137: 'Cc', + 138: 'Cc', 139: 'Cc', 140: 'Cc', 141: 'Cc', 142: 'Cc', 143: 'Cc', + 144: 'Cc', 145: 'Cc', 146: 'Cc', 147: 'Cc', 148: 'Cc', 149: 'Cc', + 150: 'Cc', 151: 'Cc', 152: 'Cc', 153: 'Cc', 154: 'Cc', 155: 'Cc', + 156: 'Cc', 157: 'Cc', 158: 'Cc', 159: 'Cc', 160: 'Zs', 161: 'Po', + 162: 'Sc', 163: 'Sc', 164: 'Sc', 165: 'Sc', 166: 'So', 167: 'So', + 168: 'Sk', 169: 'So', 170: 'Ll', 171: 'Pi', 172: 'Sm', 173: 'Cf', + 174: 'So', 175: 'Sk', 176: 'So', 177: 'Sm', 178: 'No', 179: 'No', + 180: 'Sk', 181: 'Ll', 182: 'So', 183: 'Po', 184: 'Sk', 185: 'No', + 186: 'Ll', 187: 'Pf', 188: 'No', 189: 'No', 190: 'No', 191: 'Po', + 192: 'Lu', 193: 'Lu', 194: 'Lu', 195: 'Lu', 196: 'Lu', 197: 'Lu', + 198: 'Lu', 199: 'Lu', 200: 'Lu', 201: 'Lu', 202: 'Lu', 203: 'Lu', + 204: 'Lu', 205: 'Lu', 206: 'Lu', 207: 'Lu', 208: 'Lu', 209: 'Lu', + 210: 'Lu', 211: 'Lu', 212: 'Lu', 213: 'Lu', 214: 'Lu', 215: 'Sm', + 216: 'Lu', 217: 'Lu', 218: 'Lu', 219: 'Lu', 220: 'Lu', 221: 'Lu', + 222: 'Lu', 223: 'Ll', 224: 'Ll', 225: 'Ll', 226: 'Ll', 227: 'Ll', + 228: 'Ll', 229: 'Ll', 230: 'Ll', 231: 'Ll', 232: 'Ll', 233: 'Ll', + 234: 'Ll', 235: 'Ll', 236: 'Ll', 237: 'Ll', 238: 'Ll', 239: 'Ll', + 240: 'Ll', 241: 'Ll', 242: 'Ll', 243: 'Ll', 244: 'Ll', 245: 'Ll', + 246: 'Ll', 247: 'Sm', 248: 'Ll', 249: 'Ll', 250: 'Ll', 251: 'Ll', + 252: 'Ll', 253: 'Ll', 254: 'Ll' + } diff --git a/lib_pypy/pyrepl/unix_console.py b/lib_pypy/pyrepl/unix_console.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/unix_console.py @@ -0,0 +1,567 @@ +# Copyright 2000-2010 Michael Hudson-Doyle +# Antonio Cuni +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +import termios, select, os, struct, errno +import signal, re, time, sys +from fcntl import ioctl +from pyrepl import curses +from pyrepl.fancy_termios import tcgetattr, tcsetattr +from pyrepl.console import Console, Event +from pyrepl import unix_eventqueue + +class InvalidTerminal(RuntimeError): + pass + +_error = (termios.error, curses.error, InvalidTerminal) + +# there are arguments for changing this to "refresh" +SIGWINCH_EVENT = 'repaint' + +FIONREAD = getattr(termios, "FIONREAD", None) +TIOCGWINSZ = getattr(termios, "TIOCGWINSZ", None) + +def _my_getstr(cap, optional=0): + r = curses.tigetstr(cap) + if not optional and r is None: + raise InvalidTerminal, \ + "terminal doesn't have the required '%s' capability"%cap + return r + +# at this point, can we say: AAAAAAAAAAAAAAAAAAAAAARGH! +def maybe_add_baudrate(dict, rate): + name = 'B%d'%rate + if hasattr(termios, name): + dict[getattr(termios, name)] = rate + +ratedict = {} +for r in [0, 110, 115200, 1200, 134, 150, 1800, 19200, 200, 230400, + 2400, 300, 38400, 460800, 4800, 50, 57600, 600, 75, 9600]: + maybe_add_baudrate(ratedict, r) + +del r, maybe_add_baudrate + +delayprog = re.compile("\\$<([0-9]+)((?:/|\\*){0,2})>") + +try: + poll = select.poll +except AttributeError: + # this is exactly the minumum necessary to support what we + # do with poll objects + class poll: + def __init__(self): + pass + def register(self, fd, flag): + self.fd = fd + def poll(self, timeout=None): + r,w,e = select.select([self.fd],[],[],timeout) + return r + +POLLIN = getattr(select, "POLLIN", None) + +class UnixConsole(Console): + def __init__(self, f_in=0, f_out=1, term=None, encoding=None): + if encoding is None: + encoding = sys.getdefaultencoding() + + self.encoding = encoding + + if isinstance(f_in, int): + self.input_fd = f_in + else: + self.input_fd = f_in.fileno() + + if isinstance(f_out, int): + self.output_fd = f_out + else: + self.output_fd = f_out.fileno() + + self.pollob = poll() + self.pollob.register(self.input_fd, POLLIN) + curses.setupterm(term, self.output_fd) + self.term = term + + self._bel = _my_getstr("bel") + self._civis = _my_getstr("civis", optional=1) + self._clear = _my_getstr("clear") + self._cnorm = _my_getstr("cnorm", optional=1) + self._cub = _my_getstr("cub", optional=1) + self._cub1 = _my_getstr("cub1", 1) + self._cud = _my_getstr("cud", 1) + self._cud1 = _my_getstr("cud1", 1) + self._cuf = _my_getstr("cuf", 1) + self._cuf1 = _my_getstr("cuf1", 1) + self._cup = _my_getstr("cup") + self._cuu = _my_getstr("cuu", 1) + self._cuu1 = _my_getstr("cuu1", 1) + self._dch1 = _my_getstr("dch1", 1) + self._dch = _my_getstr("dch", 1) + self._el = _my_getstr("el") + self._hpa = _my_getstr("hpa", 1) + self._ich = _my_getstr("ich", 1) + self._ich1 = _my_getstr("ich1", 1) + self._ind = _my_getstr("ind", 1) + self._pad = _my_getstr("pad", 1) + self._ri = _my_getstr("ri", 1) + self._rmkx = _my_getstr("rmkx", 1) + self._smkx = _my_getstr("smkx", 1) + + ## work out how we're going to sling the cursor around + if 0 and self._hpa: # hpa don't work in windows telnet :-( + self.__move_x = self.__move_x_hpa + elif self._cub and self._cuf: + self.__move_x = self.__move_x_cub_cuf + elif self._cub1 and self._cuf1: + self.__move_x = self.__move_x_cub1_cuf1 + else: + raise RuntimeError, "insufficient terminal (horizontal)" + + if self._cuu and self._cud: + self.__move_y = self.__move_y_cuu_cud + elif self._cuu1 and self._cud1: + self.__move_y = self.__move_y_cuu1_cud1 + else: + raise RuntimeError, "insufficient terminal (vertical)" + + if self._dch1: + self.dch1 = self._dch1 + elif self._dch: + self.dch1 = curses.tparm(self._dch, 1) + else: + self.dch1 = None + + if self._ich1: + self.ich1 = self._ich1 + elif self._ich: + self.ich1 = curses.tparm(self._ich, 1) + else: + self.ich1 = None + + self.__move = self.__move_short + + self.event_queue = unix_eventqueue.EventQueue(self.input_fd) + self.partial_char = '' + self.cursor_visible = 1 + + def change_encoding(self, encoding): + self.encoding = encoding + + def refresh(self, screen, (cx, cy)): + # this function is still too long (over 90 lines) + + if not self.__gone_tall: + while len(self.screen) < min(len(screen), self.height): + self.__hide_cursor() + self.__move(0, len(self.screen) - 1) + self.__write("\n") + self.__posxy = 0, len(self.screen) + self.screen.append("") + else: + while len(self.screen) < len(screen): + self.screen.append("") + + if len(screen) > self.height: + self.__gone_tall = 1 + self.__move = self.__move_tall + + px, py = self.__posxy + old_offset = offset = self.__offset + height = self.height + + if 0: + global counter + try: + counter + except NameError: + counter = 0 + self.__write_code(curses.tigetstr("setaf"), counter) + counter += 1 + if counter > 8: + counter = 0 + + # we make sure the cursor is on the screen, and that we're + # using all of the screen if we can + if cy < offset: + offset = cy + elif cy >= offset + height: + offset = cy - height + 1 + elif offset > 0 and len(screen) < offset + height: + offset = max(len(screen) - height, 0) + screen.append("") + + oldscr = self.screen[old_offset:old_offset + height] + newscr = screen[offset:offset + height] + + # use hardware scrolling if we have it. + if old_offset > offset and self._ri: + self.__hide_cursor() + self.__write_code(self._cup, 0, 0) + self.__posxy = 0, old_offset + for i in range(old_offset - offset): + self.__write_code(self._ri) + oldscr.pop(-1) + oldscr.insert(0, "") + elif old_offset < offset and self._ind: + self.__hide_cursor() + self.__write_code(self._cup, self.height - 1, 0) + self.__posxy = 0, old_offset + self.height - 1 + for i in range(offset - old_offset): + self.__write_code(self._ind) + oldscr.pop(0) + oldscr.append("") + + self.__offset = offset + + for y, oldline, newline, in zip(range(offset, offset + height), + oldscr, + newscr): + if oldline != newline: + self.__write_changed_line(y, oldline, newline, px) + + y = len(newscr) + while y < len(oldscr): + self.__hide_cursor() + self.__move(0, y) + self.__posxy = 0, y + self.__write_code(self._el) + y += 1 + + self.__show_cursor() + + self.screen = screen + self.move_cursor(cx, cy) + self.flushoutput() + + def __write_changed_line(self, y, oldline, newline, px): + # this is frustrating; there's no reason to test (say) + # self.dch1 inside the loop -- but alternative ways of + # structuring this function are equally painful (I'm trying to + # avoid writing code generators these days...) + x = 0 + minlen = min(len(oldline), len(newline)) + # + # reuse the oldline as much as possible, but stop as soon as we + # encounter an ESCAPE, because it might be the start of an escape + # sequene + while x < minlen and oldline[x] == newline[x] and newline[x] != '\x1b': + x += 1 + if oldline[x:] == newline[x+1:] and self.ich1: + if ( y == self.__posxy[1] and x > self.__posxy[0] + and oldline[px:x] == newline[px+1:x+1] ): + x = px + self.__move(x, y) + self.__write_code(self.ich1) + self.__write(newline[x]) + self.__posxy = x + 1, y + elif x < minlen and oldline[x + 1:] == newline[x + 1:]: + self.__move(x, y) + self.__write(newline[x]) + self.__posxy = x + 1, y + elif (self.dch1 and self.ich1 and len(newline) == self.width + and x < len(newline) - 2 + and newline[x+1:-1] == oldline[x:-2]): + self.__hide_cursor() + self.__move(self.width - 2, y) + self.__posxy = self.width - 2, y + self.__write_code(self.dch1) + self.__move(x, y) + self.__write_code(self.ich1) + self.__write(newline[x]) + self.__posxy = x + 1, y + else: + self.__hide_cursor() + self.__move(x, y) + if len(oldline) > len(newline): + self.__write_code(self._el) + self.__write(newline[x:]) + self.__posxy = len(newline), y + + if '\x1b' in newline: + # ANSI escape characters are present, so we can't assume + # anything about the position of the cursor. Moving the cursor + # to the left margin should work to get to a known position. + self.move_cursor(0, y) + + def __write(self, text): + self.__buffer.append((text, 0)) + + def __write_code(self, fmt, *args): + self.__buffer.append((curses.tparm(fmt, *args), 1)) + + def __maybe_write_code(self, fmt, *args): + if fmt: + self.__write_code(fmt, *args) + + def __move_y_cuu1_cud1(self, y): + dy = y - self.__posxy[1] + if dy > 0: + self.__write_code(dy*self._cud1) + elif dy < 0: + self.__write_code((-dy)*self._cuu1) + + def __move_y_cuu_cud(self, y): + dy = y - self.__posxy[1] + if dy > 0: + self.__write_code(self._cud, dy) + elif dy < 0: + self.__write_code(self._cuu, -dy) + + def __move_x_hpa(self, x): + if x != self.__posxy[0]: + self.__write_code(self._hpa, x) + + def __move_x_cub1_cuf1(self, x): + dx = x - self.__posxy[0] + if dx > 0: + self.__write_code(self._cuf1*dx) + elif dx < 0: + self.__write_code(self._cub1*(-dx)) + + def __move_x_cub_cuf(self, x): + dx = x - self.__posxy[0] + if dx > 0: + self.__write_code(self._cuf, dx) + elif dx < 0: + self.__write_code(self._cub, -dx) + + def __move_short(self, x, y): + self.__move_x(x) + self.__move_y(y) + + def __move_tall(self, x, y): + assert 0 <= y - self.__offset < self.height, y - self.__offset + self.__write_code(self._cup, y - self.__offset, x) + + def move_cursor(self, x, y): + if y < self.__offset or y >= self.__offset + self.height: + self.event_queue.insert(Event('scroll', None)) + else: + self.__move(x, y) + self.__posxy = x, y + self.flushoutput() + + def prepare(self): + # per-readline preparations: + self.__svtermstate = tcgetattr(self.input_fd) + raw = self.__svtermstate.copy() + raw.iflag &=~ (termios.BRKINT | termios.INPCK | + termios.ISTRIP | termios.IXON) + raw.oflag &=~ (termios.OPOST) + raw.cflag &=~ (termios.CSIZE|termios.PARENB) + raw.cflag |= (termios.CS8) + raw.lflag &=~ (termios.ICANON|termios.ECHO| + termios.IEXTEN|(termios.ISIG*1)) + raw.cc[termios.VMIN] = 1 + raw.cc[termios.VTIME] = 0 + tcsetattr(self.input_fd, termios.TCSADRAIN, raw) + + self.screen = [] + self.height, self.width = self.getheightwidth() + + self.__buffer = [] + + self.__posxy = 0, 0 + self.__gone_tall = 0 + self.__move = self.__move_short + self.__offset = 0 + + self.__maybe_write_code(self._smkx) + + self.old_sigwinch = signal.signal( + signal.SIGWINCH, self.__sigwinch) + + def restore(self): + self.__maybe_write_code(self._rmkx) + self.flushoutput() + tcsetattr(self.input_fd, termios.TCSADRAIN, self.__svtermstate) + + signal.signal(signal.SIGWINCH, self.old_sigwinch) + + def __sigwinch(self, signum, frame): + self.height, self.width = self.getheightwidth() + self.event_queue.insert(Event('resize', None)) + + def push_char(self, char): + self.partial_char += char + try: + c = unicode(self.partial_char, self.encoding) + except UnicodeError, e: + if len(e.args) > 4 and \ + e.args[4] == 'unexpected end of data': + pass + else: + raise + else: + self.partial_char = '' + self.event_queue.push(c) + + def get_event(self, block=1): + while self.event_queue.empty(): + while 1: # All hail Unix! + try: + self.push_char(os.read(self.input_fd, 1)) + except (IOError, OSError), err: + if err.errno == errno.EINTR: + if not self.event_queue.empty(): + return self.event_queue.get() + else: + continue + else: + raise + else: + break + if not block: + break + return self.event_queue.get() + + def wait(self): + self.pollob.poll() + + def set_cursor_vis(self, vis): + if vis: + self.__show_cursor() + else: + self.__hide_cursor() + + def __hide_cursor(self): + if self.cursor_visible: + self.__maybe_write_code(self._civis) + self.cursor_visible = 0 + + def __show_cursor(self): + if not self.cursor_visible: + self.__maybe_write_code(self._cnorm) + self.cursor_visible = 1 + + def repaint_prep(self): + if not self.__gone_tall: + self.__posxy = 0, self.__posxy[1] + self.__write("\r") + ns = len(self.screen)*['\000'*self.width] + self.screen = ns + else: + self.__posxy = 0, self.__offset + self.__move(0, self.__offset) + ns = self.height*['\000'*self.width] + self.screen = ns + + if TIOCGWINSZ: + def getheightwidth(self): + try: + return int(os.environ["LINES"]), int(os.environ["COLUMNS"]) + except KeyError: + height, width = struct.unpack( + "hhhh", ioctl(self.input_fd, TIOCGWINSZ, "\000"*8))[0:2] + if not height: return 25, 80 + return height, width + else: + def getheightwidth(self): + try: + return int(os.environ["LINES"]), int(os.environ["COLUMNS"]) + except KeyError: + return 25, 80 + + def forgetinput(self): + termios.tcflush(self.input_fd, termios.TCIFLUSH) + + def flushoutput(self): + for text, iscode in self.__buffer: + if iscode: + self.__tputs(text) + else: + os.write(self.output_fd, text.encode(self.encoding)) + del self.__buffer[:] + + def __tputs(self, fmt, prog=delayprog): + """A Python implementation of the curses tputs function; the + curses one can't really be wrapped in a sane manner. + + I have the strong suspicion that this is complexity that + will never do anyone any good.""" + # using .get() means that things will blow up + # only if the bps is actually needed (which I'm + # betting is pretty unlkely) + bps = ratedict.get(self.__svtermstate.ospeed) + while 1: + m = prog.search(fmt) + if not m: + os.write(self.output_fd, fmt) + break + x, y = m.span() + os.write(self.output_fd, fmt[:x]) + fmt = fmt[y:] + delay = int(m.group(1)) + if '*' in m.group(2): + delay *= self.height + if self._pad: + nchars = (bps*delay)/1000 + os.write(self.output_fd, self._pad*nchars) + else: + time.sleep(float(delay)/1000.0) + + def finish(self): + y = len(self.screen) - 1 + while y >= 0 and not self.screen[y]: + y -= 1 + self.__move(0, min(y, self.height + self.__offset - 1)) + self.__write("\n\r") + self.flushoutput() + + def beep(self): + self.__maybe_write_code(self._bel) + self.flushoutput() + + if FIONREAD: + def getpending(self): + e = Event('key', '', '') + + while not self.event_queue.empty(): + e2 = self.event_queue.get() + e.data += e2.data + e.raw += e.raw + + amount = struct.unpack( + "i", ioctl(self.input_fd, FIONREAD, "\0\0\0\0"))[0] + raw = unicode(os.read(self.input_fd, amount), self.encoding, 'replace') + e.data += raw + e.raw += raw + return e + else: + def getpending(self): + e = Event('key', '', '') + + while not self.event_queue.empty(): + e2 = self.event_queue.get() + e.data += e2.data + e.raw += e.raw + + amount = 10000 + raw = unicode(os.read(self.input_fd, amount), self.encoding, 'replace') + e.data += raw + e.raw += raw + return e + + def clear(self): + self.__write_code(self._clear) + self.__gone_tall = 1 + self.__move = self.__move_tall + self.__posxy = 0, 0 + self.screen = [] + diff --git a/lib_pypy/pyrepl/unix_eventqueue.py b/lib_pypy/pyrepl/unix_eventqueue.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/unix_eventqueue.py @@ -0,0 +1,86 @@ +# Copyright 2000-2008 Michael Hudson-Doyle +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# Bah, this would be easier to test if curses/terminfo didn't have so +# much non-introspectable global state. + +from pyrepl import keymap +from pyrepl.console import Event +from pyrepl import curses +from termios import tcgetattr, VERASE +import os + +_keynames = { + "delete" : "kdch1", + "down" : "kcud1", + "end" : "kend", + "enter" : "kent", + "f1" : "kf1", "f2" : "kf2", "f3" : "kf3", "f4" : "kf4", + "f5" : "kf5", "f6" : "kf6", "f7" : "kf7", "f8" : "kf8", + "f9" : "kf9", "f10" : "kf10", "f11" : "kf11", "f12" : "kf12", + "f13" : "kf13", "f14" : "kf14", "f15" : "kf15", "f16" : "kf16", + "f17" : "kf17", "f18" : "kf18", "f19" : "kf19", "f20" : "kf20", + "home" : "khome", + "insert" : "kich1", + "left" : "kcub1", + "page down" : "knp", + "page up" : "kpp", + "right" : "kcuf1", + "up" : "kcuu1", + } + +class EventQueue(object): + def __init__(self, fd): + our_keycodes = {} + for key, tiname in _keynames.items(): + keycode = curses.tigetstr(tiname) + if keycode: + our_keycodes[keycode] = unicode(key) + if os.isatty(fd): + our_keycodes[tcgetattr(fd)[6][VERASE]] = u'backspace' + self.k = self.ck = keymap.compile_keymap(our_keycodes) + self.events = [] + self.buf = [] + def get(self): + if self.events: + return self.events.pop(0) + else: + return None + def empty(self): + return not self.events + def insert(self, event): + self.events.append(event) + def push(self, char): + if char in self.k: + k = self.k[char] + if isinstance(k, dict): + self.buf.append(char) + self.k = k + else: + self.events.append(Event('key', k, ''.join(self.buf) + char)) + self.buf = [] + self.k = self.ck + elif self.buf: + self.events.extend([Event('key', c, c) for c in self.buf]) + self.buf = [] + self.k = self.ck + self.push(char) + else: + self.events.append(Event('key', char, char)) diff --git a/py/_io/terminalwriter.py b/py/_io/terminalwriter.py --- a/py/_io/terminalwriter.py +++ b/py/_io/terminalwriter.py @@ -81,6 +81,9 @@ oldcolors = GetConsoleInfo(handle).wAttributes attr |= (oldcolors & 0x0f0) SetConsoleTextAttribute(handle, attr) + while len(text) > 32768: + file.write(text[:32768]) + text = text[32768:] file.write(text) SetConsoleTextAttribute(handle, oldcolors) else: diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py --- a/pypy/annotation/description.py +++ b/pypy/annotation/description.py @@ -3,6 +3,7 @@ from pypy.interpreter.pycode import cpython_code_signature from pypy.interpreter.argument import rawshape from pypy.interpreter.argument import ArgErr +from pypy.interpreter.function import Defaults from pypy.tool.sourcetools import valid_identifier from pypy.tool.pairtype import extendabletype @@ -15,7 +16,7 @@ overridden = False normalized = False modified = True - + def __init__(self, desc): self.descs = { desc: True } self.calltables = {} # see calltable_lookup_row() @@ -172,7 +173,7 @@ class FunctionDesc(Desc): knowntype = types.FunctionType overridden = False - + def __init__(self, bookkeeper, pyobj=None, name=None, signature=None, defaults=None, specializer=None): @@ -230,7 +231,7 @@ return '_'.join(map(nameof, thing)) else: return str(thing)[:30] - + if key is not None and alt_name is None: postfix = valid_identifier(nameof(key)) alt_name = "%s__%s"%(self.name, postfix) @@ -250,7 +251,7 @@ for x in defaults: defs_s.append(self.bookkeeper.immutablevalue(x)) try: - inputcells = args.match_signature(signature, defs_s) + inputcells = args.match_signature(signature, Defaults(defs_s)) except ArgErr, e: raise TypeError, "signature mismatch: %s" % e.getmsg(self.name) return inputcells @@ -291,7 +292,7 @@ def bind_under(self, classdef, name): # XXX static methods - return self.bookkeeper.getmethoddesc(self, + return self.bookkeeper.getmethoddesc(self, classdef, # originclassdef, None, # selfclassdef name) @@ -574,7 +575,7 @@ while name not in cdesc.classdict: cdesc = cdesc.basedesc if cdesc is None: - return None + return None else: return cdesc @@ -750,7 +751,7 @@ class MethodDesc(Desc): knowntype = types.MethodType - def __init__(self, bookkeeper, funcdesc, originclassdef, + def __init__(self, bookkeeper, funcdesc, originclassdef, selfclassdef, name, flags={}): super(MethodDesc, self).__init__(bookkeeper) self.funcdesc = funcdesc @@ -803,7 +804,7 @@ # FunctionDescs, not MethodDescs. The present method returns the # FunctionDesc to use as a key in that family. return self.funcdesc - + def simplify_desc_set(descs): # Some hacking needed to make contains() happy on SomePBC: if the # set of MethodDescs contains some "redundant" ones, i.e. ones that @@ -894,7 +895,7 @@ return s_ImpossibleValue else: return self.bookkeeper.immutablevalue(value) - + def create_new_attribute(self, name, value): try: self.read_attribute(name) @@ -946,7 +947,7 @@ s_self = SomePBC([self.frozendesc]) args = args.prepend(s_self) return self.funcdesc.pycall(schedule, args, s_previous_result) - + def consider_call_site(bookkeeper, family, descs, args, s_result): shape = rawshape(args, nextra=1) # account for the extra 'self' funcdescs = [mofdesc.funcdesc for mofdesc in descs] diff --git a/pypy/annotation/unaryop.py b/pypy/annotation/unaryop.py --- a/pypy/annotation/unaryop.py +++ b/pypy/annotation/unaryop.py @@ -499,10 +499,14 @@ def getanyitem(str): return str.basecharclass() - def method_split(str, patt): # XXX + def method_split(str, patt, max=-1): getbookkeeper().count("str_split", str, patt) return getbookkeeper().newlist(str.basestringclass()) + def method_rsplit(str, patt, max=-1): + getbookkeeper().count("str_rsplit", str, patt) + return getbookkeeper().newlist(str.basestringclass()) + def method_replace(str, s1, s2): return str.basestringclass() diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -159,6 +159,11 @@ cmdline="--allworkingmodules", negation=True), + StrOption("extmodules", + "Comma-separated list of third-party builtin modules", + cmdline="--ext", + default=None), + BoolOption("translationmodules", "use only those modules that are needed to run translate.py on pypy", default=False, @@ -356,8 +361,8 @@ config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) config.objspace.std.suggest(newshortcut=True) - if not IS_64_BITS: - config.objspace.std.suggest(withsmalllong=True) + #if not IS_64_BITS: + # config.objspace.std.suggest(withsmalllong=True) # extra costly optimizations only go in level 3 if level == '3': diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -70,6 +70,6 @@ prefix = descr._name c = Config(descr) for path in c.getpaths(include_groups=True): - fn = prefix + "." + path + ".txt" + fn = prefix + "." + path + ".rst" yield check_file_exists, fn diff --git a/pypy/doc/Makefile b/pypy/doc/Makefile new file mode 100644 --- /dev/null +++ b/pypy/doc/Makefile @@ -0,0 +1,89 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PyPy.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyPy.qhc" + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ + "run these through (pdf)latex." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/pypy/doc/__pypy__-module.txt b/pypy/doc/__pypy__-module.rst rename from pypy/doc/__pypy__-module.txt rename to pypy/doc/__pypy__-module.rst diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.rst rename from pypy/doc/_ref.txt rename to pypy/doc/_ref.rst --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.rst @@ -3,48 +3,48 @@ .. _`lib-python/`: ../../lib-python .. _`lib-python/2.5.2/dis.py`: ../../lib-python/2.5.2/dis.py .. _`annotation/`: -.. _`pypy/annotation`: ../../pypy/annotation -.. _`pypy/annotation/annrpython.py`: ../../pypy/annotation/annrpython.py -.. _`annotation/binaryop.py`: ../../pypy/annotation/binaryop.py -.. _`pypy/annotation/builtin.py`: ../../pypy/annotation/builtin.py -.. _`pypy/annotation/model.py`: ../../pypy/annotation/model.py -.. _`bin/`: ../../pypy/bin -.. _`config/`: ../../pypy/config -.. _`pypy/config/pypyoption.py`: ../../pypy/config/pypyoption.py -.. _`doc/`: ../../pypy/doc -.. _`doc/config/`: ../../pypy/doc/config -.. _`doc/discussion/`: ../../pypy/doc/discussion +.. _`pypy/annotation`: ../../../../pypy/annotation +.. _`pypy/annotation/annrpython.py`: ../../../../pypy/annotation/annrpython.py +.. _`annotation/binaryop.py`: ../../../../pypy/annotation/binaryop.py +.. _`pypy/annotation/builtin.py`: ../../../../pypy/annotation/builtin.py +.. _`pypy/annotation/model.py`: ../../../../pypy/annotation/model.py +.. _`bin/`: ../../../../pypy/bin +.. _`config/`: ../../../../pypy/config +.. _`pypy/config/pypyoption.py`: ../../../../pypy/config/pypyoption.py +.. _`doc/`: ../../../../pypy/doc +.. _`doc/config/`: ../../../../pypy/doc/config +.. _`doc/discussion/`: ../../../../pypy/doc/discussion .. _`interpreter/`: -.. _`pypy/interpreter`: ../../pypy/interpreter -.. _`pypy/interpreter/argument.py`: ../../pypy/interpreter/argument.py +.. _`pypy/interpreter`: ../../../../pypy/interpreter +.. _`pypy/interpreter/argument.py`: ../../../../pypy/interpreter/argument.py .. _`interpreter/astcompiler/`: -.. _`pypy/interpreter/astcompiler`: ../../pypy/interpreter/astcompiler -.. _`pypy/interpreter/executioncontext.py`: ../../pypy/interpreter/executioncontext.py -.. _`pypy/interpreter/function.py`: ../../pypy/interpreter/function.py +.. _`pypy/interpreter/astcompiler`: ../../../../pypy/interpreter/astcompiler +.. _`pypy/interpreter/executioncontext.py`: ../../../../pypy/interpreter/executioncontext.py +.. _`pypy/interpreter/function.py`: ../../../../pypy/interpreter/function.py .. _`interpreter/gateway.py`: -.. _`pypy/interpreter/gateway.py`: ../../pypy/interpreter/gateway.py -.. _`pypy/interpreter/generator.py`: ../../pypy/interpreter/generator.py -.. _`pypy/interpreter/mixedmodule.py`: ../../pypy/interpreter/mixedmodule.py -.. _`pypy/interpreter/module.py`: ../../pypy/interpreter/module.py -.. _`pypy/interpreter/nestedscope.py`: ../../pypy/interpreter/nestedscope.py -.. _`pypy/interpreter/pyopcode.py`: ../../pypy/interpreter/pyopcode.py +.. _`pypy/interpreter/gateway.py`: ../../../../pypy/interpreter/gateway.py +.. _`pypy/interpreter/generator.py`: ../../../../pypy/interpreter/generator.py +.. _`pypy/interpreter/mixedmodule.py`: ../../../../pypy/interpreter/mixedmodule.py +.. _`pypy/interpreter/module.py`: ../../../../pypy/interpreter/module.py +.. _`pypy/interpreter/nestedscope.py`: ../../../../pypy/interpreter/nestedscope.py +.. _`pypy/interpreter/pyopcode.py`: ../../../../pypy/interpreter/pyopcode.py .. _`interpreter/pyparser/`: -.. _`pypy/interpreter/pyparser`: ../../pypy/interpreter/pyparser -.. _`pypy/interpreter/pyparser/pytokenizer.py`: ../../pypy/interpreter/pyparser/pytokenizer.py -.. _`pypy/interpreter/pyparser/parser.py`: ../../pypy/interpreter/pyparser/parser.py -.. _`pypy/interpreter/pyparser/pyparse.py`: ../../pypy/interpreter/pyparser/pyparse.py -.. _`pypy/interpreter/pyparser/future.py`: ../../pypy/interpreter/pyparser/future.py -.. _`pypy/interpreter/pyparser/metaparser.py`: ../../pypy/interpreter/pyparser/metaparser.py -.. _`pypy/interpreter/astcompiler/astbuilder.py`: ../../pypy/interpreter/astcompiler/astbuilder.py -.. _`pypy/interpreter/astcompiler/optimize.py`: ../../pypy/interpreter/astcompiler/optimize.py -.. _`pypy/interpreter/astcompiler/codegen.py`: ../../pypy/interpreter/astcompiler/codegen.py -.. _`pypy/interpreter/astcompiler/tools/asdl_py.py`: ../../pypy/interpreter/astcompiler/tools/asdl_py.py -.. _`pypy/interpreter/astcompiler/tools/Python.asdl`: ../../pypy/interpreter/astcompiler/tools/Python.asdl -.. _`pypy/interpreter/astcompiler/assemble.py`: ../../pypy/interpreter/astcompiler/assemble.py -.. _`pypy/interpreter/astcompiler/symtable.py`: ../../pypy/interpreter/astcompiler/symtable.py -.. _`pypy/interpreter/astcompiler/asthelpers.py`: ../../pypy/interpreter/astcompiler/asthelpers.py -.. _`pypy/interpreter/astcompiler/ast.py`: ../../pypy/interpreter/astcompiler/ast.py -.. _`pypy/interpreter/typedef.py`: ../../pypy/interpreter/typedef.py +.. _`pypy/interpreter/pyparser`: ../../../../pypy/interpreter/pyparser +.. _`pypy/interpreter/pyparser/pytokenizer.py`: ../../../../pypy/interpreter/pyparser/pytokenizer.py +.. _`pypy/interpreter/pyparser/parser.py`: ../../../../pypy/interpreter/pyparser/parser.py +.. _`pypy/interpreter/pyparser/pyparse.py`: ../../../../pypy/interpreter/pyparser/pyparse.py +.. _`pypy/interpreter/pyparser/future.py`: ../../../../pypy/interpreter/pyparser/future.py +.. _`pypy/interpreter/pyparser/metaparser.py`: ../../../../pypy/interpreter/pyparser/metaparser.py +.. _`pypy/interpreter/astcompiler/astbuilder.py`: ../../../../pypy/interpreter/astcompiler/astbuilder.py +.. _`pypy/interpreter/astcompiler/optimize.py`: ../../../../pypy/interpreter/astcompiler/optimize.py +.. _`pypy/interpreter/astcompiler/codegen.py`: ../../../../pypy/interpreter/astcompiler/codegen.py +.. _`pypy/interpreter/astcompiler/tools/asdl_py.py`: ../../../../pypy/interpreter/astcompiler/tools/asdl_py.py +.. _`pypy/interpreter/astcompiler/tools/Python.asdl`: ../../../../pypy/interpreter/astcompiler/tools/Python.asdl +.. _`pypy/interpreter/astcompiler/assemble.py`: ../../../../pypy/interpreter/astcompiler/assemble.py +.. _`pypy/interpreter/astcompiler/symtable.py`: ../../../../pypy/interpreter/astcompiler/symtable.py +.. _`pypy/interpreter/astcompiler/asthelpers.py`: ../../../../pypy/interpreter/astcompiler/asthelpers.py +.. _`pypy/interpreter/astcompiler/ast.py`: ../../../../pypy/interpreter/astcompiler/ast.py +.. _`pypy/interpreter/typedef.py`: ../../../../pypy/interpreter/typedef.py .. _`lib/`: .. _`lib_pypy/`: ../../lib_pypy .. _`lib/distributed/`: ../../lib_pypy/distributed @@ -52,56 +52,56 @@ .. _`lib_pypy/pypy_test/`: ../../lib_pypy/pypy_test .. _`module/`: .. _`pypy/module`: -.. _`pypy/module/`: ../../pypy/module -.. _`pypy/module/__builtin__/__init__.py`: ../../pypy/module/__builtin__/__init__.py -.. _`pypy/module/_stackless/test/test_clonable.py`: ../../pypy/module/_stackless/test/test_clonable.py -.. _`pypy/module/_stackless/test/test_composable_coroutine.py`: ../../pypy/module/_stackless/test/test_composable_coroutine.py +.. _`pypy/module/`: ../../../../pypy/module +.. _`pypy/module/__builtin__/__init__.py`: ../../../../pypy/module/__builtin__/__init__.py +.. _`pypy/module/_stackless/test/test_clonable.py`: ../../../../pypy/module/_stackless/test/test_clonable.py +.. _`pypy/module/_stackless/test/test_composable_coroutine.py`: ../../../../pypy/module/_stackless/test/test_composable_coroutine.py .. _`objspace/`: -.. _`pypy/objspace`: ../../pypy/objspace -.. _`objspace/dump.py`: ../../pypy/objspace/dump.py -.. _`objspace/flow/`: ../../pypy/objspace/flow +.. _`pypy/objspace`: ../../../../pypy/objspace +.. _`objspace/dump.py`: ../../../../pypy/objspace/dump.py +.. _`objspace/flow/`: ../../../../pypy/objspace/flow .. _`objspace/std/`: -.. _`pypy/objspace/std`: ../../pypy/objspace/std -.. _`objspace/taint.py`: ../../pypy/objspace/taint.py +.. _`pypy/objspace/std`: ../../../../pypy/objspace/std +.. _`objspace/taint.py`: ../../../../pypy/objspace/taint.py .. _`objspace/thunk.py`: -.. _`pypy/objspace/thunk.py`: ../../pypy/objspace/thunk.py +.. _`pypy/objspace/thunk.py`: ../../../../pypy/objspace/thunk.py .. _`objspace/trace.py`: -.. _`pypy/objspace/trace.py`: ../../pypy/objspace/trace.py +.. _`pypy/objspace/trace.py`: ../../../../pypy/objspace/trace.py .. _`pypy/rlib`: -.. _`rlib/`: ../../pypy/rlib -.. _`pypy/rlib/rarithmetic.py`: ../../pypy/rlib/rarithmetic.py -.. _`pypy/rlib/test`: ../../pypy/rlib/test +.. _`rlib/`: ../../../../pypy/rlib +.. _`pypy/rlib/rarithmetic.py`: ../../../../pypy/rlib/rarithmetic.py +.. _`pypy/rlib/test`: ../../../../pypy/rlib/test .. _`pypy/rpython`: .. _`pypy/rpython/`: -.. _`rpython/`: ../../pypy/rpython -.. _`rpython/lltypesystem/`: ../../pypy/rpython/lltypesystem +.. _`rpython/`: ../../../../pypy/rpython +.. _`rpython/lltypesystem/`: ../../../../pypy/rpython/lltypesystem .. _`pypy/rpython/lltypesystem/lltype.py`: -.. _`rpython/lltypesystem/lltype.py`: ../../pypy/rpython/lltypesystem/lltype.py -.. _`rpython/memory/`: ../../pypy/rpython/memory -.. _`rpython/memory/gc/generation.py`: ../../pypy/rpython/memory/gc/generation.py -.. _`rpython/memory/gc/hybrid.py`: ../../pypy/rpython/memory/gc/hybrid.py -.. _`rpython/memory/gc/markcompact.py`: ../../pypy/rpython/memory/gc/markcompact.py -.. _`rpython/memory/gc/marksweep.py`: ../../pypy/rpython/memory/gc/marksweep.py -.. _`rpython/memory/gc/semispace.py`: ../../pypy/rpython/memory/gc/semispace.py -.. _`rpython/ootypesystem/`: ../../pypy/rpython/ootypesystem -.. _`rpython/ootypesystem/ootype.py`: ../../pypy/rpython/ootypesystem/ootype.py -.. _`rpython/rint.py`: ../../pypy/rpython/rint.py -.. _`rpython/rlist.py`: ../../pypy/rpython/rlist.py -.. _`rpython/rmodel.py`: ../../pypy/rpython/rmodel.py -.. _`pypy/rpython/rtyper.py`: ../../pypy/rpython/rtyper.py -.. _`pypy/rpython/test/test_llinterp.py`: ../../pypy/rpython/test/test_llinterp.py -.. _`pypy/test_all.py`: ../../pypy/test_all.py -.. _`tool/`: ../../pypy/tool -.. _`tool/algo/`: ../../pypy/tool/algo -.. _`tool/pytest/`: ../../pypy/tool/pytest +.. _`rpython/lltypesystem/lltype.py`: ../../../../pypy/rpython/lltypesystem/lltype.py +.. _`rpython/memory/`: ../../../../pypy/rpython/memory +.. _`rpython/memory/gc/generation.py`: ../../../../pypy/rpython/memory/gc/generation.py +.. _`rpython/memory/gc/hybrid.py`: ../../../../pypy/rpython/memory/gc/hybrid.py +.. _`rpython/memory/gc/markcompact.py`: ../../../../pypy/rpython/memory/gc/markcompact.py +.. _`rpython/memory/gc/marksweep.py`: ../../../../pypy/rpython/memory/gc/marksweep.py +.. _`rpython/memory/gc/semispace.py`: ../../../../pypy/rpython/memory/gc/semispace.py +.. _`rpython/ootypesystem/`: ../../../../pypy/rpython/ootypesystem +.. _`rpython/ootypesystem/ootype.py`: ../../../../pypy/rpython/ootypesystem/ootype.py +.. _`rpython/rint.py`: ../../../../pypy/rpython/rint.py +.. _`rpython/rlist.py`: ../../../../pypy/rpython/rlist.py +.. _`rpython/rmodel.py`: ../../../../pypy/rpython/rmodel.py +.. _`pypy/rpython/rtyper.py`: ../../../../pypy/rpython/rtyper.py +.. _`pypy/rpython/test/test_llinterp.py`: ../../../../pypy/rpython/test/test_llinterp.py +.. _`pypy/test_all.py`: ../../../../pypy/test_all.py +.. _`tool/`: ../../../../pypy/tool +.. _`tool/algo/`: ../../../../pypy/tool/algo +.. _`tool/pytest/`: ../../../../pypy/tool/pytest .. _`pypy/translator`: -.. _`translator/`: ../../pypy/translator -.. _`translator/backendopt/`: ../../pypy/translator/backendopt -.. _`translator/c/`: ../../pypy/translator/c -.. _`translator/cli/`: ../../pypy/translator/cli -.. _`translator/goal/`: ../../pypy/translator/goal -.. _`pypy/translator/goal/targetnopstandalone.py`: ../../pypy/translator/goal/targetnopstandalone.py -.. _`translator/jvm/`: ../../pypy/translator/jvm -.. _`translator/stackless/`: ../../pypy/translator/stackless -.. _`translator/tool/`: ../../pypy/translator/tool +.. _`translator/`: ../../../../pypy/translator +.. _`translator/backendopt/`: ../../../../pypy/translator/backendopt +.. _`translator/c/`: ../../../../pypy/translator/c +.. _`translator/cli/`: ../../../../pypy/translator/cli +.. _`translator/goal/`: ../../../../pypy/translator/goal +.. _`pypy/translator/goal/targetnopstandalone.py`: ../../../../pypy/translator/goal/targetnopstandalone.py +.. _`translator/jvm/`: ../../../../pypy/translator/jvm +.. _`translator/stackless/`: ../../../../pypy/translator/stackless +.. _`translator/tool/`: ../../../../pypy/translator/tool .. _`translator/js/`: http://codespeak.net/svn/pypy/branch/oo-jit/pypy/translator/js/ diff --git a/pypy/doc/architecture.txt b/pypy/doc/architecture.rst rename from pypy/doc/architecture.txt rename to pypy/doc/architecture.rst --- a/pypy/doc/architecture.txt +++ b/pypy/doc/architecture.rst @@ -1,9 +1,9 @@ ================================================== -PyPy - Goals and Architecture Overview +Goals and Architecture Overview ================================================== .. contents:: -.. sectnum:: + This document gives an overview of the goals and architecture of PyPy. See `getting started`_ for a practical introduction and starting points. @@ -260,5 +260,5 @@ .. _`generate Just-In-Time Compilers`: jit/index.html .. _`JIT Generation in PyPy`: jit/index.html -.. include:: _ref.txt +.. include:: _ref.rst diff --git a/pypy/doc/buildtool.txt b/pypy/doc/buildtool.rst rename from pypy/doc/buildtool.txt rename to pypy/doc/buildtool.rst --- a/pypy/doc/buildtool.txt +++ b/pypy/doc/buildtool.rst @@ -2,6 +2,8 @@ PyPyBuilder ============ +.. include:: crufty.rst + What is this? ============= diff --git a/pypy/doc/carbonpython.txt b/pypy/doc/carbonpython.rst rename from pypy/doc/carbonpython.txt rename to pypy/doc/carbonpython.rst diff --git a/pypy/doc/cleanup-todo.txt b/pypy/doc/cleanup-todo.rst rename from pypy/doc/cleanup-todo.txt rename to pypy/doc/cleanup-todo.rst diff --git a/pypy/doc/cleanup.rst b/pypy/doc/cleanup.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/cleanup.rst @@ -0,0 +1,46 @@ +Old documentation that needs review +----------------------------------- + +.. The following stuff is old (and crufty?), and needs further investigation: + +.. doc-index: This needs merging somehow + +.. svn-help.rst: Needs merging/replacing with hg stuff: + + +.. toctree:: + + buildtool.rst + distribution.rst + + externaltools.rst + + geninterp.rst + + objspace-proxies.rst + + old_news.rst + + project-ideas.rst + + rffi.rst + + sandbox.rst + + statistic/index.rst + + theory.rst + + translation-aspects.rst + + docindex.rst + + svn-help.rst + + dot-net.rst + + maemo.rst + + + + diff --git a/pypy/doc/cli-backend.txt b/pypy/doc/cli-backend.rst rename from pypy/doc/cli-backend.txt rename to pypy/doc/cli-backend.rst diff --git a/pypy/doc/clr-module.txt b/pypy/doc/clr-module.rst rename from pypy/doc/clr-module.txt rename to pypy/doc/clr-module.rst diff --git a/pypy/doc/coding-guide.txt b/pypy/doc/coding-guide.rst rename from pypy/doc/coding-guide.txt rename to pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.txt +++ b/pypy/doc/coding-guide.rst @@ -1,9 +1,9 @@ ===================================== -PyPy - Coding Guide +Coding Guide ===================================== .. contents:: -.. sectnum:: + This document describes coding requirements and conventions for @@ -354,7 +354,7 @@ silent wrap-around. Whenever we need more control, we use the following helpers (which live the `pypy/rlib/rarithmetic.py`_): -.. _`pypy/rlib/rarithmetic.py`: ../../pypy/rlib/rarithmetic.py +.. _`pypy/rlib/rarithmetic.py`: ../../../../pypy/rlib/rarithmetic.py **ovfcheck()** @@ -1085,4 +1085,4 @@ which will check that remote URLs are reachable. -.. include:: _ref.txt +.. include:: _ref.rst diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py new file mode 100644 --- /dev/null +++ b/pypy/doc/conf.py @@ -0,0 +1,198 @@ +# -*- coding: utf-8 -*- +# +# PyPy documentation build configuration file, created by +# sphinx-quickstart on Mon Mar 14 10:44:41 2011. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys, os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.append(os.path.abspath('.')) + +# -- General configuration ----------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.ifconfig'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'PyPy' +copyright = u'2011, The PyPy Project' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '1.5' +# The full version, including alpha/beta/rc tags. +release = '1.5-alpha' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of documents that shouldn't be included in the build. +#unused_docs = [] + +# List of directories, relative to source directory, that shouldn't be searched +# for source files. +exclude_trees = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_use_modindex = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = '' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'PyPydoc' + + +# -- Options for LaTeX output -------------------------------------------------- + +# The paper size ('letter' or 'a4'). +#latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +#latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('temp_index', 'PyPy.tex', u'PyPy Documentation', + u'The PyPy Project', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# Additional stuff for the LaTeX preamble. +#latex_preamble = '' + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_use_modindex = True + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/pypy/doc/config/commandline.txt b/pypy/doc/config/commandline.rst rename from pypy/doc/config/commandline.txt rename to pypy/doc/config/commandline.rst diff --git a/pypy/doc/config/confrest.py b/pypy/doc/config/confrest.py --- a/pypy/doc/config/confrest.py +++ b/pypy/doc/config/confrest.py @@ -7,7 +7,6 @@ all_optiondescrs = [pypyoption.pypy_optiondescription, translationoption.translation_optiondescription, ] - start_to_descr = dict([(descr._name, descr) for descr in all_optiondescrs]) class PyPyPage(PyPyPage): @@ -29,7 +28,7 @@ Page = PyPyPage def get_content(self, txtpath, encoding): - if txtpath.basename == "commandline.txt": + if txtpath.basename == "commandline.rst": result = [] for line in txtpath.read().splitlines(): if line.startswith('.. GENERATE:'): diff --git a/pypy/doc/config/generate.py b/pypy/doc/config/generate.py --- a/pypy/doc/config/generate.py +++ b/pypy/doc/config/generate.py @@ -8,8 +8,8 @@ for descr in all_optiondescrs: prefix = descr._name c = config.Config(descr) - thisdir.join(prefix + ".txt").ensure() + thisdir.join(prefix + ".rst").ensure() for p in c.getpaths(include_groups=True): - basename = prefix + "." + p + ".txt" + basename = prefix + "." + p + ".rst" f = thisdir.join(basename) f.ensure() diff --git a/pypy/doc/config/index.txt b/pypy/doc/config/index.rst rename from pypy/doc/config/index.txt rename to pypy/doc/config/index.rst diff --git a/pypy/doc/config/objspace.allworkingmodules.txt b/pypy/doc/config/objspace.allworkingmodules.rst rename from pypy/doc/config/objspace.allworkingmodules.txt rename to pypy/doc/config/objspace.allworkingmodules.rst diff --git a/pypy/doc/config/objspace.disable_call_speedhacks.txt b/pypy/doc/config/objspace.disable_call_speedhacks.rst rename from pypy/doc/config/objspace.disable_call_speedhacks.txt rename to pypy/doc/config/objspace.disable_call_speedhacks.rst diff --git a/pypy/doc/config/objspace.extmodules.rst b/pypy/doc/config/objspace.extmodules.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.extmodules.rst @@ -0,0 +1,12 @@ +You can pass a comma-separated list of third-party builtin modules +which should be translated along with the standard modules within +``pypy.module``. + +The module names need to be fully qualified (i.e. have a ``.`` in them), +be on the ``$PYTHONPATH`` and not conflict with any existing ones, e.g. +``mypkg.somemod``. + +Once translated, the module will be accessible with a simple:: + + import somemod + diff --git a/pypy/doc/config/objspace.geninterp.txt b/pypy/doc/config/objspace.geninterp.rst rename from pypy/doc/config/objspace.geninterp.txt rename to pypy/doc/config/objspace.geninterp.rst diff --git a/pypy/doc/config/objspace.honor__builtins__.txt b/pypy/doc/config/objspace.honor__builtins__.rst rename from pypy/doc/config/objspace.honor__builtins__.txt rename to pypy/doc/config/objspace.honor__builtins__.rst diff --git a/pypy/doc/config/objspace.logbytecodes.txt b/pypy/doc/config/objspace.logbytecodes.rst rename from pypy/doc/config/objspace.logbytecodes.txt rename to pypy/doc/config/objspace.logbytecodes.rst diff --git a/pypy/doc/config/objspace.lonepycfiles.txt b/pypy/doc/config/objspace.lonepycfiles.rst rename from pypy/doc/config/objspace.lonepycfiles.txt rename to pypy/doc/config/objspace.lonepycfiles.rst diff --git a/pypy/doc/config/objspace.name.txt b/pypy/doc/config/objspace.name.rst rename from pypy/doc/config/objspace.name.txt rename to pypy/doc/config/objspace.name.rst diff --git a/pypy/doc/config/objspace.nofaking.txt b/pypy/doc/config/objspace.nofaking.rst rename from pypy/doc/config/objspace.nofaking.txt rename to pypy/doc/config/objspace.nofaking.rst diff --git a/pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.txt b/pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.rst rename from pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.txt rename to pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.rst diff --git a/pypy/doc/config/objspace.opcodes.CALL_METHOD.txt b/pypy/doc/config/objspace.opcodes.CALL_METHOD.rst rename from pypy/doc/config/objspace.opcodes.CALL_METHOD.txt rename to pypy/doc/config/objspace.opcodes.CALL_METHOD.rst diff --git a/pypy/doc/config/objspace.opcodes.txt b/pypy/doc/config/objspace.opcodes.rst rename from pypy/doc/config/objspace.opcodes.txt rename to pypy/doc/config/objspace.opcodes.rst diff --git a/pypy/doc/config/objspace.txt b/pypy/doc/config/objspace.rst rename from pypy/doc/config/objspace.txt rename to pypy/doc/config/objspace.rst diff --git a/pypy/doc/config/objspace.soabi.txt b/pypy/doc/config/objspace.soabi.rst rename from pypy/doc/config/objspace.soabi.txt rename to pypy/doc/config/objspace.soabi.rst diff --git a/pypy/doc/config/objspace.std.builtinshortcut.txt b/pypy/doc/config/objspace.std.builtinshortcut.rst rename from pypy/doc/config/objspace.std.builtinshortcut.txt rename to pypy/doc/config/objspace.std.builtinshortcut.rst diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.txt b/pypy/doc/config/objspace.std.getattributeshortcut.rst rename from pypy/doc/config/objspace.std.getattributeshortcut.txt rename to pypy/doc/config/objspace.std.getattributeshortcut.rst diff --git a/pypy/doc/config/objspace.std.logspaceoptypes.txt b/pypy/doc/config/objspace.std.logspaceoptypes.rst rename from pypy/doc/config/objspace.std.logspaceoptypes.txt rename to pypy/doc/config/objspace.std.logspaceoptypes.rst diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.txt b/pypy/doc/config/objspace.std.methodcachesizeexp.rst rename from pypy/doc/config/objspace.std.methodcachesizeexp.txt rename to pypy/doc/config/objspace.std.methodcachesizeexp.rst diff --git a/pypy/doc/config/objspace.std.multimethods.txt b/pypy/doc/config/objspace.std.multimethods.rst rename from pypy/doc/config/objspace.std.multimethods.txt rename to pypy/doc/config/objspace.std.multimethods.rst diff --git a/pypy/doc/config/objspace.std.mutable_builtintypes.txt b/pypy/doc/config/objspace.std.mutable_builtintypes.rst rename from pypy/doc/config/objspace.std.mutable_builtintypes.txt rename to pypy/doc/config/objspace.std.mutable_builtintypes.rst diff --git a/pypy/doc/config/objspace.std.newshortcut.txt b/pypy/doc/config/objspace.std.newshortcut.rst rename from pypy/doc/config/objspace.std.newshortcut.txt rename to pypy/doc/config/objspace.std.newshortcut.rst diff --git a/pypy/doc/config/objspace.std.optimized_comparison_op.txt b/pypy/doc/config/objspace.std.optimized_comparison_op.rst rename from pypy/doc/config/objspace.std.optimized_comparison_op.txt rename to pypy/doc/config/objspace.std.optimized_comparison_op.rst diff --git a/pypy/doc/config/objspace.std.optimized_int_add.txt b/pypy/doc/config/objspace.std.optimized_int_add.rst rename from pypy/doc/config/objspace.std.optimized_int_add.txt rename to pypy/doc/config/objspace.std.optimized_int_add.rst diff --git a/pypy/doc/config/objspace.std.optimized_list_getitem.txt b/pypy/doc/config/objspace.std.optimized_list_getitem.rst rename from pypy/doc/config/objspace.std.optimized_list_getitem.txt rename to pypy/doc/config/objspace.std.optimized_list_getitem.rst diff --git a/pypy/doc/config/objspace.std.prebuiltintfrom.txt b/pypy/doc/config/objspace.std.prebuiltintfrom.rst rename from pypy/doc/config/objspace.std.prebuiltintfrom.txt rename to pypy/doc/config/objspace.std.prebuiltintfrom.rst diff --git a/pypy/doc/config/objspace.std.prebuiltintto.txt b/pypy/doc/config/objspace.std.prebuiltintto.rst rename from pypy/doc/config/objspace.std.prebuiltintto.txt rename to pypy/doc/config/objspace.std.prebuiltintto.rst diff --git a/pypy/doc/config/objspace.std.txt b/pypy/doc/config/objspace.std.rst rename from pypy/doc/config/objspace.std.txt rename to pypy/doc/config/objspace.std.rst diff --git a/pypy/doc/config/objspace.std.sharesmallstr.txt b/pypy/doc/config/objspace.std.sharesmallstr.rst rename from pypy/doc/config/objspace.std.sharesmallstr.txt rename to pypy/doc/config/objspace.std.sharesmallstr.rst diff --git a/pypy/doc/config/objspace.std.withcelldict.txt b/pypy/doc/config/objspace.std.withcelldict.rst rename from pypy/doc/config/objspace.std.withcelldict.txt rename to pypy/doc/config/objspace.std.withcelldict.rst diff --git a/pypy/doc/config/objspace.std.withdictmeasurement.txt b/pypy/doc/config/objspace.std.withdictmeasurement.rst rename from pypy/doc/config/objspace.std.withdictmeasurement.txt rename to pypy/doc/config/objspace.std.withdictmeasurement.rst diff --git a/pypy/doc/config/objspace.std.withmapdict.txt b/pypy/doc/config/objspace.std.withmapdict.rst rename from pypy/doc/config/objspace.std.withmapdict.txt rename to pypy/doc/config/objspace.std.withmapdict.rst diff --git a/pypy/doc/config/objspace.std.withmethodcache.txt b/pypy/doc/config/objspace.std.withmethodcache.rst rename from pypy/doc/config/objspace.std.withmethodcache.txt rename to pypy/doc/config/objspace.std.withmethodcache.rst diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.txt b/pypy/doc/config/objspace.std.withmethodcachecounter.rst rename from pypy/doc/config/objspace.std.withmethodcachecounter.txt rename to pypy/doc/config/objspace.std.withmethodcachecounter.rst diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.txt b/pypy/doc/config/objspace.std.withprebuiltchar.rst rename from pypy/doc/config/objspace.std.withprebuiltchar.txt rename to pypy/doc/config/objspace.std.withprebuiltchar.rst diff --git a/pypy/doc/config/objspace.std.withprebuiltint.txt b/pypy/doc/config/objspace.std.withprebuiltint.rst rename from pypy/doc/config/objspace.std.withprebuiltint.txt rename to pypy/doc/config/objspace.std.withprebuiltint.rst diff --git a/pypy/doc/config/objspace.std.withrangelist.txt b/pypy/doc/config/objspace.std.withrangelist.rst rename from pypy/doc/config/objspace.std.withrangelist.txt rename to pypy/doc/config/objspace.std.withrangelist.rst diff --git a/pypy/doc/config/objspace.std.withrope.txt b/pypy/doc/config/objspace.std.withrope.rst rename from pypy/doc/config/objspace.std.withrope.txt rename to pypy/doc/config/objspace.std.withrope.rst diff --git a/pypy/doc/config/objspace.std.withropeunicode.txt b/pypy/doc/config/objspace.std.withropeunicode.rst rename from pypy/doc/config/objspace.std.withropeunicode.txt rename to pypy/doc/config/objspace.std.withropeunicode.rst diff --git a/pypy/doc/config/objspace.std.withsmallint.txt b/pypy/doc/config/objspace.std.withsmallint.rst rename from pypy/doc/config/objspace.std.withsmallint.txt rename to pypy/doc/config/objspace.std.withsmallint.rst diff --git a/pypy/doc/config/objspace.std.withsmalllong.txt b/pypy/doc/config/objspace.std.withsmalllong.rst rename from pypy/doc/config/objspace.std.withsmalllong.txt rename to pypy/doc/config/objspace.std.withsmalllong.rst diff --git a/pypy/doc/config/objspace.std.withstrbuf.txt b/pypy/doc/config/objspace.std.withstrbuf.rst rename from pypy/doc/config/objspace.std.withstrbuf.txt rename to pypy/doc/config/objspace.std.withstrbuf.rst diff --git a/pypy/doc/config/objspace.std.withstrjoin.txt b/pypy/doc/config/objspace.std.withstrjoin.rst rename from pypy/doc/config/objspace.std.withstrjoin.txt rename to pypy/doc/config/objspace.std.withstrjoin.rst diff --git a/pypy/doc/config/objspace.std.withstrslice.txt b/pypy/doc/config/objspace.std.withstrslice.rst rename from pypy/doc/config/objspace.std.withstrslice.txt rename to pypy/doc/config/objspace.std.withstrslice.rst diff --git a/pypy/doc/config/objspace.std.withtproxy.txt b/pypy/doc/config/objspace.std.withtproxy.rst rename from pypy/doc/config/objspace.std.withtproxy.txt rename to pypy/doc/config/objspace.std.withtproxy.rst diff --git a/pypy/doc/config/objspace.std.withtypeversion.txt b/pypy/doc/config/objspace.std.withtypeversion.rst rename from pypy/doc/config/objspace.std.withtypeversion.txt rename to pypy/doc/config/objspace.std.withtypeversion.rst diff --git a/pypy/doc/config/objspace.timing.txt b/pypy/doc/config/objspace.timing.rst rename from pypy/doc/config/objspace.timing.txt rename to pypy/doc/config/objspace.timing.rst diff --git a/pypy/doc/config/objspace.translationmodules.txt b/pypy/doc/config/objspace.translationmodules.rst rename from pypy/doc/config/objspace.translationmodules.txt rename to pypy/doc/config/objspace.translationmodules.rst diff --git a/pypy/doc/config/objspace.usemodules.__builtin__.txt b/pypy/doc/config/objspace.usemodules.__builtin__.rst rename from pypy/doc/config/objspace.usemodules.__builtin__.txt rename to pypy/doc/config/objspace.usemodules.__builtin__.rst diff --git a/pypy/doc/config/objspace.usemodules.__pypy__.txt b/pypy/doc/config/objspace.usemodules.__pypy__.rst rename from pypy/doc/config/objspace.usemodules.__pypy__.txt rename to pypy/doc/config/objspace.usemodules.__pypy__.rst diff --git a/pypy/doc/config/objspace.usemodules._ast.txt b/pypy/doc/config/objspace.usemodules._ast.rst rename from pypy/doc/config/objspace.usemodules._ast.txt rename to pypy/doc/config/objspace.usemodules._ast.rst diff --git a/pypy/doc/config/objspace.usemodules._bisect.txt b/pypy/doc/config/objspace.usemodules._bisect.rst rename from pypy/doc/config/objspace.usemodules._bisect.txt rename to pypy/doc/config/objspace.usemodules._bisect.rst diff --git a/pypy/doc/config/objspace.usemodules._codecs.txt b/pypy/doc/config/objspace.usemodules._codecs.rst rename from pypy/doc/config/objspace.usemodules._codecs.txt rename to pypy/doc/config/objspace.usemodules._codecs.rst diff --git a/pypy/doc/config/objspace.usemodules._collections.txt b/pypy/doc/config/objspace.usemodules._collections.rst rename from pypy/doc/config/objspace.usemodules._collections.txt rename to pypy/doc/config/objspace.usemodules._collections.rst diff --git a/pypy/doc/config/objspace.usemodules._demo.txt b/pypy/doc/config/objspace.usemodules._demo.rst rename from pypy/doc/config/objspace.usemodules._demo.txt rename to pypy/doc/config/objspace.usemodules._demo.rst diff --git a/pypy/doc/config/objspace.usemodules._ffi.txt b/pypy/doc/config/objspace.usemodules._ffi.rst rename from pypy/doc/config/objspace.usemodules._ffi.txt rename to pypy/doc/config/objspace.usemodules._ffi.rst diff --git a/pypy/doc/config/objspace.usemodules._file.txt b/pypy/doc/config/objspace.usemodules._file.rst rename from pypy/doc/config/objspace.usemodules._file.txt rename to pypy/doc/config/objspace.usemodules._file.rst diff --git a/pypy/doc/config/objspace.usemodules._hashlib.txt b/pypy/doc/config/objspace.usemodules._hashlib.rst rename from pypy/doc/config/objspace.usemodules._hashlib.txt rename to pypy/doc/config/objspace.usemodules._hashlib.rst diff --git a/pypy/doc/config/objspace.usemodules._io.txt b/pypy/doc/config/objspace.usemodules._io.rst rename from pypy/doc/config/objspace.usemodules._io.txt rename to pypy/doc/config/objspace.usemodules._io.rst diff --git a/pypy/doc/config/objspace.usemodules._locale.txt b/pypy/doc/config/objspace.usemodules._locale.rst rename from pypy/doc/config/objspace.usemodules._locale.txt rename to pypy/doc/config/objspace.usemodules._locale.rst diff --git a/pypy/doc/config/objspace.usemodules._lsprof.txt b/pypy/doc/config/objspace.usemodules._lsprof.rst rename from pypy/doc/config/objspace.usemodules._lsprof.txt rename to pypy/doc/config/objspace.usemodules._lsprof.rst diff --git a/pypy/doc/config/objspace.usemodules._md5.txt b/pypy/doc/config/objspace.usemodules._md5.rst rename from pypy/doc/config/objspace.usemodules._md5.txt rename to pypy/doc/config/objspace.usemodules._md5.rst diff --git a/pypy/doc/config/objspace.usemodules._minimal_curses.txt b/pypy/doc/config/objspace.usemodules._minimal_curses.rst rename from pypy/doc/config/objspace.usemodules._minimal_curses.txt rename to pypy/doc/config/objspace.usemodules._minimal_curses.rst diff --git a/pypy/doc/config/objspace.usemodules._multiprocessing.txt b/pypy/doc/config/objspace.usemodules._multiprocessing.rst rename from pypy/doc/config/objspace.usemodules._multiprocessing.txt rename to pypy/doc/config/objspace.usemodules._multiprocessing.rst diff --git a/pypy/doc/config/objspace.usemodules._pickle_support.txt b/pypy/doc/config/objspace.usemodules._pickle_support.rst rename from pypy/doc/config/objspace.usemodules._pickle_support.txt rename to pypy/doc/config/objspace.usemodules._pickle_support.rst diff --git a/pypy/doc/config/objspace.usemodules._random.txt b/pypy/doc/config/objspace.usemodules._random.rst rename from pypy/doc/config/objspace.usemodules._random.txt rename to pypy/doc/config/objspace.usemodules._random.rst diff --git a/pypy/doc/config/objspace.usemodules._rawffi.txt b/pypy/doc/config/objspace.usemodules._rawffi.rst rename from pypy/doc/config/objspace.usemodules._rawffi.txt rename to pypy/doc/config/objspace.usemodules._rawffi.rst diff --git a/pypy/doc/config/objspace.usemodules._sha.txt b/pypy/doc/config/objspace.usemodules._sha.rst rename from pypy/doc/config/objspace.usemodules._sha.txt rename to pypy/doc/config/objspace.usemodules._sha.rst diff --git a/pypy/doc/config/objspace.usemodules._socket.txt b/pypy/doc/config/objspace.usemodules._socket.rst rename from pypy/doc/config/objspace.usemodules._socket.txt rename to pypy/doc/config/objspace.usemodules._socket.rst diff --git a/pypy/doc/config/objspace.usemodules._sre.txt b/pypy/doc/config/objspace.usemodules._sre.rst rename from pypy/doc/config/objspace.usemodules._sre.txt rename to pypy/doc/config/objspace.usemodules._sre.rst diff --git a/pypy/doc/config/objspace.usemodules._ssl.txt b/pypy/doc/config/objspace.usemodules._ssl.rst rename from pypy/doc/config/objspace.usemodules._ssl.txt rename to pypy/doc/config/objspace.usemodules._ssl.rst diff --git a/pypy/doc/config/objspace.usemodules._stackless.txt b/pypy/doc/config/objspace.usemodules._stackless.rst rename from pypy/doc/config/objspace.usemodules._stackless.txt rename to pypy/doc/config/objspace.usemodules._stackless.rst diff --git a/pypy/doc/config/objspace.usemodules._testing.txt b/pypy/doc/config/objspace.usemodules._testing.rst rename from pypy/doc/config/objspace.usemodules._testing.txt rename to pypy/doc/config/objspace.usemodules._testing.rst diff --git a/pypy/doc/config/objspace.usemodules._warnings.txt b/pypy/doc/config/objspace.usemodules._warnings.rst rename from pypy/doc/config/objspace.usemodules._warnings.txt rename to pypy/doc/config/objspace.usemodules._warnings.rst diff --git a/pypy/doc/config/objspace.usemodules._weakref.txt b/pypy/doc/config/objspace.usemodules._weakref.rst rename from pypy/doc/config/objspace.usemodules._weakref.txt rename to pypy/doc/config/objspace.usemodules._weakref.rst diff --git a/pypy/doc/config/objspace.usemodules._winreg.txt b/pypy/doc/config/objspace.usemodules._winreg.rst rename from pypy/doc/config/objspace.usemodules._winreg.txt rename to pypy/doc/config/objspace.usemodules._winreg.rst diff --git a/pypy/doc/config/objspace.usemodules.array.txt b/pypy/doc/config/objspace.usemodules.array.rst rename from pypy/doc/config/objspace.usemodules.array.txt rename to pypy/doc/config/objspace.usemodules.array.rst diff --git a/pypy/doc/config/objspace.usemodules.binascii.txt b/pypy/doc/config/objspace.usemodules.binascii.rst rename from pypy/doc/config/objspace.usemodules.binascii.txt rename to pypy/doc/config/objspace.usemodules.binascii.rst diff --git a/pypy/doc/config/objspace.usemodules.bz2.txt b/pypy/doc/config/objspace.usemodules.bz2.rst rename from pypy/doc/config/objspace.usemodules.bz2.txt rename to pypy/doc/config/objspace.usemodules.bz2.rst diff --git a/pypy/doc/config/objspace.usemodules.cStringIO.txt b/pypy/doc/config/objspace.usemodules.cStringIO.rst rename from pypy/doc/config/objspace.usemodules.cStringIO.txt rename to pypy/doc/config/objspace.usemodules.cStringIO.rst diff --git a/pypy/doc/config/objspace.usemodules.clr.txt b/pypy/doc/config/objspace.usemodules.clr.rst rename from pypy/doc/config/objspace.usemodules.clr.txt rename to pypy/doc/config/objspace.usemodules.clr.rst diff --git a/pypy/doc/config/objspace.usemodules.cmath.txt b/pypy/doc/config/objspace.usemodules.cmath.rst rename from pypy/doc/config/objspace.usemodules.cmath.txt rename to pypy/doc/config/objspace.usemodules.cmath.rst diff --git a/pypy/doc/config/objspace.usemodules.cpyext.txt b/pypy/doc/config/objspace.usemodules.cpyext.rst rename from pypy/doc/config/objspace.usemodules.cpyext.txt rename to pypy/doc/config/objspace.usemodules.cpyext.rst diff --git a/pypy/doc/config/objspace.usemodules.crypt.txt b/pypy/doc/config/objspace.usemodules.crypt.rst rename from pypy/doc/config/objspace.usemodules.crypt.txt rename to pypy/doc/config/objspace.usemodules.crypt.rst diff --git a/pypy/doc/config/objspace.usemodules.errno.txt b/pypy/doc/config/objspace.usemodules.errno.rst rename from pypy/doc/config/objspace.usemodules.errno.txt rename to pypy/doc/config/objspace.usemodules.errno.rst diff --git a/pypy/doc/config/objspace.usemodules.exceptions.txt b/pypy/doc/config/objspace.usemodules.exceptions.rst rename from pypy/doc/config/objspace.usemodules.exceptions.txt rename to pypy/doc/config/objspace.usemodules.exceptions.rst diff --git a/pypy/doc/config/objspace.usemodules.fcntl.txt b/pypy/doc/config/objspace.usemodules.fcntl.rst rename from pypy/doc/config/objspace.usemodules.fcntl.txt rename to pypy/doc/config/objspace.usemodules.fcntl.rst diff --git a/pypy/doc/config/objspace.usemodules.gc.txt b/pypy/doc/config/objspace.usemodules.gc.rst rename from pypy/doc/config/objspace.usemodules.gc.txt rename to pypy/doc/config/objspace.usemodules.gc.rst diff --git a/pypy/doc/config/objspace.usemodules.imp.txt b/pypy/doc/config/objspace.usemodules.imp.rst rename from pypy/doc/config/objspace.usemodules.imp.txt rename to pypy/doc/config/objspace.usemodules.imp.rst diff --git a/pypy/doc/config/objspace.usemodules.itertools.txt b/pypy/doc/config/objspace.usemodules.itertools.rst rename from pypy/doc/config/objspace.usemodules.itertools.txt rename to pypy/doc/config/objspace.usemodules.itertools.rst diff --git a/pypy/doc/config/objspace.usemodules.marshal.txt b/pypy/doc/config/objspace.usemodules.marshal.rst rename from pypy/doc/config/objspace.usemodules.marshal.txt rename to pypy/doc/config/objspace.usemodules.marshal.rst diff --git a/pypy/doc/config/objspace.usemodules.math.txt b/pypy/doc/config/objspace.usemodules.math.rst rename from pypy/doc/config/objspace.usemodules.math.txt rename to pypy/doc/config/objspace.usemodules.math.rst diff --git a/pypy/doc/config/objspace.usemodules.micronumpy.txt b/pypy/doc/config/objspace.usemodules.micronumpy.rst rename from pypy/doc/config/objspace.usemodules.micronumpy.txt rename to pypy/doc/config/objspace.usemodules.micronumpy.rst diff --git a/pypy/doc/config/objspace.usemodules.mmap.txt b/pypy/doc/config/objspace.usemodules.mmap.rst rename from pypy/doc/config/objspace.usemodules.mmap.txt rename to pypy/doc/config/objspace.usemodules.mmap.rst diff --git a/pypy/doc/config/objspace.usemodules.operator.txt b/pypy/doc/config/objspace.usemodules.operator.rst rename from pypy/doc/config/objspace.usemodules.operator.txt rename to pypy/doc/config/objspace.usemodules.operator.rst diff --git a/pypy/doc/config/objspace.usemodules.oracle.txt b/pypy/doc/config/objspace.usemodules.oracle.rst rename from pypy/doc/config/objspace.usemodules.oracle.txt rename to pypy/doc/config/objspace.usemodules.oracle.rst diff --git a/pypy/doc/config/objspace.usemodules.parser.txt b/pypy/doc/config/objspace.usemodules.parser.rst rename from pypy/doc/config/objspace.usemodules.parser.txt rename to pypy/doc/config/objspace.usemodules.parser.rst diff --git a/pypy/doc/config/objspace.usemodules.posix.txt b/pypy/doc/config/objspace.usemodules.posix.rst rename from pypy/doc/config/objspace.usemodules.posix.txt rename to pypy/doc/config/objspace.usemodules.posix.rst diff --git a/pypy/doc/config/objspace.usemodules.pyexpat.txt b/pypy/doc/config/objspace.usemodules.pyexpat.rst rename from pypy/doc/config/objspace.usemodules.pyexpat.txt rename to pypy/doc/config/objspace.usemodules.pyexpat.rst diff --git a/pypy/doc/config/objspace.usemodules.pypyjit.txt b/pypy/doc/config/objspace.usemodules.pypyjit.rst rename from pypy/doc/config/objspace.usemodules.pypyjit.txt rename to pypy/doc/config/objspace.usemodules.pypyjit.rst diff --git a/pypy/doc/config/objspace.usemodules.rbench.txt b/pypy/doc/config/objspace.usemodules.rbench.rst rename from pypy/doc/config/objspace.usemodules.rbench.txt rename to pypy/doc/config/objspace.usemodules.rbench.rst diff --git a/pypy/doc/config/objspace.usemodules.rctime.txt b/pypy/doc/config/objspace.usemodules.rctime.rst rename from pypy/doc/config/objspace.usemodules.rctime.txt rename to pypy/doc/config/objspace.usemodules.rctime.rst diff --git a/pypy/doc/config/objspace.usemodules.txt b/pypy/doc/config/objspace.usemodules.rst rename from pypy/doc/config/objspace.usemodules.txt rename to pypy/doc/config/objspace.usemodules.rst diff --git a/pypy/doc/config/objspace.usemodules.select.txt b/pypy/doc/config/objspace.usemodules.select.rst rename from pypy/doc/config/objspace.usemodules.select.txt rename to pypy/doc/config/objspace.usemodules.select.rst diff --git a/pypy/doc/config/objspace.usemodules.signal.txt b/pypy/doc/config/objspace.usemodules.signal.rst rename from pypy/doc/config/objspace.usemodules.signal.txt rename to pypy/doc/config/objspace.usemodules.signal.rst diff --git a/pypy/doc/config/objspace.usemodules.struct.txt b/pypy/doc/config/objspace.usemodules.struct.rst rename from pypy/doc/config/objspace.usemodules.struct.txt rename to pypy/doc/config/objspace.usemodules.struct.rst diff --git a/pypy/doc/config/objspace.usemodules.symbol.txt b/pypy/doc/config/objspace.usemodules.symbol.rst rename from pypy/doc/config/objspace.usemodules.symbol.txt rename to pypy/doc/config/objspace.usemodules.symbol.rst diff --git a/pypy/doc/config/objspace.usemodules.sys.txt b/pypy/doc/config/objspace.usemodules.sys.rst rename from pypy/doc/config/objspace.usemodules.sys.txt rename to pypy/doc/config/objspace.usemodules.sys.rst diff --git a/pypy/doc/config/objspace.usemodules.termios.txt b/pypy/doc/config/objspace.usemodules.termios.rst rename from pypy/doc/config/objspace.usemodules.termios.txt rename to pypy/doc/config/objspace.usemodules.termios.rst diff --git a/pypy/doc/config/objspace.usemodules.thread.txt b/pypy/doc/config/objspace.usemodules.thread.rst rename from pypy/doc/config/objspace.usemodules.thread.txt rename to pypy/doc/config/objspace.usemodules.thread.rst diff --git a/pypy/doc/config/objspace.usemodules.time.txt b/pypy/doc/config/objspace.usemodules.time.rst rename from pypy/doc/config/objspace.usemodules.time.txt rename to pypy/doc/config/objspace.usemodules.time.rst diff --git a/pypy/doc/config/objspace.usemodules.token.txt b/pypy/doc/config/objspace.usemodules.token.rst rename from pypy/doc/config/objspace.usemodules.token.txt rename to pypy/doc/config/objspace.usemodules.token.rst diff --git a/pypy/doc/config/objspace.usemodules.unicodedata.txt b/pypy/doc/config/objspace.usemodules.unicodedata.rst rename from pypy/doc/config/objspace.usemodules.unicodedata.txt rename to pypy/doc/config/objspace.usemodules.unicodedata.rst diff --git a/pypy/doc/config/objspace.usemodules.zipimport.txt b/pypy/doc/config/objspace.usemodules.zipimport.rst rename from pypy/doc/config/objspace.usemodules.zipimport.txt rename to pypy/doc/config/objspace.usemodules.zipimport.rst diff --git a/pypy/doc/config/objspace.usemodules.zlib.txt b/pypy/doc/config/objspace.usemodules.zlib.rst rename from pypy/doc/config/objspace.usemodules.zlib.txt rename to pypy/doc/config/objspace.usemodules.zlib.rst diff --git a/pypy/doc/config/objspace.usepycfiles.txt b/pypy/doc/config/objspace.usepycfiles.rst rename from pypy/doc/config/objspace.usepycfiles.txt rename to pypy/doc/config/objspace.usepycfiles.rst diff --git a/pypy/doc/config/opt.txt b/pypy/doc/config/opt.rst rename from pypy/doc/config/opt.txt rename to pypy/doc/config/opt.rst diff --git a/pypy/doc/config/translation.backend.txt b/pypy/doc/config/translation.backend.rst rename from pypy/doc/config/translation.backend.txt rename to pypy/doc/config/translation.backend.rst diff --git a/pypy/doc/config/translation.backendopt.clever_malloc_removal.txt b/pypy/doc/config/translation.backendopt.clever_malloc_removal.rst rename from pypy/doc/config/translation.backendopt.clever_malloc_removal.txt rename to pypy/doc/config/translation.backendopt.clever_malloc_removal.rst diff --git a/pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.txt b/pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.rst rename from pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.txt rename to pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.rst diff --git a/pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.txt b/pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.rst rename from pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.txt rename to pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.rst diff --git a/pypy/doc/config/translation.backendopt.constfold.txt b/pypy/doc/config/translation.backendopt.constfold.rst rename from pypy/doc/config/translation.backendopt.constfold.txt rename to pypy/doc/config/translation.backendopt.constfold.rst diff --git a/pypy/doc/config/translation.backendopt.inline.txt b/pypy/doc/config/translation.backendopt.inline.rst rename from pypy/doc/config/translation.backendopt.inline.txt rename to pypy/doc/config/translation.backendopt.inline.rst diff --git a/pypy/doc/config/translation.backendopt.inline_heuristic.txt b/pypy/doc/config/translation.backendopt.inline_heuristic.rst rename from pypy/doc/config/translation.backendopt.inline_heuristic.txt rename to pypy/doc/config/translation.backendopt.inline_heuristic.rst diff --git a/pypy/doc/config/translation.backendopt.inline_threshold.txt b/pypy/doc/config/translation.backendopt.inline_threshold.rst rename from pypy/doc/config/translation.backendopt.inline_threshold.txt rename to pypy/doc/config/translation.backendopt.inline_threshold.rst diff --git a/pypy/doc/config/translation.backendopt.mallocs.txt b/pypy/doc/config/translation.backendopt.mallocs.rst rename from pypy/doc/config/translation.backendopt.mallocs.txt rename to pypy/doc/config/translation.backendopt.mallocs.rst diff --git a/pypy/doc/config/translation.backendopt.merge_if_blocks.txt b/pypy/doc/config/translation.backendopt.merge_if_blocks.rst rename from pypy/doc/config/translation.backendopt.merge_if_blocks.txt rename to pypy/doc/config/translation.backendopt.merge_if_blocks.rst diff --git a/pypy/doc/config/translation.backendopt.none.txt b/pypy/doc/config/translation.backendopt.none.rst rename from pypy/doc/config/translation.backendopt.none.txt rename to pypy/doc/config/translation.backendopt.none.rst diff --git a/pypy/doc/config/translation.backendopt.print_statistics.txt b/pypy/doc/config/translation.backendopt.print_statistics.rst rename from pypy/doc/config/translation.backendopt.print_statistics.txt rename to pypy/doc/config/translation.backendopt.print_statistics.rst diff --git a/pypy/doc/config/translation.backendopt.profile_based_inline.txt b/pypy/doc/config/translation.backendopt.profile_based_inline.rst rename from pypy/doc/config/translation.backendopt.profile_based_inline.txt rename to pypy/doc/config/translation.backendopt.profile_based_inline.rst diff --git a/pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.txt b/pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.rst rename from pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.txt rename to pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.rst diff --git a/pypy/doc/config/translation.backendopt.profile_based_inline_threshold.txt b/pypy/doc/config/translation.backendopt.profile_based_inline_threshold.rst rename from pypy/doc/config/translation.backendopt.profile_based_inline_threshold.txt rename to pypy/doc/config/translation.backendopt.profile_based_inline_threshold.rst diff --git a/pypy/doc/config/translation.backendopt.raisingop2direct_call.txt b/pypy/doc/config/translation.backendopt.raisingop2direct_call.rst rename from pypy/doc/config/translation.backendopt.raisingop2direct_call.txt rename to pypy/doc/config/translation.backendopt.raisingop2direct_call.rst diff --git a/pypy/doc/config/translation.backendopt.really_remove_asserts.txt b/pypy/doc/config/translation.backendopt.really_remove_asserts.rst rename from pypy/doc/config/translation.backendopt.really_remove_asserts.txt rename to pypy/doc/config/translation.backendopt.really_remove_asserts.rst diff --git a/pypy/doc/config/translation.backendopt.remove_asserts.txt b/pypy/doc/config/translation.backendopt.remove_asserts.rst rename from pypy/doc/config/translation.backendopt.remove_asserts.txt rename to pypy/doc/config/translation.backendopt.remove_asserts.rst diff --git a/pypy/doc/config/translation.backendopt.txt b/pypy/doc/config/translation.backendopt.rst rename from pypy/doc/config/translation.backendopt.txt rename to pypy/doc/config/translation.backendopt.rst diff --git a/pypy/doc/config/translation.backendopt.stack_optimization.txt b/pypy/doc/config/translation.backendopt.stack_optimization.rst rename from pypy/doc/config/translation.backendopt.stack_optimization.txt rename to pypy/doc/config/translation.backendopt.stack_optimization.rst diff --git a/pypy/doc/config/translation.backendopt.storesink.txt b/pypy/doc/config/translation.backendopt.storesink.rst rename from pypy/doc/config/translation.backendopt.storesink.txt rename to pypy/doc/config/translation.backendopt.storesink.rst diff --git a/pypy/doc/config/translation.builtins_can_raise_exceptions.txt b/pypy/doc/config/translation.builtins_can_raise_exceptions.rst rename from pypy/doc/config/translation.builtins_can_raise_exceptions.txt rename to pypy/doc/config/translation.builtins_can_raise_exceptions.rst diff --git a/pypy/doc/config/translation.cc.txt b/pypy/doc/config/translation.cc.rst rename from pypy/doc/config/translation.cc.txt rename to pypy/doc/config/translation.cc.rst diff --git a/pypy/doc/config/translation.cli.exception_transformer.txt b/pypy/doc/config/translation.cli.exception_transformer.rst rename from pypy/doc/config/translation.cli.exception_transformer.txt rename to pypy/doc/config/translation.cli.exception_transformer.rst diff --git a/pypy/doc/config/translation.cli.txt b/pypy/doc/config/translation.cli.rst rename from pypy/doc/config/translation.cli.txt rename to pypy/doc/config/translation.cli.rst diff --git a/pypy/doc/config/translation.cli.trace_calls.txt b/pypy/doc/config/translation.cli.trace_calls.rst rename from pypy/doc/config/translation.cli.trace_calls.txt rename to pypy/doc/config/translation.cli.trace_calls.rst diff --git a/pypy/doc/config/translation.compilerflags.txt b/pypy/doc/config/translation.compilerflags.rst rename from pypy/doc/config/translation.compilerflags.txt rename to pypy/doc/config/translation.compilerflags.rst diff --git a/pypy/doc/config/translation.countmallocs.txt b/pypy/doc/config/translation.countmallocs.rst rename from pypy/doc/config/translation.countmallocs.txt rename to pypy/doc/config/translation.countmallocs.rst diff --git a/pypy/doc/config/translation.debug.txt b/pypy/doc/config/translation.debug.rst rename from pypy/doc/config/translation.debug.txt rename to pypy/doc/config/translation.debug.rst diff --git a/pypy/doc/config/translation.dump_static_data_info.txt b/pypy/doc/config/translation.dump_static_data_info.rst rename from pypy/doc/config/translation.dump_static_data_info.txt rename to pypy/doc/config/translation.dump_static_data_info.rst diff --git a/pypy/doc/config/translation.force_make.txt b/pypy/doc/config/translation.force_make.rst rename from pypy/doc/config/translation.force_make.txt rename to pypy/doc/config/translation.force_make.rst diff --git a/pypy/doc/config/translation.fork_before.txt b/pypy/doc/config/translation.fork_before.rst rename from pypy/doc/config/translation.fork_before.txt rename to pypy/doc/config/translation.fork_before.rst diff --git a/pypy/doc/config/translation.gc.txt b/pypy/doc/config/translation.gc.rst rename from pypy/doc/config/translation.gc.txt rename to pypy/doc/config/translation.gc.rst diff --git a/pypy/doc/config/translation.gcremovetypeptr.txt b/pypy/doc/config/translation.gcremovetypeptr.rst rename from pypy/doc/config/translation.gcremovetypeptr.txt rename to pypy/doc/config/translation.gcremovetypeptr.rst diff --git a/pypy/doc/config/translation.gcrootfinder.txt b/pypy/doc/config/translation.gcrootfinder.rst rename from pypy/doc/config/translation.gcrootfinder.txt rename to pypy/doc/config/translation.gcrootfinder.rst diff --git a/pypy/doc/config/translation.gctransformer.txt b/pypy/doc/config/translation.gctransformer.rst rename from pypy/doc/config/translation.gctransformer.txt rename to pypy/doc/config/translation.gctransformer.rst diff --git a/pypy/doc/config/translation.insist.txt b/pypy/doc/config/translation.insist.rst rename from pypy/doc/config/translation.insist.txt rename to pypy/doc/config/translation.insist.rst diff --git a/pypy/doc/config/translation.instrument.txt b/pypy/doc/config/translation.instrument.rst rename from pypy/doc/config/translation.instrument.txt rename to pypy/doc/config/translation.instrument.rst diff --git a/pypy/doc/config/translation.instrumentctl.txt b/pypy/doc/config/translation.instrumentctl.rst rename from pypy/doc/config/translation.instrumentctl.txt rename to pypy/doc/config/translation.instrumentctl.rst diff --git a/pypy/doc/config/translation.jit.txt b/pypy/doc/config/translation.jit.rst rename from pypy/doc/config/translation.jit.txt rename to pypy/doc/config/translation.jit.rst diff --git a/pypy/doc/config/translation.jit_backend.txt b/pypy/doc/config/translation.jit_backend.rst rename from pypy/doc/config/translation.jit_backend.txt rename to pypy/doc/config/translation.jit_backend.rst diff --git a/pypy/doc/config/translation.jit_ffi.txt b/pypy/doc/config/translation.jit_ffi.rst rename from pypy/doc/config/translation.jit_ffi.txt rename to pypy/doc/config/translation.jit_ffi.rst diff --git a/pypy/doc/config/translation.jit_profiler.txt b/pypy/doc/config/translation.jit_profiler.rst rename from pypy/doc/config/translation.jit_profiler.txt rename to pypy/doc/config/translation.jit_profiler.rst diff --git a/pypy/doc/config/translation.linkerflags.txt b/pypy/doc/config/translation.linkerflags.rst rename from pypy/doc/config/translation.linkerflags.txt rename to pypy/doc/config/translation.linkerflags.rst diff --git a/pypy/doc/config/translation.list_comprehension_operations.txt b/pypy/doc/config/translation.list_comprehension_operations.rst rename from pypy/doc/config/translation.list_comprehension_operations.txt rename to pypy/doc/config/translation.list_comprehension_operations.rst diff --git a/pypy/doc/config/translation.log.txt b/pypy/doc/config/translation.log.rst rename from pypy/doc/config/translation.log.txt rename to pypy/doc/config/translation.log.rst diff --git a/pypy/doc/config/translation.make_jobs.txt b/pypy/doc/config/translation.make_jobs.rst rename from pypy/doc/config/translation.make_jobs.txt rename to pypy/doc/config/translation.make_jobs.rst diff --git a/pypy/doc/config/translation.no__thread.txt b/pypy/doc/config/translation.no__thread.rst rename from pypy/doc/config/translation.no__thread.txt rename to pypy/doc/config/translation.no__thread.rst diff --git a/pypy/doc/config/translation.noprofopt.txt b/pypy/doc/config/translation.noprofopt.rst rename from pypy/doc/config/translation.noprofopt.txt rename to pypy/doc/config/translation.noprofopt.rst diff --git a/pypy/doc/config/translation.ootype.mangle.txt b/pypy/doc/config/translation.ootype.mangle.rst rename from pypy/doc/config/translation.ootype.mangle.txt rename to pypy/doc/config/translation.ootype.mangle.rst diff --git a/pypy/doc/config/translation.ootype.txt b/pypy/doc/config/translation.ootype.rst rename from pypy/doc/config/translation.ootype.txt rename to pypy/doc/config/translation.ootype.rst diff --git a/pypy/doc/config/translation.output.txt b/pypy/doc/config/translation.output.rst rename from pypy/doc/config/translation.output.txt rename to pypy/doc/config/translation.output.rst diff --git a/pypy/doc/config/translation.platform.txt b/pypy/doc/config/translation.platform.rst rename from pypy/doc/config/translation.platform.txt rename to pypy/doc/config/translation.platform.rst diff --git a/pypy/doc/config/translation.profopt.txt b/pypy/doc/config/translation.profopt.rst rename from pypy/doc/config/translation.profopt.txt rename to pypy/doc/config/translation.profopt.rst diff --git a/pypy/doc/config/translation.txt b/pypy/doc/config/translation.rst rename from pypy/doc/config/translation.txt rename to pypy/doc/config/translation.rst diff --git a/pypy/doc/config/translation.rweakref.txt b/pypy/doc/config/translation.rweakref.rst rename from pypy/doc/config/translation.rweakref.txt rename to pypy/doc/config/translation.rweakref.rst diff --git a/pypy/doc/config/translation.sandbox.txt b/pypy/doc/config/translation.sandbox.rst rename from pypy/doc/config/translation.sandbox.txt rename to pypy/doc/config/translation.sandbox.rst diff --git a/pypy/doc/config/translation.secondaryentrypoints.txt b/pypy/doc/config/translation.secondaryentrypoints.rst rename from pypy/doc/config/translation.secondaryentrypoints.txt rename to pypy/doc/config/translation.secondaryentrypoints.rst diff --git a/pypy/doc/config/translation.shared.txt b/pypy/doc/config/translation.shared.rst rename from pypy/doc/config/translation.shared.txt rename to pypy/doc/config/translation.shared.rst diff --git a/pypy/doc/config/translation.simplifying.txt b/pypy/doc/config/translation.simplifying.rst rename from pypy/doc/config/translation.simplifying.txt rename to pypy/doc/config/translation.simplifying.rst diff --git a/pypy/doc/config/translation.stackless.txt b/pypy/doc/config/translation.stackless.rst rename from pypy/doc/config/translation.stackless.txt rename to pypy/doc/config/translation.stackless.rst diff --git a/pypy/doc/config/translation.taggedpointers.txt b/pypy/doc/config/translation.taggedpointers.rst rename from pypy/doc/config/translation.taggedpointers.txt rename to pypy/doc/config/translation.taggedpointers.rst diff --git a/pypy/doc/config/translation.thread.txt b/pypy/doc/config/translation.thread.rst rename from pypy/doc/config/translation.thread.txt rename to pypy/doc/config/translation.thread.rst diff --git a/pypy/doc/config/translation.type_system.txt b/pypy/doc/config/translation.type_system.rst rename from pypy/doc/config/translation.type_system.txt rename to pypy/doc/config/translation.type_system.rst diff --git a/pypy/doc/config/translation.vanilla.txt b/pypy/doc/config/translation.vanilla.rst rename from pypy/doc/config/translation.vanilla.txt rename to pypy/doc/config/translation.vanilla.rst diff --git a/pypy/doc/config/translation.verbose.txt b/pypy/doc/config/translation.verbose.rst rename from pypy/doc/config/translation.verbose.txt rename to pypy/doc/config/translation.verbose.rst diff --git a/pypy/doc/config/translation.withsmallfuncsets.txt b/pypy/doc/config/translation.withsmallfuncsets.rst rename from pypy/doc/config/translation.withsmallfuncsets.txt rename to pypy/doc/config/translation.withsmallfuncsets.rst diff --git a/pypy/doc/configuration.txt b/pypy/doc/configuration.rst rename from pypy/doc/configuration.txt rename to pypy/doc/configuration.rst diff --git a/pypy/doc/conftest.py b/pypy/doc/conftest.py deleted file mode 100644 --- a/pypy/doc/conftest.py +++ /dev/null @@ -1,29 +0,0 @@ -import py - -from pypy.config.makerestdoc import register_config_role -docdir = py.path.local(__file__).dirpath() - -pytest_plugins = "pypy.doc.pytest_restdoc" - -def pytest_addoption(parser): - group = parser.getgroup("pypy-doc options") - group.addoption('--pypy-doctests', action="store_true", - dest="pypy_doctests", default=False, - help="enable doctests in .txt files") - group.addoption('--generate-redirections', - action="store_true", dest="generateredirections", - default=True, help="Generate redirecting HTML files") - -def pytest_configure(config): - register_config_role(docdir) - -def pytest_doctest_prepare_content(content): - if not py.test.config.getvalue("pypy_doctests"): - py.test.skip("specify --pypy-doctests to run doctests") - l = [] - for line in content.split("\n"): - if line.find('>>>>') != -1: - line = "" - l.append(line) - return "\n".join(l) - diff --git a/pypy/doc/contributor.txt b/pypy/doc/contributor.rst rename from pypy/doc/contributor.txt rename to pypy/doc/contributor.rst diff --git a/pypy/doc/cpython_differences.txt b/pypy/doc/cpython_differences.rst rename from pypy/doc/cpython_differences.txt rename to pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.txt +++ b/pypy/doc/cpython_differences.rst @@ -222,4 +222,4 @@ *more* case on PyPy than on CPython 2.6/2.7.) -.. include:: _ref.txt +.. include:: _ref.rst diff --git a/pypy/doc/crufty.rst b/pypy/doc/crufty.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/crufty.rst @@ -0,0 +1,3 @@ +.. warning:: + + This documentation may be out-of-date or obsolete (identified on 2011-03-14 at the PyCon US sprint) diff --git a/pypy/doc/ctypes-implementation.txt b/pypy/doc/ctypes-implementation.rst rename from pypy/doc/ctypes-implementation.txt rename to pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.txt +++ b/pypy/doc/ctypes-implementation.rst @@ -1,3 +1,4 @@ +.. include:: crufty.rst ============================= PyPy's ctypes implementation diff --git a/pypy/doc/dev_method.txt b/pypy/doc/dev_method.rst rename from pypy/doc/dev_method.txt rename to pypy/doc/dev_method.rst diff --git a/pypy/doc/discussion/GC-performance.txt b/pypy/doc/discussion/GC-performance.rst rename from pypy/doc/discussion/GC-performance.txt rename to pypy/doc/discussion/GC-performance.rst diff --git a/pypy/doc/discussion/VM-integration.txt b/pypy/doc/discussion/VM-integration.rst rename from pypy/doc/discussion/VM-integration.txt rename to pypy/doc/discussion/VM-integration.rst diff --git a/pypy/doc/discussion/chained_getattr.txt b/pypy/doc/discussion/chained_getattr.rst rename from pypy/doc/discussion/chained_getattr.txt rename to pypy/doc/discussion/chained_getattr.rst diff --git a/pypy/doc/discussion/cli-optimizations.txt b/pypy/doc/discussion/cli-optimizations.rst rename from pypy/doc/discussion/cli-optimizations.txt rename to pypy/doc/discussion/cli-optimizations.rst diff --git a/pypy/doc/discussion/cmd-prompt-translation.txt b/pypy/doc/discussion/cmd-prompt-translation.rst rename from pypy/doc/discussion/cmd-prompt-translation.txt rename to pypy/doc/discussion/cmd-prompt-translation.rst diff --git a/pypy/doc/discussion/compiled-swamp.txt b/pypy/doc/discussion/compiled-swamp.rst rename from pypy/doc/discussion/compiled-swamp.txt rename to pypy/doc/discussion/compiled-swamp.rst diff --git a/pypy/doc/discussion/ctypes_modules.txt b/pypy/doc/discussion/ctypes_modules.rst rename from pypy/doc/discussion/ctypes_modules.txt rename to pypy/doc/discussion/ctypes_modules.rst diff --git a/pypy/doc/discussion/ctypes_todo.txt b/pypy/doc/discussion/ctypes_todo.rst rename from pypy/doc/discussion/ctypes_todo.txt rename to pypy/doc/discussion/ctypes_todo.rst diff --git a/pypy/doc/discussion/distribution-implementation.txt b/pypy/doc/discussion/distribution-implementation.rst rename from pypy/doc/discussion/distribution-implementation.txt rename to pypy/doc/discussion/distribution-implementation.rst diff --git a/pypy/doc/discussion/distribution-newattempt.txt b/pypy/doc/discussion/distribution-newattempt.rst rename from pypy/doc/discussion/distribution-newattempt.txt rename to pypy/doc/discussion/distribution-newattempt.rst diff --git a/pypy/doc/discussion/distribution-roadmap.txt b/pypy/doc/discussion/distribution-roadmap.rst rename from pypy/doc/discussion/distribution-roadmap.txt rename to pypy/doc/discussion/distribution-roadmap.rst diff --git a/pypy/doc/discussion/distribution.txt b/pypy/doc/discussion/distribution.rst rename from pypy/doc/discussion/distribution.txt rename to pypy/doc/discussion/distribution.rst diff --git a/pypy/doc/discussion/emptying-the-malloc-zoo.txt b/pypy/doc/discussion/emptying-the-malloc-zoo.rst rename from pypy/doc/discussion/emptying-the-malloc-zoo.txt rename to pypy/doc/discussion/emptying-the-malloc-zoo.rst diff --git a/pypy/doc/discussion/finalizer-order.txt b/pypy/doc/discussion/finalizer-order.rst rename from pypy/doc/discussion/finalizer-order.txt rename to pypy/doc/discussion/finalizer-order.rst diff --git a/pypy/doc/discussion/gc.txt b/pypy/doc/discussion/gc.rst rename from pypy/doc/discussion/gc.txt rename to pypy/doc/discussion/gc.rst diff --git a/pypy/doc/discussion/howtoimplementpickling.txt b/pypy/doc/discussion/howtoimplementpickling.rst rename from pypy/doc/discussion/howtoimplementpickling.txt rename to pypy/doc/discussion/howtoimplementpickling.rst diff --git a/pypy/doc/discussion/improve-rpython.txt b/pypy/doc/discussion/improve-rpython.rst rename from pypy/doc/discussion/improve-rpython.txt rename to pypy/doc/discussion/improve-rpython.rst diff --git a/pypy/doc/discussion/outline-external-ootype.txt b/pypy/doc/discussion/outline-external-ootype.rst rename from pypy/doc/discussion/outline-external-ootype.txt rename to pypy/doc/discussion/outline-external-ootype.rst diff --git a/pypy/doc/discussion/oz-thread-api.txt b/pypy/doc/discussion/oz-thread-api.rst rename from pypy/doc/discussion/oz-thread-api.txt rename to pypy/doc/discussion/oz-thread-api.rst diff --git a/pypy/doc/discussion/paper-wishlist.txt b/pypy/doc/discussion/paper-wishlist.rst rename from pypy/doc/discussion/paper-wishlist.txt rename to pypy/doc/discussion/paper-wishlist.rst diff --git a/pypy/doc/discussion/parsing-ideas.txt b/pypy/doc/discussion/parsing-ideas.rst rename from pypy/doc/discussion/parsing-ideas.txt rename to pypy/doc/discussion/parsing-ideas.rst diff --git a/pypy/doc/discussion/pypy_metaclasses_in_cl.txt b/pypy/doc/discussion/pypy_metaclasses_in_cl.rst rename from pypy/doc/discussion/pypy_metaclasses_in_cl.txt rename to pypy/doc/discussion/pypy_metaclasses_in_cl.rst diff --git a/pypy/doc/discussion/removing-stable-compiler.txt b/pypy/doc/discussion/removing-stable-compiler.rst rename from pypy/doc/discussion/removing-stable-compiler.txt rename to pypy/doc/discussion/removing-stable-compiler.rst diff --git a/pypy/doc/discussion/security-ideas.txt b/pypy/doc/discussion/security-ideas.rst rename from pypy/doc/discussion/security-ideas.txt rename to pypy/doc/discussion/security-ideas.rst diff --git a/pypy/doc/discussion/somepbc-refactoring-plan.txt b/pypy/doc/discussion/somepbc-refactoring-plan.rst rename from pypy/doc/discussion/somepbc-refactoring-plan.txt rename to pypy/doc/discussion/somepbc-refactoring-plan.rst diff --git a/pypy/doc/discussion/summer-of-pypy-pytest.txt b/pypy/doc/discussion/summer-of-pypy-pytest.rst rename from pypy/doc/discussion/summer-of-pypy-pytest.txt rename to pypy/doc/discussion/summer-of-pypy-pytest.rst diff --git a/pypy/doc/discussion/testing-zope.txt b/pypy/doc/discussion/testing-zope.rst rename from pypy/doc/discussion/testing-zope.txt rename to pypy/doc/discussion/testing-zope.rst diff --git a/pypy/doc/discussion/thoughts_string_interning.txt b/pypy/doc/discussion/thoughts_string_interning.rst rename from pypy/doc/discussion/thoughts_string_interning.txt rename to pypy/doc/discussion/thoughts_string_interning.rst diff --git a/pypy/doc/discussion/translation-swamp.txt b/pypy/doc/discussion/translation-swamp.rst rename from pypy/doc/discussion/translation-swamp.txt rename to pypy/doc/discussion/translation-swamp.rst diff --git a/pypy/doc/discussion/use_case_of_logic.txt b/pypy/doc/discussion/use_case_of_logic.rst rename from pypy/doc/discussion/use_case_of_logic.txt rename to pypy/doc/discussion/use_case_of_logic.rst diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/discussions.rst @@ -0,0 +1,41 @@ +Old discussion notes needing categorization +------------------------------------------- + +The following are old discussion notes which may or may not reflect the current reality. + +Help from domain experts would be welcome, since some of these documents probably ought to be moved to a more prominent location, some should be deleted, and some left here. + + +.. toctree:: + + discussion/GC-performance.rst + discussion/VM-integration.rst + discussion/chained_getattr.rst + discussion/cli-optimizations.rst + discussion/cmd-prompt-translation.rst + discussion/compiled-swamp.rst + discussion/ctypes_modules.rst + discussion/ctypes_todo.rst + discussion/distribution.rst + discussion/distribution-implementation.rst + discussion/distribution-newattempt.rst + discussion/distribution-roadmap.rst + discussion/emptying-the-malloc-zoo.rst + discussion/finalizer-order.rst + discussion/gc.rst + discussion/howtoimplementpickling.rst + discussion/improve-rpython.rst + discussion/outline-external-ootype.rst + discussion/oz-thread-api.rst + discussion/paper-wishlist.rst + discussion/parsing-ideas.rst + discussion/pypy_metaclasses_in_cl.rst + discussion/removing-stable-compiler.rst + discussion/security-ideas.rst + discussion/somepbc-refactoring-plan.rst + discussion/summer-of-pypy-pytest.rst + discussion/testing-zope.rst + discussion/thoughts_string_interning.rst + discussion/translation-swamp.rst + discussion/use_case_of_logic.rst + diff --git a/pypy/doc/distribution.txt b/pypy/doc/distribution.rst rename from pypy/doc/distribution.txt rename to pypy/doc/distribution.rst --- a/pypy/doc/distribution.txt +++ b/pypy/doc/distribution.rst @@ -1,3 +1,6 @@ +.. include:: crufty.rst + + .. ^^ Incomplete, superceded elsewhere ======================== lib/distributed features diff --git a/pypy/doc/docindex.txt b/pypy/doc/docindex.rst rename from pypy/doc/docindex.txt rename to pypy/doc/docindex.rst --- a/pypy/doc/docindex.txt +++ b/pypy/doc/docindex.rst @@ -4,7 +4,7 @@ .. _Python: http://www.python.org/doc/2.5.2/ -.. sectnum:: + .. contents:: :depth: 1 @@ -310,5 +310,5 @@ .. _`graph viewer`: getting-started-dev.html#try-out-the-translator .. _`compatibility matrix`: image/compat-matrix.png -.. include:: _ref.txt +.. include:: _ref.rst diff --git a/pypy/doc/dot-net.rst b/pypy/doc/dot-net.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/dot-net.rst @@ -0,0 +1,12 @@ +.NET support +============ + + .. warning:: + + The .NET backend within PyPy is unmaintained. This documentation may be out-of-date. We welcome contributors who are interested in doing the work to get this into shape. + +.. toctree:: + + cli-backend.rst + clr-module.rst + carbonpython.rst diff --git a/pypy/doc/download.txt b/pypy/doc/download.txt deleted file mode 100644 --- a/pypy/doc/download.txt +++ /dev/null @@ -1,7 +0,0 @@ - -Download one of the following release files: -============================================= - -Download page has moved to `pypy.org`_. - -.. _`pypy.org`: http://pypy.org/download.html diff --git a/pypy/doc/eventhistory.txt b/pypy/doc/eventhistory.rst rename from pypy/doc/eventhistory.txt rename to pypy/doc/eventhistory.rst --- a/pypy/doc/eventhistory.txt +++ b/pypy/doc/eventhistory.rst @@ -1,4 +1,6 @@ - +------------ +More sprints +------------ The PyPy project is a worldwide collaborative effort and its members are organizing sprints and presenting results at conferences diff --git a/pypy/doc/extending.txt b/pypy/doc/extending.rst rename from pypy/doc/extending.txt rename to pypy/doc/extending.rst diff --git a/pypy/doc/externaltools.txt b/pypy/doc/externaltools.rst rename from pypy/doc/externaltools.txt rename to pypy/doc/externaltools.rst --- a/pypy/doc/externaltools.txt +++ b/pypy/doc/externaltools.rst @@ -1,3 +1,7 @@ +.. include:: crufty.rst + + .. ^^ Incomplete and wrong, superceded elsewhere + External tools&programs needed by PyPy ====================================== @@ -16,6 +20,8 @@ - gcc + - make + - Some libraries (these are Debian package names, adapt as needed): * ``python-dev`` diff --git a/pypy/doc/extradoc.txt b/pypy/doc/extradoc.rst rename from pypy/doc/extradoc.txt rename to pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.txt +++ b/pypy/doc/extradoc.rst @@ -1,5 +1,5 @@ ================================================= -PyPy - papers, talks and related projects +Papers, talks and related projects ================================================= Papers diff --git a/pypy/doc/faq.txt b/pypy/doc/faq.rst rename from pypy/doc/faq.txt rename to pypy/doc/faq.rst --- a/pypy/doc/faq.txt +++ b/pypy/doc/faq.rst @@ -416,7 +416,7 @@ .. _`RPython`: coding-guide.html#rpython .. _`getting-started`: getting-started.html -.. include:: _ref.txt +.. include:: _ref.rst ---------------------------------------------------------- Why does PyPy draw a Mandelbrot fractal while translating? diff --git a/pypy/doc/garbage_collection.txt b/pypy/doc/garbage_collection.rst rename from pypy/doc/garbage_collection.txt rename to pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.txt +++ b/pypy/doc/garbage_collection.rst @@ -3,7 +3,7 @@ ========================== .. contents:: -.. sectnum:: + Introduction ============ @@ -124,4 +124,4 @@ More details are available as comments at the start of the source in `rpython/memory/gc/markcompact.py`_. -.. include:: _ref.txt +.. include:: _ref.rst diff --git a/pypy/doc/geninterp.txt b/pypy/doc/geninterp.rst rename from pypy/doc/geninterp.txt rename to pypy/doc/geninterp.rst --- a/pypy/doc/geninterp.txt +++ b/pypy/doc/geninterp.rst @@ -1,3 +1,7 @@ +.. include:: crufty.rst + + .. ^^ apparently dead + The Interpreter-Level backend ----------------------------- @@ -42,7 +46,7 @@ Example +++++++ -.. _implementation: ../../pypy/translator/geninterplevel.py +.. _implementation: ../../../../pypy/translator/geninterplevel.py Let's try a little example. You might want to look at the flowgraph that it produces. Here, we directly run the Python translation and look at the diff --git a/pypy/doc/getting-started-dev.txt b/pypy/doc/getting-started-dev.rst rename from pypy/doc/getting-started-dev.txt rename to pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.txt +++ b/pypy/doc/getting-started-dev.rst @@ -1,9 +1,8 @@ =============================================================================== -PyPy - Getting Started with the Translation Toolchain and Development Process +Getting Started with the Translation Toolchain and Development Process =============================================================================== .. contents:: -.. sectnum:: .. _`try out the translator`: @@ -18,9 +17,7 @@ * Download and install Pygame_. - * Download and install `Dot Graphviz`_ (optional if you have an internet - connection: the flowgraph viewer then connects to - codespeak.net and lets it convert the flowgraph by a graphviz server). + * Download and install `Dot Graphviz`_ To start the interactive translator shell do:: @@ -410,16 +407,16 @@ .. _mixedmodule.py: http://codespeak.net/svn/pypy/trunk/pypy/interpreter/mixedmodule.py .. _typedef.py: http://codespeak.net/svn/pypy/trunk/pypy/interpreter/typedef.py .. _Standard object space: objspace.html#the-standard-object-space -.. _objspace.py: ../../pypy/objspace/std/objspace.py -.. _thunk: ../../pypy/objspace/thunk.py -.. _trace: ../../pypy/objspace/trace.py -.. _flow: ../../pypy/objspace/flow/ -.. _translator.py: ../../pypy/translator/translator.py +.. _objspace.py: ../../../../pypy/objspace/std/objspace.py +.. _thunk: ../../../../pypy/objspace/thunk.py +.. _trace: ../../../../pypy/objspace/trace.py +.. _flow: ../../../../pypy/objspace/flow/ +.. _translator.py: ../../../../pypy/translator/translator.py .. _mailing lists: index.html .. _documentation: docindex.html .. _unit tests: coding-guide.html#test-design .. _`directory reference`: docindex.html#directory-reference -.. include:: _ref.txt +.. include:: _ref.rst diff --git a/pypy/doc/getting-started-python.txt b/pypy/doc/getting-started-python.rst rename from pypy/doc/getting-started-python.txt rename to pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.txt +++ b/pypy/doc/getting-started-python.rst @@ -3,7 +3,7 @@ ============================================== .. contents:: -.. sectnum:: + PyPy's Python interpreter is a very compliant Python interpreter implemented in Python. When translated to C, it passes most of @@ -35,20 +35,31 @@ You can translate the whole of PyPy's Python interpreter to low level C code, `CLI code`_, or `JVM code`_. -1. Install dependencies. You need (these are Debian package names, - adapt as needed): +1. Install build-time dependencies. On a Debian box these are:: - * ``gcc`` - * ``python-dev`` - * ``python-ctypes`` if you are still using Python2.4 - * ``libffi-dev`` + [user at debian-box ~]$ sudo apt-get install \ + gcc make python-dev libffi-dev pkg-config \ + libz-dev libbz2-dev libncurses-dev libexpat1-dev \ + libssl-dev libgc-dev python-sphinx python-greenlet + + On a Fedora box these are:: + + [user at fedora-or-rh-box ~]$ sudo yum install \ + gcc make python-devel libffi-devel pkg-config \ + zlib-devel bzip2-devel ncurses-devel expat-devel \ + openssl-devel gc-devel python-sphinx python-greenlet + + The above command lines are split with continuation characters, giving the necessary dependencies first, then the optional ones. + * ``pkg-config`` (to help us locate libffi files) * ``libz-dev`` (for the optional ``zlib`` module) * ``libbz2-dev`` (for the optional ``bz2`` module) * ``libncurses-dev`` (for the optional ``_minimal_curses`` module) * ``libexpat1-dev`` (for the optional ``pyexpat`` module) * ``libssl-dev`` (for the optional ``_ssl`` module) - * ``libgc-dev`` (Boehm: only when translating with `--opt=0, 1` or `size`) + * ``libgc-dev`` (for the Boehm garbage collector: only needed when translating with `--opt=0, 1` or `size`) + * ``python-sphinx`` (for the optional documentation build) + * ``python-greenlet`` (for the optional stackless support in interpreted mode/testing) 2. Translation is somewhat time-consuming (30 min to over one hour) and RAM-hungry. If you have less than 1.5 GB of @@ -68,8 +79,8 @@ possibly replacing ``--opt=jit`` with another `optimization level`_ of your choice like ``--opt=2`` if you do not want the included JIT - compiler. (As of March 2010, the default level is ``--opt=2``, and - ``--opt=jit`` requires an Intel **32-bit** environment.) + compiler. As of March 2011, Intel 32-bit environment needs **at + least** 2GB, and 64-bit needs 4GB. .. _`optimization level`: config/opt.html @@ -222,6 +233,12 @@ ../../.. etc. +If the executable fails to find suitable libraries, it will report +``debug: WARNING: library path not found, using compiled-in sys.path`` +and then attempt to continue normally. If the default path is usable, +most code will be fine. However, the ``sys.prefix`` will be unset +and some existing libraries assume that this is never the case. + In order to use ``distutils`` or ``setuptools`` a directory ``PREFIX/site-packages`` needs to be created. Here's an example session setting up and using ``easy_install``:: $ cd PREFIX @@ -299,4 +316,4 @@ .. _clr: clr-module.html .. _`CPythons core language regression tests`: http://codespeak.net:8099/summary?category=applevel&branch=%3Ctrunk%3E -.. include:: _ref.txt +.. include:: _ref.rst diff --git a/pypy/doc/getting-started.txt b/pypy/doc/getting-started.rst rename from pypy/doc/getting-started.txt rename to pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.txt +++ b/pypy/doc/getting-started.rst @@ -1,9 +1,8 @@ ================================== -PyPy - Getting Started +Getting Started ================================== .. contents:: -.. sectnum:: .. _howtopypy: @@ -34,9 +33,11 @@ repository using mercurial. We suggest using mercurial if one wants to access the current development. -.. _`downloading them from the download page`: download.html +.. _`downloading them from the download page`: http://pypy.org/download.html -If you choose to use mercurial, you must issue the following command on your +If you choose to use mercurial, +first make sure you have ``subversion`` installed. +You must issue the following command on your command line, DOS box, or terminal:: hg clone http://bitbucket.org/pypy/pypy pypy @@ -120,4 +121,4 @@ .. _bug reports: https://codespeak.net/issue/pypy-dev/ -.. include:: _ref.txt +.. include:: _ref.rst diff --git a/pypy/doc/glossary.txt b/pypy/doc/glossary.rst rename from pypy/doc/glossary.txt rename to pypy/doc/glossary.rst --- a/pypy/doc/glossary.txt +++ b/pypy/doc/glossary.rst @@ -1,7 +1,17 @@ +.. _glossary: + +******** +Glossary +******** + PyPy, like any large project, has developed a jargon of its own. This document gives brief definition of some of these terms and provides links to more information. +.. if you add new entries, keep the alphabetical sorting! + +.. glossary:: + **abstract interpretation** The technique of interpreting the bytecode of a user program with an interpreter that handles abstract objects instead of concrete ones. @@ -234,4 +244,4 @@ .. _`subsystem implementing the Python language`: architecture.html#standard-interpreter .. _Theory: theory.html -.. include:: _ref.txt +.. include:: _ref.rst diff --git a/pypy/doc/how-to-release.txt b/pypy/doc/how-to-release.rst rename from pypy/doc/how-to-release.txt rename to pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.txt +++ b/pypy/doc/how-to-release.rst @@ -1,3 +1,5 @@ +.. include:: crufty.rst + Making a PyPy Release ======================= diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/index-of-release-notes.rst @@ -0,0 +1,17 @@ +Historical release notes +------------------------ + +.. toctree:: + + release-0.6 + release-0.7.0.rst + release-0.8.0.rst + release-0.9.0.rst + release-0.99.0.rst + release-1.0.0.rst + release-1.1.0.rst + release-1.2.0.rst + release-1.3.0.rst + release-1.4.0.rst + release-1.4.0beta.rst + release-1.4.1.rst diff --git a/pypy/doc/index-report.txt b/pypy/doc/index-report.rst rename from pypy/doc/index-report.txt rename to pypy/doc/index-report.rst --- a/pypy/doc/index-report.txt +++ b/pypy/doc/index-report.rst @@ -1,3 +1,5 @@ +.. include:: crufty.rst + ============================================ PyPy - Overview over the EU-reports ============================================ diff --git a/pypy/doc/index.txt b/pypy/doc/index.rst rename from pypy/doc/index.txt rename to pypy/doc/index.rst --- a/pypy/doc/index.txt +++ b/pypy/doc/index.rst @@ -1,3 +1,6 @@ + +Welcome to PyPy Development +============================================= The PyPy project aims at producing a flexible and fast Python_ implementation. The guiding idea is to translate a Python-level @@ -12,10 +15,17 @@ * `PyPy Blog`_: news and status info about PyPy -* `Documentation`_: extensive documentation and papers_ about PyPy. +* `Documentation`_: extensive documentation about PyPy. * `Getting Started`_: Getting started and playing with PyPy. +* `Papers`_: Academic papers, talks, and related projects + +* `Videos`_: Videos of PyPy talks and presentations + +* `speed.pypy.org`_: Daily benchmarks of how fast PyPy is + + Mailing lists, bug tracker, IRC channel ============================================= @@ -55,5 +65,79 @@ .. _`FAQ`: faq.html .. _`Documentation`: docindex.html .. _`Getting Started`: getting-started.html -.. _papers: extradoc.html +.. _`Papers`: extradoc.html +.. _`Videos`: video-index.html .. _`Release 1.4`: http://pypy.org/download.html +.. _`speed.pypy.org`: http://speed.pypy.org + +Detailed Documentation +====================== + +.. The following documentation is important and reasonably up-to-date: + +.. extradoc: should this be integrated one level up: dcolish? + + +.. toctree:: + :maxdepth: 1 + + getting-started.rst + getting-started-python.rst + getting-started-dev.rst + windows.rst + faq.rst + architecture.rst + coding-guide.rst + cpython_differences.rst + cleanup-todo.rst + garbage_collection.rst + interpreter.rst + objspace.rst + + dev_method.rst + extending.rst + + extradoc.rst + + glossary.rst + + contributor.rst + + interpreter-optimizations.rst + configuration.rst + low-level-encapsulation.rst + parser.rst + rlib.rst + rtyper.rst + translation.rst + jit/_ref.rst + jit/index.rst + jit/overview.rst + jit/pyjitpl5.rst + + index-of-release-notes.rst + + ctypes-implementation.rst + + how-to-release.rst + + index-report.rst + + stackless.rst + + discussions.rst + + cleanup.rst + + sprint-reports.rst + + eventhistory.rst + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` +* :ref:`glossary` + diff --git a/pypy/doc/interpreter-optimizations.txt b/pypy/doc/interpreter-optimizations.rst rename from pypy/doc/interpreter-optimizations.txt rename to pypy/doc/interpreter-optimizations.rst diff --git a/pypy/doc/interpreter.txt b/pypy/doc/interpreter.rst rename from pypy/doc/interpreter.txt rename to pypy/doc/interpreter.rst --- a/pypy/doc/interpreter.txt +++ b/pypy/doc/interpreter.rst @@ -1,9 +1,9 @@ =================================== -PyPy - Bytecode Interpreter +Bytecode Interpreter =================================== .. contents:: -.. sectnum:: + Introduction and Overview @@ -407,4 +407,4 @@ as a reference for the exact attributes of interpreter classes visible at application level. -.. include:: _ref.txt +.. include:: _ref.rst diff --git a/pypy/doc/jit/_ref.txt b/pypy/doc/jit/_ref.rst rename from pypy/doc/jit/_ref.txt rename to pypy/doc/jit/_ref.rst diff --git a/pypy/doc/jit/index.txt b/pypy/doc/jit/index.rst rename from pypy/doc/jit/index.txt rename to pypy/doc/jit/index.rst diff --git a/pypy/doc/jit/overview.txt b/pypy/doc/jit/overview.rst rename from pypy/doc/jit/overview.txt rename to pypy/doc/jit/overview.rst --- a/pypy/doc/jit/overview.txt +++ b/pypy/doc/jit/overview.rst @@ -3,7 +3,6 @@ ------------------------------------------------------------------------ .. contents:: -.. sectnum:: This is a non-technical introduction and motivation for PyPy's approach to Just-In-Time compiler generation. diff --git a/pypy/doc/jit/pyjitpl5.txt b/pypy/doc/jit/pyjitpl5.rst rename from pypy/doc/jit/pyjitpl5.txt rename to pypy/doc/jit/pyjitpl5.rst diff --git a/pypy/doc/low-level-encapsulation.txt b/pypy/doc/low-level-encapsulation.rst rename from pypy/doc/low-level-encapsulation.txt rename to pypy/doc/low-level-encapsulation.rst --- a/pypy/doc/low-level-encapsulation.txt +++ b/pypy/doc/low-level-encapsulation.rst @@ -3,7 +3,7 @@ ============================================================ .. contents:: -.. sectnum:: + Abstract diff --git a/pypy/doc/maemo.txt b/pypy/doc/maemo.rst rename from pypy/doc/maemo.txt rename to pypy/doc/maemo.rst diff --git a/pypy/doc/make.bat b/pypy/doc/make.bat new file mode 100644 --- /dev/null +++ b/pypy/doc/make.bat @@ -0,0 +1,113 @@ + at ECHO OFF + +REM Command file for Sphinx documentation + +set SPHINXBUILD=sphinx-build +set BUILDDIR=_build +set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . +if NOT "%PAPER%" == "" ( + set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% +) + +if "%1" == "" goto help + +if "%1" == "help" ( + :help + echo.Please use `make ^` where ^ is one of + echo. html to make standalone HTML files + echo. dirhtml to make HTML files named index.html in directories + echo. pickle to make pickle files + echo. json to make JSON files + echo. htmlhelp to make HTML files and a HTML help project + echo. qthelp to make HTML files and a qthelp project + echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter + echo. changes to make an overview over all changed/added/deprecated items + echo. linkcheck to check all external links for integrity + echo. doctest to run all doctests embedded in the documentation if enabled + goto end +) + +if "%1" == "clean" ( + for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i + del /q /s %BUILDDIR%\* + goto end +) + +if "%1" == "html" ( + %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/html. + goto end +) + +if "%1" == "dirhtml" ( + %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. + goto end +) + +if "%1" == "pickle" ( + %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle + echo. + echo.Build finished; now you can process the pickle files. + goto end +) + +if "%1" == "json" ( + %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json + echo. + echo.Build finished; now you can process the JSON files. + goto end +) + +if "%1" == "htmlhelp" ( + %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp + echo. + echo.Build finished; now you can run HTML Help Workshop with the ^ +.hhp project file in %BUILDDIR%/htmlhelp. + goto end +) + +if "%1" == "qthelp" ( + %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp + echo. + echo.Build finished; now you can run "qcollectiongenerator" with the ^ +.qhcp project file in %BUILDDIR%/qthelp, like this: + echo.^> qcollectiongenerator %BUILDDIR%\qthelp\PyPy.qhcp + echo.To view the help file: + echo.^> assistant -collectionFile %BUILDDIR%\qthelp\PyPy.ghc + goto end +) + +if "%1" == "latex" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + echo. + echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "changes" ( + %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes + echo. + echo.The overview file is in %BUILDDIR%/changes. + goto end +) + +if "%1" == "linkcheck" ( + %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck + echo. + echo.Link check complete; look for any errors in the above output ^ +or in %BUILDDIR%/linkcheck/output.txt. + goto end +) + +if "%1" == "doctest" ( + %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest + echo. + echo.Testing of doctests in the sources finished, look at the ^ +results in %BUILDDIR%/doctest/output.txt. + goto end +) + +:end diff --git a/pypy/doc/objspace-proxies.txt b/pypy/doc/objspace-proxies.rst rename from pypy/doc/objspace-proxies.txt rename to pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.txt +++ b/pypy/doc/objspace-proxies.rst @@ -3,7 +3,7 @@ ================================= .. contents:: -.. sectnum:: + Thanks to the `Object Space`_ architecture, any feature that is @@ -607,12 +607,12 @@ lists, dicts, exceptions, tracebacks and frames. .. _`standard object space`: objspace.html#the-standard-object-space -.. _`proxy_helpers.py`: ../../pypy/objspace/std/proxy_helpers.py -.. _`proxyobject.py`: ../../pypy/objspace/std/proxyobject.py -.. _`transparent.py`: ../../pypy/objspace/std/transparent.py +.. _`proxy_helpers.py`: ../../../../pypy/objspace/std/proxy_helpers.py +.. _`proxyobject.py`: ../../../../pypy/objspace/std/proxyobject.py +.. _`transparent.py`: ../../../../pypy/objspace/std/transparent.py .. _`tputil.py`: ../../lib_pypy/tputil.py .. [D12.1] `High-Level Backends and Interpreter Feature Prototypes`, PyPy EU-Report, 2007, http://codespeak.net/pypy/extradoc/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf -.. include:: _ref.txt +.. include:: _ref.rst diff --git a/pypy/doc/objspace.txt b/pypy/doc/objspace.rst rename from pypy/doc/objspace.txt rename to pypy/doc/objspace.rst --- a/pypy/doc/objspace.txt +++ b/pypy/doc/objspace.rst @@ -1,9 +1,9 @@ ====================== -PyPy - Object Spaces +Object Spaces ====================== .. contents:: -.. sectnum:: + .. _`objectspace`: .. _`Object Space`: @@ -341,7 +341,7 @@ using plain integers instead is the complex path, not the other way around. -.. _StdObjSpace: ../objspace/std/ +.. _StdObjSpace: ../../../../pypy/objspace/std/ Object types @@ -394,10 +394,10 @@ For other examples of multiple implementations of the same Python type, see `Standard Interpreter Optimizations`_. -.. _`listtype.py`: ../objspace/std/listtype.py -.. _`stringtype.py`: ../objspace/std/stringtype.py -.. _`tupletype.py`: ../objspace/std/tupletype.py -.. _`tupleobject.py`: ../objspace/std/tupleobject.py +.. _`listtype.py`: ../../../../pypy/objspace/std/listtype.py +.. _`stringtype.py`: ../../../../pypy/objspace/std/stringtype.py +.. _`tupletype.py`: ../../../../pypy/objspace/std/tupletype.py +.. _`tupleobject.py`: ../../../../pypy/objspace/std/tupleobject.py .. _`Standard Interpreter Optimizations`: interpreter-optimizations.html @@ -412,7 +412,7 @@ alone are not enough for the Standard Object Space: the complete picture spans several levels in order to emulate the exact Python semantics. -.. __: ../objspace/std/multimethod.py +.. __: ../../../../pypy/objspace/std/multimethod.py Consider the example of the ``space.getitem(w_a, w_b)`` operation, corresponding to the application-level syntax ``a[b]``. The Standard @@ -600,7 +600,7 @@ v3 = add(v2, Constant(2)) -.. _FlowObjSpace: ../objspace/flow/ +.. _FlowObjSpace: ../../../../pypy/objspace/flow/ The Flow model @@ -650,4 +650,4 @@ .. _`What PyPy can do for your objects`: objspace-proxies.html -.. include:: _ref.txt +.. include:: _ref.rst diff --git a/pypy/doc/old_news.txt b/pypy/doc/old_news.rst rename from pypy/doc/old_news.txt rename to pypy/doc/old_news.rst diff --git a/pypy/doc/parser.txt b/pypy/doc/parser.rst rename from pypy/doc/parser.txt rename to pypy/doc/parser.rst --- a/pypy/doc/parser.txt +++ b/pypy/doc/parser.rst @@ -100,4 +100,4 @@ information like the line number table and stack depth are computed. Finally, everything is passed to a brand new ``PyCode`` object. -.. include:: _ref.txt +.. include:: _ref.rst diff --git a/pypy/doc/project-ideas.txt b/pypy/doc/project-ideas.rst rename from pypy/doc/project-ideas.txt rename to pypy/doc/project-ideas.rst diff --git a/pypy/doc/pytest_restdoc.py b/pypy/doc/pytest_restdoc.py deleted file mode 100644 --- a/pypy/doc/pytest_restdoc.py +++ /dev/null @@ -1,434 +0,0 @@ -""" -perform ReST syntax, local and remote reference tests on .rst/.txt files. -""" -import py -import sys, os, re - -def pytest_addoption(parser): - group = parser.getgroup("ReST", "ReST documentation check options") - group.addoption('-R', '--urlcheck', - action="store_true", dest="urlcheck", default=False, - help="urlopen() remote links found in ReST text files.") - group.addoption('--urltimeout', action="store", metavar="secs", - type="int", dest="urlcheck_timeout", default=5, - help="timeout in seconds for remote urlchecks") - group.addoption('--forcegen', - action="store_true", dest="forcegen", default=False, - help="force generation of html files.") - -def pytest_collect_file(path, parent): - if path.ext in (".txt", ".rst"): - project = getproject(path) - if project is not None: - return ReSTFile(path, parent=parent, project=project) - -def getproject(path): - for parent in path.parts(reverse=True): - confrest = parent.join("confrest.py") - if confrest.check(): - Project = confrest.pyimport().Project - return Project(parent) - -class ReSTFile(py.test.collect.File): - def __init__(self, fspath, parent, project): - super(ReSTFile, self).__init__(fspath=fspath, parent=parent) - self.project = project - - def collect(self): - return [ - ReSTSyntaxTest("ReSTSyntax", parent=self, project=self.project), - LinkCheckerMaker("checklinks", parent=self), - DoctestText("doctest", parent=self), - ] - -def deindent(s, sep='\n'): - leastspaces = -1 - lines = s.split(sep) - for line in lines: - if not line.strip(): - continue - spaces = len(line) - len(line.lstrip()) - if leastspaces == -1 or spaces < leastspaces: - leastspaces = spaces - if leastspaces == -1: - return s - for i, line in enumerate(lines): - if not line.strip(): - lines[i] = '' - else: - lines[i] = line[leastspaces:] - return sep.join(lines) - -class ReSTSyntaxTest(py.test.collect.Item): - def __init__(self, name, parent, project): - super(ReSTSyntaxTest, self).__init__(name=name, parent=parent) - self.project = project - - def reportinfo(self): - return self.fspath, None, "syntax check" - - def runtest(self): - self.restcheck(py.path.svnwc(self.fspath)) - - def restcheck(self, path): - py.test.importorskip("docutils") - self.register_linkrole() - from docutils.utils import SystemMessage - try: - self._checkskip(path, self.project.get_htmloutputpath(path)) - self.project.process(path) - except KeyboardInterrupt: - raise - except SystemExit, error: - if error.message == "ERROR: dot not found": - py.test.skip("system doesn't have graphviz installed") - return - raise - except SystemMessage: - # we assume docutils printed info on stdout - py.test.fail("docutils processing failed, see captured stderr") - - def register_linkrole(self): - #directive.register_linkrole('api', self.resolve_linkrole) - #directive.register_linkrole('source', self.resolve_linkrole) -# -# # XXX fake sphinx' "toctree" and refs -# directive.register_linkrole('ref', self.resolve_linkrole) - - from docutils.parsers.rst import directives - def toctree_directive(name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): - return [] - toctree_directive.content = 1 - toctree_directive.options = {'maxdepth': int, 'glob': directives.flag, - 'hidden': directives.flag} - directives.register_directive('toctree', toctree_directive) - self.register_pygments() - - def register_pygments(self): - # taken from pygments-main/external/rst-directive.py - from docutils.parsers.rst import directives - try: - from pygments.formatters import HtmlFormatter - except ImportError: - def pygments_directive(name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): - return [] - pygments_directive.options = {} - else: - # The default formatter - DEFAULT = HtmlFormatter(noclasses=True) - # Add name -> formatter pairs for every variant you want to use - VARIANTS = { - # 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True), - } - - from docutils import nodes - - from pygments import highlight - from pygments.lexers import get_lexer_by_name, TextLexer - - def pygments_directive(name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): - try: - lexer = get_lexer_by_name(arguments[0]) - except ValueError: - # no lexer found - use the text one instead of an exception - lexer = TextLexer() - # take an arbitrary option if more than one is given - formatter = options and VARIANTS[options.keys()[0]] or DEFAULT - parsed = highlight('\n'.join(content), lexer, formatter) - return [nodes.raw('', parsed, format='html')] - - pygments_directive.options = dict([(key, directives.flag) for key in VARIANTS]) - - pygments_directive.arguments = (1, 0, 1) - pygments_directive.content = 1 - directives.register_directive('sourcecode', pygments_directive) - - def resolve_linkrole(self, name, text, check=True): - apigen_relpath = self.project.apigen_relpath - - if name == 'api': - if text == 'py': - return ('py', apigen_relpath + 'api/index.html') - else: - assert text.startswith('py.'), ( - 'api link "%s" does not point to the py package') % (text,) - dotted_name = text - if dotted_name.find('(') > -1: - dotted_name = dotted_name[:text.find('(')] - # remove pkg root - path = dotted_name.split('.')[1:] - dotted_name = '.'.join(path) - obj = py - if check: - for chunk in path: - try: - obj = getattr(obj, chunk) - except AttributeError: - raise AssertionError( - 'problem with linkrole :api:`%s`: can not resolve ' - 'dotted name %s' % (text, dotted_name,)) - return (text, apigen_relpath + 'api/%s.html' % (dotted_name,)) - elif name == 'source': - assert text.startswith('py/'), ('source link "%s" does not point ' - 'to the py package') % (text,) - relpath = '/'.join(text.split('/')[1:]) - if check: - pkgroot = py._pydir - abspath = pkgroot.join(relpath) - assert pkgroot.join(relpath).check(), ( - 'problem with linkrole :source:`%s`: ' - 'path %s does not exist' % (text, relpath)) - if relpath.endswith('/') or not relpath: - relpath += 'index.html' - else: - relpath += '.html' - return (text, apigen_relpath + 'source/%s' % (relpath,)) - elif name == 'ref': - return ("", "") - - def _checkskip(self, lpath, htmlpath=None): - if not self.config.getvalue("forcegen"): - lpath = py.path.local(lpath) - if htmlpath is not None: - htmlpath = py.path.local(htmlpath) - if lpath.ext == '.txt': - htmlpath = htmlpath or lpath.new(ext='.html') - if htmlpath.check(file=1) and htmlpath.mtime() >= lpath.mtime(): - py.test.skip("html file is up to date, use --forcegen to regenerate") - #return [] # no need to rebuild - -class DoctestText(py.test.collect.Item): - def reportinfo(self): - return self.fspath, None, "doctest" - - def runtest(self): - content = self._normalize_linesep() - newcontent = self.config.hook.pytest_doctest_prepare_content(content=content) - if newcontent is not None: - content = newcontent - s = content - l = [] - prefix = '.. >>> ' - mod = py.std.types.ModuleType(self.fspath.purebasename) - skipchunk = False - for line in deindent(s).split('\n'): - stripped = line.strip() - if skipchunk and line.startswith(skipchunk): - py.builtin.print_("skipping", line) - continue - skipchunk = False - if stripped.startswith(prefix): - try: - py.builtin.exec_(py.code.Source( - stripped[len(prefix):]).compile(), mod.__dict__) - except ValueError: - e = sys.exc_info()[1] - if e.args and e.args[0] == "skipchunk": - skipchunk = " " * (len(line) - len(line.lstrip())) - else: - raise - else: - l.append(line) - docstring = "\n".join(l) - mod.__doc__ = docstring - failed, tot = py.std.doctest.testmod(mod, verbose=1) - if failed: - py.test.fail("doctest %s: %s failed out of %s" %( - self.fspath, failed, tot)) - - def _normalize_linesep(self): - # XXX quite nasty... but it works (fixes win32 issues) - s = self.fspath.read() - linesep = '\n' - if '\r' in s: - if '\n' not in s: - linesep = '\r' - else: - linesep = '\r\n' - s = s.replace(linesep, '\n') - return s - -class LinkCheckerMaker(py.test.collect.Collector): - def collect(self): - return list(self.genlinkchecks()) - - def genlinkchecks(self): - path = self.fspath - # generating functions + args as single tests - timeout = self.config.getvalue("urlcheck_timeout") - for lineno, line in enumerate(path.readlines()): - line = line.strip() - if line.startswith('.. _'): - if line.startswith('.. _`'): - delim = '`:' - else: - delim = ':' - l = line.split(delim, 1) - if len(l) != 2: - continue - tryfn = l[1].strip() - name = "%s:%d" %(tryfn, lineno) - if tryfn.startswith('http:') or tryfn.startswith('https'): - if self.config.getvalue("urlcheck"): - yield CheckLink(name, parent=self, - args=(tryfn, path, lineno, timeout), checkfunc=urlcheck) - elif tryfn.startswith('webcal:'): - continue - else: - i = tryfn.find('#') - if i != -1: - checkfn = tryfn[:i] - else: - checkfn = tryfn - if checkfn.strip() and (1 or checkfn.endswith('.html')): - yield CheckLink(name, parent=self, - args=(tryfn, path, lineno), checkfunc=localrefcheck) - -class CheckLink(py.test.collect.Item): - def __init__(self, name, parent, args, checkfunc): - super(CheckLink, self).__init__(name, parent) - self.args = args - self.checkfunc = checkfunc - - def runtest(self): - return self.checkfunc(*self.args) - - def reportinfo(self, basedir=None): - return (self.fspath, self.args[2], "checklink: %s" % self.args[0]) - -def urlcheck(tryfn, path, lineno, TIMEOUT_URLOPEN): - old = py.std.socket.getdefaulttimeout() - py.std.socket.setdefaulttimeout(TIMEOUT_URLOPEN) - try: - try: - py.builtin.print_("trying remote", tryfn) - py.std.urllib2.urlopen(tryfn) - finally: - py.std.socket.setdefaulttimeout(old) - except (py.std.urllib2.URLError, py.std.urllib2.HTTPError): - e = sys.exc_info()[1] - if getattr(e, 'code', None) in (401, 403): # authorization required, forbidden - py.test.skip("%s: %s" %(tryfn, str(e))) - else: - py.test.fail("remote reference error %r in %s:%d\n%s" %( - tryfn, path.basename, lineno+1, e)) - -def localrefcheck(tryfn, path, lineno): - # assume it should be a file - i = tryfn.find('#') - if tryfn.startswith('javascript:'): - return # don't check JS refs - if i != -1: - anchor = tryfn[i+1:] - tryfn = tryfn[:i] - else: - anchor = '' - fn = path.dirpath(tryfn) - ishtml = fn.ext == '.html' - fn = ishtml and fn.new(ext='.txt') or fn - py.builtin.print_("filename is", fn) - if not fn.check(): # not ishtml or not fn.check(): - if not py.path.local(tryfn).check(): # the html could be there - py.test.fail("reference error %r in %s:%d" %( - tryfn, path.basename, lineno+1)) - if anchor: - source = unicode(fn.read(), 'latin1') - source = source.lower().replace('-', ' ') # aehem - - anchor = anchor.replace('-', ' ') - match2 = ".. _`%s`:" % anchor - match3 = ".. _%s:" % anchor - candidates = (anchor, match2, match3) - py.builtin.print_("candidates", repr(candidates)) - for line in source.split('\n'): - line = line.strip() - if line in candidates: - break - else: - py.test.fail("anchor reference error %s#%s in %s:%d" %( - tryfn, anchor, path.basename, lineno+1)) - -if hasattr(sys.stdout, 'fileno') and os.isatty(sys.stdout.fileno()): - def log(msg): - print(msg) -else: - def log(msg): - pass - -def convert_rest_html(source, source_path, stylesheet=None, encoding='latin1'): - """ return html latin1-encoded document for the given input. - source a ReST-string - sourcepath where to look for includes (basically) - stylesheet path (to be used if any) - """ - from docutils.core import publish_string - kwargs = { - 'stylesheet' : stylesheet, - 'stylesheet_path': None, - 'traceback' : 1, - 'embed_stylesheet': 0, - 'output_encoding' : encoding, - #'halt' : 0, # 'info', - 'halt_level' : 2, - } - # docutils uses os.getcwd() :-( - source_path = os.path.abspath(str(source_path)) - prevdir = os.getcwd() - try: - #os.chdir(os.path.dirname(source_path)) - return publish_string(source, source_path, writer_name='html', - settings_overrides=kwargs) - finally: - os.chdir(prevdir) - -def process(txtpath, encoding='latin1'): - """ process a textfile """ - log("processing %s" % txtpath) - assert txtpath.check(ext='.txt') - if isinstance(txtpath, py.path.svnwc): - txtpath = txtpath.localpath - htmlpath = txtpath.new(ext='.html') - #svninfopath = txtpath.localpath.new(ext='.svninfo') - - style = txtpath.dirpath('style.css') - if style.check(): - stylesheet = style.basename - else: - stylesheet = None - content = unicode(txtpath.read(), encoding) - doc = convert_rest_html(content, txtpath, stylesheet=stylesheet, encoding=encoding) - htmlpath.open('wb').write(doc) - #log("wrote %r" % htmlpath) - #if txtpath.check(svnwc=1, versioned=1): - # info = txtpath.info() - # svninfopath.dump(info) - -if sys.version_info > (3, 0): - def _uni(s): return s -else: - def _uni(s): - return unicode(s) - -rex1 = re.compile(r'.*(.*).*', re.MULTILINE | re.DOTALL) -rex2 = re.compile(r'.*
    (.*)
    .*', re.MULTILINE | re.DOTALL) - -def strip_html_header(string, encoding='utf8'): - """ return the content of the body-tag """ - uni = unicode(string, encoding) - for rex in rex1,rex2: - match = rex.search(uni) - if not match: - break - uni = match.group(1) - return uni - -class Project: # used for confrest.py files - def __init__(self, sourcepath): - self.sourcepath = sourcepath - def process(self, path): - return process(path) - def get_htmloutputpath(self, path): - return path.new(ext='html') diff --git a/pypy/doc/redirections b/pypy/doc/redirections deleted file mode 100644 --- a/pypy/doc/redirections +++ /dev/null @@ -1,10 +0,0 @@ -# please make sure this is evaluable -{ - 'proxy.html': 'objspace-proxies.html#tproxy', - 'news.html': 'index.html', - 'contact.html': 'index.html', - 'home.html': 'index.html', - 'jit.html': 'jit/index.html', - 'standalone-howto.html': 'faq.html#pypy-translation-tool-chain', - 'dynamic-language-translation.html': 'http://codespeak.net/svn/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf', -} diff --git a/pypy/doc/release-0.6.txt b/pypy/doc/release-0.6.rst rename from pypy/doc/release-0.6.txt rename to pypy/doc/release-0.6.rst diff --git a/pypy/doc/release-0.7.0.txt b/pypy/doc/release-0.7.0.rst rename from pypy/doc/release-0.7.0.txt rename to pypy/doc/release-0.7.0.rst diff --git a/pypy/doc/release-0.8.0.txt b/pypy/doc/release-0.8.0.rst rename from pypy/doc/release-0.8.0.txt rename to pypy/doc/release-0.8.0.rst diff --git a/pypy/doc/release-0.9.0.txt b/pypy/doc/release-0.9.0.rst rename from pypy/doc/release-0.9.0.txt rename to pypy/doc/release-0.9.0.rst diff --git a/pypy/doc/release-0.99.0.txt b/pypy/doc/release-0.99.0.rst rename from pypy/doc/release-0.99.0.txt rename to pypy/doc/release-0.99.0.rst diff --git a/pypy/doc/release-1.0.0.txt b/pypy/doc/release-1.0.0.rst rename from pypy/doc/release-1.0.0.txt rename to pypy/doc/release-1.0.0.rst diff --git a/pypy/doc/release-1.1.0.txt b/pypy/doc/release-1.1.0.rst rename from pypy/doc/release-1.1.0.txt rename to pypy/doc/release-1.1.0.rst diff --git a/pypy/doc/release-1.2.0.txt b/pypy/doc/release-1.2.0.rst rename from pypy/doc/release-1.2.0.txt rename to pypy/doc/release-1.2.0.rst diff --git a/pypy/doc/release-1.3.0.txt b/pypy/doc/release-1.3.0.rst rename from pypy/doc/release-1.3.0.txt rename to pypy/doc/release-1.3.0.rst diff --git a/pypy/doc/release-1.4.0.txt b/pypy/doc/release-1.4.0.rst rename from pypy/doc/release-1.4.0.txt rename to pypy/doc/release-1.4.0.rst diff --git a/pypy/doc/release-1.4.0beta.txt b/pypy/doc/release-1.4.0beta.rst rename from pypy/doc/release-1.4.0beta.txt rename to pypy/doc/release-1.4.0beta.rst diff --git a/pypy/doc/release-1.4.1.txt b/pypy/doc/release-1.4.1.rst rename from pypy/doc/release-1.4.1.txt rename to pypy/doc/release-1.4.1.rst diff --git a/pypy/doc/rffi.txt b/pypy/doc/rffi.rst rename from pypy/doc/rffi.txt rename to pypy/doc/rffi.rst diff --git a/pypy/doc/rlib.txt b/pypy/doc/rlib.rst rename from pypy/doc/rlib.txt rename to pypy/doc/rlib.rst --- a/pypy/doc/rlib.txt +++ b/pypy/doc/rlib.rst @@ -14,8 +14,8 @@ to change at some point. Usually it is useful to look at the tests in `pypy/rlib/test`_ to get an impression of how to use a module. -.. _`pypy/rlib`: ../../pypy/rlib -.. _`pypy/rlib/test`: ../../pypy/rlib/test +.. _`pypy/rlib`: ../../../../pypy/rlib +.. _`pypy/rlib/test`: ../../../../pypy/rlib/test ``listsort`` ============ @@ -29,7 +29,7 @@ be sorted using the ``listsort`` module in one program, otherwise the annotator will be confused. -.. _listsort: ../../pypy/rlib/listsort.py +.. _listsort: ../../../../pypy/rlib/listsort.py ``nonconst`` ============ @@ -41,7 +41,7 @@ ``NonConst`` will behave during annotation like that value, but no constant folding will happen. -.. _nonconst: ../../pypy/rlib/nonconst.py +.. _nonconst: ../../../../pypy/rlib/nonconst.py .. _`flow object space`: objspace.html#the-flow-object-space .. _`annotator`: translation.html#the-annotation-pass @@ -95,7 +95,7 @@ won't be allocated but represented by *tagged pointers**, that is pointers that have the lowest bit set. -.. _objectmodel: ../../pypy/rlib/objectmodel.py +.. _objectmodel: ../../../../pypy/rlib/objectmodel.py ``rarithmetic`` @@ -105,7 +105,7 @@ in the behaviour of arithmetic code in regular Python and RPython code. Most of them are already described in the `coding guide`_ -.. _rarithmetic: ../../pypy/rlib/rarithmetic.py +.. _rarithmetic: ../../../../pypy/rlib/rarithmetic.py .. _`coding guide`: coding-guide.html @@ -122,7 +122,7 @@ these underscores left out for better readability (so ``a.add(b)`` can be used to add two rbigint instances). -.. _rbigint: ../../pypy/rlib/rbigint.py +.. _rbigint: ../../../../pypy/rlib/rbigint.py ``rrandom`` @@ -133,7 +133,7 @@ ``random`` method which returns a pseudo-random floating point number between 0.0 and 1.0. -.. _rrandom: ../../pypy/rlib/rrandom.py +.. _rrandom: ../../../../pypy/rlib/rrandom.py ``rsocket`` =========== @@ -145,7 +145,7 @@ so on, which is not suitable for RPython. Instead, ``rsocket`` contains a hierarchy of Address classes, in a typical static-OO-programming style. -.. _rsocket: ../../pypy/rlib/rsocket.py +.. _rsocket: ../../../../pypy/rlib/rsocket.py ``rstack`` @@ -210,7 +210,7 @@ f() -.. _rstack: ../../pypy/rlib/rstack.py +.. _rstack: ../../../../pypy/rlib/rstack.py ``streamio`` @@ -220,7 +220,7 @@ by Guido van Rossum as `sio.py`_ in the CPython sandbox as a prototype for the upcoming new file implementation in Python 3000). -.. _streamio: ../../pypy/rlib/streamio.py +.. _streamio: ../../../../pypy/rlib/streamio.py .. _`sio.py`: http://svn.python.org/view/sandbox/trunk/sio/sio.py ``unroll`` @@ -230,7 +230,7 @@ which wraps an iterator. Looping over the iterator in RPython code will not produce a loop in the resulting flow graph but will unroll the loop instead. -.. _unroll: ../../pypy/rlib/unroll.py +.. _unroll: ../../../../pypy/rlib/unroll.py ``parsing`` =========== @@ -359,7 +359,7 @@ of the nonterminal and ``children`` which is a list of the children attributes. -.. _`pypy.rlib.parsing.tree`: ../../pypy/rlib/parsing/tree.py +.. _`pypy.rlib.parsing.tree`: ../../../../pypy/rlib/parsing/tree.py Visitors ++++++++ @@ -531,5 +531,5 @@ .. _`Prolog interpreter`: http://codespeak.net/svn/pypy/lang/prolog/ -.. _parsing: ../../pypy/rlib/parsing/ +.. _parsing: ../../../../pypy/rlib/parsing/ .. _`json format`: http://www.json.org diff --git a/pypy/doc/rtyper.txt b/pypy/doc/rtyper.rst rename from pypy/doc/rtyper.txt rename to pypy/doc/rtyper.rst --- a/pypy/doc/rtyper.txt +++ b/pypy/doc/rtyper.rst @@ -2,7 +2,7 @@ ================= .. contents:: -.. sectnum:: + The RPython Typer lives in the directory `pypy/rpython/`_. @@ -791,4 +791,4 @@ assert res == ~3 .. _annotator: translation.html#the-annotation-pass -.. include:: _ref.txt +.. include:: _ref.rst diff --git a/pypy/doc/sandbox.txt b/pypy/doc/sandbox.rst rename from pypy/doc/sandbox.txt rename to pypy/doc/sandbox.rst --- a/pypy/doc/sandbox.txt +++ b/pypy/doc/sandbox.rst @@ -1,3 +1,7 @@ +.. include:: crufty.rst + + .. ^^ it continues to work, but is unmaintained + PyPy's sandboxing features ========================== diff --git a/pypy/doc/sprint-reports.txt b/pypy/doc/sprint-reports.rst rename from pypy/doc/sprint-reports.txt rename to pypy/doc/sprint-reports.rst --- a/pypy/doc/sprint-reports.txt +++ b/pypy/doc/sprint-reports.rst @@ -78,3 +78,6 @@ .. _`CERN (July 2010)`: http://morepypy.blogspot.com/2010/07/cern-sprint-report-wrapping-c-libraries.html .. _`Düsseldorf (October 2010)`: http://morepypy.blogspot.com/2010/10/dusseldorf-sprint-report-2010.html +Further event notes: + +* :ref:`eventhistory.rst` diff --git a/pypy/doc/stackless.txt b/pypy/doc/stackless.rst rename from pypy/doc/stackless.txt rename to pypy/doc/stackless.rst --- a/pypy/doc/stackless.txt +++ b/pypy/doc/stackless.rst @@ -2,9 +2,15 @@ Application-level Stackless features ========================================================== + + Introduction ================ +.. include:: crufty.rst + + .. apparently this still works; needs JIT integration; hasn't been maintained for years + PyPy can expose to its user language features similar to the ones present in `Stackless Python`_: **no recursion depth limit**, and the ability to write code in a **massively concurrent style**. It actually @@ -619,4 +625,4 @@ .. _`documentation of the greenlets`: http://codespeak.net/svn/greenlet/trunk/doc/greenlet.txt .. _`Stackless Transform`: translation.html#the-stackless-transform -.. include:: _ref.txt +.. include:: _ref.rst diff --git a/pypy/doc/statistic/index.txt b/pypy/doc/statistic/index.rst rename from pypy/doc/statistic/index.txt rename to pypy/doc/statistic/index.rst diff --git a/pypy/doc/svn-help.txt b/pypy/doc/svn-help.rst rename from pypy/doc/svn-help.txt rename to pypy/doc/svn-help.rst diff --git a/pypy/doc/test_redirections.py b/pypy/doc/test_redirections.py deleted file mode 100644 --- a/pypy/doc/test_redirections.py +++ /dev/null @@ -1,54 +0,0 @@ - -import py -redir = py.path.local(__file__).dirpath('redirections') - -def checkexist(path): - print "checking", path - assert path.ext == '.html' - assert path.new(ext='.txt').check(file=1) - -def checkredirection(oldname, newname): - print "checking", newname - if not newname.startswith('http://'): - newpath = redir.dirpath(newname.split('#')[0]) - checkexist(newpath) - # HACK: create the redirecting HTML file here... - # XXX obscure fishing - if py.test.config.option.generateredirections and '#' not in oldname: - generate_redirection(oldname, newname) - -def test_eval(): - d = eval(redir.read(mode='r')) - return d - -def test_redirections(): - d = test_eval() - for oldname, newname in d.items(): - yield checkredirection, oldname, newname - -def test_navlist(): - navlist = eval(redir.dirpath('navlist').read()) - for entry in navlist: - yield checkexist, redir.dirpath(entry) - -# ____________________________________________________________ - -def generate_redirection(oldname, newname): - print "redirecting from", oldname - oldpath = redir.dirpath(oldname) - url = newname # relative URL - oldpath.write(""" - - - - - - -

    - you should be automatically redirected to - %s -

    - - -""" % (url, url, url)) diff --git a/pypy/doc/theory.txt b/pypy/doc/theory.rst rename from pypy/doc/theory.txt rename to pypy/doc/theory.rst --- a/pypy/doc/theory.txt +++ b/pypy/doc/theory.rst @@ -1,9 +1,13 @@ +.. include:: crufty.rst + + .. ^^ old ideas; we're not doing it this way any more + =================================== Techniques used in PyPy =================================== .. contents:: -.. sectnum:: + .. _`abstract interpretation`: diff --git a/pypy/doc/translation-aspects.txt b/pypy/doc/translation-aspects.rst rename from pypy/doc/translation-aspects.txt rename to pypy/doc/translation-aspects.rst --- a/pypy/doc/translation-aspects.txt +++ b/pypy/doc/translation-aspects.rst @@ -1,9 +1,12 @@ +.. include:: crufty.rst +.. ^^ old and needs updating + ========================================================================================== Memory management and threading models as translation aspects -- solutions and challenges ========================================================================================== .. contents:: -.. sectnum:: + Introduction ============= diff --git a/pypy/doc/translation.txt b/pypy/doc/translation.rst rename from pypy/doc/translation.txt rename to pypy/doc/translation.rst --- a/pypy/doc/translation.txt +++ b/pypy/doc/translation.rst @@ -3,7 +3,7 @@ ===================== .. contents:: -.. sectnum:: + This document describes the tool chain that we have developed to analyze and "compile" RPython_ programs (like PyPy itself) to various target @@ -107,7 +107,7 @@ .. _`abstract interpretation`: theory.html#abstract-interpretation .. _`Flow Object Space`: objspace.html#the-flow-object-space .. _`interactive interface`: getting-started-dev.html#try-out-the-translator -.. _`translatorshell.py`: ../../pypy/bin/translatorshell.py +.. _`translatorshell.py`: ../../../../pypy/bin/translatorshell.py .. _`flow model`: .. _`control flow graphs`: @@ -274,7 +274,7 @@ should not attempt to actually mutate such Constants. .. _`document describing object spaces`: objspace.html -.. _`pypy.objspace.flow.model`: ../objspace/flow/model.py +.. _`pypy.objspace.flow.model`: ../../../../pypy/objspace/flow/model.py .. _Annotator: @@ -768,4 +768,4 @@ collection of functions (which may refer to each other in a mutually recursive fashion) and annotate and rtype them all at once. -.. include:: _ref.txt +.. include:: _ref.rst diff --git a/pypy/doc/video-index.txt b/pypy/doc/video-index.rst rename from pypy/doc/video-index.txt rename to pypy/doc/video-index.rst diff --git a/pypy/doc/windows.txt b/pypy/doc/windows.rst rename from pypy/doc/windows.txt rename to pypy/doc/windows.rst --- a/pypy/doc/windows.txt +++ b/pypy/doc/windows.rst @@ -1,6 +1,6 @@ -============= -Windows Hints -============= +=============== +PyPy on Windows +=============== Pypy is supported on Windows platforms, starting with Windows 2000. The following text gives some hints about how to translate the PyPy diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -77,13 +77,13 @@ if i == 2: return self.kwargname raise IndexError - + class Arguments(object): """ Collects the arguments of a function call. - + Instances should be considered immutable. """ @@ -146,7 +146,7 @@ self._combine_starstarargs_wrapped(w_starstararg) def _combine_starargs_wrapped(self, w_stararg): - # unpack the * arguments + # unpack the * arguments space = self.space try: args_w = space.fixedview(w_stararg) @@ -236,10 +236,10 @@ if self.arguments_w: return self.arguments_w[0] return None - + ### Parsing for function calls ### - def _match_signature(self, w_firstarg, scope_w, signature, defaults_w=[], + def _match_signature(self, w_firstarg, scope_w, signature, defaults=None, blindargs=0): """Parse args and kwargs according to the signature of a code object, or raise an ArgErr in case of failure. @@ -247,19 +247,19 @@ """ if jit.we_are_jitted() and self._dont_jit: return self._match_signature_jit_opaque(w_firstarg, scope_w, - signature, defaults_w, + signature, defaults, blindargs) return self._really_match_signature(w_firstarg, scope_w, signature, - defaults_w, blindargs) + defaults, blindargs) @jit.dont_look_inside def _match_signature_jit_opaque(self, w_firstarg, scope_w, signature, - defaults_w, blindargs): + defaults, blindargs): return self._really_match_signature(w_firstarg, scope_w, signature, - defaults_w, blindargs) + defaults, blindargs) @jit.unroll_safe - def _really_match_signature(self, w_firstarg, scope_w, signature, defaults_w=[], + def _really_match_signature(self, w_firstarg, scope_w, signature, defaults=None, blindargs=0): # # args_w = list of the normal actual parameters, wrapped @@ -283,10 +283,10 @@ scope_w[0] = w_firstarg input_argcount = 1 else: - extravarargs = [ w_firstarg ] + extravarargs = [w_firstarg] else: upfront = 0 - + args_w = self.arguments_w num_args = len(args_w) @@ -327,7 +327,7 @@ elif avail > co_argcount: raise ArgErrCount(avail, num_kwds, co_argcount, has_vararg, has_kwarg, - defaults_w, 0) + defaults, 0) # the code assumes that keywords can potentially be large, but that # argnames is typically not too large @@ -357,12 +357,12 @@ num_remainingkwds -= 1 missing = 0 if input_argcount < co_argcount: - def_first = co_argcount - len(defaults_w) + def_first = co_argcount - (0 if defaults is None else defaults.getlen()) for i in range(input_argcount, co_argcount): if scope_w[i] is not None: pass elif i >= def_first: - scope_w[i] = defaults_w[i-def_first] + scope_w[i] = defaults.getitem(i - def_first) else: # error: not enough arguments. Don't signal it immediately # because it might be related to a problem with */** or @@ -382,20 +382,20 @@ if co_argcount == 0: raise ArgErrCount(avail, num_kwds, co_argcount, has_vararg, has_kwarg, - defaults_w, missing) + defaults, missing) raise ArgErrUnknownKwds(num_remainingkwds, keywords, used_keywords) if missing: raise ArgErrCount(avail, num_kwds, co_argcount, has_vararg, has_kwarg, - defaults_w, missing) + defaults, missing) return co_argcount + has_vararg + has_kwarg - + def parse_into_scope(self, w_firstarg, - scope_w, fnname, signature, defaults_w=[]): + scope_w, fnname, signature, defaults=None): """Parse args and kwargs to initialize a frame according to the signature of code object. Store the argumentvalues into scope_w. @@ -403,32 +403,32 @@ """ try: return self._match_signature(w_firstarg, - scope_w, signature, defaults_w, 0) + scope_w, signature, defaults, 0) except ArgErr, e: raise OperationError(self.space.w_TypeError, self.space.wrap(e.getmsg(fnname))) - def _parse(self, w_firstarg, signature, defaults_w, blindargs=0): + def _parse(self, w_firstarg, signature, defaults, blindargs=0): """Parse args and kwargs according to the signature of a code object, or raise an ArgErr in case of failure. """ scopelen = signature.scope_length() scope_w = [None] * scopelen - self._match_signature(w_firstarg, scope_w, signature, defaults_w, + self._match_signature(w_firstarg, scope_w, signature, defaults, blindargs) - return scope_w + return scope_w def parse_obj(self, w_firstarg, - fnname, signature, defaults_w=[], blindargs=0): + fnname, signature, defaults=None, blindargs=0): """Parse args and kwargs to initialize a frame according to the signature of code object. """ try: - return self._parse(w_firstarg, signature, defaults_w, blindargs) + return self._parse(w_firstarg, signature, defaults, blindargs) except ArgErr, e: raise OperationError(self.space.w_TypeError, - self.space.wrap(e.getmsg(fnname))) + self.space.wrap(e.getmsg(fnname))) @staticmethod def frompacked(space, w_args=None, w_kwds=None): @@ -473,24 +473,24 @@ self.w_starstararg) - - def _match_signature(self, w_firstarg, scope_w, signature, defaults_w=[], + + def _match_signature(self, w_firstarg, scope_w, signature, defaults=None, blindargs=0): self.combine_if_necessary() # _match_signature is destructive return Arguments._match_signature( self, w_firstarg, scope_w, signature, - defaults_w, blindargs) + defaults, blindargs) def unpack(self): self.combine_if_necessary() return Arguments.unpack(self) - def match_signature(self, signature, defaults_w): + def match_signature(self, signature, defaults): """Parse args and kwargs according to the signature of a code object, or raise an ArgErr in case of failure. """ - return self._parse(None, signature, defaults_w) + return self._parse(None, signature, defaults) def unmatch_signature(self, signature, data_w): """kind of inverse of match_signature""" @@ -513,10 +513,10 @@ for w_key in space.unpackiterable(data_w_starargarg): key = space.str_w(w_key) w_value = space.getitem(data_w_starargarg, w_key) - unfiltered_kwds_w[key] = w_value + unfiltered_kwds_w[key] = w_value cnt += 1 assert len(data_w) == cnt - + ndata_args_w = len(data_args_w) if ndata_args_w >= need_cnt: args_w = data_args_w[:need_cnt] @@ -532,19 +532,19 @@ for i in range(0, len(stararg_w)): args_w[i + datalen] = stararg_w[i] assert len(args_w) == need_cnt - + keywords = [] keywords_w = [] for key in need_kwds: keywords.append(key) keywords_w.append(unfiltered_kwds_w[key]) - + return ArgumentsForTranslation(self.space, args_w, keywords, keywords_w) @staticmethod def frompacked(space, w_args=None, w_kwds=None): raise NotImplementedError("go away") - + @staticmethod def fromshape(space, (shape_cnt,shape_keys,shape_star,shape_stst), data_w): args_w = data_w[:shape_cnt] @@ -596,23 +596,23 @@ # class ArgErr(Exception): - + def getmsg(self, fnname): raise NotImplementedError class ArgErrCount(ArgErr): def __init__(self, got_nargs, nkwds, expected_nargs, has_vararg, has_kwarg, - defaults_w, missing_args): + defaults, missing_args): self.expected_nargs = expected_nargs self.has_vararg = has_vararg self.has_kwarg = has_kwarg - - self.num_defaults = len(defaults_w) + + self.num_defaults = 0 if defaults is None else defaults.getlen() self.missing_args = missing_args self.num_args = got_nargs self.num_kwds = nkwds - + def getmsg(self, fnname): args = None #args_w, kwds_w = args.unpack() @@ -620,7 +620,7 @@ n = self.expected_nargs if n == 0: msg = "%s() takes no argument (%d given)" % ( - fnname, + fnname, nargs) else: defcount = self.num_defaults diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -3590,6 +3590,8 @@ try: obj = space.interp_w(operator, w_new_value) w_self.op = obj.to_simple_int(space) + # need to save the original object too + w_self.setdictvalue(space, 'op', w_new_value) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -4824,6 +4826,8 @@ try: obj = space.interp_w(boolop, w_new_value) w_self.op = obj.to_simple_int(space) + # need to save the original object too + w_self.setdictvalue(space, 'op', w_new_value) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -4911,6 +4915,8 @@ try: obj = space.interp_w(operator, w_new_value) w_self.op = obj.to_simple_int(space) + # need to save the original object too + w_self.setdictvalue(space, 'op', w_new_value) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -4980,6 +4986,8 @@ try: obj = space.interp_w(unaryop, w_new_value) w_self.op = obj.to_simple_int(space) + # need to save the original object too + w_self.setdictvalue(space, 'op', w_new_value) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6028,6 +6036,8 @@ try: obj = space.interp_w(expr_context, w_new_value) w_self.ctx = obj.to_simple_int(space) + # need to save the original object too + w_self.setdictvalue(space, 'ctx', w_new_value) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6118,6 +6128,8 @@ try: obj = space.interp_w(expr_context, w_new_value) w_self.ctx = obj.to_simple_int(space) + # need to save the original object too + w_self.setdictvalue(space, 'ctx', w_new_value) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6187,6 +6199,8 @@ try: obj = space.interp_w(expr_context, w_new_value) w_self.ctx = obj.to_simple_int(space) + # need to save the original object too + w_self.setdictvalue(space, 'ctx', w_new_value) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6252,6 +6266,8 @@ try: obj = space.interp_w(expr_context, w_new_value) w_self.ctx = obj.to_simple_int(space) + # need to save the original object too + w_self.setdictvalue(space, 'ctx', w_new_value) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -6318,6 +6334,8 @@ try: obj = space.interp_w(expr_context, w_new_value) w_self.ctx = obj.to_simple_int(space) + # need to save the original object too + w_self.setdictvalue(space, 'ctx', w_new_value) except OperationError, e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -454,6 +454,9 @@ (field.type,), 2) self.emit("w_self.%s = obj.to_simple_int(space)" % (field.name,), 2) + self.emit("# need to save the original object too", 2) + self.emit("w_self.setdictvalue(space, '%s', w_new_value)" + % (field.name,), 2) else: config = (field.name, field.type, repr(field.opt)) self.emit("w_self.%s = space.interp_w(%s, w_new_value, %s)" % diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -365,7 +365,11 @@ def setbuiltinmodule(self, importname): """NOT_RPYTHON. load a lazy pypy/module and put it into sys.modules""" - fullname = "pypy.module.%s" % importname + if '.' in importname: + fullname = importname + importname = fullname.rsplit('.', 1)[1] + else: + fullname = "pypy.module.%s" % importname Module = __import__(fullname, None, None, ["Module"]).Module @@ -428,6 +432,11 @@ if value and name not in modules: modules.append(name) + if self.config.objspace.extmodules: + for name in self.config.objspace.extmodules.split(','): + if name not in modules: + modules.append(name) + # a bit of custom logic: time2 or rctime take precedence over time # XXX this could probably be done as a "requires" in the config if ('time2' in modules or 'rctime' in modules) and 'time' in modules: @@ -745,7 +754,12 @@ """Unpack an iterable object into a real (interpreter-level) list. Raise an OperationError(w_ValueError) if the length is wrong.""" w_iterator = self.iter(w_iterable) - items = [] + # If we know the expected length we can preallocate. + if expected_length == -1: + items = [] + else: + items = [None] * expected_length + idx = 0 while True: try: w_item = self.next(w_iterator) @@ -753,19 +767,22 @@ if not e.match(self, self.w_StopIteration): raise break # done - if expected_length != -1 and len(items) == expected_length: + if expected_length != -1 and idx == expected_length: raise OperationError(self.w_ValueError, self.wrap("too many values to unpack")) - items.append(w_item) - if expected_length != -1 and len(items) < expected_length: - i = len(items) - if i == 1: + if expected_length == -1: + items.append(w_item) + else: + items[idx] = w_item + idx += 1 + if expected_length != -1 and idx < expected_length: + if idx == 1: plural = "" else: plural = "s" raise OperationError(self.w_ValueError, self.wrap("need more than %d value%s to unpack" % - (i, plural))) + (idx, plural))) return items unpackiterable_unroll = jit.unroll_safe(func_with_new_name(unpackiterable, diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -21,6 +21,21 @@ assert not func.can_change_code return func.code +class Defaults(object): + _immutable_fields_ = ["items[*]"] + + def __init__(self, items): + self.items = items + + def getitems(self): + return jit.hint(self, promote=True).items + + def getitem(self, idx): + return self.getitems()[idx] + + def getlen(self): + return len(self.getitems()) + class Function(Wrappable): """A function is a code object captured with some environment: an object space, a dictionary of globals, default arguments, @@ -36,8 +51,7 @@ self.code = code # Code instance self.w_func_globals = w_globals # the globals dictionary self.closure = closure # normally, list of Cell instances or None - self.defs_w = defs_w # list of w_default's - make_sure_not_resized(self.defs_w) + self.defs = Defaults(defs_w) # wrapper around list of w_default's self.w_func_dict = None # filled out below if needed self.w_module = None @@ -87,7 +101,7 @@ assert isinstance(code, gateway.BuiltinCode4) return code.fastcall_4(self.space, self, args_w[0], args_w[1], args_w[2], args_w[3]) - elif (nargs|PyCode.FLATPYCALL) == fast_natural_arity: + elif (nargs | PyCode.FLATPYCALL) == fast_natural_arity: assert isinstance(code, PyCode) if nargs < 5: new_frame = self.space.createframe(code, self.w_func_globals, @@ -129,15 +143,15 @@ return code.fastcall_4(self.space, self, frame.peekvalue(3), frame.peekvalue(2), frame.peekvalue(1), frame.peekvalue(0)) - elif (nargs|Code.FLATPYCALL) == fast_natural_arity: + elif (nargs | Code.FLATPYCALL) == fast_natural_arity: assert isinstance(code, PyCode) return self._flat_pycall(code, nargs, frame) - elif fast_natural_arity&Code.FLATPYCALL: - natural_arity = fast_natural_arity&0xff - if natural_arity > nargs >= natural_arity-len(self.defs_w): + elif fast_natural_arity & Code.FLATPYCALL: + natural_arity = fast_natural_arity & 0xff + if natural_arity > nargs >= natural_arity - self.defs.getlen(): assert isinstance(code, PyCode) return self._flat_pycall_defaults(code, nargs, frame, - natural_arity-nargs) + natural_arity - nargs) elif fast_natural_arity == Code.PASSTHROUGHARGS1 and nargs >= 1: assert isinstance(code, gateway.BuiltinCodePassThroughArguments1) w_obj = frame.peekvalue(nargs-1) @@ -167,12 +181,12 @@ w_arg = frame.peekvalue(nargs-1-i) new_frame.fastlocals_w[i] = w_arg - defs_w = self.defs_w - ndefs = len(defs_w) - start = ndefs-defs_to_load + defs = self.defs + ndefs = defs.getlen() + start = ndefs - defs_to_load i = nargs for j in xrange(start, ndefs): - new_frame.fastlocals_w[i] = defs_w[j] + new_frame.fastlocals_w[i] = defs.getitem(j) i += 1 return new_frame.run() @@ -182,8 +196,10 @@ return self.w_func_dict def setdict(self, space, w_dict): - if not space.is_true(space.isinstance( w_dict, space.w_dict )): - raise OperationError( space.w_TypeError, space.wrap("setting function's dictionary to a non-dict") ) + if not space.isinstance_w(w_dict, space.w_dict): + raise OperationError(space.w_TypeError, + space.wrap("setting function's dictionary to a non-dict") + ) self.w_func_dict = w_dict def descr_function__new__(space, w_subtype, w_code, w_globals, @@ -286,7 +302,7 @@ w(self.code), w_func_globals, w_closure, - nt(self.defs_w), + nt(self.defs.getitems()), w_func_dict, self.w_module, ] @@ -296,7 +312,7 @@ from pypy.interpreter.pycode import PyCode args_w = space.unpackiterable(w_args) try: - (w_name, w_doc, w_code, w_func_globals, w_closure, w_defs_w, + (w_name, w_doc, w_code, w_func_globals, w_closure, w_defs, w_func_dict, w_module) = args_w except ValueError: # wrong args @@ -321,25 +337,28 @@ if space.is_w(w_func_dict, space.w_None): w_func_dict = None self.w_func_dict = w_func_dict - self.defs_w = space.fixedview(w_defs_w) + self.defs = Defaults(space.fixedview(w_defs)) self.w_module = w_module def fget_func_defaults(self, space): - values_w = self.defs_w + values_w = self.defs.getitems() + # the `None in values_w` check here is to ensure that interp-level + # functions with a default of NoneNotWrapped do not get their defaults + # exposed at applevel if not values_w or None in values_w: return space.w_None return space.newtuple(values_w) def fset_func_defaults(self, space, w_defaults): if space.is_w(w_defaults, space.w_None): - self.defs_w = [] + self.defs = Defaults([]) return if not space.is_true(space.isinstance(w_defaults, space.w_tuple)): raise OperationError( space.w_TypeError, space.wrap("func_defaults must be set to a tuple object or None") ) - self.defs_w = space.fixedview(w_defaults) + self.defs = Defaults(space.fixedview(w_defaults)) def fdel_func_defaults(self, space): - self.defs_w = [] + self.defs = Defaults([]) def fget_func_doc(self, space): if self.w_doc is None: @@ -369,7 +388,7 @@ def fget___module__(self, space): if self.w_module is None: if self.w_func_globals is not None and not space.is_w(self.w_func_globals, space.w_None): - self.w_module = space.call_method( self.w_func_globals, "get", space.wrap("__name__") ) + self.w_module = space.call_method(self.w_func_globals, "get", space.wrap("__name__")) else: self.w_module = space.w_None return self.w_module @@ -601,7 +620,7 @@ def __init__(self, func): assert isinstance(func, Function) Function.__init__(self, func.space, func.code, func.w_func_globals, - func.defs_w, func.closure, func.name) + func.defs.getitems(), func.closure, func.name) self.w_doc = func.w_doc self.w_func_dict = func.w_func_dict self.w_module = func.w_module diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -13,7 +13,7 @@ NoneNotWrapped = object() from pypy.tool.sourcetools import func_with_new_name -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError from pypy.interpreter import eval from pypy.interpreter.function import Function, Method, ClassMethod from pypy.interpreter.function import FunctionWithFixedCode @@ -25,7 +25,7 @@ from pypy.rlib import rstackovf from pypy.rlib.objectmodel import we_are_translated -# internal non-translatable parts: +# internal non-translatable parts: import py class SignatureBuilder(object): @@ -78,13 +78,13 @@ dispatch = self.dispatch for el in unwrap_spec: dispatch(el, *extra) - + class UnwrapSpecEmit(UnwrapSpecRecipe): def __init__(self): self.n = 0 self.miniglobals = {} - + def succ(self): n = self.n self.n += 1 @@ -94,7 +94,7 @@ name = obj.__name__ self.miniglobals[name] = obj return name - + #________________________________________________________________ class UnwrapSpec_Check(UnwrapSpecRecipe): @@ -147,7 +147,7 @@ "unwrapped %s argument %s of built-in function %r should " "not start with 'w_'" % (name, argname, self.func)) app_sig.append(argname) - + def visit__ObjSpace(self, el, app_sig): self.orig_arg() @@ -173,7 +173,7 @@ (argname, self.func)) assert app_sig.varargname is None,( "built-in function %r has conflicting rest args specs" % self.func) - app_sig.varargname = argname[:-2] + app_sig.varargname = argname[:-2] def visit_w_args(self, el, app_sig): argname = self.orig_arg() @@ -199,7 +199,7 @@ def scopenext(self): return "scope_w[%d]" % self.succ() - + def visit_function(self, (func, cls)): self.run_args.append("%s(%s)" % (self.use(func), self.scopenext())) @@ -207,7 +207,7 @@ def visit_self(self, typ): self.run_args.append("space.descr_self_interp_w(%s, %s)" % (self.use(typ), self.scopenext())) - + def visit__Wrappable(self, typ): self.run_args.append("space.interp_w(%s, %s)" % (self.use(typ), self.scopenext())) @@ -265,7 +265,7 @@ "unexpected: same spec, different run_args") return activation_factory_cls except KeyError: - parts = [] + parts = [] for el in unwrap_spec: if isinstance(el, tuple): parts.append(''.join([getattr(subel, '__name__', subel) @@ -276,7 +276,7 @@ #print label d = {} - source = """if 1: + source = """if 1: def _run(self, space, scope_w): return self.behavior(%s) \n""" % (', '.join(self.run_args),) @@ -326,7 +326,7 @@ self.finger += 1 if self.n > 4: raise FastFuncNotSupported - + def nextarg(self): arg = "w%d" % self.succ() self.args.append(arg) @@ -405,7 +405,7 @@ raise FastFuncNotSupported d = {} unwrap_info.miniglobals['func'] = func - source = """if 1: + source = """if 1: def fastfunc_%s_%d(%s): return func(%s) \n""" % (func.__name__, narg, @@ -511,7 +511,7 @@ # 'w_args' for rest arguments passed as wrapped tuple # str,int,float: unwrap argument as such type # (function, cls) use function to check/unwrap argument of type cls - + # First extract the signature from the (CPython-level) code object from pypy.interpreter import pycode argnames, varargname, kwargname = pycode.cpython_code_signature(func.func_code) @@ -532,7 +532,7 @@ else: assert descrmismatch is None, ( "descrmismatch without a self-type specified") - + orig_sig = SignatureBuilder(func, argnames, varargname, kwargname) app_sig = SignatureBuilder(func) @@ -594,7 +594,7 @@ space = func.space activation = self.activation scope_w = args.parse_obj(w_obj, func.name, self.sig, - func.defs_w, self.minargs) + func.defs, self.minargs) try: w_result = activation._run(space, scope_w) except DescrMismatch: @@ -615,10 +615,10 @@ if not we_are_translated(): raise raise e - except KeyboardInterrupt: + except KeyboardInterrupt: raise OperationError(space.w_KeyboardInterrupt, - space.w_None) - except MemoryError: + space.w_None) + except MemoryError: raise OperationError(space.w_MemoryError, space.w_None) except rstackovf.StackOverflow, e: rstackovf.check_stack_overflow() @@ -668,7 +668,7 @@ class BuiltinCode0(BuiltinCode): _immutable_ = True fast_natural_arity = 0 - + def fastcall_0(self, space, w_func): try: w_result = self.fastfunc_0(space) @@ -684,7 +684,7 @@ class BuiltinCode1(BuiltinCode): _immutable_ = True fast_natural_arity = 1 - + def fastcall_1(self, space, w_func, w1): try: w_result = self.fastfunc_1(space, w1) @@ -702,7 +702,7 @@ class BuiltinCode2(BuiltinCode): _immutable_ = True fast_natural_arity = 2 - + def fastcall_2(self, space, w_func, w1, w2): try: w_result = self.fastfunc_2(space, w1, w2) @@ -720,7 +720,7 @@ class BuiltinCode3(BuiltinCode): _immutable_ = True fast_natural_arity = 3 - + def fastcall_3(self, space, func, w1, w2, w3): try: w_result = self.fastfunc_3(space, w1, w2, w3) @@ -738,7 +738,7 @@ class BuiltinCode4(BuiltinCode): _immutable_ = True fast_natural_arity = 4 - + def fastcall_4(self, space, func, w1, w2, w3, w4): try: w_result = self.fastfunc_4(space, w1, w2, w3, w4) @@ -770,7 +770,7 @@ NOT_RPYTHON_ATTRIBUTES = ['_staticdefs'] instancecache = {} - + def __new__(cls, f, app_name=None, unwrap_spec = None, descrmismatch=None, as_classmethod=False): @@ -846,17 +846,17 @@ return fn -# -# the next gateways are to be used only for -# temporary/initialization purposes - -class interp2app_temp(interp2app): +# +# the next gateways are to be used only for +# temporary/initialization purposes + +class interp2app_temp(interp2app): "NOT_RPYTHON" - def getcache(self, space): + def getcache(self, space): return self.__dict__.setdefault(space, GatewayCache(space)) -# and now for something completely different ... +# and now for something completely different ... # class ApplevelClass: @@ -896,14 +896,14 @@ from pypy.interpreter.module import Module return Module(space, space.wrap(name), self.getwdict(space)) - def wget(self, space, name): - w_globals = self.getwdict(space) + def wget(self, space, name): + w_globals = self.getwdict(space) return space.getitem(w_globals, space.wrap(name)) def interphook(self, name): "NOT_RPYTHON" def appcaller(space, *args_w): - if not isinstance(space, ObjSpace): + if not isinstance(space, ObjSpace): raise TypeError("first argument must be a space instance.") # redirect if the space handles this specially # XXX can this be factored a bit less flow space dependently? @@ -932,7 +932,7 @@ args.arguments_w) return space.call_args(w_func, args) def get_function(space): - w_func = self.wget(space, name) + w_func = self.wget(space, name) return space.unwrap(w_func) appcaller = func_with_new_name(appcaller, name) appcaller.get_function = get_function @@ -1123,15 +1123,15 @@ myfunc = appdef('''myfunc(x, y): return x+y ''') - """ - if not isinstance(source, str): + """ + if not isinstance(source, str): source = py.std.inspect.getsource(source).lstrip() - while source.startswith('@py.test.mark.'): + while source.startswith(('@py.test.mark.', '@pytest.mark.')): # these decorators are known to return the same function # object, we may ignore them assert '\n' in source source = source[source.find('\n') + 1:].lstrip() - assert source.startswith("def "), "can only transform functions" + assert source.startswith("def "), "can only transform functions" source = source[4:] p = source.find('(') assert p >= 0 diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -86,7 +86,7 @@ self._init_flags() # Precompute what arguments need to be copied into cellvars self._args_as_cellvars = [] - + if self.co_cellvars: argcount = self.co_argcount assert argcount >= 0 # annotator hint @@ -146,7 +146,7 @@ def signature(self): return self._signature - + @classmethod def _from_code(cls, space, code, hidden_applevel=False, code_hook=None): """ Initialize the code object from a real (CPython) one. @@ -182,7 +182,7 @@ list(code.co_cellvars), hidden_applevel, cpython_magic) - + def _compute_flatcall(self): # Speed hack! self.fast_natural_arity = eval.Code.HOPELESS @@ -192,7 +192,7 @@ return if self.co_argcount > 0xff: return - + self.fast_natural_arity = eval.Code.FLATPYCALL | self.co_argcount def funcrun(self, func, args): @@ -204,7 +204,7 @@ fresh_virtualizable=True) args_matched = args.parse_into_scope(None, fresh_frame.fastlocals_w, func.name, - sig, func.defs_w) + sig, func.defs) fresh_frame.init_cells() return frame.run() @@ -214,10 +214,10 @@ sig = self._signature # speed hack fresh_frame = jit.hint(frame, access_directly=True, - fresh_virtualizable=True) + fresh_virtualizable=True) args_matched = args.parse_into_scope(w_obj, fresh_frame.fastlocals_w, func.name, - sig, func.defs_w) + sig, func.defs) fresh_frame.init_cells() return frame.run() @@ -269,7 +269,7 @@ def fget_co_consts(self, space): return space.newtuple(self.co_consts_w) - + def fget_co_names(self, space): return space.newtuple(self.co_names_w) @@ -280,7 +280,7 @@ return space.newtuple([space.wrap(name) for name in self.co_cellvars]) From noreply at buildbot.pypy.org Fri Sep 23 13:13:12 2011 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 23 Sep 2011 13:13:12 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: fix nonsense Message-ID: <20110923111312.63DBF820D1@wyvern.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: list-strategies Changeset: r47498:fdb21a0533f2 Date: 2011-04-12 15:12 +0200 http://bitbucket.org/pypy/pypy/changeset/fdb21a0533f2/ Log: fix nonsense diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -768,7 +768,7 @@ if isinstance(w_iterable, W_ListObject): w_list.extend(w_iterable) elif isinstance(w_iterable, W_TupleObject): - w_list.extend(W_ListObject(space, w_iterable.wrappeditems[:]) + w_list.extend(W_ListObject(space, w_iterable.wrappeditems[:])) else: _init_from_iterable(space, w_list, w_iterable) @@ -783,8 +783,7 @@ if not e.match(space, space.w_StopIteration): raise break # done - #items_w.append(w_item) - w_list.append(w_item) + w_list.append(w_item) def len__List(space, w_list): result = w_list.length() From noreply at buildbot.pypy.org Fri Sep 23 13:13:13 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:13:13 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Fixed getslice bug in RangeListStrategy Message-ID: <20110923111313.9059F820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47499:8b2722432eef Date: 2011-04-20 14:19 +0200 http://bitbucket.org/pypy/pypy/changeset/8b2722432eef/ Log: Fixed getslice bug in RangeListStrategy diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -514,6 +514,7 @@ return W_ListObject.from_storage_and_strategy(self.space, storage, self) else: subitems_w = [None] * length + # XXX wrap/unwrap for i in range(length): subitems_w[i] = w_list.getitem(start) start += step @@ -806,6 +807,13 @@ def getslice__List_ANY_ANY(space, w_list, w_start, w_stop): length = w_list.length() start, stop = normalize_simple_slice(space, length, w_start, w_stop) + + slicelength = stop - start + if slicelength == 0: + strategy = space.fromcache(EmptyListStrategy) + storage = strategy.cast_to_void_star(None) + return W_ListObject.from_storage_and_strategy(space, storage, strategy) + return w_list.getslice(start, stop, 1, stop - start) def setslice__List_ANY_ANY_List(space, w_list, w_start, w_stop, w_other): diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -325,3 +325,8 @@ l1.setitem(0, self.space.wrap(5)) assert not self.space.eq_w(l1, l2) + def test_weird_rangelist_bug(self): + l = make_range_list(self.space, 1, 1, 3) + from pypy.objspace.std.listobject import getslice__List_ANY_ANY + # should not raise + assert getslice__List_ANY_ANY(self.space, l, self.space.wrap(15), self.space.wrap(2222)).strategy == self.space.fromcache(EmptyListStrategy) From noreply at buildbot.pypy.org Fri Sep 23 13:13:14 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:13:14 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Fixed setslice on EmptyList to work with RPython Message-ID: <20110923111314.C4373820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47500:4c910a687b80 Date: 2011-04-21 11:12 +0200 http://bitbucket.org/pypy/pypy/changeset/4c910a687b80/ Log: Fixed setslice on EmptyList to work with RPython diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -185,6 +185,9 @@ def getitems_copy(self, w_list): raise NotImplementedError + def getstorage_copy(self, w_list): + return self.cast_to_void_star(self.getitems_copy(w_list)) + def append(self, w_list, w_item): raise NotImplementedError @@ -272,9 +275,8 @@ raise IndexError def setslice(self, w_list, start, step, slicelength, w_other): - items = w_other.getitems_copy() strategy = w_other.strategy - storage = strategy.cast_to_void_star(items) + storage = strategy.getstorage_copy(w_other) w_list.strategy = strategy w_list.lstorage = storage From noreply at buildbot.pypy.org Fri Sep 23 13:13:15 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:13:15 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: RPython wasn't satiesfied before Message-ID: <20110923111315.F024E820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47501:205bca6bc815 Date: 2011-04-21 12:02 +0200 http://bitbucket.org/pypy/pypy/changeset/205bca6bc815/ Log: RPython wasn't satiesfied before diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -186,7 +186,7 @@ raise NotImplementedError def getstorage_copy(self, w_list): - return self.cast_to_void_star(self.getitems_copy(w_list)) + raise NotImplementedError def append(self, w_list, w_item): raise NotImplementedError @@ -253,6 +253,9 @@ def getitems_copy(self, w_list): return [] + def getstorage_copy(self, w_list): + return self.cast_to_void_star(self.getitems_copy(w_list)) + def append(self, w_list, w_item): w_list.__init__(self.space, [w_item]) @@ -340,6 +343,9 @@ def getitems(self, w_list): return self._getitems_range(w_list, True) + def getstorage_copy(self, w_list): + return self.cast_to_void_star(self.getitems_copy(w_list)) + getitems_copy = getitems @specialize.arg(2) @@ -504,6 +510,9 @@ def getitems_copy(self, w_list): return [self.wrap(item) for item in self.cast_from_void_star(w_list.lstorage)] + def getstorage_copy(self, w_list): + return self.cast_to_void_star(self.getitems_copy(w_list)) + getitems = getitems_copy def getslice(self, w_list, start, stop, step, length): From noreply at buildbot.pypy.org Fri Sep 23 13:13:17 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:13:17 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Fixed getstorage_copy again Message-ID: <20110923111317.29584820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47502:9b07ebaab89c Date: 2011-04-21 12:42 +0200 http://bitbucket.org/pypy/pypy/changeset/9b07ebaab89c/ Log: Fixed getstorage_copy again diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -254,7 +254,7 @@ return [] def getstorage_copy(self, w_list): - return self.cast_to_void_star(self.getitems_copy(w_list)) + return self.cast_to_void_star([]) def append(self, w_list, w_item): w_list.__init__(self.space, [w_item]) @@ -344,7 +344,8 @@ return self._getitems_range(w_list, True) def getstorage_copy(self, w_list): - return self.cast_to_void_star(self.getitems_copy(w_list)) + # tuple is unmutable + return w_list.lstorage getitems_copy = getitems @@ -511,7 +512,8 @@ return [self.wrap(item) for item in self.cast_from_void_star(w_list.lstorage)] def getstorage_copy(self, w_list): - return self.cast_to_void_star(self.getitems_copy(w_list)) + items = self.cast_from_void_star(w_list.lstorage)[:] + return self.cast_to_void_star(items) getitems = getitems_copy From noreply at buildbot.pypy.org Fri Sep 23 13:13:18 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:13:18 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: RPython does not throw IndexError when there is not try/catch around Message-ID: <20110923111318.58889820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47503:eca3aafc0a2e Date: 2011-04-25 16:34 +0200 http://bitbucket.org/pypy/pypy/changeset/eca3aafc0a2e/ Log: RPython does not throw IndexError when there is not try/catch around diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -568,7 +568,10 @@ l = self.cast_from_void_star(w_list.lstorage) if self.is_correct_type(w_item): - l[index] = self.unwrap(w_item) + try: + l[index] = self.unwrap(w_item) + except IndexError: + raise return w_list.switch_to_object_strategy() @@ -644,7 +647,10 @@ def deleteitem(self, w_list, index): l = self.cast_from_void_star(w_list.lstorage) - del l[index] + try: + del l[index] + except IndexError: + raise w_list.check_empty_strategy() def deleteslice(self, w_list, start, step, slicelength): @@ -683,7 +689,14 @@ def pop(self, w_list, index): l = self.cast_from_void_star(w_list.lstorage) - w_item = self.wrap(l.pop(index)) + # not sure if RPython raises IndexError on pop + # so check again here + try: + item = l.pop(index) + except IndexError: + raise + + w_item = self.wrap(item) w_list.check_empty_strategy() return w_item From noreply at buildbot.pypy.org Fri Sep 23 13:13:19 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:13:19 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: It would be clever to return the clone Message-ID: <20110923111319.86B1B820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47504:cb99ab592ac6 Date: 2011-04-26 10:34 +0200 http://bitbucket.org/pypy/pypy/changeset/cb99ab592ac6/ Log: It would be clever to return the clone diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -316,6 +316,7 @@ def clone(self, w_list): storage = w_list.lstorage # lstorage is tuple, no need to clone w_clone = W_ListObject.from_storage_and_strategy(self.space, storage, self) + return w_clone def copy_into(self, w_list, w_other): w_other.strategy = self From noreply at buildbot.pypy.org Fri Sep 23 13:13:20 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:13:20 +0200 (CEST) Subject: [pypy-commit] pypy list-strategies: Added tests for adding lists (especially range lists) Message-ID: <20110923111320.B2C33820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: list-strategies Changeset: r47505:a5c9d09d47ae Date: 2011-04-26 11:11 +0200 http://bitbucket.org/pypy/pypy/changeset/a5c9d09d47ae/ Log: Added tests for adding lists (especially range lists) diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -566,6 +566,16 @@ l1 += bar assert l1 == ('radd', bar, [1,2,3]) + def test_add_lists(self): + l1 = [1,2,3] + l2 = [4,5,6] + l3 = l1 + l2 + assert l3 == [1,2,3,4,5,6] + + l4 = range(3) + l5 = l4 + l2 + assert l5 == [0,1,2,4,5,6] + def test_imul(self): l = l0 = [4,3] l *= 2 diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -330,3 +330,11 @@ from pypy.objspace.std.listobject import getslice__List_ANY_ANY # should not raise assert getslice__List_ANY_ANY(self.space, l, self.space.wrap(15), self.space.wrap(2222)).strategy == self.space.fromcache(EmptyListStrategy) + + + def test_add_to_rangelist(self): + l1 = make_range_list(self.space, 1, 1, 3) + l2 = W_ListObject(self.space, [self.space.wrap(4), self.space.wrap(5)]) + from pypy.objspace.std.listobject import add__List_List + l3 = add__List_List(self.space, l1, l2) + assert self.space.eq_w(l3, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3), self.space.wrap(4), self.space.wrap(5)])) From noreply at buildbot.pypy.org Fri Sep 23 13:13:39 2011 From: noreply at buildbot.pypy.org (l.diekmann) Date: Fri, 23 Sep 2011 13:13:39 +0200 (CEST) Subject: [pypy-commit] pypy default: merged default with pypy Message-ID: <20110923111339.9AB90820D1@wyvern.cs.uni-duesseldorf.de> Author: Lukas Diekmann Branch: Changeset: r47506:6abf02c2041c Date: 2011-04-26 19:26 +0200 http://bitbucket.org/pypy/pypy/changeset/6abf02c2041c/ Log: merged default with pypy diff too long, truncating to 10000 out of 589712 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -5,6 +5,8 @@ syntax: regexp ^testresult$ ^site-packages$ +^site-packages/.*$ +^site-packages/.*$ ^bin$ ^pypy/bin/pypy-c ^pypy/module/cpyext/src/.+\.o$ @@ -37,8 +39,6 @@ ^pypy/translator/goal/.+\.dll$ ^pypy/translator/goal/target.+-c$ ^pypy/_cache$ -^site-packages/.+\.egg$ -^site-packages/.+\.pth$ ^pypy/doc/statistic/.+\.html$ ^pypy/doc/statistic/.+\.eps$ ^pypy/doc/statistic/.+\.pdf$ diff --git a/.hgsubstate b/.hgsubstate new file mode 100644 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -123,12 +123,12 @@ by Samuel Reis and is distributed on terms of Creative Commons Share Alike License. -License for 'lib-python/2.5.2' and 'lib-python/2.5.2-modified' +License for 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' ============================================================== Except when otherwise stated (look for LICENSE files or copyright/license information at the beginning of each file) the files -in the 'lib-python/2.5.2' and 'lib-python/2.5.2-modified' directories +in the 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' directories are all copyrighted by the Python Software Foundation and licensed under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html @@ -161,21 +161,12 @@ ====================================== The following files are from the website of The Unicode Consortium -at http://www.unicode.org/. For the terms of use of these files, see -http://www.unicode.org/terms_of_use.html +at http://www.unicode.org/. For the terms of use of these files, see +http://www.unicode.org/terms_of_use.html . Or they are derived from +files from the above website, and the same terms of use apply. - CompositionExclusions-3.2.0.txt - CompositionExclusions-4.1.0.txt - CompositionExclusions-5.0.0.txt - EastAsianWidth-3.2.0.txt - EastAsianWidth-4.1.0.txt - EastAsianWidth-5.0.0.txt - UnicodeData-3.2.0.txt - UnicodeData-4.1.0.txt - UnicodeData-5.0.0.txt - -The following files are derived from files from the above website. The same -terms of use apply. - UnihanNumeric-3.2.0.txt - UnihanNumeric-4.1.0.txt - UnihanNumeric-5.0.0.txt + CompositionExclusions-*.txt + EastAsianWidth-*.txt + LineBreak-*.txt + UnicodeData-*.txt + UnihanNumeric-*.txt diff --git a/lib-python/2.5.2/BaseHTTPServer.py b/lib-python/2.5.2/BaseHTTPServer.py deleted file mode 100644 --- a/lib-python/2.5.2/BaseHTTPServer.py +++ /dev/null @@ -1,578 +0,0 @@ -"""HTTP server base class. - -Note: the class in this module doesn't implement any HTTP request; see -SimpleHTTPServer for simple implementations of GET, HEAD and POST -(including CGI scripts). It does, however, optionally implement HTTP/1.1 -persistent connections, as of version 0.3. - -Contents: - -- BaseHTTPRequestHandler: HTTP request handler base class -- test: test function - -XXX To do: - -- log requests even later (to capture byte count) -- log user-agent header and other interesting goodies -- send error log to separate file -""" - - -# See also: -# -# HTTP Working Group T. Berners-Lee -# INTERNET-DRAFT R. T. Fielding -# H. Frystyk Nielsen -# Expires September 8, 1995 March 8, 1995 -# -# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt -# -# and -# -# Network Working Group R. Fielding -# Request for Comments: 2616 et al -# Obsoletes: 2068 June 1999 -# Category: Standards Track -# -# URL: http://www.faqs.org/rfcs/rfc2616.html - -# Log files -# --------- -# -# Here's a quote from the NCSA httpd docs about log file format. -# -# | The logfile format is as follows. Each line consists of: -# | -# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb -# | -# | host: Either the DNS name or the IP number of the remote client -# | rfc931: Any information returned by identd for this person, -# | - otherwise. -# | authuser: If user sent a userid for authentication, the user name, -# | - otherwise. -# | DD: Day -# | Mon: Month (calendar name) -# | YYYY: Year -# | hh: hour (24-hour format, the machine's timezone) -# | mm: minutes -# | ss: seconds -# | request: The first line of the HTTP request as sent by the client. -# | ddd: the status code returned by the server, - if not available. -# | bbbb: the total number of bytes sent, -# | *not including the HTTP/1.0 header*, - if not available -# | -# | You can determine the name of the file accessed through request. -# -# (Actually, the latter is only true if you know the server configuration -# at the time the request was made!) - -__version__ = "0.3" - -__all__ = ["HTTPServer", "BaseHTTPRequestHandler"] - -import sys -import time -import socket # For gethostbyaddr() -import mimetools -import SocketServer - -# Default error message -DEFAULT_ERROR_MESSAGE = """\ - -Error response - - -

    Error response

    -

    Error code %(code)d. -

    Message: %(message)s. -

    Error code explanation: %(code)s = %(explain)s. - -""" - -def _quote_html(html): - return html.replace("&", "&").replace("<", "<").replace(">", ">") - -class HTTPServer(SocketServer.TCPServer): - - allow_reuse_address = 1 # Seems to make sense in testing environment - - def server_bind(self): - """Override server_bind to store the server name.""" - SocketServer.TCPServer.server_bind(self) - host, port = self.socket.getsockname()[:2] - self.server_name = socket.getfqdn(host) - self.server_port = port - - -class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler): - - """HTTP request handler base class. - - The following explanation of HTTP serves to guide you through the - code as well as to expose any misunderstandings I may have about - HTTP (so you don't need to read the code to figure out I'm wrong - :-). - - HTTP (HyperText Transfer Protocol) is an extensible protocol on - top of a reliable stream transport (e.g. TCP/IP). The protocol - recognizes three parts to a request: - - 1. One line identifying the request type and path - 2. An optional set of RFC-822-style headers - 3. An optional data part - - The headers and data are separated by a blank line. - - The first line of the request has the form - - - - where is a (case-sensitive) keyword such as GET or POST, - is a string containing path information for the request, - and should be the string "HTTP/1.0" or "HTTP/1.1". - is encoded using the URL encoding scheme (using %xx to signify - the ASCII character with hex code xx). - - The specification specifies that lines are separated by CRLF but - for compatibility with the widest range of clients recommends - servers also handle LF. Similarly, whitespace in the request line - is treated sensibly (allowing multiple spaces between components - and allowing trailing whitespace). - - Similarly, for output, lines ought to be separated by CRLF pairs - but most clients grok LF characters just fine. - - If the first line of the request has the form - - - - (i.e. is left out) then this is assumed to be an HTTP - 0.9 request; this form has no optional headers and data part and - the reply consists of just the data. - - The reply form of the HTTP 1.x protocol again has three parts: - - 1. One line giving the response code - 2. An optional set of RFC-822-style headers - 3. The data - - Again, the headers and data are separated by a blank line. - - The response code line has the form - - - - where is the protocol version ("HTTP/1.0" or "HTTP/1.1"), - is a 3-digit response code indicating success or - failure of the request, and is an optional - human-readable string explaining what the response code means. - - This server parses the request and the headers, and then calls a - function specific to the request type (). Specifically, - a request SPAM will be handled by a method do_SPAM(). If no - such method exists the server sends an error response to the - client. If it exists, it is called with no arguments: - - do_SPAM() - - Note that the request name is case sensitive (i.e. SPAM and spam - are different requests). - - The various request details are stored in instance variables: - - - client_address is the client IP address in the form (host, - port); - - - command, path and version are the broken-down request line; - - - headers is an instance of mimetools.Message (or a derived - class) containing the header information; - - - rfile is a file object open for reading positioned at the - start of the optional input data part; - - - wfile is a file object open for writing. - - IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING! - - The first thing to be written must be the response line. Then - follow 0 or more header lines, then a blank line, and then the - actual data (if any). The meaning of the header lines depends on - the command executed by the server; in most cases, when data is - returned, there should be at least one header line of the form - - Content-type: / - - where and should be registered MIME types, - e.g. "text/html" or "text/plain". - - """ - - # The Python system version, truncated to its first component. - sys_version = "Python/" + sys.version.split()[0] - - # The server software version. You may want to override this. - # The format is multiple whitespace-separated strings, - # where each string is of the form name[/version]. - server_version = "BaseHTTP/" + __version__ - - def parse_request(self): - """Parse a request (internal). - - The request should be stored in self.raw_requestline; the results - are in self.command, self.path, self.request_version and - self.headers. - - Return True for success, False for failure; on failure, an - error is sent back. - - """ - self.command = None # set in case of error on the first line - self.request_version = version = "HTTP/0.9" # Default - self.close_connection = 1 - requestline = self.raw_requestline - if requestline[-2:] == '\r\n': - requestline = requestline[:-2] - elif requestline[-1:] == '\n': - requestline = requestline[:-1] - self.requestline = requestline - words = requestline.split() - if len(words) == 3: - [command, path, version] = words - if version[:5] != 'HTTP/': - self.send_error(400, "Bad request version (%r)" % version) - return False - try: - base_version_number = version.split('/', 1)[1] - version_number = base_version_number.split(".") - # RFC 2145 section 3.1 says there can be only one "." and - # - major and minor numbers MUST be treated as - # separate integers; - # - HTTP/2.4 is a lower version than HTTP/2.13, which in - # turn is lower than HTTP/12.3; - # - Leading zeros MUST be ignored by recipients. - if len(version_number) != 2: - raise ValueError - version_number = int(version_number[0]), int(version_number[1]) - except (ValueError, IndexError): - self.send_error(400, "Bad request version (%r)" % version) - return False - if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1": - self.close_connection = 0 - if version_number >= (2, 0): - self.send_error(505, - "Invalid HTTP Version (%s)" % base_version_number) - return False - elif len(words) == 2: - [command, path] = words - self.close_connection = 1 - if command != 'GET': - self.send_error(400, - "Bad HTTP/0.9 request type (%r)" % command) - return False - elif not words: - return False - else: - self.send_error(400, "Bad request syntax (%r)" % requestline) - return False - self.command, self.path, self.request_version = command, path, version - - # Examine the headers and look for a Connection directive - self.headers = self.MessageClass(self.rfile, 0) - - conntype = self.headers.get('Connection', "") - if conntype.lower() == 'close': - self.close_connection = 1 - elif (conntype.lower() == 'keep-alive' and - self.protocol_version >= "HTTP/1.1"): - self.close_connection = 0 - return True - - def handle_one_request(self): - """Handle a single HTTP request. - - You normally don't need to override this method; see the class - __doc__ string for information on how to handle specific HTTP - commands such as GET and POST. - - """ - self.raw_requestline = self.rfile.readline() - if not self.raw_requestline: - self.close_connection = 1 - return - if not self.parse_request(): # An error code has been sent, just exit - return - mname = 'do_' + self.command - if not hasattr(self, mname): - self.send_error(501, "Unsupported method (%r)" % self.command) - return - method = getattr(self, mname) - method() - - def handle(self): - """Handle multiple requests if necessary.""" - self.close_connection = 1 - - self.handle_one_request() - while not self.close_connection: - self.handle_one_request() - - def send_error(self, code, message=None): - """Send and log an error reply. - - Arguments are the error code, and a detailed message. - The detailed message defaults to the short entry matching the - response code. - - This sends an error response (so it must be called before any - output has been generated), logs the error, and finally sends - a piece of HTML explaining the error to the user. - - """ - - try: - short, long = self.responses[code] - except KeyError: - short, long = '???', '???' - if message is None: - message = short - explain = long - self.log_error("code %d, message %s", code, message) - # using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201) - content = (self.error_message_format % - {'code': code, 'message': _quote_html(message), 'explain': explain}) - self.send_response(code, message) - self.send_header("Content-Type", "text/html") - self.send_header('Connection', 'close') - self.end_headers() - if self.command != 'HEAD' and code >= 200 and code not in (204, 304): - self.wfile.write(content) - - error_message_format = DEFAULT_ERROR_MESSAGE - - def send_response(self, code, message=None): - """Send the response header and log the response code. - - Also send two standard headers with the server software - version and the current date. - - """ - self.log_request(code) - if message is None: - if code in self.responses: - message = self.responses[code][0] - else: - message = '' - if self.request_version != 'HTTP/0.9': - self.wfile.write("%s %d %s\r\n" % - (self.protocol_version, code, message)) - # print (self.protocol_version, code, message) - self.send_header('Server', self.version_string()) - self.send_header('Date', self.date_time_string()) - - def send_header(self, keyword, value): - """Send a MIME header.""" - if self.request_version != 'HTTP/0.9': - self.wfile.write("%s: %s\r\n" % (keyword, value)) - - if keyword.lower() == 'connection': - if value.lower() == 'close': - self.close_connection = 1 - elif value.lower() == 'keep-alive': - self.close_connection = 0 - - def end_headers(self): - """Send the blank line ending the MIME headers.""" - if self.request_version != 'HTTP/0.9': - self.wfile.write("\r\n") - - def log_request(self, code='-', size='-'): - """Log an accepted request. - - This is called by send_response(). - - """ - - self.log_message('"%s" %s %s', - self.requestline, str(code), str(size)) - - def log_error(self, *args): - """Log an error. - - This is called when a request cannot be fulfilled. By - default it passes the message on to log_message(). - - Arguments are the same as for log_message(). - - XXX This should go to the separate error log. - - """ - - self.log_message(*args) - - def log_message(self, format, *args): - """Log an arbitrary message. - - This is used by all other logging functions. Override - it if you have specific logging wishes. - - The first argument, FORMAT, is a format string for the - message to be logged. If the format string contains - any % escapes requiring parameters, they should be - specified as subsequent arguments (it's just like - printf!). - - The client host and current date/time are prefixed to - every message. - - """ - - sys.stderr.write("%s - - [%s] %s\n" % - (self.address_string(), - self.log_date_time_string(), - format%args)) - - def version_string(self): - """Return the server software version string.""" - return self.server_version + ' ' + self.sys_version - - def date_time_string(self, timestamp=None): - """Return the current date and time formatted for a message header.""" - if timestamp is None: - timestamp = time.time() - year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp) - s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( - self.weekdayname[wd], - day, self.monthname[month], year, - hh, mm, ss) - return s - - def log_date_time_string(self): - """Return the current time formatted for logging.""" - now = time.time() - year, month, day, hh, mm, ss, x, y, z = time.localtime(now) - s = "%02d/%3s/%04d %02d:%02d:%02d" % ( - day, self.monthname[month], year, hh, mm, ss) - return s - - weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] - - monthname = [None, - 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', - 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] - - def address_string(self): - """Return the client address formatted for logging. - - This version looks up the full hostname using gethostbyaddr(), - and tries to find a name that contains at least one dot. - - """ - - host, port = self.client_address[:2] - return socket.getfqdn(host) - - # Essentially static class variables - - # The version of the HTTP protocol we support. - # Set this to HTTP/1.1 to enable automatic keepalive - protocol_version = "HTTP/1.0" - - # The Message-like class used to parse headers - MessageClass = mimetools.Message - - # Table mapping response codes to messages; entries have the - # form {code: (shortmessage, longmessage)}. - # See RFC 2616. - responses = { - 100: ('Continue', 'Request received, please continue'), - 101: ('Switching Protocols', - 'Switching to new protocol; obey Upgrade header'), - - 200: ('OK', 'Request fulfilled, document follows'), - 201: ('Created', 'Document created, URL follows'), - 202: ('Accepted', - 'Request accepted, processing continues off-line'), - 203: ('Non-Authoritative Information', 'Request fulfilled from cache'), - 204: ('No Content', 'Request fulfilled, nothing follows'), - 205: ('Reset Content', 'Clear input form for further input.'), - 206: ('Partial Content', 'Partial content follows.'), - - 300: ('Multiple Choices', - 'Object has several resources -- see URI list'), - 301: ('Moved Permanently', 'Object moved permanently -- see URI list'), - 302: ('Found', 'Object moved temporarily -- see URI list'), - 303: ('See Other', 'Object moved -- see Method and URL list'), - 304: ('Not Modified', - 'Document has not changed since given time'), - 305: ('Use Proxy', - 'You must use proxy specified in Location to access this ' - 'resource.'), - 307: ('Temporary Redirect', - 'Object moved temporarily -- see URI list'), - - 400: ('Bad Request', - 'Bad request syntax or unsupported method'), - 401: ('Unauthorized', - 'No permission -- see authorization schemes'), - 402: ('Payment Required', - 'No payment -- see charging schemes'), - 403: ('Forbidden', - 'Request forbidden -- authorization will not help'), - 404: ('Not Found', 'Nothing matches the given URI'), - 405: ('Method Not Allowed', - 'Specified method is invalid for this server.'), - 406: ('Not Acceptable', 'URI not available in preferred format.'), - 407: ('Proxy Authentication Required', 'You must authenticate with ' - 'this proxy before proceeding.'), - 408: ('Request Timeout', 'Request timed out; try again later.'), - 409: ('Conflict', 'Request conflict.'), - 410: ('Gone', - 'URI no longer exists and has been permanently removed.'), - 411: ('Length Required', 'Client must specify Content-Length.'), - 412: ('Precondition Failed', 'Precondition in headers is false.'), - 413: ('Request Entity Too Large', 'Entity is too large.'), - 414: ('Request-URI Too Long', 'URI is too long.'), - 415: ('Unsupported Media Type', 'Entity body in unsupported format.'), - 416: ('Requested Range Not Satisfiable', - 'Cannot satisfy request range.'), - 417: ('Expectation Failed', - 'Expect condition could not be satisfied.'), - - 500: ('Internal Server Error', 'Server got itself in trouble'), - 501: ('Not Implemented', - 'Server does not support this operation'), - 502: ('Bad Gateway', 'Invalid responses from another server/proxy.'), - 503: ('Service Unavailable', - 'The server cannot process the request due to a high load'), - 504: ('Gateway Timeout', - 'The gateway server did not receive a timely response'), - 505: ('HTTP Version Not Supported', 'Cannot fulfill request.'), - } - - -def test(HandlerClass = BaseHTTPRequestHandler, - ServerClass = HTTPServer, protocol="HTTP/1.0"): - """Test the HTTP request handler class. - - This runs an HTTP server on port 8000 (or the first command line - argument). - - """ - - if sys.argv[1:]: - port = int(sys.argv[1]) - else: - port = 8000 - server_address = ('', port) - - HandlerClass.protocol_version = protocol - httpd = ServerClass(server_address, HandlerClass) - - sa = httpd.socket.getsockname() - print "Serving HTTP on", sa[0], "port", sa[1], "..." - httpd.serve_forever() - - -if __name__ == '__main__': - test() diff --git a/lib-python/2.5.2/Bastion.py b/lib-python/2.5.2/Bastion.py deleted file mode 100644 --- a/lib-python/2.5.2/Bastion.py +++ /dev/null @@ -1,177 +0,0 @@ -"""Bastionification utility. - -A bastion (for another object -- the 'original') is an object that has -the same methods as the original but does not give access to its -instance variables. Bastions have a number of uses, but the most -obvious one is to provide code executing in restricted mode with a -safe interface to an object implemented in unrestricted mode. - -The bastionification routine has an optional second argument which is -a filter function. Only those methods for which the filter method -(called with the method name as argument) returns true are accessible. -The default filter method returns true unless the method name begins -with an underscore. - -There are a number of possible implementations of bastions. We use a -'lazy' approach where the bastion's __getattr__() discipline does all -the work for a particular method the first time it is used. This is -usually fastest, especially if the user doesn't call all available -methods. The retrieved methods are stored as instance variables of -the bastion, so the overhead is only occurred on the first use of each -method. - -Detail: the bastion class has a __repr__() discipline which includes -the repr() of the original object. This is precomputed when the -bastion is created. - -""" - -__all__ = ["BastionClass", "Bastion"] - -from types import MethodType - - -class BastionClass: - - """Helper class used by the Bastion() function. - - You could subclass this and pass the subclass as the bastionclass - argument to the Bastion() function, as long as the constructor has - the same signature (a get() function and a name for the object). - - """ - - def __init__(self, get, name): - """Constructor. - - Arguments: - - get - a function that gets the attribute value (by name) - name - a human-readable name for the original object - (suggestion: use repr(object)) - - """ - self._get_ = get - self._name_ = name - - def __repr__(self): - """Return a representation string. - - This includes the name passed in to the constructor, so that - if you print the bastion during debugging, at least you have - some idea of what it is. - - """ - return "" % self._name_ - - def __getattr__(self, name): - """Get an as-yet undefined attribute value. - - This calls the get() function that was passed to the - constructor. The result is stored as an instance variable so - that the next time the same attribute is requested, - __getattr__() won't be invoked. - - If the get() function raises an exception, this is simply - passed on -- exceptions are not cached. - - """ - attribute = self._get_(name) - self.__dict__[name] = attribute - return attribute - - -def Bastion(object, filter = lambda name: name[:1] != '_', - name=None, bastionclass=BastionClass): - """Create a bastion for an object, using an optional filter. - - See the Bastion module's documentation for background. - - Arguments: - - object - the original object - filter - a predicate that decides whether a function name is OK; - by default all names are OK that don't start with '_' - name - the name of the object; default repr(object) - bastionclass - class used to create the bastion; default BastionClass - - """ - - raise RuntimeError, "This code is not secure in Python 2.2 and later" - - # Note: we define *two* ad-hoc functions here, get1 and get2. - # Both are intended to be called in the same way: get(name). - # It is clear that the real work (getting the attribute - # from the object and calling the filter) is done in get1. - # Why can't we pass get1 to the bastion? Because the user - # would be able to override the filter argument! With get2, - # overriding the default argument is no security loophole: - # all it does is call it. - # Also notice that we can't place the object and filter as - # instance variables on the bastion object itself, since - # the user has full access to all instance variables! - - def get1(name, object=object, filter=filter): - """Internal function for Bastion(). See source comments.""" - if filter(name): - attribute = getattr(object, name) - if type(attribute) == MethodType: - return attribute - raise AttributeError, name - - def get2(name, get1=get1): - """Internal function for Bastion(). See source comments.""" - return get1(name) - - if name is None: - name = repr(object) - return bastionclass(get2, name) - - -def _test(): - """Test the Bastion() function.""" - class Original: - def __init__(self): - self.sum = 0 - def add(self, n): - self._add(n) - def _add(self, n): - self.sum = self.sum + n - def total(self): - return self.sum - o = Original() - b = Bastion(o) - testcode = """if 1: - b.add(81) - b.add(18) - print "b.total() =", b.total() - try: - print "b.sum =", b.sum, - except: - print "inaccessible" - else: - print "accessible" - try: - print "b._add =", b._add, - except: - print "inaccessible" - else: - print "accessible" - try: - print "b._get_.func_defaults =", map(type, b._get_.func_defaults), - except: - print "inaccessible" - else: - print "accessible" - \n""" - exec testcode - print '='*20, "Using rexec:", '='*20 - import rexec - r = rexec.RExec() - m = r.add_module('__main__') - m.b = b - r.r_exec(testcode) - - -if __name__ == '__main__': - _test() diff --git a/lib-python/2.5.2/CGIHTTPServer.py b/lib-python/2.5.2/CGIHTTPServer.py deleted file mode 100644 --- a/lib-python/2.5.2/CGIHTTPServer.py +++ /dev/null @@ -1,362 +0,0 @@ -"""CGI-savvy HTTP Server. - -This module builds on SimpleHTTPServer by implementing GET and POST -requests to cgi-bin scripts. - -If the os.fork() function is not present (e.g. on Windows), -os.popen2() is used as a fallback, with slightly altered semantics; if -that function is not present either (e.g. on Macintosh), only Python -scripts are supported, and they are executed by the current process. - -In all cases, the implementation is intentionally naive -- all -requests are executed sychronously. - -SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL --- it may execute arbitrary Python code or external programs. - -Note that status code 200 is sent prior to execution of a CGI script, so -scripts cannot send other status codes such as 302 (redirect). -""" - - -__version__ = "0.4" - -__all__ = ["CGIHTTPRequestHandler"] - -import os -import sys -import urllib -import BaseHTTPServer -import SimpleHTTPServer -import select - - -class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): - - """Complete HTTP server with GET, HEAD and POST commands. - - GET and HEAD also support running CGI scripts. - - The POST command is *only* implemented for CGI scripts. - - """ - - # Determine platform specifics - have_fork = hasattr(os, 'fork') - have_popen2 = hasattr(os, 'popen2') - have_popen3 = hasattr(os, 'popen3') - - # Make rfile unbuffered -- we need to read one line and then pass - # the rest to a subprocess, so we can't use buffered input. - rbufsize = 0 - - def do_POST(self): - """Serve a POST request. - - This is only implemented for CGI scripts. - - """ - - if self.is_cgi(): - self.run_cgi() - else: - self.send_error(501, "Can only POST to CGI scripts") - - def send_head(self): - """Version of send_head that support CGI scripts""" - if self.is_cgi(): - return self.run_cgi() - else: - return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self) - - def is_cgi(self): - """Test whether self.path corresponds to a CGI script. - - Return a tuple (dir, rest) if self.path requires running a - CGI script, None if not. Note that rest begins with a - slash if it is not empty. - - The default implementation tests whether the path - begins with one of the strings in the list - self.cgi_directories (and the next character is a '/' - or the end of the string). - - """ - - path = self.path - - for x in self.cgi_directories: - i = len(x) - if path[:i] == x and (not path[i:] or path[i] == '/'): - self.cgi_info = path[:i], path[i+1:] - return True - return False - - cgi_directories = ['/cgi-bin', '/htbin'] - - def is_executable(self, path): - """Test whether argument path is an executable file.""" - return executable(path) - - def is_python(self, path): - """Test whether argument path is a Python script.""" - head, tail = os.path.splitext(path) - return tail.lower() in (".py", ".pyw") - - def run_cgi(self): - """Execute a CGI script.""" - path = self.path - dir, rest = self.cgi_info - - i = path.find('/', len(dir) + 1) - while i >= 0: - nextdir = path[:i] - nextrest = path[i+1:] - - scriptdir = self.translate_path(nextdir) - if os.path.isdir(scriptdir): - dir, rest = nextdir, nextrest - i = path.find('/', len(dir) + 1) - else: - break - - # find an explicit query string, if present. - i = rest.rfind('?') - if i >= 0: - rest, query = rest[:i], rest[i+1:] - else: - query = '' - - # dissect the part after the directory name into a script name & - # a possible additional path, to be stored in PATH_INFO. - i = rest.find('/') - if i >= 0: - script, rest = rest[:i], rest[i:] - else: - script, rest = rest, '' - - scriptname = dir + '/' + script - scriptfile = self.translate_path(scriptname) - if not os.path.exists(scriptfile): - self.send_error(404, "No such CGI script (%r)" % scriptname) - return - if not os.path.isfile(scriptfile): - self.send_error(403, "CGI script is not a plain file (%r)" % - scriptname) - return - ispy = self.is_python(scriptname) - if not ispy: - if not (self.have_fork or self.have_popen2 or self.have_popen3): - self.send_error(403, "CGI script is not a Python script (%r)" % - scriptname) - return - if not self.is_executable(scriptfile): - self.send_error(403, "CGI script is not executable (%r)" % - scriptname) - return - - # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html - # XXX Much of the following could be prepared ahead of time! - env = {} - env['SERVER_SOFTWARE'] = self.version_string() - env['SERVER_NAME'] = self.server.server_name - env['GATEWAY_INTERFACE'] = 'CGI/1.1' - env['SERVER_PROTOCOL'] = self.protocol_version - env['SERVER_PORT'] = str(self.server.server_port) - env['REQUEST_METHOD'] = self.command - uqrest = urllib.unquote(rest) - env['PATH_INFO'] = uqrest - env['PATH_TRANSLATED'] = self.translate_path(uqrest) - env['SCRIPT_NAME'] = scriptname - if query: - env['QUERY_STRING'] = query - host = self.address_string() - if host != self.client_address[0]: - env['REMOTE_HOST'] = host - env['REMOTE_ADDR'] = self.client_address[0] - authorization = self.headers.getheader("authorization") - if authorization: - authorization = authorization.split() - if len(authorization) == 2: - import base64, binascii - env['AUTH_TYPE'] = authorization[0] - if authorization[0].lower() == "basic": - try: - authorization = base64.decodestring(authorization[1]) - except binascii.Error: - pass - else: - authorization = authorization.split(':') - if len(authorization) == 2: - env['REMOTE_USER'] = authorization[0] - # XXX REMOTE_IDENT - if self.headers.typeheader is None: - env['CONTENT_TYPE'] = self.headers.type - else: - env['CONTENT_TYPE'] = self.headers.typeheader - length = self.headers.getheader('content-length') - if length: - env['CONTENT_LENGTH'] = length - accept = [] - for line in self.headers.getallmatchingheaders('accept'): - if line[:1] in "\t\n\r ": - accept.append(line.strip()) - else: - accept = accept + line[7:].split(',') - env['HTTP_ACCEPT'] = ','.join(accept) - ua = self.headers.getheader('user-agent') - if ua: - env['HTTP_USER_AGENT'] = ua - co = filter(None, self.headers.getheaders('cookie')) - if co: - env['HTTP_COOKIE'] = ', '.join(co) - # XXX Other HTTP_* headers - # Since we're setting the env in the parent, provide empty - # values to override previously set values - for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH', - 'HTTP_USER_AGENT', 'HTTP_COOKIE'): - env.setdefault(k, "") - os.environ.update(env) - - self.send_response(200, "Script output follows") - - decoded_query = query.replace('+', ' ') - - if self.have_fork: - # Unix -- fork as we should - args = [script] - if '=' not in decoded_query: - args.append(decoded_query) - nobody = nobody_uid() - self.wfile.flush() # Always flush before forking - pid = os.fork() - if pid != 0: - # Parent - pid, sts = os.waitpid(pid, 0) - # throw away additional data [see bug #427345] - while select.select([self.rfile], [], [], 0)[0]: - if not self.rfile.read(1): - break - if sts: - self.log_error("CGI script exit status %#x", sts) - return - # Child - try: - try: - os.setuid(nobody) - except os.error: - pass - os.dup2(self.rfile.fileno(), 0) - os.dup2(self.wfile.fileno(), 1) - os.execve(scriptfile, args, os.environ) - except: - self.server.handle_error(self.request, self.client_address) - os._exit(127) - - elif self.have_popen2 or self.have_popen3: - # Windows -- use popen2 or popen3 to create a subprocess - import shutil - if self.have_popen3: - popenx = os.popen3 - else: - popenx = os.popen2 - cmdline = scriptfile - if self.is_python(scriptfile): - interp = sys.executable - if interp.lower().endswith("w.exe"): - # On Windows, use python.exe, not pythonw.exe - interp = interp[:-5] + interp[-4:] - cmdline = "%s -u %s" % (interp, cmdline) - if '=' not in query and '"' not in query: - cmdline = '%s "%s"' % (cmdline, query) - self.log_message("command: %s", cmdline) - try: - nbytes = int(length) - except (TypeError, ValueError): - nbytes = 0 - files = popenx(cmdline, 'b') - fi = files[0] - fo = files[1] - if self.have_popen3: - fe = files[2] - if self.command.lower() == "post" and nbytes > 0: - data = self.rfile.read(nbytes) - fi.write(data) - # throw away additional data [see bug #427345] - while select.select([self.rfile._sock], [], [], 0)[0]: - if not self.rfile._sock.recv(1): - break - fi.close() - shutil.copyfileobj(fo, self.wfile) - if self.have_popen3: - errors = fe.read() - fe.close() - if errors: - self.log_error('%s', errors) - sts = fo.close() - if sts: - self.log_error("CGI script exit status %#x", sts) - else: - self.log_message("CGI script exited OK") - - else: - # Other O.S. -- execute script in this process - save_argv = sys.argv - save_stdin = sys.stdin - save_stdout = sys.stdout - save_stderr = sys.stderr - try: - save_cwd = os.getcwd() - try: - sys.argv = [scriptfile] - if '=' not in decoded_query: - sys.argv.append(decoded_query) - sys.stdout = self.wfile - sys.stdin = self.rfile - execfile(scriptfile, {"__name__": "__main__"}) - finally: - sys.argv = save_argv - sys.stdin = save_stdin - sys.stdout = save_stdout - sys.stderr = save_stderr - os.chdir(save_cwd) - except SystemExit, sts: - self.log_error("CGI script exit status %s", str(sts)) - else: - self.log_message("CGI script exited OK") - - -nobody = None - -def nobody_uid(): - """Internal routine to get nobody's uid""" - global nobody - if nobody: - return nobody - try: - import pwd - except ImportError: - return -1 - try: - nobody = pwd.getpwnam('nobody')[2] - except KeyError: - nobody = 1 + max(map(lambda x: x[2], pwd.getpwall())) - return nobody - - -def executable(path): - """Test for executable file.""" - try: - st = os.stat(path) - except os.error: - return False - return st.st_mode & 0111 != 0 - - -def test(HandlerClass = CGIHTTPRequestHandler, - ServerClass = BaseHTTPServer.HTTPServer): - SimpleHTTPServer.test(HandlerClass, ServerClass) - - -if __name__ == '__main__': - test() diff --git a/lib-python/2.5.2/ConfigParser.py b/lib-python/2.5.2/ConfigParser.py deleted file mode 100644 --- a/lib-python/2.5.2/ConfigParser.py +++ /dev/null @@ -1,640 +0,0 @@ -"""Configuration file parser. - -A setup file consists of sections, lead by a "[section]" header, -and followed by "name: value" entries, with continuations and such in -the style of RFC 822. - -The option values can contain format strings which refer to other values in -the same section, or values in a special [DEFAULT] section. - -For example: - - something: %(dir)s/whatever - -would resolve the "%(dir)s" to the value of dir. All reference -expansions are done late, on demand. - -Intrinsic defaults can be specified by passing them into the -ConfigParser constructor as a dictionary. - -class: - -ConfigParser -- responsible for parsing a list of - configuration files, and managing the parsed database. - - methods: - - __init__(defaults=None) - create the parser and specify a dictionary of intrinsic defaults. The - keys must be strings, the values must be appropriate for %()s string - interpolation. Note that `__name__' is always an intrinsic default; - its value is the section's name. - - sections() - return all the configuration section names, sans DEFAULT - - has_section(section) - return whether the given section exists - - has_option(section, option) - return whether the given option exists in the given section - - options(section) - return list of configuration options for the named section - - read(filenames) - read and parse the list of named configuration files, given by - name. A single filename is also allowed. Non-existing files - are ignored. Return list of successfully read files. - - readfp(fp, filename=None) - read and parse one configuration file, given as a file object. - The filename defaults to fp.name; it is only used in error - messages (if fp has no `name' attribute, the string `' is used). - - get(section, option, raw=False, vars=None) - return a string value for the named option. All % interpolations are - expanded in the return values, based on the defaults passed into the - constructor and the DEFAULT section. Additional substitutions may be - provided using the `vars' argument, which must be a dictionary whose - contents override any pre-existing defaults. - - getint(section, options) - like get(), but convert value to an integer - - getfloat(section, options) - like get(), but convert value to a float - - getboolean(section, options) - like get(), but convert value to a boolean (currently case - insensitively defined as 0, false, no, off for False, and 1, true, - yes, on for True). Returns False or True. - - items(section, raw=False, vars=None) - return a list of tuples with (name, value) for each option - in the section. - - remove_section(section) - remove the given file section and all its options - - remove_option(section, option) - remove the given option from the given section - - set(section, option, value) - set the given option - - write(fp) - write the configuration state in .ini format -""" - -import re - -__all__ = ["NoSectionError", "DuplicateSectionError", "NoOptionError", - "InterpolationError", "InterpolationDepthError", - "InterpolationSyntaxError", "ParsingError", - "MissingSectionHeaderError", - "ConfigParser", "SafeConfigParser", "RawConfigParser", - "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"] - -DEFAULTSECT = "DEFAULT" - -MAX_INTERPOLATION_DEPTH = 10 - - - -# exception classes -class Error(Exception): - """Base class for ConfigParser exceptions.""" - - def __init__(self, msg=''): - self.message = msg - Exception.__init__(self, msg) - - def __repr__(self): - return self.message - - __str__ = __repr__ - -class NoSectionError(Error): - """Raised when no section matches a requested option.""" - - def __init__(self, section): - Error.__init__(self, 'No section: %r' % (section,)) - self.section = section - -class DuplicateSectionError(Error): - """Raised when a section is multiply-created.""" - - def __init__(self, section): - Error.__init__(self, "Section %r already exists" % section) - self.section = section - -class NoOptionError(Error): - """A requested option was not found.""" - - def __init__(self, option, section): - Error.__init__(self, "No option %r in section: %r" % - (option, section)) - self.option = option - self.section = section - -class InterpolationError(Error): - """Base class for interpolation-related exceptions.""" - - def __init__(self, option, section, msg): - Error.__init__(self, msg) - self.option = option - self.section = section - -class InterpolationMissingOptionError(InterpolationError): - """A string substitution required a setting which was not available.""" - - def __init__(self, option, section, rawval, reference): - msg = ("Bad value substitution:\n" - "\tsection: [%s]\n" - "\toption : %s\n" - "\tkey : %s\n" - "\trawval : %s\n" - % (section, option, reference, rawval)) - InterpolationError.__init__(self, option, section, msg) - self.reference = reference - -class InterpolationSyntaxError(InterpolationError): - """Raised when the source text into which substitutions are made - does not conform to the required syntax.""" - -class InterpolationDepthError(InterpolationError): - """Raised when substitutions are nested too deeply.""" - - def __init__(self, option, section, rawval): - msg = ("Value interpolation too deeply recursive:\n" - "\tsection: [%s]\n" - "\toption : %s\n" - "\trawval : %s\n" - % (section, option, rawval)) - InterpolationError.__init__(self, option, section, msg) - -class ParsingError(Error): - """Raised when a configuration file does not follow legal syntax.""" - - def __init__(self, filename): - Error.__init__(self, 'File contains parsing errors: %s' % filename) - self.filename = filename - self.errors = [] - - def append(self, lineno, line): - self.errors.append((lineno, line)) - self.message += '\n\t[line %2d]: %s' % (lineno, line) - -class MissingSectionHeaderError(ParsingError): - """Raised when a key-value pair is found before any section header.""" - - def __init__(self, filename, lineno, line): - Error.__init__( - self, - 'File contains no section headers.\nfile: %s, line: %d\n%r' % - (filename, lineno, line)) - self.filename = filename - self.lineno = lineno - self.line = line - - - -class RawConfigParser: - def __init__(self, defaults=None): - self._sections = {} - self._defaults = {} - if defaults: - for key, value in defaults.items(): - self._defaults[self.optionxform(key)] = value - - def defaults(self): - return self._defaults - - def sections(self): - """Return a list of section names, excluding [DEFAULT]""" - # self._sections will never have [DEFAULT] in it - return self._sections.keys() - - def add_section(self, section): - """Create a new section in the configuration. - - Raise DuplicateSectionError if a section by the specified name - already exists. - """ - if section in self._sections: - raise DuplicateSectionError(section) - self._sections[section] = {} - - def has_section(self, section): - """Indicate whether the named section is present in the configuration. - - The DEFAULT section is not acknowledged. - """ - return section in self._sections - - def options(self, section): - """Return a list of option names for the given section name.""" - try: - opts = self._sections[section].copy() - except KeyError: - raise NoSectionError(section) - opts.update(self._defaults) - if '__name__' in opts: - del opts['__name__'] - return opts.keys() - - def read(self, filenames): - """Read and parse a filename or a list of filenames. - - Files that cannot be opened are silently ignored; this is - designed so that you can specify a list of potential - configuration file locations (e.g. current directory, user's - home directory, systemwide directory), and all existing - configuration files in the list will be read. A single - filename may also be given. - - Return list of successfully read files. - """ - if isinstance(filenames, basestring): - filenames = [filenames] - read_ok = [] - for filename in filenames: - try: - fp = open(filename) - except IOError: - continue - self._read(fp, filename) - fp.close() - read_ok.append(filename) - return read_ok - - def readfp(self, fp, filename=None): - """Like read() but the argument must be a file-like object. - - The `fp' argument must have a `readline' method. Optional - second argument is the `filename', which if not given, is - taken from fp.name. If fp has no `name' attribute, `' is - used. - - """ - if filename is None: - try: - filename = fp.name - except AttributeError: - filename = '' - self._read(fp, filename) - - def get(self, section, option): - opt = self.optionxform(option) - if section not in self._sections: - if section != DEFAULTSECT: - raise NoSectionError(section) - if opt in self._defaults: - return self._defaults[opt] - else: - raise NoOptionError(option, section) - elif opt in self._sections[section]: - return self._sections[section][opt] - elif opt in self._defaults: - return self._defaults[opt] - else: - raise NoOptionError(option, section) - - def items(self, section): - try: - d2 = self._sections[section] - except KeyError: - if section != DEFAULTSECT: - raise NoSectionError(section) - d2 = {} - d = self._defaults.copy() - d.update(d2) - if "__name__" in d: - del d["__name__"] - return d.items() - - def _get(self, section, conv, option): - return conv(self.get(section, option)) - - def getint(self, section, option): - return self._get(section, int, option) - - def getfloat(self, section, option): - return self._get(section, float, option) - - _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True, - '0': False, 'no': False, 'false': False, 'off': False} - - def getboolean(self, section, option): - v = self.get(section, option) - if v.lower() not in self._boolean_states: - raise ValueError, 'Not a boolean: %s' % v - return self._boolean_states[v.lower()] - - def optionxform(self, optionstr): - return optionstr.lower() - - def has_option(self, section, option): - """Check for the existence of a given option in a given section.""" - if not section or section == DEFAULTSECT: - option = self.optionxform(option) - return option in self._defaults - elif section not in self._sections: - return False - else: - option = self.optionxform(option) - return (option in self._sections[section] - or option in self._defaults) - - def set(self, section, option, value): - """Set an option.""" - if not section or section == DEFAULTSECT: - sectdict = self._defaults - else: - try: - sectdict = self._sections[section] - except KeyError: - raise NoSectionError(section) - sectdict[self.optionxform(option)] = value - - def write(self, fp): - """Write an .ini-format representation of the configuration state.""" - if self._defaults: - fp.write("[%s]\n" % DEFAULTSECT) - for (key, value) in self._defaults.items(): - fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t'))) - fp.write("\n") - for section in self._sections: - fp.write("[%s]\n" % section) - for (key, value) in self._sections[section].items(): - if key != "__name__": - fp.write("%s = %s\n" % - (key, str(value).replace('\n', '\n\t'))) - fp.write("\n") - - def remove_option(self, section, option): - """Remove an option.""" - if not section or section == DEFAULTSECT: - sectdict = self._defaults - else: - try: - sectdict = self._sections[section] - except KeyError: - raise NoSectionError(section) - option = self.optionxform(option) - existed = option in sectdict - if existed: - del sectdict[option] - return existed - - def remove_section(self, section): - """Remove a file section.""" - existed = section in self._sections - if existed: - del self._sections[section] - return existed - - # - # Regular expressions for parsing section headers and options. - # - SECTCRE = re.compile( - r'\[' # [ - r'(?P

    [^]]+)' # very permissive! - r'\]' # ] - ) - OPTCRE = re.compile( - r'(?P